• 免费好用的星瞳AI云服务上线!简单标注,云端训练,支持OpenMV H7和OpenMV H7 Plus。可以替代edge impulse。 https://forum.singtown.com/topic/9519
  • 我们只解决官方正版的OpenMV的问题(STM32),其他的分支有很多兼容问题,我们无法解决。
  • 如果有产品硬件故障问题,比如无法开机,论坛很难解决。可以直接找售后维修
  • 发帖子之前,请确认看过所有的视频教程,https://singtown.com/learn/ 和所有的上手教程http://book.openmv.cc/
  • 每一个新的提问,单独发一个新帖子
  • 帖子需要目的,你要做什么?
  • 如果涉及代码,需要报错提示全部代码文本,请注意不要贴代码图片
  • 必看:玩转星瞳论坛了解一下图片上传,代码格式等问题。
  • 请问一下如果想把摄像头的原始数据通过wifi发送出去怎么操作,而不是仅仅发送一张jpg图片?



    • buffer=[]        
      for x in range(0,160):
          for y in range(0,120):
              ROI=(x,y,1,1)
              statistics=img.get_statistics(roi=ROI)
              buffer.append( map_g_to_temp(statistics.max()))
      
      print(buffer)
      client.publish("openmv/test", buffer)
      

      但是会爆出MemoryError: memory allocation failed, allocating 16824 bytes 错误,是不是内存溢出了,那么如果想把160*120原始数据通过mqtt传出去用什么办法呢

      谢谢用的是openmv4 lepton3.5



    • 我感觉应该是内存定义的问题。。。。stm32h7 应该有1m的内存,目前我只是处理160*120的红外图像,不知道有经验的人可以分享一下么



    • 你得提供全部的代码我才能测试。。。



    • @kidswong999请问一下如果想把摄像头的原始数据通过wifi发送出去怎么操作,而不是仅仅发送一张jpg图片? 中说:

      得提供全部的代码我才能测试。。。

      mport sensor, image, time, math,network,usocket,sys,json,gc

      from mqtt import MQTTClient

      SSID='abut' # Network SSID
      KEY='tuba1abut' # Network key

      Color Tracking Thresholds (Grayscale Min, Grayscale Max)

      threshold_list = [(200, 255)]

      Set the target temp range here

      min_temp_in_celsius = 20.0
      max_temp_in_celsius = 40.0

      print("Resetting Lepton...")

      These settings are applied on reset

      sensor.reset()
      sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_MODE, True)
      sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius)
      print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
      sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT)))
      print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No"))

      sensor.set_pixformat(sensor.GRAYSCALE)
      sensor.set_framesize(sensor.QQVGA)
      sensor.skip_frames(100)

      Init wlan module and connect to network

      print("Trying to connect... (may take a while)...")

      wlan = network.WINC()
      wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK)

      We should have a valid IP now via DHCP

      print(wlan.ifconfig())

      client = MQTTClient("abut", "192.168.101.100", port=1883,user="admin",password="ddd")
      client.connect()

      clock = time.clock()

      Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are

      returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the

      camera resolution. "merge=True" merges all overlapping blobs in the image.

      def map_g_to_temp(g):
      return ((g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0) + min_temp_in_celsius

      while (True):
      clock.tick() # Track elapsed milliseconds between snapshots().

      img = sensor.snapshot()
      blob_stats = []
      blobs = img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True)
      # Collect stats into a list of tuples
      for blob in blobs:
          blob_stats.append((blob.x(), blob.y(), map_g_to_temp(img.get_statistics(thresholds=threshold_list,
                                                                                  roi=blob.rect()).max())))
      img.to_rainbow(color_palette=sensor.PALETTE_IRONBOW) # color it
      # Draw stuff on the colored image
      for blob in blobs:
          img.draw_rectangle(blob.rect())
          #img.draw_cross(blob.cx(), blob.cy())
      for blob_stat in blob_stats:
          img.draw_string(blob_stat[0], blob_stat[1] - 10, "%.2f C" % blob_stat[2], mono_space=False)
      
      
      buffer=[]
      for x in range(0,160):
          for y in range(0,80):
              ROI=(x,y,1,1)
              statistics=img.get_statistics(roi=ROI)
              buffer.append( map_g_to_temp(statistics.max()))
      
      print(buffer)


    • # Lepton Get Object Temp Example
      
      
      import sensor, image, time, math,network,usocket,sys,json,gc
      
      from mqtt import MQTTClient
      
      SSID='' # Network SSID
      KEY=''  # Network key
      
      # Color Tracking Thresholds (Grayscale Min, Grayscale Max)
      threshold_list = [(200, 255)]
      
      # Set the target temp range here
      min_temp_in_celsius = 20.0
      max_temp_in_celsius = 40.0
      
      print("Resetting Lepton...")
      # These settings are applied on reset
      sensor.reset()
      sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_MODE, True)
      sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius)
      print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
                                    sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT)))
      print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No"))
      
      sensor.set_pixformat(sensor.GRAYSCALE)
      sensor.set_framesize(sensor.QQVGA)
      sensor.skip_frames(100)
      
      # Init wlan module and connect to network
      print("Trying to connect... (may take a while)...")
      
      wlan = network.WINC()
      wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK)
      
      # We should have a valid IP now via DHCP
      print(wlan.ifconfig())
      
      client = MQTTClient("abut", "192.168.101.100", port=1883,user="admin",password="ddd")
      client.connect()
      
      clock = time.clock()
      
      
      # Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are
      # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
      # camera resolution. "merge=True" merges all overlapping blobs in the image.
      def map_g_to_temp(g):
          return ((g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0) + min_temp_in_celsius
      
      while (True):
          clock.tick() # Track elapsed milliseconds between snapshots().
      
          img = sensor.snapshot()
          blob_stats = []
          blobs = img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True)
          # Collect stats into a list of tuples
          for blob in blobs:
              blob_stats.append((blob.x(), blob.y(), map_g_to_temp(img.get_statistics(thresholds=threshold_list,
                                                                                      roi=blob.rect()).max())))
          img.to_rainbow(color_palette=sensor.PALETTE_IRONBOW) # color it
          # Draw stuff on the colored image
          for blob in blobs:
              img.draw_rectangle(blob.rect())
              #img.draw_cross(blob.cx(), blob.cy())
          for blob_stat in blob_stats:
              img.draw_string(blob_stat[0], blob_stat[1] - 10, "%.2f C" % blob_stat[2], mono_space=False)
      
      
          buffer=[]
          for x in range(0,160):
              for y in range(0,80):
                  ROI=(x,y,1,1)
                  statistics=img.get_statistics(roi=ROI)
                  buffer.append( map_g_to_temp(statistics.max()))
      
          print(buffer)
      
      
      
          print("FPS %f - Lepton Temp: %f C" % (clock.fps(), sensor.ioctl(sensor.IOCTL_LEPTON_GET_FPA_TEMPERATURE)))
      
          cframe = json.dumps(img)
      
          #cframe=img.compressed(quality=35)
      
          client.publish("openmv/test", img.array())
          print(gc.mem_free())
          time.sleep(1000)
      
      
      
      

      漏了点



    • 应该是堆和栈的问题,就是如何顶一个大数组在堆里 ,160*120总应该可以的把



    • https://singtown.com/product/50302/openmv-cam-h7/

      你可以看参数里面RAM Layout,256KB .DATA/.BSS/Heap/Stack。所以是不能直接放一个很大的数组的。



    • 256KB .DATA/.BSS/Heap/Stack
      512KB Frame Buffer/Stack
      256 KB DMA Buffers
      (1MB Total)

      把数组定义在512kb内可以么,还是其实资源基本已经用完了,红外只有160*120



    • micropython里面能操作的只有256KB。