导航

    • 登录
    • 搜索
    • 版块
    • 产品
    • 教程
    • 论坛
    • 淘宝
    1. 主页
    2. 搜索

    高级搜索

    搜索子版块
    保存设置 清除设置
    共 499 条结果匹配 "sensor",(耗时 0.03 秒)

    串口通讯失败

    import sensor, image, time
    from pyb import UART
    uart = UART(3, 19200)
    
    sensor.reset()                      # Reset and initialize the sensor.
    sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
    sensor.set_framesize(sensor.QVGA)   # Set frame size to QVGA (320x240)
    sensor.skip_frames(time = 2000)     # Wait for settings take effect.
    clock = time.clock()                # Create a clock object to track the FPS.
    red_threshold_01 = (91, 100, 127, -9, -20, 20)
    
    while(True):
        clock.tick()                    # Update the FPS clock.
        img = sensor.snapshot()         # Take a picture and return the image.
        blobs = img.find_blobs([red_threshold_01], area_threshold=150)
        if blobs:
           # print(blobs)
            for b in blobs:
                img.draw_rectangle(b[0:4])
                img.draw_cross(b[5], b[6])
                m=[b[5],b[6]]
                print(m)
                uart.write(m)
        print(clock.fps())              # Note: OpenMV Cam runs about half as fast when connected
                                        # to the IDE. The FPS should increase once disconnected.
    

    0_1525939737111_6752PPZP32PTPXV_L@6B%C4.png

    1
    发布在 OpenMV Cam

    请问一下,openmv图像识别的例程,如何根据四种不同的识别结果驱动舵机旋转不同的角度啊。想知道怎么增加或者修改程序

    # Edge Impulse - OpenMV Image Classification Example
    
    import sensor, image, time, os, tf
    
    sensor.reset()                         # Reset and initialize the sensor.
    sensor.set_pixformat(sensor.RGB565)    # Set pixel format to RGB565 (or GRAYSCALE)
    sensor.set_framesize(sensor.QVGA)      # Set frame size to QVGA (320x240)
    sensor.set_windowing((240, 240))       # Set 240x240 window.
    sensor.skip_frames(time=2000)          # Let the camera adjust.
    
    net = "trained.tflite"
    labels = [line.rstrip('\n') for line in open("labels.txt")]
    
    clock = time.clock()
    while(True):
        clock.tick()
    
        img = sensor.snapshot()
    
        # default settings just do one detection... change them to search the image...
        for obj in tf.classify(net, img, min_scale=1.0, scale_mul=0.8, x_overlap=0.5, y_overlap=0.5):
            print("**********\nPredictions at [x=%d,y=%d,w=%d,h=%d]" % obj.rect())
            img.draw_rectangle(obj.rect())
            # This combines the labels and confidence values into a list of tuples
            predictions_list = list(zip(labels, obj.output()))
    
            for i in range(len(predictions_list)):
                print("%s = %f" % (predictions_list[i][0], predictions_list[i][1]))
    
        print(clock.fps(), "fps")
    
    

    E
    发布在 OpenMV Cam

    垃圾分类中,如何使舵机在识别相应的垃圾种类后转动相应的角度?

    # Edge Impulse - OpenMV Image Classification Example
    
    import sensor, image, time, os, tf
    
    sensor.reset()                         # Reset and initialize the sensor.
    sensor.set_pixformat(sensor.RGB565)    # Set pixel format to RGB565 (or GRAYSCALE)
    sensor.set_framesize(sensor.QVGA)      # Set frame size to QVGA (320x240)
    sensor.set_windowing((240, 240))       # Set 240x240 window.
    sensor.skip_frames(time=2000)          # Let the camera adjust.
    
    net = "trained.tflite"
    labels = [line.rstrip('\n') for line in open("labels.txt")]
    
    clock = time.clock()
    while(True):
        clock.tick()
    
        img = sensor.snapshot()
    
        # default settings just do one detection... change them to search the image...
        for obj in tf.classify(net, img, min_scale=1.0, scale_mul=0.8, x_overlap=0.5, y_overlap=0.5):
            print("**********\nPredictions at [x=%d,y=%d,w=%d,h=%d]" % obj.rect())
            img.draw_rectangle(obj.rect())
            # This combines the labels and confidence values into a list of tuples
            predictions_list = list(zip(labels, obj.output()))
    
            for i in range(len(predictions_list)):
                print("%s = %f" % (predictions_list[i][0], predictions_list[i][1]))
    
        print(clock.fps(), "fps")
    

    Q
    发布在 OpenMV Cam

    如何让定时器中断打开,那个里面运行识别部分的程序作为一个后台,然后舵机的部分接受它的输出?最大值

    # Edge Impulse - OpenMV Image Classification Example
    
    import sensor, image, time, os, tf
    
    sensor.reset()                         # Reset and initialize the sensor.
    sensor.set_pixformat(sensor.RGB565)    # Set pixel format to RGB565 (or GRAYSCALE)
    sensor.set_framesize(sensor.QVGA)      # Set frame size to QVGA (320x240)
    sensor.set_windowing((240, 240))       # Set 240x240 window.
    sensor.skip_frames(time=2000)          # Let the camera adjust.
    
    net = "trained.tflite"
    labels = [line.rstrip('\n') for line in open("labels.txt")]
    
    clock = time.clock()
    while(True):
        clock.tick()
    
        img = sensor.snapshot()
    
        # default settings just do one detection... change them to search the image...
        for obj in tf.classify(net, img, min_scale=1.0, scale_mul=0.8, x_overlap=0.5, y_overlap=0.5):
            print("**********\nPredictions at [x=%d,y=%d,w=%d,h=%d]" % obj.rect())
            img.draw_rectangle(obj.rect())
            # This combines the labels and confidence values into a list of tuples
            predictions_list = list(zip(labels, obj.output()))
    
            for i in range(len(predictions_list)):
                print("%s = %f" % (predictions_list[i][0], predictions_list[i][1]))
    
        print(clock.fps(), "fps")
    

    Q
    发布在 OpenMV Cam

    怎么改这个程序·,让它只输出最大可能性的垃圾类型?而不是把每种垃圾的可能性都列出来?

    Edge Impulse - OpenMV Image Classification Example

    import sensor, image, time, os, tf

    sensor.reset() # Reset and initialize the sensor.
    sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
    sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
    sensor.set_windowing((240, 240)) # Set 240x240 window.
    sensor.skip_frames(time=2000) # Let the camera adjust.

    net = "trained.tflite"
    labels = [line.rstrip('\n') for line in open("labels.txt")]

    clock = time.clock()
    while(True):
    clock.tick()

    img = sensor.snapshot()
    
    # default settings just do one detection... change them to search the image...
    for obj in tf.classify(net, img, min_scale=1.0, scale_mul=0.8, x_overlap=0.5, y_overlap=0.5):
        print("**********\nPredictions at [x=%d,y=%d,w=%d,h=%d]" % obj.rect())
        img.draw_rectangle(obj.rect())
        # This combines the labels and confidence values into a list of tuples
        predictions_list = list(zip(labels, obj.output()))
    
        for i in range(len(predictions_list)):
            print("%s = %f" % (predictions_list[i][0], predictions_list[i][1]))
    
    print(clock.fps(), "fps")

    Q
    发布在 OpenMV Cam

    如何比较这里列表里元素的大小,把概率最大对应的的元素提取出来?

    Edge Impulse - OpenMV Image Classification Example

    import sensor, image, time, os, tf

    sensor.reset() # Reset and initialize the sensor.
    sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
    sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
    sensor.set_windowing((240, 240)) # Set 240x240 window.
    sensor.skip_frames(time=2000) # Let the camera adjust.

    net = "trained.tflite"
    labels = [line.rstrip('\n') for line in open("labels.txt")]

    clock = time.clock()
    while(True):
    clock.tick()

    img = sensor.snapshot()
    
    # default settings just do one detection... change them to search the image...
    for obj in tf.classify(net, img, min_scale=1.0, scale_mul=0.8, x_overlap=0.5, y_overlap=0.5):
        print("**********\nPredictions at [x=%d,y=%d,w=%d,h=%d]" % obj.rect())
        img.draw_rectangle(obj.rect())
        # This combines the labels and confidence values into a list of tuples
        predictions_list = list(zip(labels, obj.output()))
    
      #  for i in range(len(predictions_list)):
       #    print("%s = %f" % (predictions_list[i][0], predictions_list[i][1]))
    
    print(clock.fps(), "fps")

    Q
    发布在 OpenMV Cam

    如何解决Memory Error:out of fast frame buffer stark memory?加了sd卡

    Edge Impulse - OpenMV Image Classification Example

    import sensor, image, time, os, tf

    sensor.reset() # Reset and initialize the sensor.
    sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
    sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
    sensor.set_windowing((240, 240)) # Set 240x240 window.
    sensor.skip_frames(time=2000) # Let the camera adjust.

    net = "trained.tflite"
    labels = [line.rstrip('\n') for line in open("labels.txt")]

    clock = time.clock()
    while(True):
    clock.tick()

    img = sensor.snapshot()
    
    # default settings just do one detection... change them to search the image...
    for obj in tf.classify(net, img, min_scale=1.0, scale_mul=0.8, x_overlap=0.5, y_overlap=0.5):
        print("**********\nPredictions at [x=%d,y=%d,w=%d,h=%d]" % obj.rect())
        img.draw_rectangle(obj.rect())
        # This combines the labels and confidence values into a list of tuples
        predictions_list = list(zip(labels, obj.output()))
    
        for i in range(len(predictions_list)):
            print("%s = %f" % (predictions_list[i][0], predictions_list[i][1]))
    
    print(clock.fps(), "fps")

    2
    发布在 OpenMV Cam

    怎么helloworld也不行呀?显示Sensor Timeout?

    # Hello World Example
    #
    # Welcome to the OpenMV IDE! Click on the green run arrow button below to run the script!
    
    import sensor, image, time
    
    sensor.reset()                      # Reset and initialize the sensor.
    sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
    sensor.set_framesize(sensor.QVGA)   # Set frame size to QVGA (320x240)
    sensor.skip_frames(time = 2000)     # Wait for settings take effect.
    clock = time.clock()                # Create a clock object to track the FPS.
    
    while(True):
        clock.tick()                    # Update the FPS clock.
        img = sensor.snapshot()         # Take a picture and return the image.
        print(clock.fps())              # Note: OpenMV Cam runs about half as fast when connected
                                        # to the IDE. The FPS should increase once disconnected.
    
    

    RuntimeError:Sensor Timeout
    之前还可以显示的,不知道是不是动了什么东西,就突然timeout了😭 而且连接的时候闪了3下红灯

    R
    发布在 OpenMV Cam

    拍的图片不对,内容有错位现象

    import cpufreq
    import pyb
    import sensor,image, time,math
    from pyb import LED,Timer,UART
    
    sensor.reset()                      # Reset and initialize the sensor.
    sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
    sensor.set_framesize(sensor.QQVGA)  # Set frame size to QVGA (320x240)
    sensor.skip_frames(time = 2000)     #延时跳过一些帧,等待感光元件变稳定
    sensor.set_auto_gain(False)
    sensor.set_auto_whitebal(False)
    clock = time.clock()                # Create a clock object to track the FPS.
    sensor.set_auto_exposure(True, exposure_us=5000) # 设置自动曝光
    qrcodes_flag=0
    def opv_find_qrcodes():
    
        #sensor.skip_frames(30)
        global qrcodes_flag
        sensor.set_auto_gain(False)
        img = sensor.snapshot()
        img.lens_corr(1.5) # strength of 1.8 is good for the 2.8mm lens.
        for code in img.find_qrcodes():
            clock.tick()
            img.draw_rectangle(code[0:4])
            qrcodes_flag += 1
            if(qrcodes_flag==5 ):#
                img.save("/example1.jpg")
            if(qrcodes_flag==10):#
                img.save("/example2.jpg")
            if(qrcodes_flag==15):#
                img.save("/example3.jpg")
            print(code)
    
    opv_find_qrcodes()
    
    

    4
    发布在 OpenMV Cam

    MemoryError: Out of fast Frame Buffer Stack Memory!

    Edge Impulse - OpenMV Image Classification Example

    import sensor, image, time, os, tf

    sensor.reset() # Reset and initialize the sensor.
    sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
    sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
    #sensor.set_windowing((240, 240)) # Set 240x240 window.
    sensor.skip_frames(time=2000) # Let the camera adjust.

    net = "trained.tflite"
    labels = [line.rstrip('\n') for line in open("labels.txt")]

    clock = time.clock()
    while(True):
    clock.tick()

    img = sensor.snapshot()
    
    # default settings just do one detection... change them to search the image...
    for obj in tf.classify(net, img, min_scale=1.0, scale_mul=0.8, x_overlap=0.5, y_overlap=0.5):
        print("**********\nPredictions at [x=%d,y=%d,w=%d,h=%d]" % obj.rect())
        img.draw_rectangle(obj.rect())
        # This combines the labels and confidence values into a list of tuples
        predictions_list = list(zip(labels, obj.output()))
    
        for i in range(len(predictions_list)):
            print("%s = %f" % (predictions_list[i][0], predictions_list[i][1]))
    
    print(clock.fps(), "fps")

    L
    发布在 OpenMV Cam
    • 1
    • 2
    • 34
    • 35
    • 36
    • 37
    • 38
    • 49
    • 50
    • 36 / 50