好像是的欸,不能超过六个吗
dymb 发布的帖子
-
用神经网络fomo的时候出现了这个问题
Exception: Failed to load "trained.tflite", did you copy the .tflite and labels.txt file onto the mass-storage device? (Arena size is too small for all buffers. Needed 225840 but only 219920 was available.
-
在用神经网络的fomo的时候,显示报错“列表索引超出范围”
# Edge Impulse - OpenMV Object Detection Example import sensor, image, time, os, tf, math, uos, gc sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) sensor.set_windowing((240, 240)) # Set 240x240 window. sensor.skip_frames(time=2000) # Let the camera adjust. net = None labels = None min_confidence = 0.5 try: # load the model, alloc the model file on the heap if we have at least 64K free after loading net = tf.load("trained.tflite", load_to_fb=uos.stat('trained.tflite')[6] > (gc.mem_free() - (64*1024))) except Exception as e: raise Exception('Failed to load "trained.tflite", did you copy the .tflite and labels.txt file onto the mass-storage device? (' + str(e) + ')') try: labels = [line.rstrip('\n') for line in open("labels.txt")] except Exception as e: raise Exception('Failed to load "labels.txt", did you copy the .tflite and labels.txt file onto the mass-storage device? (' + str(e) + ')') colors = [ # Add more colors if you are detecting more than 7 types of classes at once. (255, 0, 0), ( 0, 255, 0), (255, 255, 0), ( 0, 0, 255), (255, 0, 255), ( 0, 255, 255), (255, 255, 255), ] clock = time.clock() while(True): clock.tick() img = sensor.snapshot() # detect() returns all objects found in the image (splitted out per class already) # we skip class index 0, as that is the background, and then draw circles of the center # of our objects for i, detection_list in enumerate(net.detect(img, thresholds=[(math.ceil(min_confidence * 255), 255)])): if (i == 0): continue # background class if (len(detection_list) == 0): continue # no detections for this class? print("********** %s **********" % labels[i]) for d in detection_list: [x, y, w, h] = d.rect() center_x = math.floor(x + (w / 2)) center_y = math.floor(y + (h / 2)) print('x %d\ty %d' % (center_x, center_y)) img.draw_circle((center_x, center_y, 12), color=colors[i], thickness=2) print(clock.fps(), "fps", end="\n\n")
img.draw_circle((center_x, center_y, 12), color=colors[i], thickness=2)这句话报错
-
大佬知道在在Edge Impuse上生成的神经网络模型怎么识别多个物品嘛
就是画面中同时出现三个物品,判断他们中有哪些是我需要的。如果不行的话,是否可以把画面分割成三个部分,三个画面分别识别。如果可以的话,能不能教我一下代码,,,求教大佬解答了,万分感谢!!!!
-
在Edge Impuse上生成的神经网络模型只能全屏识别物品。怎么样可以像模板识别一样用小一点的矩形框选出识别到的物品
在Edge Impuse上生成的神经网络模型只能全屏识别物品。怎么样可以像模板识别一样用小一点的矩形框选出识别到的物品
这里附上生成的代码:# Edge Impulse - OpenMV Image Classification Exampleimport sensor, image, time, os, tf, uos, gc
flag=0
a=0
b=0
pre=0
sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
sensor.set_windowing((240, 240)) # Set 240x240 window.
sensor.skip_frames(time=2000) # Let the camera adjust.
from pyb import UART
net = None
labels = None
uart = UART(3, 115200)
while(True):
#a=uart.read()
#if a==b'o' :
try:
# load the model, alloc the model file on the heap if we have at least 64K free after loading
net = tf.load("trained.tflite", load_to_fb=uos.stat('trained.tflite')[6] > (gc.mem_free() - (64*1024)))
except Exception as e:
print(e)
raise Exception('Failed to load "trained.tflite", did you copy the .tflite and labels.txt file onto the mass-storage device? (' + str(e) + ')')try: labels = [line.rstrip('\n') for line in open("labels.txt")] except Exception as e: raise Exception('Failed to load "labels.txt", did you copy the .tflite and labels.txt file onto the mass-storage device? (' + str(e) + ')') clock = time.clock() while(True): clock.tick() img = sensor.snapshot() # default settings just do one detection... change them to search the image... for obj in net.classify(img, min_scale=1.0, scale_mul=0.8, x_overlap=0.5, y_overlap=0.5) : print("**********\nPredictions at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) img.draw_rectangle(obj.rect()) #, roi=(10, 0, 60, 60)) # This combines the labels and confidence values into a list of tuples predictions_list = list(zip(labels, obj.output())) for i in range(len(predictions_list)): print("%s = %f" % (predictions_list[i][0], predictions_list[i][1])) if predictions_list[0][1]>0.85: flag=1 elif predictions_list[1][1]>0.85: flag=2 elif predictions_list[0][1]<0.85 and predictions_list[1][1]<0.85: flag=3 if pre!=flag: b=0 pre=flag if flag==1: if b<=2: uart.write("z1") b+=1 if flag==2: if b<=2: uart.write("z2") b+=1 if flag==3: if b<=2: uart.write("g") b+=1 print(clock.fps(), "fps")
求大佬解答
-
用openmv做模板识别特别卡啊
应该怎么解决 啊大佬
import time, sensor, image from image import SEARCH_EX, SEARCH_DS # Reset sensor sensor.reset() # Set sensor settings sensor.set_contrast(1) sensor.set_gainceiling(16) sensor.set_framesize(sensor.QQVGA) # You can set windowing to reduce the search image. #sensor.set_windowing(((640-80)//2, (480-60)//2, 80, 60)) sensor.set_pixformat(sensor.GRAYSCALE) from pyb import UART uart = UART(3, 115200) # Load template. # Template should be a small (eg. 32x32 pixels) grayscale image. template1 = image.Image("/1.pgm") template2 = image.Image("/2.pgm") template3 = image.Image("/3.pgm") template4 = image.Image("/4.pgm") template5 = image.Image("/5.pgm") template6 = image.Image("/6.pgm") template7 = image.Image("/7.pgm") template8 = image.Image("/8.pgm") template9 = image.Image("/9.pgm") template14 = image.Image("/m.pgm") template18 = image.Image("/1L2.pgm") template19 = image.Image("/1L3.pgm") template21 = image.Image("/1R2.pgm") template22 = image.Image("/1R3.pgm") template25 = image.Image("/2L2.pgm") template26 = image.Image("/2L3.pgm") template29 = image.Image("/2R2.pgm") template30 = image.Image("/3L1.pgm") template31 = image.Image("/3L2.pgm") template33 = image.Image("/3R1.pgm") template34 = image.Image("/3R2.pgm") template37 = image.Image("/4L1.pgm") template38 = image.Image("/4L2.pgm") template40 = image.Image("/4R1.pgm") template41 = image.Image("/4R1.pgm") flag=0 flag1=0 clock = time.clock() # Run template matching while (True): clock.tick() img = sensor.snapshot() # find_template(template, threshold, [roi, step, search]) # ROI: The region of interest tuple (x, y, w, h). # Step: The loop step used (y+=step, x+=step) use a bigger step to make it faster. # Search is either image.SEARCH_EX for exhaustive search or image.SEARCH_DS for diamond search # # Note1: ROI has to be smaller than the image and bigger than the template. # Note2: In diamond search, step and ROI are both ignored. r1 = img.find_template(template1, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r2 = img.find_template(template2, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r3 = img.find_template(template3, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r4 = img.find_template(template4, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r5 = img.find_template(template5, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r6 = img.find_template(template6, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r7 = img.find_template(template7, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r8 = img.find_template(template8, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r9 = img.find_template(template9, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r14= img.find_template(template14, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r18= img.find_template(template18, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r19= img.find_template(template19, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r22= img.find_template(template22, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r25= img.find_template(template25, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r26= img.find_template(template26, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r29= img.find_template(template29, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r30= img.find_template(template30, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r31= img.find_template(template31, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r33= img.find_template(template33, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r37= img.find_template(template37, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r38= img.find_template(template38, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r40= img.find_template(template40, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r41= img.find_template(template41, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) if flag1==0: if r14: uart.write("s") flag1=1 if flag==0: if r1: img.draw_rectangle(r1) flag=1 uart.write("1") if r2: img.draw_rectangle(r2) flag=2 uart.write("2") if r3: img.draw_rectangle(r3) flag=3 uart.write("3") if r4: img.draw_rectangle(r4) flag=4 uart.write("4") if r5: img.draw_rectangle(r5) uart.write("5") if r6: img.draw_rectangle(r6) uart.write("6") if r7: img.draw_rectangle(r7) uart.write("7") if r8: img.draw_rectangle(r8) uart.write("8") if r9: img.draw_rectangle(r9) uart.write("9") if flag==1: if r14: if r18 or r19: uart.write("l") if r22: uart.write("r") else: uart.write("m") import time time.sleep(2) flag1=0 if flag==2: if r14: if r25 or r26: img.draw_rectangle("l") if r29: uart.write("r") else: uart.write("m") import time time.sleep(2) flag1=0 if flag==3: if r14: if r30 or r31: img.draw_rectangle("l") if r33: uart.write("r") else: uart.write("m") import time time.sleep(2) flag1=0 if flag==4: if r14: if r37 or r38: uart.write("l") if r40 or r41: uart.write("r") else: uart.write("m") import time time.sleep(2) flag1=0 print(clock.fps())
-
RE: 红线左边识别到数字发送L,右边识别到数字发送R
@kidswong999 # Template Matching Example - Normalized Cross Correlation (NCC)
# # This example shows off how to use the NCC feature of your OpenMV Cam to match # image patches to parts of an image... expect for extremely controlled enviorments # NCC is not all to useful. # # WARNING: NCC supports needs to be reworked! As of right now this feature needs # a lot of work to be made into somethin useful. This script will reamin to show # that the functionality exists, but, in its current state is inadequate. import time, sensor, image from image import SEARCH_EX, SEARCH_DS # Reset sensor sensor.reset() # Set sensor settings sensor.set_contrast(1) sensor.set_gainceiling(16) # Max resolution for template matching with SEARCH_EX is QQVGA sensor.set_framesize(sensor.QQVGA) # You can set windowing to reduce the search image. #sensor.set_windowing(((640-80)//2, (480-60)//2, 80, 60)) sensor.set_pixformat(sensor.GRAYSCALE) from pyb import UART uart = UART(3, 115200) # Load template. # Template should be a small (eg. 32x32 pixels) grayscale image. template1 = image.Image("/1.pgm") template2 = image.Image("/2.pgm") clock = time.clock() flag=0 # Run template matching while (True): clock.tick() img = sensor.snapshot() # find_template(template, threshold, [roi, step, search]) # ROI: The region of interest tuple (x, y, w, h). # Step: The loop step used (y+=step, x+=step) use a bigger step to make it faster. # Search is either image.SEARCH_EX for exhaustive search or image.SEARCH_DS for diamond search # # Note1: ROI has to be smaller than the image and bigger than the template. # Note2: In diamond search, step and ROI are both ignored. if flag==0: r1 = img.find_template(template1, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r2 = img.find_template(template2, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) if r1: img.draw_rectangle(r1) flag=1 if r2: img.draw_rectangle(r2) flag=2 print(clock.fps()) if flag==1: uart.write("1") flag=999 if flag==2: uart.write("2") flag=998 r1 = img.find_template(template1, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) r2 = img.find_template(template2, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) if flag==999 if r1: img.draw_rectangle(r1) if
-
OSError:Region of interest is smaller thantemplate
怎么解决OSError:Region of interest is smaller thantemplate