import sensor, image, time, os, tf, uos, gc
from pyb import UART
uart = UART(3, 9600)
net = None
labels = None
sensor.reset() # 初始化感光元件
sensor.set_pixformat(sensor.GRAYSCALE) # 设置图像格式为灰度
sensor.set_framesize(sensor.QVGA) # 设置图像大小为 QVGA (320x240)
sensor.set_windowing((240, 240)) # 设置窗口大小为 240x240
sensor.skip_frames(10) # 跳过一些帧,使以上设置生效
try:
# 加载模型文件
net = tf.load("trained.tflite", load_to_fb=uos.stat('trained.tflite')[6] > (gc.mem_free() - (64*1024)))
except Exception as e:
print(e)
raise Exception('Failed to load "trained.tflite", did you copy the .tflite and labels.txt file onto the mass-storage device? (' + str(e) + ')')
try:
labels = [line.rstrip('\n') for line in open("labels.txt")]
except Exception as e:
raise Exception('Failed to load "labels.txt", did you copy the .tflite and labels.txt file onto the mass-storage device? (' + str(e) + ')')
clock = time.clock() # 创建时钟对象
while True:
clock.tick() # 更新FPS帧率时钟
img = sensor.snapshot() # 拍一张照片并返回图像
# default settings just do one detection... change them to search the image...
for obj in net.classify(img, min_scale=1.0, scale_mul=0.8, x_overlap=0.5, y_overlap=0.5):
#print("**********\nPredictions at [x=%d,y=%d,w=%d,h=%d]" % obj.rect())
img.draw_rectangle(obj.rect())
# 将分类和对应的相似度以列表套元组形式返回
predictions_list = list(zip(labels, obj.output()))
# 打印分类和与图像中检测到的物体的对应的相似度
#for i in range(len(predictions_list)):
#print("%s = %f" % (predictions_list[i][0], predictions_list[i][1]))
if max(obj.output()) > 0.7: # 当识别到的最大的相似度大于0.7时才认为是识别到了数字
# 将识别到的数字赋值给num
num = labels[obj.output().index(max(obj.output()))]
# 打印识别到的数字
print('识别到的数字是 %s' % num)
#uart.write('\n')
##uart.write("识别到了,大傻瓜!!!\r\n")
#uart.write(num)
num1 = bytearray([0xFE,0xBC,obj.output().index(max(obj.output())),0xEF])
uart.write(num1)
time.sleep_ms(100)
#print(clock.fps(), "fps") # 打印帧率