用FOMO算法识别出来目标后想用它得到的中心坐标控制云台,但是云台只向下转动到最低就没反应了
# Edge Impulse - OpenMV Object Detection Example
import sensor, image, time, os, tf, math, uos, gc, tv
from pid import PID
from pyb import Servo
pan_servo=Servo(1)
tilt_servo=Servo(2)
pan_servo.calibration(500,2500,500)
tilt_servo.calibration(500,2500,500)
pan_pid = PID(p=0.07, i=0, imax=90) #脱机运行或者禁用图像传输,使用这个PID
tilt_pid = PID(p=0.05, i=0, imax=90) #脱机运行或者禁用图像传输,使用这个PID
#pan_pid = PID(p=0.1, i=0, imax=90)#在线调试使用这个PID
#tilt_pid = PID(p=0.1, i=0, imax=90)#在线调试使用这个PID
sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
sensor.set_windowing((240, 240)) # Set 240x240 window.
sensor.skip_frames(time=2000) # Let the camera adjust.
net = None
labels = None
min_confidence = 0.5
try:
# load the model, alloc the model file on the heap if we have at least 64K free after loading
net = tf.load("trained.tflite", load_to_fb=uos.stat('trained.tflite')[6] > (gc.mem_free() - (64*1024)))
except Exception as e:
raise Exception('Failed to load "trained.tflite", did you copy the .tflite and labels.txt file onto the mass-storage device? (' + str(e) + ')')
try:
labels = [line.rstrip('\n') for line in open("labels.txt")]
except Exception as e:
raise Exception('Failed to load "labels.txt", did you copy the .tflite and labels.txt file onto the mass-storage device? (' + str(e) + ')')
colors = [ # Add more colors if you are detecting more than 7 types of classes at once.
(255, 0, 0),
( 0, 255, 0),
(255, 255, 0),
( 0, 0, 255),
(255, 0, 255),
( 0, 255, 255),
(255, 255, 255),
]
clock = time.clock()
tv.init(triple_buffer=False) # 初始化tv
tv.channel(8) # 用于无线图传扩展板
while(True):
clock.tick()
img = sensor.snapshot()
# detect() returns all objects found in the image (splitted out per class already)
# we skip class index 0, as that is the background, and then draw circles of the center
# of our objects
result = net.detect(img, thresholds=[(math.ceil(min_confidence * 255), 255)])
print(result)
for i, detection_list in enumerate(result):
if (i == 0): continue # background class
if (len(detection_list) == 0): continue # no detections for this class?
print("********** %s **********" % labels[i])
for d in detection_list:
[x, y, w, h] = d.rect()
center_x = math.floor(x + (w / 2))
center_y = math.floor(y + (h / 2))
print('x %d\ty %d' % (center_x, center_y))
img.draw_circle((center_x, center_y, 12), color=colors[i], thickness=2)
pan_error = center_x-img.width()
tilt_error = center_y-img.height()
print(pan_error , pan_error)
pan_output=pan_pid.get_pid(pan_error,1)
tilt_output=tilt_pid.get_pid(tilt_error,1)
print(pan_output,pan_output)
pan_servo.angle(pan_servo.angle()+pan_output)
tilt_servo.angle(tilt_servo.angle()-tilt_output)
tv.display(sensor.snapshot()) # 拍照并显示图像
print(clock.fps(), "fps", end="\n\n")
# print('x %d\ty %d' % (math.floor(x + (w / 2)), math.floor(y + (h / 2)))