TypeError: can't convert float to int ;一个循红线程序 roi 会报错
-
# Black Grayscale Line Following Example # # Making a line following robot requires a lot of effort. This example script # shows how to do the machine vision part of the line following robot. You # can use the output from this script to drive a differential drive robot to # follow a line. This script just generates a single turn value that tells # your robot to go left or right. # # For this script to work properly you should point the camera at a line at a # 45 or so degree angle. Please make sure that only the line is within the # camera's field of view. import sensor, image, time, math#调用声明 from pyb import UART import json # Tracks a black line. Use [(128, 255)] for a tracking a white line. # GRAYSCALE_THRESHOLD = [(0, 50)] #GRAYSCALE_THRESHOLD = [(0, 80)] #RED_THRESHOLD = [( 30, 80, 0, 45, 10, 30)] #RED_THRESHOLD = [(44, 75, 8, 77, -44, 21)] RED_THRESHOLD = [(20, 34, -19, 72, 16, 37)] #frame_size_w = 320 frame_size_w = 160 #设置阈值,如果是黑线,GRAYSCALE_THRESHOLD = [(0, 64)]; #如果是白线,GRAYSCALE_THRESHOLD = [(128,255)] #GRAYSCALE_THRESHOLD = [(190,255)] # Each roi is (x, y, w, h). The line detection algorithm will try to find the # centroid of the largest blob in each roi. The x position of the centroids # will then be averaged with different weights where the most weight is assigned # to the roi near the bottom of the image and less to the next roi and so on. # ROIS = [ # [ROI, weight] # (0, 40, 80, 15, 0.7), # You'll need to tweak the weights for you app # (0, 20, 80, 15, 0.3), # depending on how your robot is setup. # (0, 00, 80, 15, 0.1) # ] if frame_size_w == 160: #print(frame_size_w) width = frame_size_w /2 frame_size_h = frame_size_w *3 /4 height = (frame_size_h /10 )//3 * 10 roi_x = (frame_size_w-width)/2 roi_y_base = height roi_h = height *3/5 roi_y_base_two = height *2 print("height") print(height) print("test") print(roi_x) print(roi_y_base) print(roi_h) print(roi_y_base_two) ROIS = [ # [ROI, weight] (roi_x, roi_y_base_two , width, roi_h, 0.7), # You'll need to tweak the weights for you app # 根据线宽 修改高度 # 远处给的权重更大 (roi_x, roi_y_base , width, roi_h, 0.3), # depending on how your robot is setup. # 权重后期需要调整 # 应该需要使用全宽度160(即分辨率对应的最大值) 否则转弯貌似无法识别 (0, 000, frame_size_w, roi_h, 0) ] #ROIS = [ # [ROI, weight] #(20, 40, 40, 15, 0.1), # You'll need to tweak the weights for you app #(20, 20, 40, 15, 0.3), # depending on how your robot is setup. #(20, 00, 40, 15, 0.7) #] if frame_size_w == 320: #print(frame_size_w) width = frame_size_w /2 frame_size_h = frame_size_w *3 /4 height = (frame_size_h /10 )//3 * 10 print(height) ROIS = [ # [ROI, weight] ((frame_size_w-width)/2, height *2, width, height *3/5, 0.7), # You'll need to tweak the weights for you app # 根据线宽 修改高度 # 远处给的权重更大 ((frame_size_w-width)/2, height, width, height *3/5, 0.3), # depending on how your robot is setup. # 权重后期需要调整 # 应该需要使用全宽度160(即分辨率对应的最大值) 否则转弯貌似无法识别 (0, 000, frame_size_w, height *3/5, 0) ] #ROIS = [ # [ROI, weight] #(20, 40, 40, 15, 0.1), # You'll need to tweak the weights for you app #(20, 20, 40, 15, 0.3), # depending on how your robot is setup. #(20, 00, 40, 15, 0.7) #] #roi代表三个取样区域,(x,y,w,h,weight),代表左上顶点(x,y)宽高分别为w和h的矩形, #weight为当前矩形的权值。注意本例程采用的QQVGA图像大小为160x120,roi即把图像横分成三个矩形。 #三个矩形的阈值要根据实际情况进行调整,离机器人视野最近的矩形权值要最大, #如上图的最下方的矩形,即(0, 100, 160, 20, 0.7) # Compute the weight divisor (we're computing this so you don't have to make weights add to 1). weight_sum = 0 #权值和初始化 for r in ROIS: weight_sum += r[4] # r[4] is the roi weight. print("weight_sum_max") print(weight_sum) #weight_sum = 0 #权值和初始化 current_x=current_y=0 #计算权值和。遍历上面的三个矩形,r[4]即每个矩形的权值。 # Camera setup... sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.RGB565) # use grayscale. #sensor.set_pixformat(sensor.GRAYSCALE) # use grayscale. if frame_size_w == 320: sensor.set_framesize(sensor.QVGA) # use QQVGA for speed. if frame_size_w == 160: sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed. sensor.skip_frames(300) # Let new settings take affect. #不要太小 否则容易卡顿 sensor.set_auto_gain(False) # must be turned off for color tracking sensor.set_auto_whitebal(False) # must be turned off for color tracking #关闭白平衡 clock = time.clock() # Tracks FPS. uart = UART(3, 9600)#波特率两边要设置成一样 while(True): clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot() # Take a picture and return the image. # uart = UART(3,19200) # uart.init(19200,bits=8,parity=None,stop=1)#init with given parameters centroid_sum = centroid_sum_y = 0 flag=0 #print("i") #print(i) #利用颜色识别分别寻找三个矩形区域内的线段 #weight_sum = 1 #权值和初始化 for r in ROIS: find_blobs_num = 0 error_x = 0 # - left; + right #blobs = img.find_blobs(GRAYSCALE_THRESHOLD, roi=r[0:4], merge=False) print(r[0]) for i in range(4): print(r[i]) print("test") print(r[0:4]) blobs = img.find_blobs(RED_THRESHOLD, roi=r[0:4], merge=False) #blobs = img.find_blobs(RED_THRESHOLD, roi=[40, 80, 80, 24] ,merge=False) ##blobs = img.find_blobs(RED_THRESHOLD, roi=(40.0, 40.0, 80.0, 24.0) ,merge=False) #blobs = img.find_blobs(RED_THRESHOLD, roi=[0, 0, 160, 24.0] ,merge=False) # blobs = img.find_blobs(GRAYSCALE_THRESHOL, roi=r[0:4], merge=True) # r[0:4] is roi tuple. #找到视野中的线,merge=true,将找到的图像区域合并成一个 # 在每一个ROI都会执行下述语句 所以总共4个ROI #目标区域找到直线 if blobs: find_blobs_num +=1 flag+=1 # 不要用i 用flag print("flag ") print(flag) #if flag ==1 : #weight_sum = 0 # Find the index of the blob with the most pixels. most_pixels = 0 largest_blob = 0 for i in range(len(blobs)): #目标区域找到的颜色块(线段块)可能不止一个,找到最大的一个,作为本区域内的目标直线 if blobs[i].pixels() > most_pixels: most_pixels = blobs[i].pixels() #merged_blobs[i][4]是这个颜色块的像素总数,如果此颜色块像素总数大于 #most_pixels,则把本区域作为像素总数最大的颜色块。更新most_pixels和largest_blob largest_blob = i #print("most_pixels") #print(most_pixels) # Draw a rect around the blob. img.draw_rectangle(blobs[largest_blob].rect()) #img.draw_rectangle((0,0,20, 20)) #将此区域的像素数最大的颜色块画矩形和十字形标记出来 img.draw_cross(blobs[largest_blob].cx(), blobs[largest_blob].cy()) if frame_size_w == 320: print(find_blobs_num) error_x += blobs[largest_blob].cx() - frame_size_w / 2 print(error_x) if frame_size_w == 160: print(find_blobs_num) error_x += blobs[largest_blob].cx() - frame_size_w / 2 print(error_x) # if no turn angle ; can comment until (1) #centroid_sum += blobs[largest_blob].cx() * r[4] # r[4] is the roi weight. ##计算centroid_sum,centroid_sum等于每个区域的最大颜色块的中心点的x坐标值乘本区域的权值 #centroid_sum_y += blobs[largest_blob].cy() * r[4] # r[4] is the roi weight. #print("centroid_x_proportion") #print(centroid_sum) #weight_sum += r[4] #print("weight_sum_temp") #print(weight_sum) #if(r[4]==0.1): ## 近处权重更小 计算当前位置 #current_x=blobs[largest_blob].cx() #current_y=blobs[largest_blob].cy() #print("current_x") #print(current_x) #print("current_y") #print(current_y) #for temp in blobs: #img.draw_rectangle(temp.rect()) #print("weight ") #print(weight_sum) ## print("s") #center_pos = (centroid_sum / weight_sum) # Determine center of line. #center_pos_y = (centroid_sum_y / weight_sum) ##中间公式 #print("center_pos_x ") #print(center_pos) #print("center_pos_y ") #print(center_pos_y) # (1) if no turn angle ; can comment this print("error_x") print(error_x) if int (error_x) > 0 : error_x=int(error_x)*10 else: error_x=abs(int(error_x))*10+1 error_x_str=str(error_x) error_x_str_out = json.dumps(set(error_x_str))#将data转化为json uart.write('?'+error_x_str_out+'!')#写到缓冲区,由arduino进行读取 # uart.write('?'+data+';'+data+'!')#写到缓冲区,由arduino进行读取 #print('you send:',data_out)#写到串口监视端,让你能够看到数据 time.sleep(0.1)#延时 print("clock ") print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected.
修改前 没有bug
# Black Grayscale Line Following Example # # Making a line following robot requires a lot of effort. This example script # shows how to do the machine vision part of the line following robot. You # can use the output from this script to drive a differential drive robot to # follow a line. This script just generates a single turn value that tells # your robot to go left or right. # # For this script to work properly you should point the camera at a line at a # 45 or so degree angle. Please make sure that only the line is within the # camera's field of view. import sensor, image, time, math#调用声明 from pyb import UART import json # Tracks a black line. Use [(128, 255)] for a tracking a white line. # GRAYSCALE_THRESHOLD = [(0, 50)] #GRAYSCALE_THRESHOLD = [(0, 80)] #RED_THRESHOLD = [( 30, 80, 0, 45, 10, 30)] #RED_THRESHOLD = [(44, 75, 8, 77, -44, 21)] RED_THRESHOLD = [(20, 34, -19, 72, 16, 37)] #frame_size_w = 320 frame_size_w = 160 #设置阈值,如果是黑线,GRAYSCALE_THRESHOLD = [(0, 64)]; #如果是白线,GRAYSCALE_THRESHOLD = [(128,255)] #GRAYSCALE_THRESHOLD = [(190,255)] # Each roi is (x, y, w, h). The line detection algorithm will try to find the # centroid of the largest blob in each roi. The x position of the centroids # will then be averaged with different weights where the most weight is assigned # to the roi near the bottom of the image and less to the next roi and so on. # ROIS = [ # [ROI, weight] # (0, 40, 80, 15, 0.7), # You'll need to tweak the weights for you app # (0, 20, 80, 15, 0.3), # depending on how your robot is setup. # (0, 00, 80, 15, 0.1) # ] if frame_size_w == 160: #print(frame_size_w) width = frame_size_w /2 frame_size_h = frame_size_w *3 /4 height = (frame_size_h /10 )//3 * 10 roi_x = (frame_size_w-width)/2 roi_y_base = height roi_h = height *3/5 roi_y_base_two = height *2 print("height") print(height) print("test") print(roi_x) print(roi_y_base) print(roi_h) print(roi_y_base_two) #ROIS = [ # [ROI, weight] #(roi_x, roi_y_base_two , width, roi_h, 0.7), # You'll need to tweak the weights for you app ## 根据线宽 修改高度 ## 远处给的权重更大 #(roi_x, roi_y_base , width, roi_h, 0.3), # depending on how your robot is setup. ## 权重后期需要调整 ## 应该需要使用全宽度160(即分辨率对应的最大值) 否则转弯貌似无法识别 #(0, 000, frame_size_w, roi_h, 0) #] ROIS = [ # [ROI, weight] (20, 40, 40, 15, 0.1), # You'll need to tweak the weights for you app (20, 20, 40, 15, 0.3), # depending on how your robot is setup. (20, 00, 40, 15, 0.7) ] if frame_size_w == 320: #print(frame_size_w) width = frame_size_w /2 frame_size_h = frame_size_w *3 /4 height = (frame_size_h /10 )//3 * 10 print(height) ROIS = [ # [ROI, weight] ((frame_size_w-width)/2, height *2, width, height *3/5, 0.7), # You'll need to tweak the weights for you app # 根据线宽 修改高度 # 远处给的权重更大 ((frame_size_w-width)/2, height, width, height *3/5, 0.3), # depending on how your robot is setup. # 权重后期需要调整 # 应该需要使用全宽度160(即分辨率对应的最大值) 否则转弯貌似无法识别 (0, 000, frame_size_w, height *3/5, 0) ] #ROIS = [ # [ROI, weight] #(20, 40, 40, 15, 0.1), # You'll need to tweak the weights for you app #(20, 20, 40, 15, 0.3), # depending on how your robot is setup. #(20, 00, 40, 15, 0.7) #] #roi代表三个取样区域,(x,y,w,h,weight),代表左上顶点(x,y)宽高分别为w和h的矩形, #weight为当前矩形的权值。注意本例程采用的QQVGA图像大小为160x120,roi即把图像横分成三个矩形。 #三个矩形的阈值要根据实际情况进行调整,离机器人视野最近的矩形权值要最大, #如上图的最下方的矩形,即(0, 100, 160, 20, 0.7) # Compute the weight divisor (we're computing this so you don't have to make weights add to 1). weight_sum = 0 #权值和初始化 for r in ROIS: weight_sum += r[4] # r[4] is the roi weight. print("weight_sum_max") print(weight_sum) #weight_sum = 0 #权值和初始化 current_x=current_y=0 #计算权值和。遍历上面的三个矩形,r[4]即每个矩形的权值。 # Camera setup... sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.RGB565) # use grayscale. #sensor.set_pixformat(sensor.GRAYSCALE) # use grayscale. if frame_size_w == 320: sensor.set_framesize(sensor.QVGA) # use QQVGA for speed. if frame_size_w == 160: sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed. sensor.skip_frames(300) # Let new settings take affect. #不要太小 否则容易卡顿 sensor.set_auto_gain(False) # must be turned off for color tracking sensor.set_auto_whitebal(False) # must be turned off for color tracking #关闭白平衡 clock = time.clock() # Tracks FPS. uart = UART(3, 9600)#波特率两边要设置成一样 while(True): clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot() # Take a picture and return the image. # uart = UART(3,19200) # uart.init(19200,bits=8,parity=None,stop=1)#init with given parameters centroid_sum = centroid_sum_y = 0 flag=0 #print("i") #print(i) #利用颜色识别分别寻找三个矩形区域内的线段 #weight_sum = 1 #权值和初始化 for r in ROIS: find_blobs_num = 0 error_x = 0 # - left; + right #blobs = img.find_blobs(GRAYSCALE_THRESHOLD, roi=r[0:4], merge=False) print(r[0]) for i in range(4): print(r[i]) print("test") print(r[0:4]) blobs = img.find_blobs(RED_THRESHOLD, roi=r[0:4], merge=False) #blobs = img.find_blobs(RED_THRESHOLD, roi=[40, 80, 80, 24] ,merge=False) ##blobs = img.find_blobs(RED_THRESHOLD, roi=(40.0, 40.0, 80.0, 24.0) ,merge=False) #blobs = img.find_blobs(RED_THRESHOLD, roi=[0, 0, 160, 24.0] ,merge=False) # blobs = img.find_blobs(GRAYSCALE_THRESHOL, roi=r[0:4], merge=True) # r[0:4] is roi tuple. #找到视野中的线,merge=true,将找到的图像区域合并成一个 # 在每一个ROI都会执行下述语句 所以总共4个ROI #目标区域找到直线 if blobs: find_blobs_num +=1 flag+=1 # 不要用i 用flag print("flag ") print(flag) #if flag ==1 : #weight_sum = 0 # Find the index of the blob with the most pixels. most_pixels = 0 largest_blob = 0 for i in range(len(blobs)): #目标区域找到的颜色块(线段块)可能不止一个,找到最大的一个,作为本区域内的目标直线 if blobs[i].pixels() > most_pixels: most_pixels = blobs[i].pixels() #merged_blobs[i][4]是这个颜色块的像素总数,如果此颜色块像素总数大于 #most_pixels,则把本区域作为像素总数最大的颜色块。更新most_pixels和largest_blob largest_blob = i #print("most_pixels") #print(most_pixels) # Draw a rect around the blob. img.draw_rectangle(blobs[largest_blob].rect()) #img.draw_rectangle((0,0,20, 20)) #将此区域的像素数最大的颜色块画矩形和十字形标记出来 img.draw_cross(blobs[largest_blob].cx(), blobs[largest_blob].cy()) if frame_size_w == 320: print(find_blobs_num) error_x += blobs[largest_blob].cx() - frame_size_w / 2 print(error_x) if frame_size_w == 160: print(find_blobs_num) error_x += blobs[largest_blob].cx() - frame_size_w / 2 print(error_x) # if no turn angle ; can comment until (1) #centroid_sum += blobs[largest_blob].cx() * r[4] # r[4] is the roi weight. ##计算centroid_sum,centroid_sum等于每个区域的最大颜色块的中心点的x坐标值乘本区域的权值 #centroid_sum_y += blobs[largest_blob].cy() * r[4] # r[4] is the roi weight. #print("centroid_x_proportion") #print(centroid_sum) #weight_sum += r[4] #print("weight_sum_temp") #print(weight_sum) #if(r[4]==0.1): ## 近处权重更小 计算当前位置 #current_x=blobs[largest_blob].cx() #current_y=blobs[largest_blob].cy() #print("current_x") #print(current_x) #print("current_y") #print(current_y) #for temp in blobs: #img.draw_rectangle(temp.rect()) #print("weight ") #print(weight_sum) ## print("s") #center_pos = (centroid_sum / weight_sum) # Determine center of line. #center_pos_y = (centroid_sum_y / weight_sum) ##中间公式 #print("center_pos_x ") #print(center_pos) #print("center_pos_y ") #print(center_pos_y) # (1) if no turn angle ; can comment this print("error_x") print(error_x) if int (error_x) > 0 : error_x=int(error_x)*10 else: error_x=abs(int(error_x))*10+1 error_x_str=str(error_x) error_x_str_out = json.dumps(set(error_x_str))#将data转化为json uart.write('?'+error_x_str_out+'!')#写到缓冲区,由arduino进行读取 # uart.write('?'+data+';'+data+'!')#写到缓冲区,由arduino进行读取 #print('you send:',data_out)#写到串口监视端,让你能够看到数据 time.sleep(0.1)#延时 print("clock ") print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected.
只改了
#ROIS = [ # [ROI, weight] #(roi_x, roi_y_base_two , width, roi_h, 0.7), # You'll need to tweak the weights for you app ## 根据线宽 修改高度 ## 远处给的权重更大 #(roi_x, roi_y_base , width, roi_h, 0.3), # depending on how your robot is setup. ## 权重后期需要调整 ## 应该需要使用全宽度160(即分辨率对应的最大值) 否则转弯貌似无法识别 #(0, 000, frame_size_w, roi_h, 0) #] ROIS = [ # [ROI, weight] (20, 40, 40, 15, 0.1), # You'll need to tweak the weights for you app (20, 20, 40, 15, 0.3), # depending on how your robot is setup. (20, 00, 40, 15, 0.7) ]
在这里报错
blobs = img.find_blobs(RED_THRESHOLD, roi=r[0:4], merge=False)
-
67行到75行,90行到96行。所有的成员加上int转成整数。
比如,roi_x 改为 int(roi_x)。注意:我只是在修改你的报错错误。逻辑需要自己考虑,我看不懂。