支持在openmv运用svm或者决策树这两种方法进行机器学习吗
4
4fi5
@4fi5
0
声望
5
楼层
536
资料浏览
0
粉丝
0
关注
4fi5 发布的帖子
-
模板匹配能使用滤波处理和边缘检测吗
模板匹配能搭配滤波处理和边缘检测吗,为什么我在多模板匹配里面加了中值滤波处理后运行没问题,但不进行模板匹配了
import time, sensor, image from image import SEARCH_EX, SEARCH_DS # 初始化传感器和设置 sensor.reset() sensor.set_contrast(1) sensor.set_gainceiling(16) sensor.set_framesize(sensor.QQVGA) sensor.set_pixformat(sensor.GRAYSCALE) # 模板列表 templates = [ "/0.pgm", "/1.pgm", "/2.pgm", "/3.pgm", "/4.pgm", ] # ROI列表 roi_list = [(10, 0, 40, 40)] * len(templates) sensor.skip_frames(time = 2000) # 让新的设置生效 clock = time.clock() # 跟踪FPS帧率 while(True): clock.tick() # 追踪两个snapshots()之间经过的毫秒数. img = sensor.snapshot() # 拍一张照片,返回图像 # 第一个参数是内核大小。N对应于((N * 2)+1)^ 2内核大小。 # 例如。 1 == 3x3内核,2 == 5x5内核等。 # 第二个参数“percentile”是从NxN邻域中选择的百分位数。 # 0.5是中位数,0.25是下四分位数,0.75是上四分位数。 img.median(1, percentile=0.5, threshold=True, offset=5, invert=True) # 添加中值滤波预处理 def apply_median_blur(image, kernel_size=3): return image.median_blur(kernel_size) clock = time.clock() while True: clock.tick() # 捕获图像 img = sensor.snapshot() # 对图像进行中值滤波预处理 img = apply_median_blur(img) char_positions = [] # 存储字符及其位置 for t, roi in zip(templates, roi_list): template = image.Image(t) r = img.find_template(template, 0.70, step=3, search=SEARCH_DS, roi=roi) if r: img.draw_rectangle(r) char_positions.append((t[1], r[0])) # 使用模板文件名的第一个字符作为字符标识 # 检查是否有至少三个字符被识别 if len(char_positions) > 2: # 按水平位置排序 char_positions.sort(key=lambda x: x[1]) # 将排序后的字符打印出来 result = ''.join([char for char, _ in char_positions]) print("识别结果:", result)
-
RE: 在edge impulse上进行数字识别训练,在网站上准确率在九十多,但部署到openmv上几乎全是错误的识别
你好,同样的数据集我之前也在星瞳上面训练了,准确率部署到上面同样不太行,请问是要怎样上传数据你可以看见
-
在edge impulse上进行数字识别训练,在网站上准确率在九十多,但部署到openmv上几乎全是错误的识别
# Edge Impulse - OpenMV Image Classification Example import sensor, image, time, os, tf, uos, gc sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) sensor.set_windowing((240, 240)) # Set 240x240 window. sensor.skip_frames(time=2000) # Let the camera adjust. net = None labels = None try: # load the model, alloc the model file on the heap if we have at least 64K free after loading net = tf.load("trained.tflite", load_to_fb=uos.stat('trained.tflite')[6] > (gc.mem_free() - (64*1024))) except Exception as e: print(e) raise Exception('Failed to load "trained.tflite", did you copy the .tflite and labels.txt file onto the mass-storage device? (' + str(e) + ')') try: labels = [line.rstrip('\n') for line in open("labels.txt")] except Exception as e: raise Exception('Failed to load "labels.txt", did you copy the .tflite and labels.txt file onto the mass-storage device? (' + str(e) + ')') clock = time.clock() while(True): clock.tick() img = sensor.snapshot() # default settings just do one detection... change them to search the image... for obj in net.classify(img, min_scale=1.0, scale_mul=0.8, x_overlap=0.5, y_overlap=0.5): print("**********\nPredictions at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) img.draw_rectangle(obj.rect()) # This combines the labels and confidence values into a list of tuples predictions_list = list(zip(labels, obj.output())) for i in range(len(predictions_list)): print("%s = %f" % (predictions_list[i][0], predictions_list[i][1])) print(clock.fps(), "fps")