Traceback (most recent call last):
File "/home/train.py", line 356, in
main_function()
File "/home/train.py", line 267, in main_function
train_dataset, validation_dataset, samples_dataset, X_train, X_test, Y_train, Y_test, has_samples, X_samples, Y_samples = ei_tensorflow.training.get_dataset_from_folder(
File "/app/./resources/libraries/ei_tensorflow/training.py", line 238, in get_dataset_from_folder
X_train, X_test, Y_train, Y_test, X_train_raw = split_and_shuffle_data(
File "/app/./resources/libraries/ei_tensorflow/training.py", line 63, in split_and_shuffle_data
Y_file = np_load_file_auto_mmap(os.path.join(dir_path, y_train_path))
File "/app/./resources/libraries/ei_tensorflow/training.py", line 24, in np_load_file_auto_mmap
return np.load(file)
File "/app/keras/.venv/lib/python3.8/site-packages/numpy/lib/npyio.py", line 438, in load
raise ValueError("Cannot load file containing pickled data "
ValueError: Cannot load file containing pickled data when allow_pickle=False
Application exited with code 1
Job failed (see above)
N
n6l1
@n6l1
0
声望
5
楼层
206
资料浏览
0
粉丝
0
关注
n6l1 发布的帖子
-
训练transfer learning为什么报错?
-
edge impluse中我想训练一个“transfer learning"的模型,但是系统提示报错?应该如何解决?
edge impluse中我想训练一个“transfer learning"的模型,但是系统提示报错”Failed to start job: Your labeling method is set to "Bounding boxes (Object detection)", but you're trying to train a non-object detection model. Either change the labeling method (on Dashboard), or remove this learn block and add an 'Object detection' block under Create impulse.
Job failed (see above)“应该怎么解决? -
RE: 训练神经网络,为什么运行结果只有帧率,没有概率?而且代码中的net为什么是none?
 我想让运行结果为类似官方给的教程37里的运行结果“face=0.05,mask=0.94”应该怎么做
-
训练神经网络,为什么运行结果只有帧率,没有概率?而且代码中的net为什么是none?
# Edge Impulse - OpenMV Object Detection Example import sensor, image, time, os, tf, math, uos, gc sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) sensor.set_windowing((240, 240)) # Set 240x240 window. sensor.skip_frames(time=2000) # Let the camera adjust. net = None labels = None min_confidence = 0.5 try: # load the model, alloc the model file on the heap if we have at least 64K free after loading net = tf.load("trained.tflite", load_to_fb=uos.stat('trained.tflite')[6] > (gc.mem_free() - (64*1024))) except Exception as e: raise Exception('Failed to load "trained.tflite", did you copy the .tflite and labels.txt file onto the mass-storage device? (' + str(e) + ')') try: labels = [line.rstrip('\n') for line in open("labels.txt")] except Exception as e: raise Exception('Failed to load "labels.txt", did you copy the .tflite and labels.txt file onto the mass-storage device? (' + str(e) + ')') colors = [ # Add more colors if you are detecting more than 7 types of classes at once. (255, 0, 0), ( 0, 255, 0), (255, 255, 0), ( 0, 0, 255), (255, 0, 255), ( 0, 255, 255), (255, 255, 255), ] clock = time.clock() while(True): clock.tick() img = sensor.snapshot() # detect() returns all objects found in the image (splitted out per class already) # we skip class index 0, as that is the background, and then draw circles of the center # of our objects for i, detection_list in enumerate(net.detect(img, thresholds=[(math.ceil(min_confidence * 255), 255)])): if (i == 0): continue # background class if (len(detection_list) == 0): continue # no detections for this class? print("********** %s **********" % labels[i]) for d in detection_list: [x, y, w, h] = d.rect() center_x = math.floor(x + (w / 2)) center_y = math.floor(y + (h / 2)) print('x %d\ty %d' % (center_x, center_y)) img.draw_circle((center_x, center_y, 12), color=colors[i], thickness=2) print(clock.fps(), "fps", end="\n\n")