python-3.x CNN验证准确率高,但预测不好?

mzsu5hc0  于 2022-11-19  发布在  Python
关注(0)|答案(2)|浏览(155)

我试图建立一个CNN来区分3个类别,即真实的脸,打印的脸,和重放的脸。我准备的数据如下:

classes = ['Genuine', 'Printed', 'Replay']

base_dir = '/Dataset'

import os
import numpy as np
import glob
import shutil

for cl in classes:
  img_path = os.path.join(base_dir, cl)
  images = glob.glob(img_path + '/*.jpg')
  print("{}: {} Images".format(cl, len(images)))
  num_train = int(round(len(images)*0.8))
  train, val = images[:num_train], images[num_train:]

  for t in train:
    if not os.path.exists(os.path.join(base_dir, 'train', cl)):
      os.makedirs(os.path.join(base_dir, 'train', cl))
    shutil.move(t, os.path.join(base_dir, 'train', cl))

  for v in val:
    if not os.path.exists(os.path.join(base_dir, 'val', cl)):
      os.makedirs(os.path.join(base_dir, 'val', cl))
    shutil.move(v, os.path.join(base_dir, 'val', cl))

from tensorflow.keras.preprocessing.image import ImageDataGenerator

image_gen_train = ImageDataGenerator(
                    rescale=1./255,
                    rotation_range=45,
                    width_shift_range=.15,
                    height_shift_range=.15,
                    horizontal_flip=True,
                    zoom_range=0.5
                    )

batch_size = 32
IMG_SHAPE = 96 
train_data_gen = image_gen_train.flow_from_directory(
                                                batch_size=batch_size,
                                                directory=train_dir,
                                                shuffle=True,
                                                target_size=(IMG_SHAPE,IMG_SHAPE),
                                                class_mode='sparse'
                                                )

我构建了一个简单的模型,如下所示:

## Model
import tensorflow as tf
from keras import regularizers
from keras.layers.normalization import BatchNormalization

IMG_SHAPE = (96, 96, 3)
batch_size = 32

## Trainable classification head

aConv_layer = tf.keras.layers.Conv2D(576, (3, 3), padding="same", 
                                     activation="relu", input_shape= IMG_SHAPE)
aConv_layer = tf.keras.layers.Conv2D(144, (3, 3), padding="same", 
                                     activation="relu", input_shape= IMG_SHAPE)

gmaxPool_layer = tf.keras.layers.GlobalMaxPooling2D() #reduces input from 4D to 2D
maxPool_layer = tf.keras.layers.MaxPool2D(pool_size=(1, 1), strides=None, 
                                          padding='valid', data_format=None,
                                          )

batNor_layer = tf.keras.layers.BatchNormalization(axis=-1, momentum=0.99, 
                                                  epsilon=0.001, 
                                center=True, scale=True, 
                                beta_initializer='zeros', 
                                gamma_initializer='ones', 
                                moving_mean_initializer='zeros', 
                                moving_variance_initializer='ones', 
                                beta_regularizer=None, gamma_regularizer=None, 
                                beta_constraint=None, gamma_constraint=None)

flat_layer = tf.keras.layers.Flatten()

dense_layer = tf.keras.layers.Dense(9, activation='softmax', 
                                    kernel_regularizer=regularizers.l2(0.01))

prediction_layer = tf.keras.layers.Dense(3, activation='softmax')

model = tf.keras.Sequential([
     #base_model,
     tf.keras.layers.Conv2D(576, (3, 3), padding="same", activation="relu", input_shape= IMG_SHAPE),
     tf.keras.layers.Dense(288, activation='softmax', kernel_regularizer=regularizers.l2(0.01)),
     tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None),
     tf.keras.layers.Conv2D(144, (3, 3), padding="same", activation="relu"),
     tf.keras.layers.Dense(72, activation='softmax', kernel_regularizer=regularizers.l2(0.01)),
     tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None),
     #
     batNor_layer,
     gmaxPool_layer,
     tf.keras.layers.Flatten(),
     #tf.keras.layers.Dropout(0.5),
     prediction_layer                        
])

learning_rate = 0.001

## Compiles the model
model.compile(optimizer=tf.keras.optimizers.Adam(lr=learning_rate),
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy']
)

我对模型进行了训练,得到了以下结果,我认为这是很好的结果:

然而,每当我试图用下面的代码预测图像时,它几乎总是出错:

import numpy as np
from google.colab import files
from keras.preprocessing import image

uploaded = files.upload()
for fn in uploaded.keys():

# predicting images
  path = fn
  img = image.load_img(path, target_size=(96, 96))
  x = image.img_to_array(img)
  x = np.expand_dims(x, axis=0)
  images = np.vstack([x])
  classes = model.predict(images, batch_size=10)
print(fn)
print('Genuine | Printout | Replay')
print(np.argmax(classes))

当验证准确度如此之高时,预测怎么可能是错误的呢?如果有帮助的话,这里是Codelab

4uqofj5v

4uqofj5v1#

不知何故,Keras的图像生成器在与fit()或fit_generator()函数结合使用时工作得很好,但在与predict_generator()或predict()函数结合使用时却失败得很惨。
当使用AMD处理器的Plaid-ML Keras后端时,我宁愿一个接一个地循环所有测试图像,并在每次迭代中获得每个图像的预测。

import os
from PIL import Image
import keras
import numpy

# code for creating dan training model is not included

print("Prediction result:")
dir = "/path/to/test/images"
files = os.listdir(dir)
correct = 0
total = 0
#dictionary to label all animal category class.
classes = {
    0:'This is Cat',
    1:'This is Dog',
}
for file_name in files:
    total += 1
    image = Image.open(dir + "/" + file_name).convert('RGB')
    image = image.resize((100,100))
    image = numpy.expand_dims(image, axis=0)
    image = numpy.array(image)
    image = image/255
    pred = model.predict_classes([image])[0]
    animals_category = classes[pred]
    if ("cat" in file_name) and ("cat" in sign):
        print(correct,". ", file_name, animals_category)
        correct+=1
    elif ("dog" in file_name) and ("dog" in animals_category):
        print(correct,". ", file_name, animals_category)
        correct+=1
print("accuracy: ", (correct/total))
iaqfqrcu

iaqfqrcu2#

以处理训练图像的相同方式处理预测图像。特别是,像处理ImageDataGenerator那样重新缩放图像。

import numpy as np
from google.colab import files
from keras.preprocessing import image

uploaded = files.upload()
for fn in uploaded.keys():

# predicting images
  path = fn
  img = image.load_img(path, target_size=(96, 96))
  x = image.img_to_array(img)
  # Rescale image.
  x = x / 255.
  x = np.expand_dims(x, axis=0)
  images = np.vstack([x])
  classes = model.predict(images, batch_size=10)
print(fn)
print('Genuine | Printout | Replay')
print(np.argmax(classes))

相关问题