keras 在类精度计算模型中,在二进制情况下仅预测一类

jyztefdp  于 2022-11-13  发布在  其他
关注(0)|答案(1)|浏览(134)

在按类预测的情况下,模型只对一个类0进行预测,而在二元分类的情况下不对类1进行预测,但在多类分类的情况下,修改代码后的相同模型工作良好,这是否与用作S形的损失函数或激活函数有关?

The output is as:
Epoch 5/5
19/19 [==============================] - 144s 8s/step - loss: 0.0232 - accuracy: 0.9884 
- acc_1_0: 1.0000 - acc_1_1: 0.0000e+00 - prec_1_0: 1.0000 - recall_1_0: 1.0000 - 
prec_1_1: 0.0000e+00 - recall_1_1: 0.0000e+00 - val_loss: 0.0057 - val_accuracy: 1.0000 
- val_acc_1_0: 1.0000 - val_acc_1_1: 0.0000e+00 - val_prec_1_0: 1.0000 - val_recall_1_0: 
1.0000 - val_prec_1_1: 0.0000e+00 - val_recall_1_1: 0.0000e+00

code:
 from keras.preprocessing.image import ImageDataGenerator
 train_datagen=ImageDataGenerator(rescale=1./255)
 test_datagen = ImageDataGenerator(rescale=1./255)
data_dir1='Flower2'
data_dir2='Flower'

train_generator = train_datagen.flow_from_directory( data_dir1, target_size=(180, 180), 
batch_size=32, shuffle=True, class_mode='binary')

val_generator = test_datagen.flow_from_directory( data_dir2, target_size=(180, 180), 
batch_size=32, shuffle=True, class_mode='binary')

x_test, y_test=next(val_generator)

interesting_class_id=0

def single_class_accuracy(interesting_class_id):
    def acc1(y_true, y_pred):
        class_id_true = K.argmax(y_true, axis=-1)
        class_id_preds = K.argmax(y_pred, axis=-1)
        accuracy_mask = K.cast(K.equal(class_id_preds, interesting_class_id), 'int32')
        class_acc_tensor = K.cast(K.equal(class_id_true, class_id_preds), 'int32') * 
        accuracy_mask
        class_acc = K.cast(K.sum(class_acc_tensor), 'float32') / 
  K.cast(K.maximum(K.sum(accuracy_mask), 1), 'float32')
        return class_acc
acc1.__name__ = 'acc_1_{}'.format(interesting_class_id)
return acc1

def single_class_precision(interesting_class_id):
    def prec(y_true, y_pred):
        class_id_true = K.argmax(y_true, axis=-1)
        class_id_pred = K.argmax(y_pred, axis=-1)
        precision_mask = K.cast(K.equal(class_id_pred, interesting_class_id), 'int32')
        class_prec_tensor = K.cast(K.equal(class_id_true, class_id_pred), 'int32') * 
    precision_mask
        class_prec = K.cast(K.sum(class_prec_tensor), 'float32') / 
K.cast(K.maximum(K.sum(precision_mask), 1), 'float32')
    return class_prec
 prec.__name__ = 'prec_1_{}'.format(interesting_class_id)

return prec

def single_class_recall(interesting_class_id):
    def recall(y_true, y_pred):
        class_id_true = K.argmax(y_true, axis=-1)
        class_id_pred = K.argmax(y_pred, axis=-1)
        recall_mask = K.cast(K.equal(class_id_true, interesting_class_id), 'int32')
        class_recall_tensor = K.cast(K.equal(class_id_true, class_id_pred), 'int32') * 
recall_mask
     class_recall = K.cast(K.sum(class_recall_tensor), 'float32') / 
K.cast(K.maximum(K.sum(recall_mask), 1), 'float32')
    return class_recall
recall.__name__ = 'recall_1_{}'.format(interesting_class_id)

return recall

model = Sequential()

pretrained_model= tf.keras.applications.VGG16(include_top=False,
               input_shape=(180,180,3),
               pooling='avg',classes=2,
               weights='imagenet',
               classifier_activation= 'sigmoid'
               )
             
for layer in pretrained_model.layers:
    layer.trainable=False

model.add(pretrained_model)


model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.summary()

model.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.01), 
          metrics=['accuracy',
                   single_class_accuracy(0), single_class_accuracy(1),
                  
                   single_class_precision(0), single_class_recall(0),
                   single_class_precision(1), single_class_recall(1),
                                       ])

hist = model.fit(train_generator, validation_data=val_generator, epochs=5, 
batch_size=32)
3mpgtkmj

3mpgtkmj1#

问题出在激活函数上,我将代码更改为使用softmax而不是sigmoid,可能它只在二进制的情况下获取一个类的度量值。

相关问题