我正试图使用keras调谐器与fashion-mnist数据集在谷歌Colab,这是我的代码:
!pip install keras-tuner
import tensorflow as tf
import kerastuner
import numpy as np
print("TensorFlow version: ", tf.__version__)
(x_train, y_train) , (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
def create_model(hp):
num_hidden_layers = 1
num_units = 8
dropout_rate = 0.1
learning_rate = 0.01
if hp:
num_hidden_layers = hp.Choice('num_hidden_layers', values = [1,2,3])
num_units = hp.Choice('num_units', values = [8,16,32])
dropout_rate = hp.Float('dropout_rate', min_value = 0.1, max_value = 0.5)
learning_rate = hp.Float('learning_rate', min_value = 0.0001, max_value = 0.01)
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten(input_shape = (28,28)))
model.add(tf.keras.layers.Lambda(lambda x: x/255.))
for _ in range(0, num_hidden_layers):
model.add(tf.keras.layers.Dense(num_units, activation='relu'))
model.add(tf.keras.layers.Dropout(dropout_rate))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
metrics=['accuracy']
)
return model
class CustomTuner(kerastuner.tuners.BayesianOptimization):
def run_trial(self, trial, *args, **kwargs):
kwargs['batch_size'] = trial.hyperparameters.Int('batch_size', 32, 128, step=32)
super(CustomTuner, self).run_trial(trial, *args, **kwargs)
tuner = CustomTuner(
create_model,
objective='val_accuracy',
max_trials = 20,
directory= 'logs',
project_name='fashion_minist',
overwrite=True,
)
search = tuner.search(
x_train, y_train,
validation_data=(x_test, y_test),
epochs = 5,
)
我得到以下错误:
ValueError: Unknown metric: val_accuracy
我做错了什么吗?我在训练过程中得到了val_accuracy
值,但是当第一次试验结束时,它就因为这个错误而停止了。
1条答案
按热度按时间aemubtdh1#
如果您添加一个return语句,它应该可以工作。
return super(自定义调谐器,自身).run_trial(试用版,* 参数,**kwargs)