我尝试在Keras中创建一个卷积自动编码器网络,但是,autoencoder.fit()函数给我一个错误,即“logits”和“labels”必须具有相同的形状。我不确定“logits”指的是什么,并且我不认为我将标签传递到了fit中?如果是这样,我不太明白在哪里...有人能看到是什么导致了这个错误吗?
data = tf.keras.utils.image_dataset_from_directory('/content/gdrive/MyDrive/Skyrmion Vision/testFiles/train/',batch_size=32,image_size=(237,237),shuffle=False)
data_iterator = data.as_numpy_iterator()
#get another batch from the iterator
batch = data_iterator.next()
len(batch) # theres the images and theres the labels -- images are represented as numpy arrays
batch[0].shape
batch[1]
#different based on what the data_iterator grabs
fig, ax = plt.subplots(ncols=4, figsize=(20,20))
for idx, img in enumerate(batch[0][:4]):
ax[idx].imshow(img.astype(int))
ax[idx].title.set_text(batch[1][idx])
"""## **SCALE DATA**"""
# functions available to apply to pipeline, this is using the map() function: https://www.tensorflow.org/api_docs/python/tf/data/Dataset
data = data.map(lambda x,y: (x/255, y))
scaled_iterator = data.as_numpy_iterator()
batch = scaled_iterator.next()[0]
batch[0].max()
"""## **Split Data**"""
len(data) # 127 batches, each batch with 32 images
train_size = int(len(data)*.7)+2
val_size = int(len(data)*.2)
test_size = int(len(data)*.1)
print(train_size,val_size,test_size)
train_size+val_size+test_size
# from https://www.tensorflow.org/api_docs/python/tf/data/Dataset
train = data.take(train_size)
val = data.skip(train_size).take(val_size)
test = data.skip(train_size+val_size).take(test_size)
len(train)
"""## **DEEP MODEL**"""
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D
from keras.models import Model
data_iterator = data.as_numpy_iterator()
batch = data_iterator.next()
"""**Encoding Process**"""
input_img = Input(shape=(237, 237, 3))
# Conv1 #
x = Conv2D(filters = 32, kernel_size = (3, 3), activation='relu', padding='same')(input_img)
x = MaxPooling2D(pool_size = (2, 2), padding='same')(x)
# Conv2 #
x = Conv2D(filters = 16, kernel_size = (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D(pool_size = (2, 2), padding='same')(x)
# Conv 3 #
x = Conv2D(filters = 8, kernel_size = (3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling2D(pool_size = (2, 2), padding='same')(x)
"""**Decoding Process**"""
# DeConv1
x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
# DeConv2
x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
# Deconv3
x = Conv2D(32, (3, 3), activation='relu')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)
"""**Declare the Model**"""
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
# Train the model
autoencoder.fit(batch[0], batch[0],
epochs=20,
shuffle=False,
#validation_data=(x_test, x_test)
)
这就给了我一个我一直在纠结的错误:
Epoch 1/20
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-70-62d669270126> in <module>
2 autoencoder.fit(batch[0], batch[0],
3 epochs=20,
----> 4 shuffle=False,
5 #validation_data=(x_test, x_test)
6 )
1 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py in autograph_handler(*args, **kwargs)
1145 except Exception as e: # pylint:disable=broad-except
1146 if hasattr(e, "ag_error_metadata"):
-> 1147 raise e.ag_error_metadata.to_exception(e)
1148 else:
1149 raise
ValueError: in user code:
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1021, in train_function *
return step_function(self, iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1010, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1000, in run_step **
outputs = model.train_step(data)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 860, in train_step
loss = self.compute_loss(x, y, y_pred, sample_weight)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 919, in compute_loss
y, y_pred, sample_weight, regularization_losses=self.losses)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/compile_utils.py", line 201, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 141, in __call__
losses = call_fn(y_true, y_pred)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 245, in call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 1932, in binary_crossentropy
backend.binary_crossentropy(y_true, y_pred, from_logits=from_logits),
File "/usr/local/lib/python3.7/dist-packages/keras/backend.py", line 5247, in binary_crossentropy
return tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
ValueError: `logits` and `labels` must have the same shape, received ((None, 236, 236, 1) vs (None, 237, 237, 3)).
1条答案
按热度按时间klsxnrf11#
binary_crossentropy
损失函数应用于二值分类问题。你正在比较两个图像,输入图像和输出图像。因此你可以使用例如MeanSquaredError作为损失函数。