keras 层“conv2d_transpose_4”的输入0与层不兼容:预期ndim=4,找到ndim=2,收到完整形状:(无,100)

fumotvh3  于 2023-03-18  发布在  其他
关注(0)|答案(2)|浏览(156)

我正在尝试开发一个GAN,我已经创建了生成器和鉴别器,现在我正在尝试训练它。我正在使用Mnist数据集,但我计划使用更多数据集。问题是,当我训练它时,我得到以下错误:Input 0 of layer "conv2d_transpose_4" is incompatible with the layer: expected ndim=4, found ndim=2. Full shape received: (None, 100)
我真的不知道问题是在网络中还是在用于训练GAN的数据中,有人能告诉我应该如何训练它或者问题在哪里吗?
进口:

import tensorflow
import keras
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, Input, BatchNormalization, 
LeakyReLU, Reshape
from keras.layers import Conv2D, Conv2DTranspose, MaxPooling2D
from tensorflow.keras.optimizers import Adam
from keras import backend as K
from keras.utils import np_utils

from keras.datasets import mnist

import numpy as np
import matplotlib.pyplot as plt
import os
import cv2

发电机:

def generator():
   model = Sequential()
   model.add(Conv2DTranspose(32, (3,3), strides=(2, 
   2), activation='relu', use_bias=False, 
   input_shape=img_shape))
   model.add(BatchNormalization(momentum=0.3))
   model.add(Conv2DTranspose(128,(3,3),strides= 
   (2,2), activation='relu', padding='same', 
   use_bias=False)) 
   model.add(MaxPooling2D(pool_size=(2, 2)))
   model.add(LeakyReLU(alpha=0.2))

   model.add(Conv2DTranspose(128,(3,3),strides= 
   (2,2), activation='relu', padding='same', 
   use_bias=False))
   model.add(MaxPooling2D(pool_size=(2, 2)))
   model.add(Dropout(0.5))
   model.add(BatchNormalization(momentum=0.3))
   model.add(LeakyReLU(alpha=0.2))

   model.add(Conv2DTranspose(128,(3,3),strides= 
   (2,2), activation='relu', padding='same', 
   use_bias=False))
   model.add(BatchNormalization())
   model.add(Dense(512, 
   activation=LeakyReLU(alpha=0.2)))
   model.add(BatchNormalization(momentum=0.7))

   model.build()
   model.summary()
   return model

鉴别器:

def discriminator():
   model = Sequential()
   model.add(Conv2D(32, (5,5), strides=(2, 2), 
   activation='relu', use_bias=False, 
   input_shape=img_shape))
   model.add(BatchNormalization(momentum=0.3))
   model.add(Conv2D(64,(5,5),strides=(2,2), 
   activation='relu', padding='same', 
   use_bias=False))
   model.add(MaxPooling2D(pool_size=(2, 2)))
   model.add(LeakyReLU(alpha=0.2))

   model.add(Conv2D(64,(5,5),strides=(2,2), 
   activation='relu', padding='same', 
   use_bias=False))
   model.add(Dropout(0.5))
   model.add(BatchNormalization(momentum=0.3))
   model.add(LeakyReLU(alpha=0.2))

   model.add(Dense(512, 
   activation=LeakyReLU(alpha=0.2)))
   model.add(Flatten())
   model.add(BatchNormalization(momentum=0.7))
   model.add(Dense(1, activation='sigmoid'))

   model.build()
   model.summary()

   return model

列车功能:

def train(epochs, batch_size, save_interval):

(x_train, _), (_, _) = mnist.load_data()

x_train = (x_train.astype(np.float32) - 127.5) / 127.5
x_train = np.expand_dims(x_train, axis=3) 
half_batch = int(batch_size / 2)

for epoch in range(epochs):
    idx = np.random.randint(0, x_train.shape[0], half_batch)
    imgs = x_train[idx]

    noise = np.random.normal(0, 1, (half_batch, 100))

    gen_imgs = generator.predict(noise)

    d_loss_real = discriminator.train_on_batch(imgs, np.ones((half_batch, 1)))
    d_loss_fake = discriminator.train_on_batch(gen_imgs, np.zeros((half_batch, 1)))

    d_loss = 0.5 * np.add(d_loss_real, d_loss_fake) 
    noise = np.random.normal(0, 1, (batch_size, 100)) 
    valid_y = np.array([1] * batch_size)
    g_loss = combined.train_on_batch(noise, valid_y)

    print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))
    if epoch % save_interval == 0:
        save_imgs(epoch)

所用数据:

img_rows = 28
img_cols = 28
channels = 1
img_shape = (img_rows, img_cols, channels)

optimizer = Adam(0.0002, 0.5)
discriminator = discriminator()
discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])

generator = generator()
generator.compile(loss='binary_crossentropy', 
optimizer=optimizer)

z = Input(shape=(100,))
img = generator(z) #error

discriminator.trainable = False  

valid = discriminator(img)

combined = Model(z, valid)
combined.compile(loss='binary_crossentropy', 
optimizer=optimizer)

train(epochs=100000, batch_size=32, 
save_interval=10)

generator.save('generator_model.h5')
798qvoo8

798qvoo81#

问题出在Discriminator模型中的第一个Flatten层,该层正在将n维Tensor转换为1DTensor。由于MaxPooling2D层无法处理1DTensor,因此您会看到该错误。如果将其删除,它应该可以工作:

def discriminator():
   model = Sequential()
   model.add(Conv2D(32, (5,5), strides=(2, 2), 
   activation='relu', use_bias=False, 
   input_shape=img_shape))
   model.add(BatchNormalization(momentum=0.3))
   model.add(Conv2D(64,(5,5),strides=(2,2), 
   activation='relu', padding='same', 
   use_bias=False))
   model.add(MaxPooling2D(pool_size=(2, 2)))
   model.add(LeakyReLU(alpha=0.2))

   model.add(Conv2D(64,(5,5),strides=(2,2), 
   activation='relu', padding='same', 
   use_bias=False))
   model.add(Dropout(0.5))
   model.add(BatchNormalization(momentum=0.3))
   model.add(LeakyReLU(alpha=0.2))

   model.add(Flatten())
   model.add(Dense(512, 
   activation=LeakyReLU(alpha=0.2)))
   model.add(BatchNormalization(momentum=0.7))
   model.add(Dense(1, activation='sigmoid'))

   model.build()
   model.summary()

   return model

更新1:尝试像这样重写您的Generator模型:

def generator():
  model = Sequential()

  model = tf.keras.Sequential()
  model.add(Dense(7*7*256, use_bias=False, input_shape=(100,)))
  model.add(BatchNormalization())
  model.add(LeakyReLU())

  model.add(Reshape((7, 7, 256)))
  assert model.output_shape == (None, 7, 7, 256)  # Note: None is the batch size

  model.add(Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
  assert model.output_shape == (None, 7, 7, 128)
  model.add(BatchNormalization())
  model.add(LeakyReLU())

  model.add(Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
  assert model.output_shape == (None, 14, 14, 64)
  model.add(BatchNormalization())
  model.add(LeakyReLU())

  model.add(Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
  assert model.output_shape == (None, 28, 28, 1)

  model.summary()
  return model

然后它应该工作,但你一定要通过这个tutorial来了解一切。

vktxenjb

vktxenjb2#

首先鉴别器是错误的,您期望发生器的输入噪声具有相同的维度,而这是交叉存储的相似之处,噪声可以生成多个级别,其中创建了批次!

当你看每一层,你会看到多少层识别需要!
只需测量输入/输出,您无需删除或编辑模型的含义。

这不是发生器失效当您输入错误时,请尝试查看鉴别器层,它们正在批量大小和生成的输入图像上进行训练。(这是噪声相似性交叉熵)
当您以正确的方式使用输入时,模型层和形状不会更改

### name='conv2d_transpose_input'), name='conv2d_transpose_input', description="created by layer 'conv2d_transpose_input'"), 
### but it was called on an input with incompatible shape (None, 100).
model = Sequential()
model.add(Conv2DTranspose(32, (3,3), strides=(2, 
2), activation='relu', use_bias=False, 
input_shape=(28, 28, 1)))
model.add(BatchNormalization(momentum=0.3))
model.add(Conv2DTranspose(128,(3,3),strides= 
(2,2), activation='relu', padding='same', 
use_bias=False)) 
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(LeakyReLU(alpha=0.2))

########################
def discriminator():
model = Sequential()

### Input 0 of layer "conv2d" is incompatible with the layer: expected axis -1 of input shape to have value 1, but received input with shape (None, 114, 114, 512)
### model.add(tf.keras.layers.Reshape((1, 100), input_shape=img_shape))

model.add(Conv2D(32, (5,5), strides=(2, 2), 
activation='relu', use_bias=False, 
input_shape=( 28, 28, 1)))  ### img_shape
model.add(BatchNormalization(momentum=0.3))
model.add(Conv2D(64,(5,5),strides=(2,2), 
activation='relu', padding='same', 
use_bias=False))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(LeakyReLU(alpha=0.2))

My sample is gradients values from motions of the Heriken kicks !

相关问题