keras ValueError:密集图层的输入0与图层不兼容:预期轴-1的值为8,但收到的输入的形状为[None,1]

m1m5dgzv  于 2022-11-13  发布在  其他
关注(0)|答案(1)|浏览(159)

我正在为OpenAI lunarLander-v2环境训练一个模型。我已经使用Sequential模型成功地完成了这个任务,但是当我尝试使用函数模型时,我得到了一些Tensor形状不兼容的错误。这是Agent类的代码,我认为这个问题与done_list和next_states的形状不兼容有关,但是我不知道如何重新塑造这些Tensor使它工作。

class DQAgent(Agent):
def __init__(self, env, config):
    Agent.__init__(self, env, config)
    
    self.memory = deque(maxlen=self.config.memory_size)
    self.model = self.initialize()

def initialize(self):
    
    inputs = Input(shape=(8,))
    
    dense = Dense(self.config.layer_size * self.config.input_layer_mult, activation = relu)
    x = dense(inputs)
    x = Dense(self.config.layer_size, activation = relu)(x)
    
    outputs = layers.Dense(self.action_space_size, activation = linear)(x)
    
    model = keras.Model(inputs = inputs, outputs = outputs, name = self.name)

    model.compile(loss = mean_squared_error, optimizer = Adam(lr = self.config.learning_rate))
    model.summary()

    return model

def policyAct(self, state):
    predicted_actions = self.model.predict(state)
    return np.argmax(predicted_actions[0])

def addToMemory(self, state, action, reward, next_state, done):
    self.memory.append((self, state, action, reward, next_state, done))
    
def sampleFromMemory(self):
    sample = np.random.sample(self.memory, self.config.batch_size)
    return sample

def extractFromSample(self, sample):
    states = np.array([i[0] for i in sample])
    actions = np.array([i[1] for i in sample])
    rewards = np.array([i[2] for i in sample])
    next_states = np.array([i[3] for i in sample])
    done_list = np.array([i[4] for i in sample])
    states = np.squeeze(states)
    next_states = np.squeeze(next_states)
    
    
    return np.squeeze(states), actions, rewards, next_states, done_list
    
def updateReplayCount(self):
    self.config.replay_counter += 1
    self.config.replay_counter = self.replay_counter % self.config.replay_step_size

def learnFromMemory(self):
    if len(self.memory) < self.config.batch_size or self.config.replay_counter != 0:
        return
    if np.mean(self.training_episode_rewards[-10:]) > 100:
        return
    sample = self.sampleFromMemory()

    states, actions, rewards, next_states, done_list = self.extractFromSample(sample)
    targets = rewards + self.config.gamma * (np.amax(self.model.predict_on_batch(next_states), 
                                                     axis=1)) * (1 - (done_list))
    
    target_vec = self.model.predict_on_batch(states)
    indexes = np.array([i for i in range(self.config.batch_size)])
    target_vec[[indexes], [actions]] = targets
    self.model.fit(states, target_vec, epochs=1, verbose=0)
    
def save(self, name):
    self.model.save(name)

当使用Sequential API而不是函数式API创建模型时,类似的代码可以很好地工作。我对这一点非常陌生,对SO也是如此,任何帮助都非常感谢。
警告:tensorflow:模型是使用输入Tensor(“input_10:0”,shape=(None,8),dtype= float 32)的形状(None,8)构建的,但在使用不兼容形状(None,1)的输入上调用了该模型。值错误:层dense_72的输入0与层不兼容:输入shape的轴-1的值应为8,但收到的输入的shape为[None,1]
来自顺序实现的模型,运行时没有问题(其余代码相同)

def initialize_model(self):
    model = Sequential()
    
   
    model.add(Dense(self.config.layer_size*self.config.input_layer_mult, input_dim = self.observation_space_dim, activation=relu))
    
    
    for i in range(self.config.deep_layers):
        model.add(Dense(self.config.layer_size, activation=relu))
    
    
    model.add(Dense(self.action_space_dim, activation=linear))
    
    
    model.compile(loss=mean_squared_error, optimizer=Adam(lr=self.config.learning_rate))

    print(model.summary())
    
    return model

相关问题