Epoch 1/10
TypeError Traceback(most recent call last)in〈cell line:2〉()1 # Train model ----〉2model.fit(X_train,y_train,epochs=10,batch_size=16,validation_data=(X_val,y_val))
1 frames /usr/local/lib/python3.9/dist-packages/keras/engine/training.pyin tf__train_function(iterator)13 try:14 do_return = True ---〉15 retval_ = ag__.converted_call(ag__.ld(step_function),(ag__.ld(self),ag__.ld(iterator)),None,fscope)16除了:17 do_return = False
TypeError:在用户代码中:
File "/usr/local/lib/python3.9/dist-packages/keras/engine/training.py", line 1284, in train_function *
return step_function(self, iterator)
File "/usr/local/lib/python3.9/dist-packages/keras/engine/training.py", line 1268, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.9/dist-packages/keras/engine/training.py", line 1249, in run_step **
outputs = model.train_step(data)
File "/usr/local/lib/python3.9/dist-packages/keras/engine/training.py", line 1051, in train_step
loss = self.compute_loss(x, y, y_pred, sample_weight)
File "/usr/local/lib/python3.9/dist-packages/keras/engine/training.py", line 1109, in compute_loss
return self.compiled_loss(
File "/usr/local/lib/python3.9/dist-packages/keras/engine/compile_utils.py", line 317, in __call__
self._total_loss_mean.update_state(
File "/usr/local/lib/python3.9/dist-packages/keras/utils/metrics_utils.py", line 77, in decorated
update_op = update_state_fn(*args, **kwargs)
File "/usr/local/lib/python3.9/dist-packages/keras/metrics/base_metric.py", line 140, in update_state_fn
return ag_update_state(*args, **kwargs)
File "/usr/local/lib/python3.9/dist-packages/keras/metrics/base_metric.py", line 477, in update_state **
sample_weight = tf.__internal__.ops.broadcast_weights(
File "/usr/local/lib/python3.9/dist-packages/keras/engine/keras_tensor.py", line 283, in __array__
raise TypeError(
TypeError: You are passing KerasTensor(type_spec=TensorSpec(shape=(), dtype=tf.float32, name=None), name='Placeholder:0', description="created by layer 'tf.cast_2'"), an intermediate Keras symbolic input/output, to a TF API that does not allow registering custom dispatchers, such as `tf.cond`, `tf.function`, gradient tapes, or `tf.map_fn`. Keras Functional model construction only supports TF API calls that *do* support dispatching, such as `tf.math.add` or `tf.reshape`. Other APIs cannot be called directly on symbolic Kerasinputs/outputs. You can work around this limitation by putting the operation in a custom Keras layer `call` and calling that layer on this symbolic input/output.
这是密码
# Set path to directory containing images
path_to_images = '/content/drive/MyDrive/Heatsource504'
# Define regular expression pattern to extract coordinates
pattern = r'x_(\d+)y_(\d+)\.jpg'
# Initialize empty lists to store image data and corresponding coordinates
images = []
heatmaps = []
coordinates = []
# Loop through images and extract data and coordinates
for filename in os.listdir(path_to_images):
match = re.search(pattern, filename)
if match:
x_coord = int(match.group(1))
y_coord = int(match.group(2))
img = Image.open(os.path.join(path_to_images, filename))
img = img.resize((200, 200)) # resize image to desired dimensions
img_array = np.array(img) # convert image to numpy array
# Convert image to grayscale if it is not already grayscale
if len(img_array.shape) == 3 and img_array.shape[2] == 3:
img_array = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY)
# Check if image is of type uint8, and convert it to uint8 if necessary
if img_array.dtype != 'uint8':
img_array = img_array.astype('uint8')
# Normalize pixel values of input image to be between 0 and 1
img_array = img_array / 255.0
# Add input image and corresponding output image to lists
heatmaps.append(img_array)
images.append(cv2.imread(os.path.join(path_to_images, filename)))
coordinates.append([x_coord, y_coord])
# Convert lists to numpy arrays
images = np.array(images)
heatmaps = np.array(heatmaps)
coordinates = np.array(coordinates)
# Split data into training and validation sets
X_train, X_val, y_train, y_val, coords_train, coords_val = train_test_split(heatmaps, images, coordinates, test_size=0.2, random_state=42)
return X_train, y_train, coords_train, X_val, y_val, coords_val
input_shape = (200, 200, 1)
# Encoder
inputs = Input(shape=input_shape)
x = Conv2D(filters=16, kernel_size=3, padding='valid', activation='relu')(inputs)
x = Conv2D(filters=32, kernel_size=3, padding='valid', activation='relu')(x)
x = Conv2D(filters=64, kernel_size=3, padding='valid', activation='relu')(x)
x = Conv2D(filters=128, kernel_size=3, padding='valid', activation='relu')(x)
x = Conv2D(filters=256, kernel_size=3, padding='same', activation='relu')(x)
x = Flatten()(x)
x = Dense(units=128, activation='relu')(x)
# Define latent variables
latent_dim = 10
mu = Dense(units=latent_dim)(x)
log_var = Dense(units=latent_dim)(x)
# Reparameterization trick
def sampling(args):
mu, log_var = args
epsilon = K.random_normal(shape=K.shape(mu))
return mu + K.exp(log_var / 2) * epsilon
# Sample latent variables
z = Lambda(sampling)([mu, log_var])
# Decoder
x = Dense(units=128, activation='relu')(z)
x = Dense(units=8 * 8 * 128, activation='relu')(x)
x = Reshape(target_shape=(8, 8, 128))(x)
x = Conv2DTranspose(filters=128, kernel_size=3, padding='same', activation='relu')(x)
x = Conv2DTranspose(filters=64, kernel_size=4, padding='valid', activation='relu')(x)
x = Conv2DTranspose(filters=32, kernel_size=2, padding='valid', activation='relu')(x)
x = Conv2DTranspose(filters=16, kernel_size=3, padding='valid', activation='relu')(x)
x = Conv2DTranspose(filters=1, kernel_size=2, padding='valid', activation='sigmoid')(x)
# Define VAE loss function
def vae_loss(y_true, y_pred):
reconstruction_loss = binary_crossentropy(K.flatten(y_true), K.flatten(y_pred))
reconstruction_loss *= input_shape[0] * input_shape[1] * input_shape[2]
kl_loss = 1 + log_var - K.square(mu) - K.exp(log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
vae_loss = K.mean(reconstruction_loss + kl_loss)
return vae_loss
model = Model(inputs=inputs, outputs=x)
model.compile(optimizer='adam', loss=vae_loss)
# Load data
X_train, y_train, coords_train, X_val, y_val, coords_val = load_data4()
model.fit(X_train, y_train, epochs=10, batch_size=16, validation_data=(X_val, y_val))```
1条答案
按热度按时间iq0todco1#
我记得我也遇到过这种情况,好像tensorflow不再支持这样的vae_loss函数了。
我有两个解决方案,我将在这里粘贴简短的一个。而不是创建一个vae_loss函数,你需要添加损失像这样:
我还有另一个解决这个问题的方法,那就是使用自定义模型。你可以在这里找到这个解决方案:
https://keras.io/examples/generative/vae/