大家好,我正在使用DeeplabV3Plus架构和Tensorflow(Keras)进行语义分割。我在另一个数据集上做得很好,但现在我想用我自己的数据集来做。但在加载数据的第一步,它显示了一个奇怪的错误。函数是
tf.数据.数据集.来自_Tensor_切片
其误差为:
ValueError Traceback (most recent call last)
~\AppData\Local\Temp\ipykernel_20192\306109049.py in <module>
57
58 train_dataset = data_generator(train_images, train_masks)
---> 59 val_dataset = data_generator(val_images, val_masks)
60
61 print("Train Dataset:", train_dataset)
~\AppData\Local\Temp\ipykernel_20192\306109049.py in data_generator(image_list, mask_list)
50
51 def data_generator(image_list, mask_list):
---> 52 dataset = tf.data.Dataset.from_tensor_slices((image_list, mask_list))
53 dataset = dataset.map(load_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
54 dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
~\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\data\ops\dataset_ops.py in from_tensor_slices(tensors, name)
812 Dataset: A `Dataset`.
813 """
--> 814 return TensorSliceDataset(tensors, name=name)
815
816 class _GeneratorState(object):
~\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\data\ops\dataset_ops.py in __init__(self, element, is_files, name)
4720 batch_dim.assert_is_compatible_with(
4721 tensor_shape.Dimension(
-> 4722 tensor_shape.dimension_value(t.get_shape()[0])))
4723
4724 variant_tensor = gen_dataset_ops.tensor_slice_dataset(
~\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\framework\tensor_shape.py in assert_is_compatible_with(self, other)
298 if not self.is_compatible_with(other):
299 raise ValueError("Dimensions %s and %s are not compatible" %
--> 300 (self, other))
301
302 def merge_with(self, other):
ValueError: Dimensions 37 and 50 are not compatible
错误是"尺寸37和50不兼容",我搜索了这个,但找不到解决方案。代码:
import os
import cv2
import numpy as np
from glob import glob
from scipy.io import loadmat
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
IMAGE_SIZE = 512
BATCH_SIZE = 4
NUM_CLASSES = 20
DATA_DIR = r'C:/Users/Joshi/Desktop/CARLA_0.9.13/WindowsNoEditor/PythonAPI/examples/out'
NUM_TRAIN_IMAGES = 250
NUM_VAL_IMAGES = 50
train_images = sorted(glob(os.path.join(DATA_DIR, "out/*")))[:NUM_TRAIN_IMAGES]
train_masks = sorted(glob(os.path.join(DATA_DIR, "Seman/*")))[:NUM_TRAIN_IMAGES]
val_images = sorted(glob(os.path.join(DATA_DIR, "out/*")))[
NUM_TRAIN_IMAGES : NUM_VAL_IMAGES + NUM_TRAIN_IMAGES
]
val_masks = sorted(glob(os.path.join(DATA_DIR, "Seman/*")))[
NUM_TRAIN_IMAGES : NUM_VAL_IMAGES + NUM_TRAIN_IMAGES
]
def read_image(image_path, mask=False):
image = tf.io.read_file(image_path)
if mask:
image = tf.image.decode_png(image, channels=1)
image.set_shape([None, None, 1])
image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
else:
image = tf.image.decode_png(image, channels=3)
image.set_shape([None, None, 3])
image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
image = image / 127.5 - 1
return image
def load_data(image_list, mask_list):
image = read_image(image_list)
mask = read_image(mask_list, mask=True)
return image, mask
def data_generator(image_list, mask_list):
dataset = tf.data.Dataset.from_tensor_slices((image_list, mask_list))
dataset = dataset.map(load_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
return dataset
train_dataset = data_generator(train_images, train_masks)
val_dataset = data_generator(val_images, val_masks)
print("Train Dataset:", train_dataset)
print("Val Dataset:", val_dataset)
1条答案
按热度按时间lx0bsm1f1#
只是尺寸不适合这张照片。