机器学习项目tensorflow.keras中的内存问题

jvlzgdj9  于 2023-10-19  发布在  其他
关注(0)|答案(1)|浏览(172)

在处理第一批150个训练图像后出现问题。模型开始处理下一个训练批,打印1-3个图像(用于调试),然后RAM使用量从7 GB急剧增加到36 GB,导致程序崩溃。模型永远不会到达验证集。我遇到的一些日志:Allocation of 5910240000 exceeds 10% of free system memory. This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. You must feed a value for placeholder tensor 'Placeholder/_0' with dtype int32. TF-TRT Warning: Could not find TensorRT我假设问题出在我的data_generator中。我编辑了它,但没有任何帮助。你知道为什么会这样吗?

我的data_generator:

!pip install opencv-python
!pip install pydicom

import numpy as np
import pydicom
import cv2

def conditional_resize(img, img_id, target_height=3518, target_width=2800):
    # Get the original image dimensions
    original_height, original_width = img.shape[:2]

    # Initialize the output image as the input image
    output_img = img

    # Case 1: If the original image matches the target dimensions, leave it as it is
    if original_height == target_height and original_width == target_width:
        output_img = img

    else:
        # Case 2: If the original image is smaller than the target dimensions, pad it
        if original_height < target_height and original_width < target_width:
            pad_h = target_height - original_height
            pad_w = target_width - original_width
            output_img = cv2.copyMakeBorder(img, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=0)

        # Case 3: If the original image is larger than the target dimensions, crop it
        elif original_height > target_height and original_width > target_width:
            crop_h = (original_height - target_height) // 2
            crop_w = (original_width - target_width) // 2
            output_img = img[crop_h:crop_h + target_height, crop_w:crop_w + target_width]

        # Case 4: If original image HEIGHT is larger than target AND WIDTH is smaller than target
        elif original_height > target_height and original_width < target_width:
            # Crop the height
            crop_h = (original_height - target_height) // 2
            cropped_img = img[crop_h:crop_h + target_height, :]

            # Pad the width
            pad_w = target_width - original_width
            output_img = cv2.copyMakeBorder(cropped_img, 0, 0, 0, pad_w, cv2.BORDER_CONSTANT, value=0)

        # Case 5: If original image HEIGHT is smaller than target AND WIDTH is larger than target
        elif original_height < target_height and original_width > target_width:
            # Pad the height
            pad_h = target_height - original_height
            padded_img = cv2.copyMakeBorder(img, 0, pad_h, 0, 0, cv2.BORDER_CONSTANT, value=0)

            # Crop the width
            crop_w = (original_width - target_width) // 2
            output_img = padded_img[:, crop_w:crop_w + target_width]

    print("\n IMAGE:", img_id, original_height, original_width, output_img.shape)
    return output_img

    mapping_dict_density = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
    mapping_dict_laterality = {'left': 0, 'right': 1}
    mapping_dict_view_position = {'CC': 0, 'MLO': 1}

    while True:
        # Select files (IDs) and labels for the batch
        batch_indices = np.random.choice(a=len(df), size=batch_size)
        batch_df = df.iloc[batch_indices]

        batch_size = 150  # The number of samples per batch
        height = 3518  # The height of your images
        width = 2800  # The width of your images
        num_features = 2  # Number of feature columns, here you have 'laterality' and 'view_position'

        # Initialize your arrays
        batch_images = np.zeros((batch_size, height, width), dtype=np.float32)  # Adjust dtype as needed
        batch_labels_birads = np.zeros((batch_size, 1), dtype=np.int)  # Assuming birads labels are integers
        batch_labels_density = np.zeros((batch_size, 1), dtype=np.int)  # Assuming density labels are integers
        batch_features = np.zeros((batch_size, num_features), dtype=np.int)  # Assuming features are integers
        batch_weights_birads = np.zeros((batch_size,), dtype=np.float32)  # Assuming weights are float numbers
        batch_weights_density = np.zeros((batch_size,), dtype=np.float32)  # Assuming weights are float numbers

        for i, original_idx in enumerate(batch_indices):
            row = batch_df.iloc[i]

            img_id = row['image_id']
            study_id = row['study_id']
            img_path = f"/content/drive/MyDrive/Colab/vindr-mammo-a-large-scale-benchmark-dataset-for-computer-aided-detection-and-diagnosis-in-full-field-digital-mammography-1.0.0/images/{study_id}/{img_id}.dicom"
            img = pydicom.dcmread(img_path).pixel_array

            img = conditional_resize(img, img_id)
            batch_images[i] = img
            batch_labels_birads[i, 0] = row['breast_birads'] - 1
            batch_labels_density[i, 0] = mapping_dict_density.get(row['breast_density'], -1)
            batch_features[i] = [mapping_dict_laterality.get(row['laterality'], -1),
                                 mapping_dict_view_position.get(row['view_position'], -1)]
            batch_weights_birads[i] = sample_weights_birads[original_idx]
            batch_weights_density[i] = sample_weights_density[original_idx]



        yield {'image_input': batch_images, 'feature_input': batch_features}, \
            {'birads_output': batch_labels_birads, 'density_output': batch_labels_density}, \
            {'birads_output': batch_weights_birads, 'density_output': batch_weights_density}

Model.fit

history = model.fit(
    my_data_generator(
        training_data,
        batch_size=150,
        # birads_output =
        sample_weights_birads=sample_weights_birads_train,
        sample_weights_density=sample_weights_density_train,
    ),
    # steps_per_epoch=len(training_data) // 150,
    epochs=2,
    validation_data=my_data_generator(
        validation_data,
        batch_size=32,
        sample_weights_birads=sample_weights_birads_val,
        sample_weights_density=sample_weights_density_val,
    ),
    # validation_steps=len(validation_data) // 32,
    callbacks=[checkpoint]
)

我已经将每个循环的arrays改为np.arrays,但这并没有帮助

6za6bjd0

6za6bjd01#

您正在使用的图像具有非常大的尺寸3518 x2800,批量大小为150。如果我没有弄错的话,一批图像的内存大约是4.1Gb(如果dtype=np.float32,则是16.4Gb)。在# Initialize your arrays部分之后,您将为中间数组分配更多的内存。
我相信你应该考虑使用较小的图像和/或较小的批量大小来解决你的问题。

相关问题