pycharm OCT数据图像配准中体积/重叠区域的检测

gblwokeq  于 2022-11-23  发布在  PyCharm
关注(0)|答案(1)|浏览(244)

我正在进行OCT数据的图像配准。我想在目标配准图像中定位区域/面积,其中图像配准实际上已经从源图像发生。我正在使用Python。有人能告诉我有什么可用的技术吗?
我们也欢迎任何关于如何处理这个问题的建议。我最初在两个图像上做了一些试验性的图像配准。目标是做一个大数据集的配准。
我的代码如下:

#importing libraries
import cv2
import numpy as np
# from skimage.measure import structural_similarity as ssim
# from skimage.measure import compare_ssim
import skimage
from skimage import measure
import matplotlib.pyplot as plt

def imageRegistration():
    # open the image files
    path = 'D:/Fraunhofer Thesis/LatestPythonImplementations/Import_OCT_Vision/sliceImages(_x_)/'
    image1 = cv2.imread(str(path) + '104.png')
    image2 = cv2.imread(str(path) + '0.png')

    # converting to greyscale
    img1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
    img2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
    height, width = img2.shape

    # Create ORB detector with 5000 features.
    orb_detector = cv2.ORB_create(5000)

    # Find keypoints and descriptors.
    # The first arg is the image, second arg is the mask
    #  (which is not reqiured in this case).
    kp1, d1 = orb_detector.detectAndCompute(img1, None)
    kp2, d2 = orb_detector.detectAndCompute(img2, None)

    # Match features between the two images.
    # We create a Brute Force matcher with
    # Hamming distance as measurement mode.
    matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

    # Match the two sets of descriptors.
    matches = matcher.match(d1, d2)

    # Sort matches on the basis of their Hamming distance.
    matches.sort(key=lambda x: x.distance)

    # Take the top 90 % matches forward.
    matches = matches[:int(len(matches) * 90)]
    no_of_matches = len(matches)

    # Define empty matrices of shape no_of_matches * 2.
    p1 = np.zeros((no_of_matches, 2))
    p2 = np.zeros((no_of_matches, 2))

    for i in range(len(matches)):
        p1[i, :] = kp1[matches[i].queryIdx].pt
        p2[i, :] = kp2[matches[i].trainIdx].pt

    # Find the homography matrix.
    homography, mask = cv2.findHomography(p1, p2, cv2.RANSAC)

    # Use this matrix to transform the
    # colored image wrt the reference image.
    transformed_img = cv2.warpPerspective(image1,
                                          homography, (width, height))

    # Save the output.
    cv2.imwrite('output.jpg', transformed_img)

#following is the code figuring out difference in the source image, target image and the registered image
# 0 mse means perfect similarity , no difference
# mse >1 means there is difference and as the value increases , the difference increases
def findingDifferenceMSE():
    path = 'D:/Fraunhofer Thesis/LatestPythonImplementations/Import_OCT_Vision/sliceImages(_x_)/'
    image1 = cv2.imread(str(path) + '104.png')
    image2 = cv2.imread(str(path) + '0.png')
    image3 = cv2.imread('D:/Fraunhofer Thesis/LatestPythonImplementations/Import_OCT_Vision/output.jpg')
    err = np.sum((image1.astype("float") - image3.astype("float")) ** 2)
    err /= float(image1.shape[0] * image3.shape[1])
    print("MSE:")
    print('Image 104 and output image: ', + err)
    err1 = np.sum((image2.astype("float") - image3.astype("float")) ** 2)
    err1 /= float(image2.shape[0] * image3.shape[1])
    print('Image 0 and output image: ', + err1)

def findingDifferenceSSIM():
    path = 'D:/Fraunhofer Thesis/LatestPythonImplementations/Import_OCT_Vision/sliceImages(_x_)/'
    image1 = cv2.imread(str(path) + '104.png')
    image2 = cv2.imread(str(path) + '0.png')
    image3 = cv2.imread('D:/Fraunhofer Thesis/LatestPythonImplementations/Import_OCT_Vision/output.jpg')
    result1=measure.compare_ssim(image1,image3)
    print(result1)

#calling the fucntion
imageRegistration()
findingDifferenceMSE()
#findingDifferenceSSIM()

这是注册的图像:

此图像是第一个参考图像:

这是第二个参考图像:

ohfgkhjo

ohfgkhjo1#

利用图像微分技术,通过将图像中的配准区域与基准图像进行比较,识别出图像中的配准区域,从而识别出不同的区域。

相关问题