I am asked to develop an image processing pipeline using OpenCV python to segment a flower from a dataset of plant images and evaluate the images produced with the ground truth images. However, whenever I face ground truth images where their edge will touch the border of the image, the evaluation will go wrong and produce very small similarity score. Any solution to this?
Code for evaluation
# Function to find the largest contour which is assumed to be the flower
def find_largest_contour(binary_image):
# Find contours from the binary image
contours, _ = cv2.findContours(binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if contours:
return max(contours, key=cv2.contourArea)
else:
return None
# Function to calculate the Intersection over Union
def calculate_iou(contourA, contourB, shape):
maskA = np.zeros(shape, dtype=np.uint8)
maskB = np.zeros(shape, dtype=np.uint8)
cv2.drawContours(maskA, [contourA], -1, color=255, thickness=cv2.FILLED)
cv2.drawContours(maskB, [contourB], -1, color=255, thickness=cv2.FILLED)
intersection = np.logical_and(maskA, maskB)
union = np.logical_or(maskA, maskB)
iou_score = np.sum(intersection) / np.sum(union)
return iou_score
# Function to display similarity percentage based on IoU
def display_similarity(image_name, iou_score):
similarity_percentage = round(iou_score, 2)
print(f"Similarity for {image_name}: {similarity_percentage}%")
# Apply the processing and calculate IoU for each image
ious = []
for input_path, ground_truth_path in zip(image_paths, ground_truth_image_paths):
image_name = os.path.basename(input_path)
original_image = cv2.imread(input_path) # Read the original image again for visualization
processed_image = process_image(input_path, image_name)
show_binary_image(processed_image, window_name=f"Binary: {image_name}")
ground_truth_image = cv2.imread(ground_truth_path, cv2.IMREAD_GRAYSCALE)
show_binary_image(ground_truth_image, window_name=f"Binary: {image_name}")
if processed_image.shape != ground_truth_image.shape:
ground_truth_image = cv2.resize(ground_truth_image, (processed_image.shape[1], processed_image.shape[0]))
# Find largest contours
contour_processed = find_largest_contour(processed_image)
contours_ground_truth = process_red_edges(ground_truth_path) # Fix: Pass path instead of image
# Find the largest contour among the contours found
contour_ground_truth = max(contours_ground_truth, key=cv2.contourArea)
# Calculate IoU
iou_score = calculate_iou(contour_processed, contour_ground_truth, ground_truth_image.shape) * 100
ious.append(iou_score)
Image produced from pipeline to compare with ground truth image

As seen, the top and bottom part of the ground truth image's flower foreground have their sides touching the border of the image window, and when running the code, it only gives a similarity of 1.58% while other images which don't have their edges touching the border of the image window have >90% similarity.

It's not quite "intersection over union". You care that the entire positive Ground Truth is also called positive by the model, and the same for the negative GT. So that requires "intersection over GT".
So there, near perfect scores. It's not perfect at least partly because you gave us screenshots that contain debris around the edges of the image.
All those masking operations can also be accomplished with plain numpy and boolean arrays instead of uint8 containing 0 and 255.