Computing mAP in MaskRCNN fails

45 Views Asked by At

I am trying to detect an object using a custom dataset. The images in my dataset are of size 3060x4080. I have created a coco like dataset from makesense.ai. I am trying to check the complete training and testing of my model before finetuning the hyperparameters. The code is currently running on CPU. I am able to train my model with code shown below. But when I run the evaluate_model function, I get a Value Error written at the end of code.

import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
import mrcnn
from PIL import Image, ImageDraw

from mrcnn.visualize import display_instances, display_top_masks
from mrcnn.utils import extract_bboxes
from mrcnn.utils import Dataset
from matplotlib import pyplot as plt
from mrcnn.config import Config
from mrcnn.model import MaskRCNN
from mrcnn import model as modellib, utils

class CocoLikeDataset(utils.Dataset):
    def load_data(self, annotation_json, images_dir): (not sharing the function body for sake of brevity. The function loads data from coco type dataset from a json file.
                
    def load_mask(self, image_id):

dataset_train = CocoLikeDataset()
dataset_train.load_data('switch_dataset/train/labels/Limit_Switch_Dataset.json', 'switch_dataset/train')
dataset_train.prepare()

dataset_val = CocoLikeDataset()
dataset_val.load_data('switch_dataset/val/labels/Limit_Switch_Dataset_val.json', 'switch_dataset/val')
dataset_val.prepare()

dataset = dataset_train
image_ids = dataset.image_ids
for image_id in image_ids:
    image = dataset.load_image(image_id)
    mask, class_ids = dataset.load_mask(image_id)

image_id = 0
image = dataset_train.load_image(image_id)
mask, class_ids = dataset_train.load_mask(image_id)
bbox = extract_bboxes(mask)

class LimitSwitchConfig(Config):
    NAME = "limit_switch_config_coco"
    NUM_CLASSES = 1 + 1
    STEPS_PER_EPOCH = 2
    #DETECTION_MIN_CONFIDENCE = 0.9 # Skip detections with < 90% confidence
    
config = LimitSwitchConfig()
config.display() 

model = mrcnn.model.MaskRCNN(mode='training', model_dir='./', config=config)
model.load_weights(filepath='mask_rcnn_coco.h5', by_name=True, exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",  "mrcnn_bbox", "mrcnn_mask"])
model.train(train_dataset= dataset_train, val_dataset= dataset_train, learning_rate= config.LEARNING_RATE, epochs=1, layers='heads')

model_path = 'Limit_Switch_mask_rcnn_trained.h5'
model.keras_model.save_weights(model_path)
print("training completed")

class PredictionConfig(Config):
    NAME = "limit_switch_config_coco"
    NUM_CLASSES = 1 + 1
    GPU_COUNT = 1
    IMAGES_PER_GPU = 1
 
def evaluate_model(dataset, model, cfg):
    APs = list()
    for image_id in dataset.image_ids:
        image, image_meta, gt_class_id, gt_bbox, gt_mask = load_image_gt(dataset, cfg, image_id, use_mini_mask=False)
        scaled_image = mold_image(image, cfg)
        sample = expand_dims(scaled_image, 0)
        yhat = model.detect(sample, verbose=0)
        r = yhat[0]
        AP, _, _, _ = compute_ap(gt_bbox, gt_class_id, gt_mask, r["rois"], r["class_ids"], r["scores"], r['masks'])
        APs.append(AP)
    mAP = mean(APs)
    return mAP
 

cfg = PredictionConfig()
model = MaskRCNN(mode='inference', model_dir='logs', config=cfg)
model.load_weights('Limit_Switch_mask_rcnn_trained.h5', by_name=True)
train_mAP = evaluate_model(dataset_train, model, cfg)
print("Train mAP: %.3f" % train_mAP)

I get following error.

File "<__array_function__ internals>", line 6, in dot
ValueError: shapes (100,1048576) and (983040,1) not aligned: 1048576 (dim 1) != 983040 (dim 0)

My virtual environment is as follows:

python - 3.7.0
numpy==1.20.3
scipy==1.4.1
Pillow==8.4.0
cython==0.29.24
matplotlib
scikit-image==0.16.2
tensorflow==2.2.0
keras==2.3.1
opencv-python==4.5.4.60
h5py==2.10.0
imgaug==0.4.0
IPython[all]
0

There are 0 best solutions below