OpenCV StereoCalibrate not returning expected values

61 Views Asked by At

I am using the OpenCV library in C++ to calibrate 2 Azure Kinect cameras with the cv::stereoCalibrate function. I use the calibration to align the 2 point clouds I generate from the cameras. The calibration results in a rotation and translation matrix, which can be used to align the point clouds from the 2 cameras. The code I'm using is the green screen example from the Microsoft Azure Kinect sensor SDK GitHub: link. I use this code to take a depth and color image from both cameras, all I do is transform the depth images from the cameras to their corresponding color camera like so:

k4a::image transform_depth_to_color_master(k4a_device_t capturer, k4a_device_configuration_t main_config, k4a::capture image) {
    k4a_transformation_t transformation = NULL;
    k4a_calibration_t calibration = get_calib(capturer, main_config);
    transformation = k4a_transformation_create(&calibration);
    k4a_image_t transformed = NULL;
    k4a_image_create(K4A_IMAGE_FORMAT_DEPTH16, k4a_image_get_width_pixels(image.get_color_image().handle()), k4a_image_get_height_pixels(image.get_color_image().handle()), 0, &transformed);
    k4a_transformation_depth_image_to_color_camera(transformation, image.get_depth_image().handle(), transformed);
    k4a_transformation_destroy(transformation);
    return k4a::image(transformed);
}

k4a::image transform_depth_to_color_slave(k4a_device_t capturer, k4a_device_configuration_t secondary_config, k4a::capture image) {
    k4a_transformation_t transformation = NULL;
    k4a_calibration_t calibration = get_calib(capturer, secondary_config);
    transformation = k4a_transformation_create(&calibration);
    k4a_image_t transformed = NULL;
    k4a_image_create(K4A_IMAGE_FORMAT_DEPTH16, k4a_image_get_width_pixels(image.get_color_image().handle()), k4a_image_get_height_pixels(image.get_color_image().handle()), 0, &transformed);
    k4a_transformation_depth_image_to_color_camera(transformation, image.get_depth_image().handle(), transformed);
    k4a_transformation_destroy(transformation);
    return k4a::image(transformed);
}

I save the color images without changing them, I save the depth images after executing the functions above. Aside from this I also save the camera intrinsic and extrinsic matrices:

ofstream exfile("extrinsic.json");
    if (exfile.is_open()) {
        exfile << "{\"rotation_matrix\" : [";
        for (int count = 0; count < tr_secondary_color_to_main_color.R.channels; count++) {
            if (count == (tr_secondary_color_to_main_color.R.channels - 1)) {
                exfile << tr_secondary_color_to_main_color.R.val[count] << "], ";
                continue;
            }
            exfile << tr_secondary_color_to_main_color.R.val[count] << ", ";
        }
        exfile << "\"translation_matrix\" : [";
        for (int count = 0; count < tr_secondary_color_to_main_color.t.channels; count++) {
            if (count == (tr_secondary_color_to_main_color.t.channels - 1)) {
                exfile << tr_secondary_color_to_main_color.t.val[count] << "]}";
                continue;
            }
            exfile << tr_secondary_color_to_main_color.t.val[count] << ", ";
        }
        exfile.close();
    }
    ofstream myfile("intrinsic1.json");
    if (myfile.is_open())
    {
        myfile << "{\"intrinsic_matrix\" : [";
        for (int count = 0; count < main_calibration.depth_camera_calibration.intrinsics.parameter_count; count++) {
            if (count == (main_calibration.depth_camera_calibration.intrinsics.parameter_count - 1)) {
                myfile << main_calibration.depth_camera_calibration.intrinsics.parameters.v[count] << "]}";
                continue;
            }
            myfile << main_calibration.depth_camera_calibration.intrinsics.parameters.v[count] << ", ";
        }
        myfile.close();
    }
    ofstream myfile2("intrinsic2.json");
    if (myfile2.is_open())
    {
        myfile2 << "{\"intrinsic_matrix\" : [";
        for (int count = 0; count < secondary_calibration.depth_camera_calibration.intrinsics.parameter_count; count++) {
            if (count == (secondary_calibration.depth_camera_calibration.intrinsics.parameter_count - 1)) {
                myfile2 << secondary_calibration.depth_camera_calibration.intrinsics.parameters.v[count] << "]}";
                continue;
            }
            myfile2 << secondary_calibration.depth_camera_calibration.intrinsics.parameters.v[count] << ", ";
        }
        myfile2.close();
    }

This data I use in a python script using the Open3D library:

with open('intrinsic1.json') as f:
        intrinsic_json_1 = json.load(f)
    with open('intrinsic2.json') as f2:
        intrinsic_json_2 = json.load(f2)
    with open('extrinsic.json') as f3:
        extrinsic_json = json.load(f3)
    col_img_1 = open3d.io.read_image("color1.jpg")
    dep_img_1 = open3d.io.read_image("depth1.png")
    col_img_2 = open3d.io.read_image("color2.jpg")
    dep_img_2 = open3d.io.read_image("depth2.png")
    rgbd1 = open3d.geometry.RGBDImage.create_from_color_and_depth(col_img_1, dep_img_1, convert_rgb_to_intensity = False)
    rgbd2 = open3d.geometry.RGBDImage.create_from_color_and_depth(col_img_2, dep_img_2, convert_rgb_to_intensity = False)
    phc = open3d.camera.PinholeCameraIntrinsic()
    phc2 = open3d.camera.PinholeCameraIntrinsic()
    phc.intrinsic_matrix = [intrinsic_json_1["intrinsic_matrix"][2], 0, intrinsic_json_1["intrinsic_matrix"][0]], [0, intrinsic_json_1["intrinsic_matrix"][3], intrinsic_json_1["intrinsic_matrix"][1]], [0, 0, 1]
    phc2.intrinsic_matrix = [intrinsic_json_2["intrinsic_matrix"][2], 0, intrinsic_json_2["intrinsic_matrix"][0]], [0, intrinsic_json_2["intrinsic_matrix"][3], intrinsic_json_2["intrinsic_matrix"][1]], [0, 0, 1]
    pcd = open3d.geometry.PointCloud.create_from_rgbd_image(rgbd1, phc)
    pcd2 = open3d.geometry.PointCloud.create_from_rgbd_image(rgbd2, phc2)
    pcd2.rotate([extrinsic_json["rotation_matrix"][0:3], extrinsic_json["rotation_matrix"][3:6], extrinsic_json["rotation_matrix"][6:9]])
    trans_list = [x / 1000 for x in extrinsic_json["translation_matrix"][0:3]]
    pcd2.translate(trans_list)
    pcd += pcd2
    pcd.transform([[1,0,0,0],[0,-1,0,0],[0,0,-1,0],[0,0,0,1]])
    open3d.visualization.draw_geometries([pcd])

I divide the values in the extrinsic translation matrix by 1000, because the green screen example operates in mm, and it seems that Open3D uses meters. However this does not give the desired result:view1 view2 view3 The point clouds do not align, and I do not know why, the in- and extrinsics are as follows:

intrinsics main cam:

{"intrinsic_matrix" : [511.402, 508.682, 504.847, 505.014, 0.717922, 0.328064, 0.018138, 1.0548, 0.509733, 0.0934842, 0, 0, -2.10705e-05, 0.000120733]}

intrinsics sub cam:

{"intrinsic_matrix" : [516.703, 519.074, 505.402, 505.532, 21.7393, 13.1051, 0.602955, 22.0502, 20.5187, 3.35786, 0, 0, 6.67588e-05, -5.81172e-05]}

extrinsics:

{"rotation_matrix" : [0.736823, 0.001164, -0.676084, 0.018739, 0.999579, 0.0221434, 0.675826, -0.0289849, 0.736491], "translation_matrix" : [304.393, 6.84392, 130.422]}

Does anyone know what is going wrong here?

I measured the squares on the chessboard used in the calibration multiple times, I am quite certain I am using the right measurements (in mm).

0

There are 0 best solutions below