How to detect squares in video with OpenCV?

1.2k Views Asked by At

So I combined squares.cpp with cvBoundingRect.cpp code to detect squares in video. I therefore, had to convert from IplImage to Mat type so that findSquares and drawSquares methods could run (By using cvarrToMat function). But unfortunately, after successful compilation I get this error when running:

OpenCV Error: Assertion failed (j < nsrcs && src[j].depth() == depth) in mixChannels, file /Users/Desktop/opencv-3.0.0-rc1/modules/core/src/convert.cpp, line 1205 libc++abi.dylib: terminating with uncaught exception of type cv::Exception: /Users/Desktop/opencv-3.0.0-rc1/modules/core/src/convert.cpp:1205: error: (-215) j < nsrcs && src[j].depth() == depth in function mixChannels
Abort trap: 6

Here's the code:

#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"

#include <iostream>
#include <math.h>
#include <string.h>

using namespace cv;
using namespace std;

int thresh = 50, N = 11;
const char* wndname = "Square Detection Demo";

// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
static double angle( Point pt1, Point pt2, Point pt0 )
{
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}

// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
static void findSquares( const Mat& image, vector<vector<Point> >& squares )
{
squares.clear();

Mat pyr, timg, gray0(image.size(), CV_8U), gray;

// down-scale and upscale the image to filter out the noise
pyrDown(image, pyr, Size(image.cols/2, image.rows/2));
pyrUp(pyr, timg, image.size());
vector<vector<Point> > contours;

// find squares in every color plane of the image
for( int c = 0; c < 3; c++ )
{
    int ch[] = {c, 0};
    mixChannels(&timg, 1, &gray0, 1, ch, 1);

    // try several threshold levels
    for( int l = 0; l < N; l++ )
    {
        // hack: use Canny instead of zero threshold level.
        // Canny helps to catch squares with gradient shading
        if( l == 0 )
        {
            // apply Canny. Take the upper threshold from slider
            // and set the lower to 0 (which forces edges merging)
            Canny(gray0, gray, 0, thresh, 5);
            // dilate canny output to remove potential
            // holes between edge segments
            dilate(gray, gray, Mat(), Point(-1,-1));
        }
        else
        {
            // apply threshold if l!=0:
            //     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
            gray = gray0 >= (l+1)*255/N;
        }

        // find contours and store them all as a list
        findContours(gray, contours, RETR_LIST, CHAIN_APPROX_SIMPLE);

        vector<Point> approx;

        // test each contour
        for( size_t i = 0; i < contours.size(); i++ )
        {
            // approximate contour with accuracy proportional
            // to the contour perimeter
            approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);

            // square contours should have 4 vertices after approximation
            // relatively large area (to filter out noisy contours)
            // and be convex.
            // Note: absolute value of an area is used because
            // area may be positive or negative - in accordance with the
            // contour orientation
            if( approx.size() == 4 &&
                fabs(contourArea(Mat(approx))) > 1000 &&
                isContourConvex(Mat(approx)) )
            {
                double maxCosine = 0;

                for( int j = 2; j < 5; j++ )
                {
                    // find the maximum cosine of the angle between joint edges
                    double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
                    maxCosine = MAX(maxCosine, cosine);
                }

                // if cosines of all angles are small
                // (all angles are ~90 degree) then write quandrange
                // vertices to resultant sequence
                if( maxCosine < 0.3 )
                    squares.push_back(approx);
            }
        }
    }
}
}


// the function draws all the squares in the image
static void drawSquares( Mat& image, const vector<vector<Point> >& squares )
{
for( size_t i = 0; i < squares.size(); i++ )
{
    const Point* p = &squares[i][0];
    int n = (int)squares[i].size();
    polylines(image, &p, &n, 1, true, Scalar(255,0,0), 3, LINE_AA);
}

imshow(wndname, image);
}

CvRect rect;
CvSeq* contours = 0;
CvMemStorage* storage = NULL;
CvCapture *cam;
IplImage *currentFrame, *currentFrame_grey, *differenceImg, *oldFrame_grey;

bool first = true;


int main(int argc, char* argv[])
{
//Create a new movie capture object.
   cam = cvCaptureFromCAM(0);

   //create storage for contours
   storage = cvCreateMemStorage(0);

   //capture current frame from webcam
   currentFrame = cvQueryFrame(cam);

   //Size of the image.
   CvSize imgSize;
   imgSize.width = currentFrame->width;
   imgSize.height = currentFrame->height;

   //Images to use in the program.
   currentFrame_grey = cvCreateImage( imgSize, IPL_DEPTH_8U, 1);                           

namedWindow( wndname, 1 );
    vector<vector<Point> > squares;

while(1)
   {
          currentFrame = cvQueryFrame( cam );
          if( !currentFrame ) break;

          //Convert the image to grayscale.
          cvCvtColor(currentFrame,currentFrame_grey,CV_RGB2GRAY);

          if(first) //Capturing Background for the first time
          {
                 differenceImg = cvCloneImage(currentFrame_grey);
                 oldFrame_grey = cvCloneImage(currentFrame_grey);
                 cvConvertScale(currentFrame_grey, oldFrame_grey, 1.0, 0.0);
                 first = false;
                 continue;
          }

          //Minus the current frame from the moving average.
          cvAbsDiff(oldFrame_grey,currentFrame_grey,differenceImg);

          //bluring the differnece image
          cvSmooth(differenceImg, differenceImg, CV_BLUR);             

          //apply threshold to discard small unwanted movements
          cvThreshold(differenceImg, differenceImg, 25, 255, CV_THRESH_BINARY);

          //find contours


cv::Mat diffImg = cv::cvarrToMat(differenceImg);
cv::Mat currFrame = cv::cvarrToMat(currentFrame);

          findSquares(diffImg, squares);

          //draw bounding box around each contour
          drawSquares(currFrame, squares);

          //display colour image with bounding box
          cvShowImage("Output Image", currentFrame);

          //display threshold image
          cvShowImage("Difference image", differenceImg);

          //New Background
          cvConvertScale(currentFrame_grey, oldFrame_grey, 1.0, 0.0);

          //clear memory and contours
          cvClearMemStorage( storage );
          contours = 0;

          //press Esc to exit
          char c = cvWaitKey(33);
          if( c == 27 ) break;

   }

// Destroy the image & movies objects
   cvReleaseImage(&oldFrame_grey);
   cvReleaseImage(&differenceImg);
   cvReleaseImage(&currentFrame);
   cvReleaseImage(&currentFrame_grey);


return 0;
}
1

There are 1 best solutions below

5
On BEST ANSWER

As the error message says, your problem is in cv::mixChannels(). See documentation.

Or you could simply do something like

cv::Mat channels[3];
cv::split(multiChannelImage, channels);

and then access each channel using

cv::Mat currChannel = channels[channelNumber]