How to create a GUI App in C# for a code written in opencv C++?

833 Views Asked by At

I have written a face recognition code in C++ using opencv.

I use my webcam for live video and output the recognized faces in video in the debuggin screen.

Now I want to create a app in C# using Visual Studio and use the output of the C++ opencv code and put it in a window in the C# app.

So my problem..

1) How to use the opencv C++ code in C#

2) How to put the output of my code on window in C# app

My Code::

#include<opencv2\opencv.hpp>                //For opencv functions
#include<opencv2\highgui\highgui.hpp>       //For window based functions
#include<fstream>                           //For dealing with I/O operations on file


using namespace std;
using namespace cv;


static void read_data(vector <Mat> & images,vector <int>& labels, char separator=' ')
{
    ifstream file("images.txt");   //images.txt contains paths and labels separated by a space
    string line;
    string a[2];

while(getline(file,line))  // read images.txt line by line 
     {
         int i=0;
        stringstream iss(line);
        while (iss.good() && i < 2)
        {
            iss>>a[i]; 
            ++i;
        }
        images.push_back(imread(a[0],CV_LOAD_IMAGE_GRAYSCALE)); // a[0] = "path of images"
        labels.push_back(atoi(a[1].c_str()));  //a[1] = "labels"
    }

    file.close();
}

int main()  
{  
    vector<Mat> images;    //stores the paths of all images
    vector<int> labels;    //stores the corresponding labels

    //function call to function read_data
    read_data(images,labels);    

    //take the size of the sample images
    int im_width = images[0].cols;           
    int im_height = images[0].rows;

    //threshold is the minimum value of magnitude of vector of EigenFaces
    double threshold=10.0;    

    //create instance of EigenFaceRecognizer
    Ptr<FaceRecognizer> model = createEigenFaceRecognizer(10,threshold);  
    double current_threshold =model->getDouble("threshold");

    // set a threshold value, for face prediction
    model->set("threshold",5000.0);      

    // train the face recognizer using the sample images
    model->train(images,labels);         

    // Create face_cascade to detect people
    CascadeClassifier face_cascade;
    if(!face_cascade.load("c:\\haar\\haarcascade_frontalface_default.xml"))  
    { 
        cout<<"ERROR Loading cascade file";
        return 1;
    }

    // capture the video input from webcam
    VideoCapture capture(CV_CAP_ANY);    

    capture.set(CV_CAP_PROP_FRAME_WIDTH, 320);
    capture.set(CV_CAP_PROP_FRAME_HEIGHT, 240); 

    Size frameSize(static_cast<int>(320), static_cast<int>(240));

    //initialize the VideoWriter object
    VideoWriter oVideoWriter ("MyVideo.avi", CV_FOURCC('P','I','M','1'), 20, frameSize, true);  

    if(!capture.isOpened())
    {
        cout<<"Error in camera";
        return 1;
    }

    Mat cap_img, gray_img;

    //store the detected faces
    vector<Rect> faces;   
    while(1)
    {
        //capture frame by frame in cap_img
        capture>>cap_img;   
        waitKey(10);

        // Image conversion: Color to Gray
        cvtColor(cap_img,gray_img,CV_BGR2GRAY);   

        //Histogram Equilization to increase contrast by stretching intensity ranges
        equalizeHist(gray_img,gray_img);          


        // detects faces in the frame
        //CV_HAAR_SCALE_IMAGE to scale the size of the detect face 
        //CV_HAAR_DO_CANNY_PRUNING to increase speed as it skips image regions that are unlikely to contain a face
        face_cascade.detectMultiScale(gray_img,faces,1.1,10,CV_HAAR_SCALE_IMAGE | CV_HAAR_DO_CANNY_PRUNING, Size(0,0),Size(300,300));  


        //Loop over the detected faces
        for(int i=0;i<faces.size();i++)
        {
            Rect face_i = faces[i];
            Mat face = gray_img(face_i);

            Mat face_resized;

            //resize the detected face to the size of sample images
            resize(face,face_resized, Size(im_width,im_height),1.0,1.0,INTER_CUBIC);  

            // predict the person the face belongs to, returns label
            int predicted_label = -1;
            predicted_label=model->predict(face_resized); 

            // Draws a rectangle around the faces
            rectangle(cap_img,face_i, CV_RGB(0,255,0),1);   

            //text to be put with the face, by default "new" for new faces
            string box_text=format("new");  

            // Change the text based on label
            if(predicted_label>-1)
                switch(predicted_label)
                {
                    case 0:box_text = format("keanu");
                           break;
                    case 1:box_text = format("selena");
                           break;
                }


            // calculate the coordinates to put the text based on the postion of the face 
            int pos_x = max(face_i.tl().x - 10, 0);
            int pos_y = max(face_i.tl().y - 10, 0);

            // put text on the output screen
            putText(cap_img, box_text , Point(pos_x,pos_y), FONT_HERSHEY_PLAIN,0.8, CV_RGB(0,255,0), 1,CV_AA);  

            if (box_text=="keanu" || box_text=="selena");
            else        
               oVideoWriter.write(cap_img); //writer the frame into the file
        }

        // show the frame on the result window
        imshow("Result",cap_img); 

        waitKey(3);
        char c =waitKey(3);
        if(c==27)
            break; 
        }

    return 0;
}  
0

There are 0 best solutions below