2016-05-08 2 views
0

Ich versuche, die Position (x, y) des verfolgten Objekts in eine Textdatei aufzunehmen. Ich benutze Opencv und C++ Visual 2010. Bis jetzt kann ich Daten speichern, aber diese Daten sind die ursprüngliche Position, aber wiederholt. Ich möchte, dass die aktuelle Position bei jedem Frame gespeichert wird.Wie schreibe ich die Position (x, y) des verfolgten Objekts in eine Textdatei?

Kurz gesagt, wie kann ich die genauen Daten schreiben geschrieben von PutText() auf dem Bildschirm, sondern zu einer Datei? sehen, was puttext() schreiben, um den Bildschirm

//write the position of the object to the screen 
putText(framein,"Tracking object at (" + intToString(x)+","+intToString(y)+")",Point(x,y),1,1,Scalar(255,0,0),2); 

ich glaube, das Problem in diesem Abschnitt ist:

//save position 
ofstream file_; 
file_.open("position.txt"); 
file_ <<"these are the position pattern made by the foreground object \n"; 
for(int count = -1; count < 10000; count++) 
{ 
    file_ <<"X:"<<intToString(x)<<" "<<"Y:"<<intToString(y)<<"\n"; 


} 
file_.close(); 

der vollständige Code ist dies:

#include < opencv2/opencv.hpp> 
#include < opencv2/core/core.hpp> 
#include < opencv2/highgui/highgui.hpp> 
#include < opencv2/video/background_segm.hpp> 
#include < opencv2/imgproc/imgproc.hpp> 
#include < opencv2/video/video.hpp> 
//#include < opencv2/videoio.hpp> 
//#include < opencv2/imgcodecs.hpp> 
//C 
#include <stdio.h> 
//C++ 
#include <iostream> 
#include <sstream> 
#include <fstream> 

using namespace cv; 
using namespace std; 

//global variables 
cv::Mat fg_mask; 
cv::Mat frame; 
cv::Mat binaryImage; 
cv::Mat ContourImg; 
Ptr<BackgroundSubtractor> pMOG; //MOG Background subtractor 
int keyboard; //input from keyboard 
//we'll have just one object to search for 
//and keep track of its position. 
int theObject[2] = {0,0}; 
//bounding rectangle of the object, we will use the center of this as its position. 
Rect objectBoundingRectangle = Rect(0,0,0,0); 
//our sensitivity value to be used 
const static int SENSITIVITY_VALUE = 50; 

string intToString(int number){ 

//this function has a number input and string output 
std::stringstream ss; 
ss << number; 
return ss.str(); 
} 
void searchForMovement(Mat binaryImage, Mat &framein){ 
//notice how we use the '&' operator for objectDetected and cameraFeed. This is because we wish 
//to take the values passed into the function and manipulate them, rather t han just working with a copy. 
//eg. we draw to the cameraFeed to be displayed in the main() function. 
bool objectDetected = false; 
Mat temp; 
binaryImage.copyTo(temp); 
//these two vectors needed for output of findContours 
vector< vector<Point> > contours; 
vector<Vec4i> hierarchy; 
//find contours of filtered image using openCV findContours function 
//findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE);// retrieves all contours 
findContours(temp,contours,hierarchy,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE);// retrieves external contours 

//if contours vector is not empty, we have found some objects 
if(contours.size()>0)objectDetected=true; 
else objectDetected = false; 

if(objectDetected){ 
    //the largest contour is found at the end of the contours vector 
    //we will simply assume that the biggest contour is the object we are looking for. 
    vector< vector<Point> > largestContourVec; 
    largestContourVec.push_back(contours.at(contours.size()-1)); 
    //make a bounding rectangle around the largest contour then find its centroid 
    //this will be the object's final estimated position. 
    objectBoundingRectangle = boundingRect(largestContourVec.at(0)); 
    int xpos = objectBoundingRectangle.x+objectBoundingRectangle.width/2; 
    int ypos = objectBoundingRectangle.y+objectBoundingRectangle.height/2; 

    //update the objects positions by changing the 'theObject' array values 
    theObject[0] = xpos , theObject[1] = ypos; 
} 
//make some temp x and y variables so we dont have to type out so much 
int x = theObject[0]; 
int y = theObject[1]; 

//draw some crosshairs around the object 
circle(framein,Point(x,y),20,Scalar(0,255,0),2); 
line(framein,Point(x,y),Point(x,y-25),Scalar(0,255,0),2); 
line(framein,Point(x,y),Point(x,y+25),Scalar(0,255,0),2); 
line(framein,Point(x,y),Point(x-25,y),Scalar(0,255,0),2); 
line(framein,Point(x,y),Point(x+25,y),Scalar(0,255,0),2); 

//write the position of the object to the screen 
putText(framein,"Tracking object at (" + intToString(x)+","+intToString(y)+")",Point(x,y),1,1,Scalar(255,0,0),2); 

//save position 
ofstream file_; 
file_.open("position.txt"); 
file_ <<"these are the position pattern made by the foreground object \n"; 
for(int count = -1; count < 10000; count++) 
{ 
    file_ <<"X:"<<intToString(x)<<" "<<"Y:"<<intToString(y)<<"\n"; 


} 
file_.close(); 
//std::cin.get(); 


    } 

    void morphOps(Mat &thresh){ 

    //create structuring element that will be used to "dilate" and "erode" image. 
    //the element chosen here is a 3px by 3px rectangle 

    Mat erodeElement = getStructuringElement(MORPH_RECT,Size(2,2)); //3x3 
    //dilate with larger element so make sure object is nicely visible 
    Mat dilateElement = getStructuringElement(MORPH_RECT,Size(1,1)); //8x8 

    erode(thresh,thresh,erodeElement); 
    erode(thresh,thresh,erodeElement); 


    dilate(thresh,thresh,dilateElement); 
    dilate(thresh,thresh,dilateElement); 

    } 
int main(int, char**) 
{ 
//some boolean variables for added functionality 
bool objectDetected = false; 
//these two can be toggled by pressing 'd' or 't' 
bool debugMode = true; 
bool trackingEnabled = true; 
//pause and resume code 
bool pause = false; 
//video capture object. 
VideoCapture capture; 
while(1){ 

    //we can loop the video by re-opening the capture every time the video reaches its last frame 

    capture.open("Video_003.avi"); 
    //capture.open(0); 

    if(!capture.isOpened()){ 
     cout<<"ERROR ACQUIRING VIDEO FEED\n"; 
     getchar(); 
     return -1; 
    } 
    double fps = capture.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video 
    cout << "Frame per seconds : " << fps << endl; 

pMOG = new BackgroundSubtractorMOG(); 

//morphology element 
Mat element = getStructuringElement(MORPH_RECT, Size(7, 7), Point(3,3)); 

int count = -1; 

//check if the video has reach its last frame. 
    //we add '-1' because we are reading two frames from the video at a time. 
    //if this is not included, we get a memory error! 
    while(capture.get(CV_CAP_PROP_POS_FRAMES) <capture.get(CV_CAP_PROP_FRAME_COUNT)-1){ 
    // Get frame from camera 
     capture.read(frame); 
    // Update counter 
    ++count; 
    //Resize 
resize(frame, frame, Size(frame.size().width/2, frame.size().height/2)); 
    //Blur 
    blur(frame, frame, Size(5,5)); 

    // Background subtraction 
    pMOG->operator()(frame, fg_mask,0.05); 

    //////// 
    //pre procesing 
    //1 point delete  
    morphologyEx(fg_mask, binaryImage, CV_MOP_CLOSE, element); 

    // threshold 
    //threshold intensity image at a given sensitivity value 
      cv::threshold(binaryImage,binaryImage,SENSITIVITY_VALUE,255,THRESH_BINARY); 
     morphOps(binaryImage); 

    if(debugMode==true){ 

    imshow("frame", frame); 
    imshow("fg_mask", fg_mask); 
    imshow("final", binaryImage); 
    }else{ 
      //if not in debug mode, destroy the windows so we don't see them anymore 
      cv::destroyWindow("frame"); 
      cv::destroyWindow("fg_mask"); 
      cv::destroyWindow("final"); 
    } 

    //if tracking enabled, search for contours in our thresholded image 
     if(trackingEnabled){ 

      searchForMovement(binaryImage,frame); 

    //Find contour 
    ContourImg = binaryImage.clone(); 
    //less blob delete 
    vector< vector< Point> > contours; 

    findContours(ContourImg, 
     contours, // a vector of contours 
     CV_RETR_EXTERNAL, // retrieve the external contours 
     CV_CHAIN_APPROX_NONE); // all pixels of each contours 


vector<Rect> output; 
    vector< vector< Point> >::iterator itc= contours.begin(); 
    while (itc!=contours.end()) { 

//Create bounding rect of object 
//rect draw on origin image 
Rect mr= boundingRect(Mat(*itc)); 
rectangle(frame, mr, CV_RGB(255,0,0)); 
++itc; 
    } 
     } 
     imshow("frame", frame); 
    // Save foreground mask 
    string name = "mask_" + std::to_string(static_cast<long long>(count)) + ".png"; 
    imwrite("D:\\SO\\temp\\" + name, fg_mask); 

    switch(waitKey(10)){ 

     case 27: //'esc' key has been pressed, exit program. 
      return 0; 
     case 116: //'t' has been pressed. this will toggle tracking 
      trackingEnabled = !trackingEnabled; 
      if(trackingEnabled == false) cout<<"Tracking disabled."<<endl; 
      else cout<<"Tracking enabled."<<endl; 
      break; 
     case 100: //'d' has been pressed. this will debug mode 
      debugMode = !debugMode; 
      if(debugMode == true) cout<<"Debug mode enabled."<<endl; 
      else cout<<"Debug mode disabled."<<endl; 
      break; 
     case 112: //'p' has been pressed. this will pause/resume the code. 
      pause = !pause; 
      if(pause == true){ cout<<"Code paused, press 'p' again to resume"<<endl; 
      while (pause == true){ 
       //stay in this loop until 
       switch (waitKey()){ 
        //a switch statement inside a switch statement? Mind blown. 
       case 112: 
        //change pause back to false 
        pause = false; 
        cout<<"Code Resumed"<<endl; 
        break; 
       } 

} 
// the camera will be deinitialized automatically in VideoCapture destructor 

      } 
    } 
    } 
    //release the capture before re-opening and looping again. 
    capture.release(); 
    } 
    return 0; 
    } 
+0

Warum Sie zweimal die Erosion und Dilatation anwenden? – black

+0

Nun, ich folge nur einige Vorschläge aus dem Internet, um das Erscheinungsbild des Vordergrundobjekts zu verbessern. Übrigens, haben Sie einen Vorschlag, wie das Rauschen zu reduzieren ist Ich habe immer noch viel Lärm – Zakarya

+0

Aufruf zweimal hintereinander ist viel langsamer als nur die Kernel-Größe zu erhöhen. Verwenden Sie zum Reduzieren von Rauschen am Anfang Gaußsche Unschärfe. Für jede andere Frage, bitte öffnen Sie eine neue Frage. – black

Antwort

2

OK ich einige sehen seltsame Dinge in Ihrem Code. Aber um Ihre Frage zu beantworten:

In Ihrem Code öffnen Sie die Datei, geben die gleichen Werte für x und y 10000 mal aus und schließen die Datei für für jeden Rahmen. Stattdessen, was Sie tun sollten, ist Datei im Start öffnen, Ausgabe nur ein Koordinatenpaar pro Frame dann Datei am Ende zu schließen.

Beispielcode:

Vor Hauptschleife beginnt

ofstream file_; 
file_.open("position.txt"); 
file_ <<"these are the position pattern made by the foreground object \n"; 

In Hauptschleife

file_ <<"X:"<<intToString(x)<<" "<<"Y:"<<intToString(y)<<"\n"; 

Nach Hauptschleife endet

file_.close(); 

EDIT: vollständige Code den Weg Hinzugefügt ich es gemeint sein:

#include < opencv2/opencv.hpp> 
#include < opencv2/core/core.hpp> 
#include < opencv2/highgui/highgui.hpp> 
#include < opencv2/video/background_segm.hpp> 
#include < opencv2/imgproc/imgproc.hpp> 
#include < opencv2/video/video.hpp> 
//#include < opencv2/videoio.hpp> 
//#include < opencv2/imgcodecs.hpp> 
//C 
#include <stdio.h> 
//C++ 
#include <iostream> 
#include <sstream> 
#include <fstream> 

using namespace cv; 
using namespace std; 

ofstream file_; 

//global variables 
cv::Mat fg_mask; 
cv::Mat frame; 
cv::Mat binaryImage; 
cv::Mat ContourImg; 
Ptr<BackgroundSubtractor> pMOG; //MOG Background subtractor 
int keyboard; //input from keyboard 
//we'll have just one object to search for 
//and keep track of its position. 
int theObject[2] = {0,0}; 
//bounding rectangle of the object, we will use the center of this as its position. 
Rect objectBoundingRectangle = Rect(0,0,0,0); 
//our sensitivity value to be used 
const static int SENSITIVITY_VALUE = 50; 

string intToString(int number){ 

    //this function has a number input and string output 
    std::stringstream ss; 
    ss << number; 
    return ss.str(); 
} 
void searchForMovement(Mat binaryImage, Mat &framein){ 
    //notice how we use the '&' operator for objectDetected and cameraFeed. This is because we wish 
    //to take the values passed into the function and manipulate them, rather t han just working with a copy. 
    //eg. we draw to the cameraFeed to be displayed in the main() function. 
    bool objectDetected = false; 
    Mat temp; 
    binaryImage.copyTo(temp); 
    //these two vectors needed for output of findContours 
    vector< vector<Point> > contours; 
    vector<Vec4i> hierarchy; 
    //find contours of filtered image using openCV findContours function 
    //findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE);// retrieves all contours 
    findContours(temp,contours,hierarchy,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE);// retrieves external contours 

    //if contours vector is not empty, we have found some objects 
    if(contours.size()>0)objectDetected=true; 
    else objectDetected = false; 

    if(objectDetected){ 
     //the largest contour is found at the end of the contours vector 
     //we will simply assume that the biggest contour is the object we are looking for. 
     vector< vector<Point> > largestContourVec; 
     largestContourVec.push_back(contours.at(contours.size()-1)); 
     //make a bounding rectangle around the largest contour then find its centroid 
     //this will be the object's final estimated position. 
     objectBoundingRectangle = boundingRect(largestContourVec.at(0)); 
     int xpos = objectBoundingRectangle.x+objectBoundingRectangle.width/2; 
     int ypos = objectBoundingRectangle.y+objectBoundingRectangle.height/2; 

     //update the objects positions by changing the 'theObject' array values 
     theObject[0] = xpos , theObject[1] = ypos; 
    } 
    //make some temp x and y variables so we dont have to type out so much 
    int x = theObject[0]; 
    int y = theObject[1]; 

    //draw some crosshairs around the object 
    circle(framein,Point(x,y),20,Scalar(0,255,0),2); 
    line(framein,Point(x,y),Point(x,y-25),Scalar(0,255,0),2); 
    line(framein,Point(x,y),Point(x,y+25),Scalar(0,255,0),2); 
    line(framein,Point(x,y),Point(x-25,y),Scalar(0,255,0),2); 
    line(framein,Point(x,y),Point(x+25,y),Scalar(0,255,0),2); 

    //write the position of the object to the screen 
    putText(framein,"Tracking object at (" + intToString(x)+","+intToString(y)+")",Point(x,y),1,1,Scalar(255,0,0),2); 

    //save position 
    file_ <<"X:"<<intToString(x)<<" "<<"Y:"<<intToString(y)<<"\n"; 

    //std::cin.get(); 


} 

void morphOps(Mat &thresh){ 

    //create structuring element that will be used to "dilate" and "erode" image. 
    //the element chosen here is a 3px by 3px rectangle 

    Mat erodeElement = getStructuringElement(MORPH_RECT,Size(2,2)); //3x3 
    //dilate with larger element so make sure object is nicely visible 
    Mat dilateElement = getStructuringElement(MORPH_RECT,Size(1,1)); //8x8 

    erode(thresh,thresh,erodeElement); 
    erode(thresh,thresh,erodeElement); 


    dilate(thresh,thresh,dilateElement); 
    dilate(thresh,thresh,dilateElement); 

} 
int main(int, char**) 
{ 

    file_.open("position.txt"); 
    file_ <<"these are the position pattern made by the foreground object \n"; 

    //some boolean variables for added functionality 
    bool objectDetected = false; 
    //these two can be toggled by pressing 'd' or 't' 
    bool debugMode = true; 
    bool trackingEnabled = true; 
    //pause and resume code 
    bool pause = false; 
    //video capture object. 
    VideoCapture capture; 

    while(1){ 

     //we can loop the video by re-opening the capture every time the video reaches its last frame 

     capture.open("Video_003.avi"); 
     //capture.open(0); 

     if(!capture.isOpened()){ 
      cout<<"ERROR ACQUIRING VIDEO FEED\n"; 
      getchar(); 
      return -1; 
     } 
     double fps = capture.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video 
     cout << "Frame per seconds : " << fps << endl; 

     pMOG = new BackgroundSubtractorMOG(); 

     //morphology element 
     Mat element = getStructuringElement(MORPH_RECT, Size(7, 7), Point(3,3)); 

     int count = -1; 

     //check if the video has reach its last frame. 
     //we add '-1' because we are reading two frames from the video at a time. 
     //if this is not included, we get a memory error! 
     while(capture.get(CV_CAP_PROP_POS_FRAMES) <capture.get(CV_CAP_PROP_FRAME_COUNT)-1){ 
      // Get frame from camera 
      capture.read(frame); 
      // Update counter 
      ++count; 
      //Resize 
      resize(frame, frame, Size(frame.size().width/2, frame.size().height/2)); 
      //Blur 
      blur(frame, frame, Size(5,5)); 

      // Background subtraction 
      pMOG->operator()(frame, fg_mask,0.05); 

      //////// 
      //pre procesing 
      //1 point delete 
      morphologyEx(fg_mask, binaryImage, CV_MOP_CLOSE, element); 

      // threshold 
      //threshold intensity image at a given sensitivity value 
      cv::threshold(binaryImage,binaryImage,SENSITIVITY_VALUE,255,THRESH_BINARY); 
      morphOps(binaryImage); 

      if(debugMode==true){ 

       imshow("frame", frame); 
       imshow("fg_mask", fg_mask); 
       imshow("final", binaryImage); 
      }else{ 
       //if not in debug mode, destroy the windows so we don't see them anymore 
       cv::destroyWindow("frame"); 
       cv::destroyWindow("fg_mask"); 
       cv::destroyWindow("final"); 
      } 

      //if tracking enabled, search for contours in our thresholded image 
      if(trackingEnabled){ 

       searchForMovement(binaryImage,frame); 

       //Find contour 
       ContourImg = binaryImage.clone(); 
       //less blob delete 
       vector< vector< Point> > contours; 

       findContours(ContourImg, 
          contours, // a vector of contours 
          CV_RETR_EXTERNAL, // retrieve the external contours 
          CV_CHAIN_APPROX_NONE); // all pixels of each contours 


       vector<Rect> output; 
       vector< vector< Point> >::iterator itc= contours.begin(); 
       while (itc!=contours.end()) { 

        //Create bounding rect of object 
        //rect draw on origin image 
        Rect mr= boundingRect(Mat(*itc)); 
        rectangle(frame, mr, CV_RGB(255,0,0)); 
        ++itc; 
       } 
      } 
      imshow("frame", frame); 
      // Save foreground mask 
      string name = "mask_" + std::to_string(static_cast<long long>(count)) + ".png"; 
      imwrite("D:\\SO\\temp\\" + name, fg_mask); 

      switch(waitKey(10)){ 

       case 27: //'esc' key has been pressed, exit program. 
        return 0; 
       case 116: //'t' has been pressed. this will toggle tracking 
        trackingEnabled = !trackingEnabled; 
        if(trackingEnabled == false) cout<<"Tracking disabled."<<endl; 
        else cout<<"Tracking enabled."<<endl; 
        break; 
       case 100: //'d' has been pressed. this will debug mode 
        debugMode = !debugMode; 
        if(debugMode == true) cout<<"Debug mode enabled."<<endl; 
        else cout<<"Debug mode disabled."<<endl; 
        break; 
       case 112: //'p' has been pressed. this will pause/resume the code. 
        pause = !pause; 
        if(pause == true){ cout<<"Code paused, press 'p' again to resume"<<endl; 
         while (pause == true){ 
          //stay in this loop until 
          switch (waitKey()){ 
           //a switch statement inside a switch statement? Mind blown. 
           case 112: 
            //change pause back to false 
            pause = false; 
            cout<<"Code Resumed"<<endl; 
            break; 
          } 

         } 
         // the camera will be deinitialized automatically in VideoCapture destructor 

        } 
      } 
     } 
     //release the capture before re-opening and looping again. 
     capture.release(); 
     //Close position log 
     file_.close(); 
    } 
    return 0; 
} 
+0

Vielen Dank für Ihre Antwort, wenn ich tue ich, error sayin "diese Deklaration hat keine Speicherklasse oder Typbezeichner" für die Start- und Endteile – Zakarya

+0

Versuchen Sie, die "Ofstream-Datei_;" Teil direkt unter der Zeile "using namespace std;" macht es zu einer globalen Variable. Auf diese Weise können Sie von überall darauf verweisen. Das ist nicht wirklich eine gute Übung, aber in Ihrem Beispiel wird es zumindest funktionieren, dann können Sie sich später auf gute Übungen konzentrieren :-) –

+0

funktioniert immer noch nicht, ich habe meinen vorherigen Kommentar bearbeitet – Zakarya

Verwandte Themen