2016-08-21 1 views
2

Ich fügte einen correlation_tracker zu einem Multithreading-Gesicht tracking script hinzu, und merkwürdigerweise tut es im Allgemeinen gut, ein Gesicht auf dem Bildschirm zu verfolgen, aber wenn Sie Ihre Hand über die Kamera halten, sagt es die gleichen Koordinaten und markiert den gleichen Bereich. Gibt es eine gute Möglichkeit zu erkennen, wenn das verfolgte Objekt tatsächlich verlässt? Oder erfordert dies die zeitweise Verarbeitung des Detektorobjekts mit der langsameren Erkennung aller Gesichter?Wie erkenne ich, wo der correlation_tracker von dlib das Zielbild verloren hat?

from __future__ import division 
import sys 
from time import time, sleep 
import threading 

import dlib 
#from skimage import io 


detector = dlib.get_frontal_face_detector() 
win = dlib.image_window() 

def adjustForOpenCV(image): 
    """ OpenCV use bgr not rgb. odd. """ 
    for row in image: 
     for px in row: 
      #rgb expected... but the array is bgr? 
      r = px[2] 
      px[2] = px[0] 
      px[0] = r 
    return image 

class webCamGrabber(threading.Thread): 
    def __init__(self): 
     threading.Thread.__init__(self) 
     #Lock for when you can read/write self.image: 
     #self.imageLock = threading.Lock() 
     self.image = False 

     from cv2 import VideoCapture, cv 
     from time import time 

     self.cam = VideoCapture(0) #set the port of the camera as before 
     #Doesn't seem to work: 
     self.cam.set(cv.CV_CAP_PROP_FRAME_WIDTH, 160) 
     self.cam.set(cv.CV_CAP_PROP_FRAME_WIDTH, 120) 
     #self.cam.set(cv.CV_CAP_PROP_FPS, 1) 


    def run(self): 
     while True: 
      start = time() 
      #self.imageLock.acquire() 
      retval, self.image = self.cam.read() #return a True bolean and and the image if all go right 
      #print("readimage: " + str(time() - start)) 
      #sleep(0.1) 

if len(sys.argv[1:]) == 0: 

    #Start webcam reader thread: 
    camThread = webCamGrabber() 
    camThread.start() 

    #Setup window for results 
    detector = dlib.get_frontal_face_detector() 
    win = dlib.image_window() 

    while True: 
     #camThread.imageLock.acquire() 
     if camThread.image is not False: 
      print("enter") 
      start = time() 

      myimage = camThread.image 
      for row in myimage: 
       for px in row: 
        #rgb expected... but the array is bgr? 
        r = px[2] 
        px[2] = px[0] 
        px[0] = r 


      dets = detector(myimage, 0) 
      #camThread.imageLock.release() 
      print "your faces:" +str(len(dets)) 
      nearFace = None 
      nearFaceArea = 0 

      for i, d in enumerate(dets): 
       #print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
       # i, d.left(), d.top(), d.right(), d.bottom())) 
       screenArea = (d.right() - d.left()) * (d.bottom() - d.top()) 
       #print 'area', screenArea 
       if screenArea > nearFaceArea: 
        nearFace = d 
      print("face-find-time: " + str(time() - start)) 

      print("from left: {}".format(((nearFace.left() + nearFace.right())/2)/len(camThread.image[0]))) 
      print("from top: {}".format(((nearFace.top() + nearFace.bottom())/2)/len(camThread.image))) 

      start = time() 
      win.clear_overlay() 
      win.set_image(myimage) 
      win.add_overlay(nearFace) 
      print("show: " + str(time() - start)) 

      if nearFace != None: 
       points = (nearFace.left(), nearFace.top(), nearFace.right(), nearFace.bottom()) 
       tracker = dlib.correlation_tracker() 
       tracker.start_track(myimage, dlib.rectangle(*points)) 

       while True: 
        myImage = adjustForOpenCV(camThread.image) 

        tracker.update(myImage) 
        rect = tracker.get_position() 
        cx = (rect.right() + rect.left())/2 
        cy = (rect.top() + rect.bottom())/2 
        print('correlationTracker %s,%s' % (cx, cy)) 
        print rect 
        win.clear_overlay() 
        win.set_image(myImage) 
        win.add_overlay(rect) 
        sleep(0.1) 

      #dlib.hit_enter_to_continue() 




for f in sys.argv[1:]: 
    print("Processing file: {}".format(f)) 
    img = io.imread(f) 
    # The 1 in the second argument indicates that we should upsample the image 
    # 1 time. This will make everything bigger and allow us to detect more 
    # faces. 
    dets = detector(img, 1) 
    print("Number of faces detected: {}".format(len(dets))) 
    for i, d in enumerate(dets): 
     print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
      i, d.left(), d.top(), d.right(), d.bottom())) 

    win.clear_overlay() 
    win.set_image(img) 
    win.add_overlay(dets) 
    dlib.hit_enter_to_continue() 


# Finally, if you really want to you can ask the detector to tell you the score 
# for each detection. The score is bigger for more confident detections. 
# Also, the idx tells you which of the face sub-detectors matched. This can be 
# used to broadly identify faces in different orientations. 
if (len(sys.argv[1:]) > 0): 
    img = io.imread(sys.argv[1]) 
    dets, scores, idx = detector.run(img, 1) 
    for i, d in enumerate(dets): 
     print("Detection {}, score: {}, face_type:{}".format(
      d, scores[i], idx[i])) 
+0

tracker.update gibt eine Punktzahl zurück, mit der festgestellt werden kann, ob eine Spur verloren gegangen ist –

+0

@ZawLin Thanks! Diese Zahl scheint bei einem veränderten Bild zu sinken, aber ich bin mir nicht ganz sicher, welche Einheiten das sind - die Quelle scheint sie als "const double b) = (G (py(), px()) zu definieren. rs.mean())/rs.stddev(); '(http://dlib.net/dlib/image_processing/correlation_tracker.h.html) – NoBugs

Antwort

2

Sie müssen den Gesichtsdetektor hin und wieder laufen lassen, um zu sehen, ob das Gesicht noch da ist.

Verwandte Themen