2016-03-25 19 views
-2

Ich mache meine letzte Projektstudie mit Himbeer-Karte und ich möchte zwei Kameras gleichzeitig verwenden und das gleiche Python-Programm mit zwei Kameras ausführen, um ein Bild zu machen und die Behandlung zu machen, um das Produkt zu kennen, wie ich das tun kann Multithreading verwendenMultithread in Python 2.7?

from os import listdir 
from os.path import isfile, join 
import numpy 
import cv2 
import os 
import sys 

def match_images(img1, img2): 

    detector = cv2.SURF(200, 1, 1) 
    matcher = cv2.BFMatcher(cv2.NORM_L2) 
    kp1, desc1 = detector.detectAndCompute(img1, None) 
    kp2, desc2 = detector.detectAndCompute(img2, None) 
    print 'img1 %d features, img2 %d features' % (len(kp1), len(kp2)) 
    raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) 
    kp_pairs = filter_matches(kp1, kp2, raw_matches) 
    return kp_pairs 

def filter_matches(kp1, kp2, matches, ratio = 0.75): 

    mkp1, mkp2 = [], [] 
    for m in matches: 
     if len(m) == 2 and m[0].distance < m[1].distance * ratio: 
      m = m[0] 
      mkp1.append(kp1[m.queryIdx]) 
      mkp2.append(kp2[m.trainIdx]) 
    kp_pairs = zip(mkp1, mkp2) 
    return kp_pairs 

def explore_match(win, img1, img2, kp_pairs, status = None, H = None): 

    h1, w1 = img1.shape[:2] 
    h2, w2 = img2.shape[:2] 
    vis = numpy.zeros((max(h1, h2), w1+w2), numpy.uint8) 
    vis[:h1, :w1] = img1 
    vis[:h2, w1:w1+w2] = img2 
    vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR) 
    if H is not None: 
     corners = numpy.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]]) 
     corners = numpy.int32(cv2.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0)) 
     cv2.polylines(vis, [corners], True, (255, 255, 255)) 
    if status is None: 
     status = numpy.ones(len(kp_pairs), numpy.bool_) 
    p1 = numpy.int32([kpp[0].pt for kpp in kp_pairs]) 
    p2 = numpy.int32([kpp[1].pt for kpp in kp_pairs]) + (w1, 0) 
    green = (0, 255, 0) 
    red = (0, 0, 255) 
    white = (255, 255, 255) 
    kp_color = (51, 103, 236) 
    for (x1, y1), (x2, y2), inlier in zip(p1, p2, status): 
     if inlier: 
      col = green 
      cv2.circle(vis, (x1, y1), 2, col, -1) 
      cv2.circle(vis, (x2, y2), 2, col, -1) 
     else: 
      col = red 
      r = 2 
      thickness = 3 
      cv2.line(vis, (x1-r, y1-r), (x1+r, y1+r), col, thickness) 
      cv2.line(vis, (x1-r, y1+r), (x1+r, y1-r), col, thickness) 
      cv2.line(vis, (x2-r, y2-r), (x2+r, y2+r), col, thickness) 
      cv2.line(vis, (x2-r, y2+r), (x2+r, y2-r), col, thickness) 
    vis0 = vis.copy() 
    for (x1, y1), (x2, y2), inlier in zip(p1, p2, status): 
     if inlier: 
      cv2.line(vis, (x1, y1), (x2, y2), green) 
    cv2.imshow(win, vis) 

def draw_matches(window_name, kp_pairs, img1, img2): 

mkp1, mkp2 = zip(*kp_pairs)  
p1 = numpy.float32([kp.pt for kp in mkp1]) 
p2 = numpy.float32([kp.pt for kp in mkp2]) 
if len(kp_pairs) >= 100: 
     H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0) 
     print '%d/%d inliers/matched' % (numpy.sum(status), len(status)) 
     import RPI.GPIO as GPIO 
     import time 
     GPIO.setup(LEDPin, GPIO.OUT) 
     GPIO.output(LEDPin, True) 
     print("LED ON") 
     time.sleep(5) 
     GPIO.output(LEDPin, False) 
if len(p1): 
     explore_match(window_name, img1, img2, kp_pairs, status, H) 

if __name__ == '__main__': 

import cv2 
import os 
camera_port = 1 
ramp_frames = 2 
camera = cv2.VideoCapture(camera_port) 
def get_image(): 
    retval, im = camera.read() 
    return im 
    for i in xrange(ramp_frames): 
    temp = get_image() 
    print("Taking image...") 
    camera_capture = get_image() 
    file = "C:/Users/oussema/Desktop/projet/scripts/test_image.png" 
    cv2.imwrite(file, camera_capture) 
    del(camera) 
    img2 = cv2.imread('test_image.png', 0) 
    img1 = cv2.imread('1.jpg', 0) 
    if img1 is None: 
     print 'Failed to load img1:' 
     sys.exit(1)  
    if img2 is None: 
     print 'Failed to load img2:' 
     sys.exit(1) 
    kp_pairs = match_images(img1, img2) 
    if len(kp_pairs) >= 100: 
    draw_matches('find_obj', kp_pairs, img1, img2) 
    cv2.waitKey() 
    cv2.destroyAllWindows() 
    elif len(kp_pairs) <= 100: 
    print '%d matches found, not enough for homography estimation' % len(kp_pairs) 
    img1 = cv2.imread('2.jpg', 0) 
    kp_pairs = match_images(img1, img2) 
    if len(kp_pairs) >= 100: 
     draw_matches('find_obj', kp_pairs, img1, img2) 
     cv2.waitKey() 
     cv2.destroyAllWindows() 
    elif len(kp_pairs) <= 100: 
     print '%d matches found, not enough for homography estimation' % len(kp_pairs) 
     img1 = cv2.imread('3.jpg', 0) 
     kp_pairs = match_images(img1, img2) 
     if len(kp_pairs) >= 100: 
     draw_matches('find_obj', kp_pairs, img1, img2) 
     cv2.waitKey() 
     cv2.destroyAllWindows() 
     elif len(kp_pairs) <= 100: 
      print '%d matches found, not enough for homography estimation' % len(kp_pairs) 
      img1 = cv2.imread('4.jpg', 0) 
      kp_pairs = match_images(img1, img2) 
      if len(kp_pairs) >= 100: 
       draw_matches('find_obj', kp_pairs, img1, img2) 
       cv2.waitKey() 
       cv2.destroyAllWindows() 
      elif len(kp_pairs) <= 100: 
       print '%d matches found, not enough for homography estimation' % len(kp_pairs) 
       img1 = cv2.imread('5.jpg', 0) 
       kp_pairs = match_images(img1, img2) 
       if len(kp_pairs) >= 100: 
       draw_matches('find_obj', kp_pairs, img1, img2) 
       cv2.waitKey() 
       cv2.destroyAllWindows() 
       elif len(kp_pairs) <= 100: 
       print '%d matches found, not enough for homography estimation' % len(kp_pairs) 
       img1 = cv2.imread('6.jpg', 0) 
       kp_pairs = match_images(img1, img2) 
       if len(kp_pairs) >= 100: 
        draw_matches('find_obj', kp_pairs, img1, img2) 
        cv2.waitKey() 
        cv2.destroyAllWindows() 
       elif len(kp_pairs) <= 100: 
        print '%d matches found, not enough for homography estimation' % len(kp_pairs) 
+0

Haben Sie die Multi-Thread-Bibliothek versucht? – Seekheart

+1

Mögliches Duplikat von [Wie verwende ich Threading in Python?] (Http://stackoverflow.com/questions/2846653/how-to-use-threading-in-python) – Signal

+0

Nein Ich kann nicht verstehen, wie Multithreading ausgeführt wird mein Code zu den 2 Kameras zur gleichen Zeit –

Antwort

0

Hier ist ein Beispiel, ich habe Platz für Sie, um Ihren Kamera-Code zu setzen.

import thread 
import time 

# Define a function for the thread 
def camera(threadName, delay): 
    # Your Code to take a picture, or whatever 

# Create two threads as follows 
try: 
    thread.start_new_thread(camera, ("camera-1", 2,)) 
    thread.start_new_thread(camera, ("camera-2", 4,)) 
except: 
    print "Error: unable to start thread" 

while 1: 
    pass 
+0

Wenn Sie meinen Code sehen, können Sie mir bitte helfen –

1

Ich glaube nicht, dass Sie 2 Threads zur gleichen Zeit wegen GIL laufen lassen können. Also, vielleicht können Sie versuchen multiprocessing Modul.

Jut Versuch:

from multiprocessing import Process 

def Camera(name): 
    print('hello', name) 

if __name__ == '__main__': 
    p1 = Process(target=Camera, args=('bob',)) 
    p2 = Process(target=Camera, args=('jack',)) 
    p1.start() 
    p2.start() 
    p1.join() 
    p2.join() 
+0

bitte, wenn Sie meinen Code sehen, wie ich die Multiprocessing oder Multithread kann ich diesen Code mit 2 Kameras gleichzeitig tun? –

+0

@OussemaHelal, warten Sie bitte, wenn Sie mehr Details zeigen müssen, bearbeiten Sie einfach Ihre Frage und zeigen Sie Ihren Code in der obigen Frage. Zeigen Sie den Code mit den richtigen Einrückungen an. Vielen Dank! –

+0

sir Sie können mir Ihre E-Mail senden oder wenn Sie möchten, dass ich Ihnen den Code per E-Mail senden kann, ist meine E-Mail: [email protected], weil ich nicht verstehen kann, was Sie wollen für mich genau –

0
from os import listdir 
from os.path import isfile, join 
import numpy 
import cv2 
import os 
import sys 

def match_images(img1, img2): 

detector = cv2.SURF(200, 1, 1) 
matcher = cv2.BFMatcher(cv2.NORM_L2) 
kp1, desc1 = detector.detectAndCompute(img1, None) 
kp2, desc2 = detector.detectAndCompute(img2, None) 
print 'img1 %d features, img2 %d features' % (len(kp1), len(kp2)) 
raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) 
kp_pairs = filter_matches(kp1, kp2, raw_matches) 
return kp_pairs 

def filter_matches(kp1, kp2, matches, ratio = 0.75): 

mkp1, mkp2 = [], [] 
for m in matches: 
    if len(m) == 2 and m[0].distance < m[1].distance * ratio: 
     m = m[0] 
     mkp1.append(kp1[m.queryIdx]) 
     mkp2.append(kp2[m.trainIdx]) 
kp_pairs = zip(mkp1, mkp2) 
return kp_pairs 

def explore_match(win, img1, img2, kp_pairs, status = None, H = None): 

h1, w1 = img1.shape[:2] 
h2, w2 = img2.shape[:2] 
vis = numpy.zeros((max(h1, h2), w1+w2), numpy.uint8) 
vis[:h1, :w1] = img1 
vis[:h2, w1:w1+w2] = img2 
vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR) 
if H is not None: 
    corners = numpy.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]]) 
    corners = numpy.int32(cv2.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0)) 
    cv2.polylines(vis, [corners], True, (255, 255, 255)) 
if status is None: 
    status = numpy.ones(len(kp_pairs), numpy.bool_) 
p1 = numpy.int32([kpp[0].pt for kpp in kp_pairs]) 
p2 = numpy.int32([kpp[1].pt for kpp in kp_pairs]) + (w1, 0) 
green = (0, 255, 0) 
red = (0, 0, 255) 
white = (255, 255, 255) 
kp_color = (51, 103, 236) 
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status): 
    if inlier: 
     col = green 
     cv2.circle(vis, (x1, y1), 2, col, -1) 
     cv2.circle(vis, (x2, y2), 2, col, -1) 
    else: 
     col = red 
     r = 2 
     thickness = 3 
     cv2.line(vis, (x1-r, y1-r), (x1+r, y1+r), col, thickness) 
     cv2.line(vis, (x1-r, y1+r), (x1+r, y1-r), col, thickness) 
     cv2.line(vis, (x2-r, y2-r), (x2+r, y2+r), col, thickness) 
     cv2.line(vis, (x2-r, y2+r), (x2+r, y2-r), col, thickness) 
vis0 = vis.copy() 
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status): 
    if inlier: 
     cv2.line(vis, (x1, y1), (x2, y2), green) 
cv2.imshow(win, vis) 

def draw_matches(window_name, kp_pairs, img1, img2): 

    mkp1, mkp2 = zip(*kp_pairs)  
    p1 = numpy.float32([kp.pt for kp in mkp1]) 
    p2 = numpy.float32([kp.pt for kp in mkp2]) 
    if len(kp_pairs) >= 100: 
    H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0) 
    print '%d/%d inliers/matched' % (numpy.sum(status), len(status)) 
    import RPI.GPIO as GPIO 
    import time 
    GPIO.setup(LEDPin, GPIO.OUT) 
    GPIO.output(LEDPin, True) 
    print("LED ON") 
    time.sleep(5) 
    GPIO.output(LEDPin, False) 
    if len(p1): 
    explore_match(window_name, img1, img2, kp_pairs, status, H) 

if __name__ == '__main__': 

    import cv2 
    import os 
    camera_port = 1 
    ramp_frames = 2 
    camera = cv2.VideoCapture(camera_port) 
    def get_image(): 
    retval, im = camera.read() 
    return im 
    for i in xrange(ramp_frames): 
    temp = get_image() 
    print("Taking image...") 
    camera_capture = get_image() 
    file = "C:/Users/oussema/Desktop/projet/scripts/test_image.png" 
    cv2.imwrite(file, camera_capture) 
    del(camera) 
    img2 = cv2.imread('test_image.png', 0) 
    img1 = cv2.imread('1.jpg', 0) 
    if img1 is None: 
    print 'Failed to load img1:' 
    sys.exit(1)  
    if img2 is None: 
    print 'Failed to load img2:' 
    sys.exit(1) 
    kp_pairs = match_images(img1, img2) 
    if len(kp_pairs) >= 100: 
    draw_matches('find_obj', kp_pairs, img1, img2) 
    cv2.waitKey() 
    cv2.destroyAllWindows() 
    elif len(kp_pairs) <= 100: 
    print '%d matches found, not enough for homography estimation' %  len(kp_pairs) 
    img1 = cv2.imread('2.jpg', 0) 
    kp_pairs = match_images(img1, img2) 
    if len(kp_pairs) >= 100: 
    draw_matches('find_obj', kp_pairs, img1, img2) 
    cv2.waitKey() 
    cv2.destroyAllWindows() 
    elif len(kp_pairs) <= 100: 
    print '%d matches found, not enough for homography estimation' % len(kp_pairs) 
    img1 = cv2.imread('3.jpg', 0) 
    kp_pairs = match_images(img1, img2) 
    if len(kp_pairs) >= 100: 
    draw_matches('find_obj', kp_pairs, img1, img2) 
    cv2.waitKey() 
    cv2.destroyAllWindows() 
    elif len(kp_pairs) <= 100: 
     print '%d matches found, not enough for homography estimation' % len(kp_pairs) 
     img1 = cv2.imread('4.jpg', 0) 
     kp_pairs = match_images(img1, img2) 
     if len(kp_pairs) >= 100: 
      draw_matches('find_obj', kp_pairs, img1, img2) 
      cv2.waitKey() 
      cv2.destroyAllWindows() 
     elif len(kp_pairs) <= 100: 
      print '%d matches found, not enough for homography estimation' % len(kp_pairs) 
      img1 = cv2.imread('5.jpg', 0) 
      kp_pairs = match_images(img1, img2) 
      if len(kp_pairs) >= 100: 
      draw_matches('find_obj', kp_pairs, img1, img2) 
      cv2.waitKey() 
      cv2.destroyAllWindows() 
      elif len(kp_pairs) <= 100: 
      print '%d matches found, not enough for homography estimation' % len(kp_pairs) 
      img1 = cv2.imread('6.jpg', 0)`` 
      kp_pairs = match_images(img1, img2) 
      if len(kp_pairs) >= 100: 
       draw_matches('find_obj', kp_pairs, img1, img2) 
       cv2.waitKey() 
       cv2.destroyAllWindows() 
      elif len(kp_pairs) <= 100: 
       print '%d matches found, not enough for homography estimation' % len(kp_pairs)  
0

Ich benutze 2 Kamera, weil ich ein Problem haben, wenn das Produkt auf der Matte ist