2016-08-05 16 views
9

ich die angepassten keypoints zwischen zwei Bildern zu zeigen versuche, (eine, die von meiner Kamera und die andere aus der Datenbank erfaßt wird)DrawMatching zwischen zwei Bildern - Bilderkennung

Kann mir jemand schriftlich DrawMatches Funktion helfen, in mein Code, um die übereinstimmenden Zeilen zwischen 2 Bildern anzuzeigen.

Hier ist mein Code:

public final class ImageDetectionFilter{ 

// Flag draw target Image corner. 
private boolean flagDraw ; 

// The reference image (this detector's target). 
private final Mat mReferenceImage; 

// Features of the reference image. 
private final MatOfKeyPoint mReferenceKeypoints = new MatOfKeyPoint(); 

// Descriptors of the reference image's features. 
private final Mat mReferenceDescriptors = new Mat(); 

// The corner coordinates of the reference image, in pixels. 
// CvType defines the color depth, number of channels, and 
// channel layout in the image. Here, each point is represented 
// by two 32-bit floats. 
private final Mat mReferenceCorners = new Mat(4, 1, CvType.CV_32FC2); 

// Features of the scene (the current frame). 
private final MatOfKeyPoint mSceneKeypoints = new MatOfKeyPoint(); 
// Descriptors of the scene's features. 
private final Mat mSceneDescriptors = new Mat(); 
// Tentative corner coordinates detected in the scene, in 
// pixels. 
private final Mat mCandidateSceneCorners = 
    new Mat(4, 1, CvType.CV_32FC2); 
// Good corner coordinates detected in the scene, in pixels. 
private final Mat mSceneCorners = new Mat(4, 1, CvType.CV_32FC2); 
// The good detected corner coordinates, in pixels, as integers. 
private final MatOfPoint mIntSceneCorners = new MatOfPoint(); 

// A grayscale version of the scene. 
private final Mat mGraySrc = new Mat(); 
// Tentative matches of scene features and reference features. 
private final MatOfDMatch mMatches = new MatOfDMatch(); 

// A feature detector, which finds features in images. 
private final FeatureDetector mFeatureDetector = 
    FeatureDetector.create(FeatureDetector.ORB); 
// A descriptor extractor, which creates descriptors of 
// features. 
private final DescriptorExtractor mDescriptorExtractor = 
    DescriptorExtractor.create(DescriptorExtractor.ORB); 
// A descriptor matcher, which matches features based on their 
// descriptors. 
private final DescriptorMatcher mDescriptorMatcher = DescriptorMatcher 
    .create(DescriptorMatcher.BRUTEFORCE_HAMMINGLUT); 

// The color of the outline drawn around the detected image. 
private final Scalar mLineColor = new Scalar(0, 255, 0); 

public ImageDetectionFilter(final Context context, 
    final int referenceImageResourceID) throws IOException { 

// Load the reference image from the app's resources. 
// It is loaded in BGR (blue, green, red) format. 
mReferenceImage = Utils.loadResource(context, referenceImageResourceID, 
     Imgcodecs.CV_LOAD_IMAGE_COLOR); 

// Create grayscale and RGBA versions of the reference image. 
final Mat referenceImageGray = new Mat(); 
Imgproc.cvtColor(mReferenceImage, referenceImageGray, 
     Imgproc.COLOR_BGR2GRAY); 

Imgproc.cvtColor(mReferenceImage, mReferenceImage, 
     Imgproc.COLOR_BGR2RGBA); 

// Store the reference image's corner coordinates, in pixels. 
mReferenceCorners.put(0, 0, new double[] { 0.0, 0.0 }); 
mReferenceCorners.put(1, 0, 
     new double[] { referenceImageGray.cols(),0.0 }); 
mReferenceCorners.put(2, 0, 
     new double[] { referenceImageGray.cols(), 
     referenceImageGray.rows() }); 
mReferenceCorners.put(3, 0, 
     new double[] { 0.0, referenceImageGray.rows() }); 

// Detect the reference features and compute their 
// descriptors. 
mFeatureDetector.detect(referenceImageGray, 
     mReferenceKeypoints); 
mDescriptorExtractor.compute(referenceImageGray, 
     mReferenceKeypoints,mReferenceDescriptors); 
} 

public void apply(Mat src, Mat dst) { 

// Convert the scene to grayscale. 
Imgproc.cvtColor(src, mGraySrc, Imgproc.COLOR_RGBA2GRAY); 

// Detect the same features, compute their descriptors, 
// and match the scene descriptors to reference descriptors. 
mFeatureDetector.detect(mGraySrc, mSceneKeypoints); 
mDescriptorExtractor.compute(mGraySrc, mSceneKeypoints, 
     mSceneDescriptors); 
mDescriptorMatcher.match(mSceneDescriptors, 
     mReferenceDescriptors,mMatches); 

findSceneCorners(); 

// If the corners have been found, draw an outline around the 
// target image. 
// Else, draw a thumbnail of the target image. 
draw(src, dst); 

} 

private void findSceneCorners() { 
flagDraw = false; 

final List<DMatch> matchesList = mMatches.toList(); 

if (matchesList.size() < 4) { 
    // There are too few matches to find the homography. 
    return; 
} 

final List<KeyPoint> referenceKeypointsList = 
     mReferenceKeypoints.toList(); 
final List<KeyPoint> sceneKeypointsList = 
     mSceneKeypoints.toList(); 

// Calculate the max and min distances between keypoints. 
double maxDist = 0.0; 
double minDist = Double.MAX_VALUE; 

for (final DMatch match : matchesList) { 
    final double dist = match.distance; 
    if (dist < minDist) { 
     minDist = dist; 
    } 
    if (dist > maxDist) { 
     maxDist = dist; 
    } 
} 

// The thresholds for minDist are chosen subjectively 
// based on testing. The unit is not related to pixel 
// distances; it is related to the number of failed tests 
// for similarity between the matched descriptors. 
if (minDist > 50.0) { 
    // The target is completely lost. 
    // Discard any previously found corners. 
    mSceneCorners.create(0, 0, mSceneCorners.type()); 
    return; 
} else if (minDist > 25.0) { 
    // The target is lost but maybe it is still close. 
    // Keep any previously found corners. 
    return; 
} 

// Identify "good" keypoints and on match distance. 
final ArrayList<Point> goodReferencePointsList = 
     new ArrayList<Point>(); 
final ArrayList<Point> goodScenePointsList = 
     new ArrayList<Point>(); 
final double maxGoodMatchDist = 1.75 * minDist; 
for (final DMatch match : matchesList) { 
    if (match.distance < maxGoodMatchDist) { 
     goodReferencePointsList.add(
       referenceKeypointsList.get(match.trainIdx).pt); 
     goodScenePointsList 
       .add(sceneKeypointsList.get(match.queryIdx).pt); 
    } 
} 
if (goodReferencePointsList.size() < 4 
     || goodScenePointsList.size() < 4) { 
    // There are too few good points to find the homography. 
    return; 
} 
// There are enough good points to find the homography. 
// (Otherwise, the method would have already returned.) 

// Convert the matched points to MatOfPoint2f format, as 
// required by the Calib3d.findHomography function. 
final MatOfPoint2f goodReferencePoints = new MatOfPoint2f(); 
goodReferencePoints.fromList(goodReferencePointsList); 

final MatOfPoint2f goodScenePoints = new MatOfPoint2f(); 
goodScenePoints.fromList(goodScenePointsList); 

// Find the homography. 
final Mat homography = Calib3d.findHomography(
     goodReferencePoints,goodScenePoints); 

// Use the homography to project the reference corner 
// coordinates into scene coordinates. 
Core.perspectiveTransform(mReferenceCorners, 
     mCandidateSceneCorners,homography); 

// Convert the scene corners to integer format, as required 
// by the Imgproc.isContourConvex function. 
mCandidateSceneCorners.convertTo(mIntSceneCorners, 
     CvType.CV_32S); 

// Check whether the corners form a convex polygon. If not, 
// (that is, if the corners form a concave polygon), the 
// detection result is invalid because no real perspective can 
// make the corners of a rectangular image look like a concave 
// polygon! 
if (Imgproc.isContourConvex(mIntSceneCorners)) { 
    // The corners form a convex polygon, so record them as 
    // valid scene corners. 
    mCandidateSceneCorners.copyTo(mSceneCorners); 
    flagDraw = true; 
} 

} 

protected void draw(final Mat src, final Mat dst) { 

if (dst != src) { 
    src.copyTo(dst); 
} 

// Outline the found target in green. 
Imgproc.line(dst, new Point(mSceneCorners.get(0, 0)), new Point(
     mSceneCorners.get(1, 0)), mLineColor, 4); 
Imgproc.line(dst, new Point(mSceneCorners.get(1, 0)), new Point(
     mSceneCorners.get(2, 0)), mLineColor, 4); 
Imgproc.line(dst, new Point(mSceneCorners.get(2, 0)), new Point(
     mSceneCorners.get(3, 0)), mLineColor, 4); 
Imgproc.line(dst, new Point(mSceneCorners.get(3, 0)), new Point(
     mSceneCorners.get(0, 0)), mLineColor, 4); 
} 

public boolean getFlagDraw(){ 

return flagDraw; 
} 
} 
+0

wäre hilfreich, wenn Sie auch einige Beispielbilder zur Anpassung teilen. – ZdaR

+0

@ZdaR, bekomme einen Fehler beim Ausführen der folgenden Anweisung. Könnten Sie einen Blick darauf werfen und lassen Sie mich wissen, was falsch ist. Mat outImg = neue Mat(); Features2d.drawMatches (mReferenceImage, mReferenceKeypoints, mCandidateSceneCorners, mSceneKeypoints, mMatches, outImg); – WhoAmI

Antwort

1

Ich bin nicht in Java fest und nicht sicher, ob dies hilfreich wird, aber ich ein Beispiel bin Entsendung, wie ich es geschafft, diese mit OpenCV in Python zu erreichen. Vielleicht hilft dir das als Richtlinie.

(das Beispiel von this Standort angepasst ist, die weiteren Erklärungen hat, die von Interesse sein kann)

in diesem Beispiel, ich bin in einer Reihe von sechs Comic-Tieren eine gedrehte Version von einem Cartoon Tiere zu finden.

grundsätzlich möchten Sie cv2.drawMatches() mit den Schlüsselpunkten aus Ihrem Training und abfragen Bilder und Maske schlechte Übereinstimmungen aufrufen. Der relevante Teil meines Codes ist ganz unten.

Ihr Beispiel ist kein Beispiel für einen minimalen Code und ich habe nicht alles durchgearbeitet, aber es scheint, dass Sie bereits Ihre Schlüsselpunkte haben und bereit sein sollten, zu gehen?

enter image description here

enter image description here

enter image description here

import numpy as np 
import cv2 
from matplotlib import pyplot as plt 

MIN_MATCH_COUNT = 4 


img1 = cv2.imread('d:/one_animal_rotated.jpg',0)   # queryImage 
img2 = cv2.imread('d:/many_animals.jpg',0) # trainImage 

# Initiate SIFT detector 
sift = cv2.xfeatures2d.SIFT_create(0,3,0) 
# find the keypoints and descriptors with SIFT 
kp1, des1 = sift.detectAndCompute(img1,None) 
kp2, des2 = sift.detectAndCompute(img2,None) 

#find matches using FLANN 
FLANN_INDEX_KDTREE = 0 
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) 
search_params = dict(checks = 50) 
flann = cv2.FlannBasedMatcher(index_params, search_params) 
matches = flann.knnMatch(des1,des2,k=2) 

#apply ratio test to find best matches (values from 0.7-1 made sense here) 
good = [] 
for m,n in matches: 
    if m.distance < 1*n.distance: 
     good.append(m) 

#find homography to transform the edges of the query image and draw them on the train image 
#This is also used to mask all keypoints that aren't inside this box further below. 
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good]).reshape(-1,1,2) 
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good]).reshape(-1,1,2) 

M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) 
matchesMask = mask.ravel().tolist() 

h,w = img1.shape 
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) 
dst = cv2.perspectiveTransform(pts,M) 
img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA) 

#draw the good matched key points 
draw_params = dict(matchColor = (0,255,0), # draw matches in green color 
        singlePointColor = None, 
        matchesMask = matchesMask, # draw only inliers 
        flags = 2) 

img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params) 
plt.figure() 
plt.imshow(img3, 'gray'),plt.show() 
+0

Das muss ich in meinem Code implementieren. Aber ich brauche es in Java für ein Bild aus der Datenbank und das andere von der Kamera aufgenommene Bild. – WhoAmI

Verwandte Themen