OpenCV2.3.1のOrb特徴検出器を使用してプロジェクトに取り組んでいます。私は8つの異なる画像の間で一致を見つけています。そのうちの6つは非常に似ています(カメラの位置が20 cm異なり、線形スライダーに沿っているため、スケールや回転の変動はありません)。次に、どちらかから約45度の角度から撮影した2つの画像側。私のコードは、非常に類似した画像間で正確な一致をたくさん見つけていますが、より異なる視点から撮影された画像についてはほとんどまたはまったくありません。私のコードの適切な部分であると私が思うものを含めました。さらに情報が必要な場合はお知らせください。
// set parameters
int numKeyPoints = 1500;
float distThreshold = 15.0;
//instantiate detector, extractor, matcher
detector = new cv::OrbFeatureDetector(numKeyPoints);
extractor = new cv::OrbDescriptorExtractor;
matcher = new cv::BruteForceMatcher<cv::HammingLUT>;
//Load input image detect keypoints
cv::Mat img1;
std::vector<cv::KeyPoint> img1_keypoints;
cv::Mat img1_descriptors;
cv::Mat img2;
std::vector<cv::KeyPoint> img2_keypoints
cv::Mat img2_descriptors;
img1 = cv::imread(fList[0].string(), CV_LOAD_IMAGE_GRAYSCALE);
img2 = cv::imread(fList[1].string(), CV_LOAD_IMAGE_GRAYSCALE);
detector->detect(img1, img1_keypoints);
detector->detect(img2, img2_keypoints);
extractor->compute(img1, img1_keypoints, img1_descriptors);
extractor->compute(img2, img2_keypoints, img2_descriptors);
//Match keypoints using knnMatch to find the single best match for each keypoint
//Then cull results that fall below given distance threshold
std::vector<std::vector<cv::DMatch> > matches;
matcher->knnMatch(img1_descriptors, img2_descriptors, matches, 1);
int matchCount=0;
for (int n=0; n<matches.size(); ++n) {
if (matches[n].size() > 0){
if (matches[n][0].distance > distThreshold){
matches[n].erase(matches[n].begin());
}else{
++matchCount;
}
}
}
一致をフィルタリングするプロセスを変更することで、十分な有用な一致を取得することができました。私の以前の方法は、距離の値だけに基づいて多くの良い一致を破棄することでした。 OpenCV2 Computer Vision Application Programming Cookbookで見つけたこのRobustMatcher
クラスは、うまく機能することになりました。すべての一致が正確になったので、ORB検出器が探しているキーポイントの数を増やすことで、十分な結果を得ることができました。 SIFTまたはSURFでRobustMatcher
を使用すると、さらに良い結果が得られますが、ORBで使用可能なデータを取得しています。
//RobustMatcher class taken from OpenCV2 Computer Vision Application Programming Cookbook Ch 9
class RobustMatcher {
private:
// pointer to the feature point detector object
cv::Ptr<cv::FeatureDetector> detector;
// pointer to the feature descriptor extractor object
cv::Ptr<cv::DescriptorExtractor> extractor;
// pointer to the matcher object
cv::Ptr<cv::DescriptorMatcher > matcher;
float ratio; // max ratio between 1st and 2nd NN
bool refineF; // if true will refine the F matrix
double distance; // min distance to epipolar
double confidence; // confidence level (probability)
public:
RobustMatcher() : ratio(0.65f), refineF(true),
confidence(0.99), distance(3.0) {
// ORB is the default feature
detector= new cv::OrbFeatureDetector();
extractor= new cv::OrbDescriptorExtractor();
matcher= new cv::BruteForceMatcher<cv::HammingLUT>;
}
// Set the feature detector
void setFeatureDetector(
cv::Ptr<cv::FeatureDetector>& detect) {
detector= detect;
}
// Set the descriptor extractor
void setDescriptorExtractor(
cv::Ptr<cv::DescriptorExtractor>& desc) {
extractor= desc;
}
// Set the matcher
void setDescriptorMatcher(
cv::Ptr<cv::DescriptorMatcher>& match) {
matcher= match;
}
// Set confidence level
void setConfidenceLevel(
double conf) {
confidence= conf;
}
//Set MinDistanceToEpipolar
void setMinDistanceToEpipolar(
double dist) {
distance= dist;
}
//Set ratio
void setRatio(
float rat) {
ratio= rat;
}
// Clear matches for which NN ratio is > than threshold
// return the number of removed points
// (corresponding entries being cleared,
// i.e. size will be 0)
int ratioTest(std::vector<std::vector<cv::DMatch> >
&matches) {
int removed=0;
// for all matches
for (std::vector<std::vector<cv::DMatch> >::iterator
matchIterator= matches.begin();
matchIterator!= matches.end(); ++matchIterator) {
// if 2 NN has been identified
if (matchIterator->size() > 1) {
// check distance ratio
if ((*matchIterator)[0].distance/
(*matchIterator)[1].distance > ratio) {
matchIterator->clear(); // remove match
removed++;
}
} else { // does not have 2 neighbours
matchIterator->clear(); // remove match
removed++;
}
}
return removed;
}
// Insert symmetrical matches in symMatches vector
void symmetryTest(
const std::vector<std::vector<cv::DMatch> >& matches1,
const std::vector<std::vector<cv::DMatch> >& matches2,
std::vector<cv::DMatch>& symMatches) {
// for all matches image 1 -> image 2
for (std::vector<std::vector<cv::DMatch> >::
const_iterator matchIterator1= matches1.begin();
matchIterator1!= matches1.end(); ++matchIterator1) {
// ignore deleted matches
if (matchIterator1->size() < 2)
continue;
// for all matches image 2 -> image 1
for (std::vector<std::vector<cv::DMatch> >::
const_iterator matchIterator2= matches2.begin();
matchIterator2!= matches2.end();
++matchIterator2) {
// ignore deleted matches
if (matchIterator2->size() < 2)
continue;
// Match symmetry test
if ((*matchIterator1)[0].queryIdx ==
(*matchIterator2)[0].trainIdx &&
(*matchIterator2)[0].queryIdx ==
(*matchIterator1)[0].trainIdx) {
// add symmetrical match
symMatches.Push_back(
cv::DMatch((*matchIterator1)[0].queryIdx,
(*matchIterator1)[0].trainIdx,
(*matchIterator1)[0].distance));
break; // next match in image 1 -> image 2
}
}
}
}
// Identify good matches using RANSAC
// Return fundemental matrix
cv::Mat ransacTest(
const std::vector<cv::DMatch>& matches,
const std::vector<cv::KeyPoint>& keypoints1,
const std::vector<cv::KeyPoint>& keypoints2,
std::vector<cv::DMatch>& outMatches) {
// Convert keypoints into Point2f
std::vector<cv::Point2f> points1, points2;
cv::Mat fundemental;
for (std::vector<cv::DMatch>::
const_iterator it= matches.begin();
it!= matches.end(); ++it) {
// Get the position of left keypoints
float x= keypoints1[it->queryIdx].pt.x;
float y= keypoints1[it->queryIdx].pt.y;
points1.Push_back(cv::Point2f(x,y));
// Get the position of right keypoints
x= keypoints2[it->trainIdx].pt.x;
y= keypoints2[it->trainIdx].pt.y;
points2.Push_back(cv::Point2f(x,y));
}
// Compute F matrix using RANSAC
std::vector<uchar> inliers(points1.size(),0);
if (points1.size()>0&&points2.size()>0){
cv::Mat fundemental= cv::findFundamentalMat(
cv::Mat(points1),cv::Mat(points2), // matching points
inliers, // match status (inlier or outlier)
CV_FM_RANSAC, // RANSAC method
distance, // distance to epipolar line
confidence); // confidence probability
// extract the surviving (inliers) matches
std::vector<uchar>::const_iterator
itIn= inliers.begin();
std::vector<cv::DMatch>::const_iterator
itM= matches.begin();
// for all matches
for ( ;itIn!= inliers.end(); ++itIn, ++itM) {
if (*itIn) { // it is a valid match
outMatches.Push_back(*itM);
}
}
if (refineF) {
// The F matrix will be recomputed with
// all accepted matches
// Convert keypoints into Point2f
// for final F computation
points1.clear();
points2.clear();
for (std::vector<cv::DMatch>::
const_iterator it= outMatches.begin();
it!= outMatches.end(); ++it) {
// Get the position of left keypoints
float x= keypoints1[it->queryIdx].pt.x;
float y= keypoints1[it->queryIdx].pt.y;
points1.Push_back(cv::Point2f(x,y));
// Get the position of right keypoints
x= keypoints2[it->trainIdx].pt.x;
y= keypoints2[it->trainIdx].pt.y;
points2.Push_back(cv::Point2f(x,y));
}
// Compute 8-point F from all accepted matches
if (points1.size()>0&&points2.size()>0){
fundemental= cv::findFundamentalMat(
cv::Mat(points1),cv::Mat(points2), // matches
CV_FM_8POINT); // 8-point method
}
}
}
return fundemental;
}
// Match feature points using symmetry test and RANSAC
// returns fundemental matrix
cv::Mat match(cv::Mat& image1,
cv::Mat& image2, // input images
// output matches and keypoints
std::vector<cv::DMatch>& matches,
std::vector<cv::KeyPoint>& keypoints1,
std::vector<cv::KeyPoint>& keypoints2) {
// 1a. Detection of the SURF features
detector->detect(image1,keypoints1);
detector->detect(image2,keypoints2);
// 1b. Extraction of the SURF descriptors
cv::Mat descriptors1, descriptors2;
extractor->compute(image1,keypoints1,descriptors1);
extractor->compute(image2,keypoints2,descriptors2);
// 2. Match the two image descriptors
// Construction of the matcher
//cv::BruteForceMatcher<cv::L2<float>> matcher;
// from image 1 to image 2
// based on k nearest neighbours (with k=2)
std::vector<std::vector<cv::DMatch> > matches1;
matcher->knnMatch(descriptors1,descriptors2,
matches1, // vector of matches (up to 2 per entry)
2); // return 2 nearest neighbours
// from image 2 to image 1
// based on k nearest neighbours (with k=2)
std::vector<std::vector<cv::DMatch> > matches2;
matcher->knnMatch(descriptors2,descriptors1,
matches2, // vector of matches (up to 2 per entry)
2); // return 2 nearest neighbours
// 3. Remove matches for which NN ratio is
// > than threshold
// clean image 1 -> image 2 matches
int removed= ratioTest(matches1);
// clean image 2 -> image 1 matches
removed= ratioTest(matches2);
// 4. Remove non-symmetrical matches
std::vector<cv::DMatch> symMatches;
symmetryTest(matches1,matches2,symMatches);
// 5. Validate matches using RANSAC
cv::Mat fundemental= ransacTest(symMatches,
keypoints1, keypoints2, matches);
// return the found fundemental matrix
return fundemental;
}
};
// set parameters
int numKeyPoints = 1500;
//Instantiate robust matcher
RobustMatcher rmatcher;
//instantiate detector, extractor, matcher
detector = new cv::OrbFeatureDetector(numKeyPoints);
extractor = new cv::OrbDescriptorExtractor;
matcher = new cv::BruteForceMatcher<cv::HammingLUT>;
rmatcher.setFeatureDetector(detector);
rmatcher.setDescriptorExtractor(extractor);
rmatcher.setDescriptorMatcher(matcher);
//Load input image detect keypoints
cv::Mat img1;
std::vector<cv::KeyPoint> img1_keypoints;
cv::Mat img1_descriptors;
cv::Mat img2;
std::vector<cv::KeyPoint> img2_keypoints
cv::Mat img2_descriptors;
std::vector<std::vector<cv::DMatch> > matches;
img1 = cv::imread(fList[0].string(), CV_LOAD_IMAGE_GRAYSCALE);
img2 = cv::imread(fList[1].string(), CV_LOAD_IMAGE_GRAYSCALE);
rmatcher.match(img1, img2, matches, img1_keypoints, img2_keypoints);
私はopencv pythonで同様の問題を抱えていて、グーグル経由でここに来ました。
私の問題を解決するために、@ KLowesソリューションに基づくマッチングフィルタリングのコードpythonコードを作成しました。他の誰かが同じ問題を抱えている場合に備えて、ここで共有します。
_""" Clear matches for which NN ratio is > than threshold """
def filter_distance(matches):
dist = [m.distance for m in matches]
thres_dist = (sum(dist) / len(dist)) * ratio
sel_matches = [m for m in matches if m.distance < thres_dist]
#print '#selected matches:%d (out of %d)' % (len(sel_matches), len(matches))
return sel_matches
""" keep only symmetric matches """
def filter_asymmetric(matches, matches2, k_scene, k_ftr):
sel_matches = []
for match1 in matches:
for match2 in matches2:
if match1.queryIdx < len(k_ftr) and match2.queryIdx < len(k_scene) and \
match2.trainIdx < len(k_ftr) and match1.trainIdx < len(k_scene) and \
k_ftr[match1.queryIdx] == k_ftr[match2.trainIdx] and \
k_scene[match1.trainIdx] == k_scene[match2.queryIdx]:
sel_matches.append(match1)
break
return sel_matches
def filter_ransac(matches, kp_scene, kp_ftr, countIterations=2):
if countIterations < 1 or len(kp_scene) < minimalCountForHomography:
return matches
p_scene = []
p_ftr = []
for m in matches:
p_scene.append(kp_scene[m.queryIdx].pt)
p_ftr.append(kp_ftr[m.trainIdx].pt)
if len(p_scene) < minimalCountForHomography:
return None
F, mask = cv2.findFundamentalMat(np.float32(p_ftr), np.float32(p_scene), cv2.FM_RANSAC)
sel_matches = []
for m, status in Zip(matches, mask):
if status:
sel_matches.append(m)
#print '#ransac selected matches:%d (out of %d)' % (len(sel_matches), len(matches))
return filter_ransac(sel_matches, kp_scene, kp_ftr, countIterations-1)
def filter_matches(matches, matches2, k_scene, k_ftr):
matches = filter_distance(matches)
matches2 = filter_distance(matches2)
matchesSym = filter_asymmetric(matches, matches2, k_scene, k_ftr)
if len(k_scene) >= minimalCountForHomography:
return filter_ransac(matchesSym, k_scene, k_ftr)
_
一致をフィルタリングするには、filter_matches(matches, matches2, k_scene, k_ftr)
を呼び出す必要があります。_matches, matches2
_はorb-matcherによって取得された一致を表し、_k_scene, k_ftr
_は対応するキーポイントです。
私はあなたのコードに何も悪いことはないと思います。私の経験から、opencvのORBはスケールの変動に敏感です。
おそらく、小さなテストでこれを確認できます。回転のみの画像と、スケールの変化のみの画像を作成できます。回転のものはおそらくうまく一致しますが、スケールのものは一致しません(スケールを小さくするのが最悪だと思います)。
また、トランクからopencvバージョンを試してみることをお勧めします(コンパイル手順についてはopencvのサイトを参照してください)。ORBは2.3.1以降に更新され、パフォーマンスは少し向上しますが、それでもスケールの問題があります。