You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
wheeldetect/3part/Cyclops/include/PatternDetector.h

417 lines
17 KiB
C

/*!
* \file PatternDetector.h
* \date 2018/03/23
*
* \author Lin, Chi
* Contact: lin.chi@hzleaper.com
*
*
* \note
*/
#ifndef __PatternDetector_h_
#define __PatternDetector_h_
#include "StdUtils.h"
#include "CVUtils.h"
#include "PatternDescriptor.h"
#include "CyclopsLock.h"
#include "DetectRoi.h"
#include "CyclopsModules.h"
struct PeakPatternParamPack;
enum PeakAlgoAccLevel;
/*! \brief Locate single or multiple instances of the trained pattern in the given image and ROI.
*
* 1) If you are using Cyclops as static library,
* 1.1) and you want global factory to manage the trained pattern and detector for you, initialize the detector
* via PatternDetector::getInstance().
* Example:
* \code{.cpp}
* PatternDetector::Ptr detectorPtr = PatternDetector::getInstance("detect sth");
* if (!detectorPtr->isTrained()) { // do training if not trained yet
* bool ret = detectorPtr->train(trainImg);
* if (!ret) return false;
* }
* detectorPtr->setScale(0); // no scale
* detectorPtr->setAngle(15); // -15 ~ +15
* Vec4f pose; // result pose
* float bestScore = detectorPtr->detectBest(detectImg, roiVertexes, pose);
*
* // delete it later
* PatternDetector::deleteInstance("detect sth");
* \endcode
*
* 1.2) or, if you wish to manage the detector yourself:
* \code{.cpp}
* PatternDetector::Ptr detectorPtr = std::make_shared<PatternDetector>(); // remember to hold the smart pointer
* // balabala, same as above
* // ...
* \endcode
*
* 2) If you are using Cyclops as dynamic library,
* initialize and manipulate the detector via CyclopsModules APIs.
* Example:
* \code{.cpp}
* // get a long-term detector, and give it an unique name
* PatternDetector::Ptr pdPtr = GetModuleInstance<PatternDetector>("detect sth");
* // get for temporary usage
* PatternDetector::Ptr pdPtr = GetModuleInstance<PatternDetector>();
* \endcode
*
* see SinglePatternTest and MultiPatternTest for unit test
*/
class PatternDetector : public ICyclopsModuleInstance
{
/*! \fn setACThres
* Define minimum acceptable score, 0 to 100, default to 70, see also getACThres()
* \fn getACThres
* Get value of minimum acceptable score, see also setACThres()
*/
DECLARE_PARAMETER2(int, ACThres, 0, 100)
/*! \fn setCoarseACThres
* Define minimum acceptable score to filter result in early stage, default to 0, means we'll decide it by training.
* Works when turn on strict-scoring, we'll use this score to filter candidates first
* see also getCoarseACThres()
* \fn getCoarseACThres
* Get value of minimum accpetable score in coarse stage, see also setCoarseACThres()
*/
DECLARE_PARAMETER2(int, CoarseACThres, 0, 100)
/*! \fn setAngle
* Define angle range for matching search, 0 to 180, default to 15 means +-15, see also getAngle()
* \fn getAngle
* Get value of angle range for matching search, see also setAngle()
*/
DECLARE_PARAMETER2(int, Angle, 0, 180)
/*! \fn setScale
* Define scale range for matching search, 0 to 50, default to 1 means 99% to 101%, see also getScale()
* \fn getScale
* Get value of scale range for matching search, see also setScale()
*/
DECLARE_PARAMETER2(int, Scale, 0, 50)
/*! \fn setOverlap
* Define minimum overlap percentage of each pattern for matches, 1 to 100, default to 50 means 50%, see also getOverlapa()
* \fn getOverlap
* Get value of minimum overlap percentage of each pattern for matches, see also setOverlapa()
*/
DECLARE_PARAMETER2(int, Overlap, 1, 100)
/*! \fn setIgnorePolarity
* Define whether to ignore polarity for matches, default to false, see also getIgnorePolarity()
* \fn getIgnorePolarity
* Get whether to ignore polarity for matches, see also setIgnorePolarity()
*/
DECLARE_PARAMETER(bool, IgnorePolarity)
/*! \fn setIgnoreMissing
* Define whether to ignore missing part for scoring, default to true, see also getIgnoreMissing()
* \fn getIgnoreMissing
* Get value of whether to ignore missing part for scoring, see also setIgnoreMissing()
*/
DECLARE_PARAMETER(bool, IgnoreMissing)
/*! \fn setXOffset
* Define x offset of matches, default to 0, see also getXOffset()
* \fn getXOffset
* Get value of x offset of matches, see also setXOffset(), setOffset()
*/
DECLARE_PARAMETER(float, XOffset)
/*! \fn setYOffset
* Define y offset of matches, default to 0, see also getYOffset()
* \fn getYOffset
* Get value of y offset of matches, see also setYOffset(), setOffset()
*/
DECLARE_PARAMETER(float, YOffset)
/*! \fn setThreadNum
* Define maximum thread number we could use for detection, default to -1 for no limiatation, see also getThreadNum()
* \fn getThreadNum
* Get value of maximum thread number we could use for detection, see also setThreadNum()
*/
DECLARE_PARAMETER(int, ThreadNum)
/*! \fn setDetailLevel
* Define level of detail of trained pattern template, 0 to 1, default 0.5, higher for more detail, see also getDetailLevel()
* \fn getDetailLevel
* Get value of level of detail of trained pattern template, see also setDetailLevel()
*/
DECLARE_PARAMETER2(float, DetailLevel, 0, 1)
/*! \fn setAccLevel
* Define accuracy level of the algorithm, default to -1 means hardcoded defaults, see also getAccLevel()
* Note: this value only works for PeakPattern.
* \fn getAccLevel
* Get value of accuracy level of the algorithm, see also setAccLevel()
*/
DECLARE_PARAMETER(int, AccLevel)
/*! \fn setCoarseStepX
* Define coarse stage along x axis (horizontally), 1 to 16, default to 0,
* means use default values related to accuracy level and hardware concurrency, see also getCoarseStepX()
* \fn getCoarseStepX
* Get value of coarse stage along x axis (horizontally), see also setCoarseStepX()
*/
DECLARE_PARAMETER2(int, CoarseStepX, 0, 16)
/*! \fn setCoarseStepY
* Define coarse stage along y axis (vertically), 1 to 16, default to 0,
* means use default values related to accuracy level and hardware concurrency, see also getCoarseStepY()
* \fn getCoarseStepY
* Get value of coarse stage along y axis (horizontally), see also setCoarseStepY()
*/
DECLARE_PARAMETER2(int, CoarseStepY, 0, 16)
/*! \fn setEnableDetectMutex
* Define whether to enable the mutex in detection, default to true, see also getEnableDetectMutex()
* If turn it off, user need to be careful of the multi-threading problem, in case too much threads are created at one time.
* \fn getEnableDetectMutex
* Get value of whether to enable the mutex in detection, see also setEnableDetectMutex()
*/
DECLARE_PARAMETER(bool, EnableDetectMutex)
/*! \fn setBias
* Define whether detection will prefer 0-angle and 1-scale, the algorithm will start searching from biased value and stop early if score decreased.
* default to false, see also getBias()
* \fn getBias
* Get value of whether detection will prefer 0-angle and 1-scale, see also setBias()
*/
DECLARE_PARAMETER(bool, Bias)
/*! \fn setUseCache
* Define whether to use cache to store some internal structure, default to true, see also getUseCache()
* It will speed-up the detection while consume extra memory depending on your search angle and scale. Accuracy is not affected.
* \fn getUseCache
* Get value of whether to use cache to store some internal structure, see also setUseCache()
*/
DECLARE_PARAMETER(bool, UseCache)
/*! \fn setStrictScore
* Define whether to re-score the detect results on original source image, default to false, see also getStrictScore()
* \fn getStrictScore
* Get value of whether to re-score the detect results on original source image, see also setStrictScore()
*/
DECLARE_PARAMETER(bool, StrictScore)
/*! \fn setGrayValueWeight
* Define the weight of gray value score in detection, 0 ~ 100, default to 0, see also getGrayValueWeight()
* \fn getGrayValueWeight
* Get value of the weight of gray value score in detection, see also setGrayValueWeight()
*/
DECLARE_PARAMETER2(int, GrayValueWeight, 0, 100)
public:
PatternDetector()
: mACThres(70), mCoarseACThres(0),
mAngle(15),
mScale(1),
mOverlap(50), mIgnorePolarity(false), mIgnoreMissing(true), mXOffset(0), mYOffset(0),
mThreadNum(0), mDetailLevel(0.5), mAccLevel(-1),
mCoarseStepX(0), mCoarseStepY(0), mEnableDetectMutex(true), mBias(false), mUseCache(true), mStrictScore(false),
mGrayValueWeight(0)
{}
virtual ~PatternDetector() {}
/*! \fn serializeToMemory
* Serialize the pattern detector into a in-memory string, see also deserializeFromMemory()
* @param str used to take the output serialization result
* @return true for succeed, false for fail
* \fn serializeToFile
* Serialize the pattern detector into a text file, see also deserializeFromFile()
* @param filename file name (full path) where we will write the data
* @return true for succeed, false for fail
* \fn deserializeFromMemory
* Deserialize the pattern detector from in-memory string, see also serializeToMemory()
* @param str in-memory string
* @return true for succeed, false for fail
* \fn deserializeFromFile
* Deserialize the pattern detector from a text file, see also serializeToFile()
* @param filename file name (full path) where we will read the data
* @return true for succeed, false for fail
*/
DECL_SERIALIZE_FUNCS
//! Smart pointer to hold an instance of PatternDetector
typedef std::shared_ptr<PatternDetector> Ptr;
DECL_GET_INSTANCE(PatternDetector::Ptr)
/*! Define x and y offset of matches, it's in the coordinate of template,
* which means if the match result scales, the offsets scales; if the match result rotates, the offset rotates.
*/
void setOffset(float x, float y) {
setXOffset(x);
setYOffset(y);
}
/*! Train the detector with a provided image.
* @param templateImg the template image for training, a gray image is preferred, otherwise we'll convert it to gray
* @param pMask the mask for which pixels should be excluded
* @return true for successfully trained, false for failure, say no key point is detected in the training image.
*/
virtual bool train(const Mat& templateImg, const Mat* pMask = nullptr);
/** @overload */
virtual bool train(const Mat& templateImg, const vector<Point2f>& roi); // train the template within given ROI
/*! Tell whether this detector is trained or not, see also train()
* @return true for already trained
*/
virtual bool isTrained();
/* ! Reset the detector as never trained
*/
virtual void reset();
/*! Prune training result via provided key point list which should be removed from template
* @param kpPruneList indexes of key points
* @return true if succeed
*/
virtual bool prune(const Mat& templateImg, const vector<int>& kpPruneList);
/*! Get the trained center of trained template, it's the anchor point of rotation
* @return center
*/
virtual Point2f templateCenter();
/*! Detect the best matching instance using the detector.
* @param img input image for detection
* @param bestPattern output pose of the best instance: x, y (without offset), angle (in degree), scale
* @param allScores output, represent the matching score distribution of the entire image,
* pass null if you don't want it
* @param mask input mask for exclude some pixel from detection
* @return score of the best one, 0 if we found nothing good
*/
virtual float detectBest(const Mat& img,
Vec4f& bestPattern,
Mat* allScores = nullptr,
Mat* mask = nullptr
);
/** @overload */
virtual float detectBest(const Mat& img, const vector<Point2f>& roi,
Vec4f& bestPattern,
Mat* allScores = nullptr
);
/** @overload */
virtual float detectBest(const Mat& img, DetectRoi& droi,
Vec4f& bestPattern,
Mat* allScores = nullptr
);
/*! Detect multiple best matching instances using the detector.
* @param img input image for detection
* @param bestPatterns output pose of several best instances: x, y (without offset), angle (in degree), scale
* @param bestScores output scores of several best instances
* @param allScores output, represent the matching score distribution of the entire image,
* pass null if you don't want it
* @param minCount expected minimum count of the found instances
* @param maxCount expected maximum count of the found instances
* @param mask input mask for exclude some pixel from detection
* @return count of the found instances, it may below minCount but wouldn't exceed maxCount. return 0 if we found nothing good.
*/
virtual int detectMulti(const Mat& img,
vector<Vec4f>* bestPatterns,
vector<float>* bestScores,
Mat* allScores = nullptr,
int minCount = 0,
int maxCount = 10,
Mat* mask = nullptr
);
/** @overload */
virtual int detectMulti(const Mat& img, const vector<Point2f>& roi,
vector<Vec4f>* bestPatterns,
vector<float>* bestScores,
Mat* allScores,
int minCount,
int maxCount
);
/** @overload */
virtual int detectMulti(const Mat& img, DetectRoi& droi,
vector<Vec4f>* bestPatterns,
vector<float>* bestScores,
Mat* allScores,
int minCount,
int maxCount
);
/*! Draw pattern on the provided image
* @param img on what we should draw
* @param pose the detected pose got from detectBest: x, y, angle (in degree), scale
* @param withDir true if draw the key points' direction, false for singe points
* @param factor to scale canvas for easy identification, default to 1.0 means no scale
* @param colorPeak color for the key points, default to random color
* @param colorDir color for the direction path, default to random color
* @return canvas
*/
virtual Mat drawPattern(const Mat& img, const Vec4f& pose, bool withDir = false, float factor = 1.0,
Scalar colorPeak = gDummyScalar, Scalar colorDir = gDummyScalar);
/*! Draw pattern on a transparent canvas, align to the center, without scale
* @param pose the detected pose got from detectBest: x, y, angle (in degree), scale
* @param withDir true if draw the key points' direction, false for singe points
* @param colorPeak color for the key points, default to random color
* @param colorDir color for the direction path, default to random color
* @return canvas
*/
virtual Mat drawPattern(const Vec4f& pose, bool withDir = false,
Scalar colorPeak = gDummyScalar, Scalar colorDir = gDummyScalar);
/*! Draw pattern on a transparent canvas, with provided width and height, and provided shift, without scale
* @param pose the detected pose got from detectBest: x, y, angle (in degree), scale
* @param w width of the result canvas
* @param h height of the result canvas
* @param shifts shifts of the instance position
* @param withDir true if draw the key points' direction, false for singe points
* @param colorPeak color for the key points, default to random color
* @param colorDir color for the direction path, default to random color
* @return canvas
*/
virtual Mat drawPattern(const Vec4f& pose, int w, int h, Point2f shifts, bool withDir = false,
Scalar colorPeak = gDummyScalar, Scalar colorDir = gDummyScalar);
/*! Draw patterns on the provided image
* @param img on what we should draw
* @param poses the detected poses got from detectMulti: x, y, angle (in degree), scale
* @param withDir true if draw the key points' direction, false for singe points
* @param factor to scale canvas for easy identification, default to 1.0 means no scale
* @param colorPeak color for the key points, default to random color
* @param colorDir color for the direction path, default to random color
* @return canvas
*/
virtual Mat drawPatterns(const Mat& img, const vector<Vec4f>& poses, bool withDir = false, float factor = 1.0,
Scalar colorPeak = gDummyScalar, Scalar colorDir = gDummyScalar);
/*! Draw patterns on a transparent canvas, with provided width and height, and provided shift, without scale
* @param poses the detected pose got from detectMulti: x, y, angle (in degree), scale
* @param w width of the result canvas
* @param h height of the result canvas
* @param shifts shifts of the instance position
* @param withDir true if draw the key points' direction, false for singe points
* @param colorPeak color for the key points, default to random color
* @param colorDir color for the direction path, default to random color
* @return canvas
*/
virtual Mat drawPatterns(const vector<Vec4f>& poses, int w, int h, Point2f shifts, bool withDir = false,
Scalar colorPeak = gDummyScalar, Scalar colorDir = gDummyScalar);
/*! Represent the pattern of provided pose with patches of lines showing couture's direction
* @param pose the detected pose got from detectMulti or detectBest: x, y, angle (in degree), scale
* @param dirLen individual line's length, 0 ~ 5.0, negative value means we'll get an automatic value
according to the distribution of template's key points.
*/
virtual std::vector<Vec4f> getPatternAsLines(const Vec4f& pose, float dirLen = 0);
/*! Represent the pattern of provided pose with key points
* @param pose the detected pose got from detectMulti or detectBest: x, y, angle (in degree), scale
* @return coarseKps return the key points: x, y (int original image size), and angle
*/
virtual std::vector<Vec4f> getPatternAsPoints(const Vec4f& pose);
/*! Generate the required position applied the offset setting
* @param result the detected pose got from detectBest or detectMulti.
* @return position applied the offset setting
*/
virtual Point2f getOffsetCenter(const Vec4f& result);
private:
void genPeakPattenPack(PeakPatternParamPack& pack, PeakAlgoAccLevel defaultAccLevel);
virtual bool serialize(FileStorage& fs);
virtual bool deserialize(const FileNode& fs);
private:
PatternDescriptor::Ptr mpTemplateDesc;
CyclopsLock mDescLock;
static CyclopsLock mDetectMutex;
};
#endif // PatternDetector_h_