diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 5e2e911cc88..026b11a2dbc 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -2,8 +2,8 @@ See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request -- [ ] I agree to contribute to the project under Apache 2 License. -- [ ] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV +- [x] I agree to contribute to the project under Apache 2 License. +- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [ ] The PR is proposed to the proper branch - [ ] There is a reference to the original bug report and related work - [ ] There is accuracy test, performance test and test data in opencv_extra repository, if applicable diff --git a/modules/bgsegm/include/opencv2/bgsegm.hpp b/modules/bgsegm/include/opencv2/bgsegm.hpp index 8ace5d9a5e7..1d2b6f892f2 100644 --- a/modules/bgsegm/include/opencv2/bgsegm.hpp +++ b/modules/bgsegm/include/opencv2/bgsegm.hpp @@ -99,6 +99,19 @@ CV_EXPORTS_W Ptr class CV_EXPORTS_W BackgroundSubtractorGMG : public BackgroundSubtractor { public: + // BackgroundSubtractor interface + /** @brief Computes a foreground mask. + + @param image Next video frame of type CV_8UC(n),CV_8SC(n),CV_16UC(n),CV_16SC(n),CV_32SC(n),CV_32FC(n),CV_64FC(n), where n is 1,2,3,4. + @param fgmask The output foreground mask as an 8-bit binary image. + @param learningRate The value between 0 and 1 that indicates how fast the background model is + learnt. Negative parameter value makes the algorithm to use some automatically chosen learning + rate. 0 means that the background model is not updated at all, 1 means that the background model + is completely reinitialized from the last frame. + */ + CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate=-1) CV_OVERRIDE = 0; + CV_WRAP virtual void getBackgroundImage(OutputArray backgroundImage) const CV_OVERRIDE = 0; + /** @brief Returns total number of distinct colors to maintain in histogram. */ CV_WRAP virtual int getMaxFeatures() const = 0; diff --git a/modules/bgsegm/src/bgfg_gmg.cpp b/modules/bgsegm/src/bgfg_gmg.cpp index a4f8f2a4765..a83a83cc696 100644 --- a/modules/bgsegm/src/bgfg_gmg.cpp +++ b/modules/bgsegm/src/bgfg_gmg.cpp @@ -426,8 +426,13 @@ void BackgroundSubtractorGMGImpl::apply(InputArray _frame, OutputArray _fgmask, { Mat frame = _frame.getMat(); - CV_Assert(frame.depth() == CV_8U || frame.depth() == CV_16U || frame.depth() == CV_32F); - CV_Assert(frame.channels() == 1 || frame.channels() == 3 || frame.channels() == 4); + const int depth = frame.depth(); + CV_CheckDepth(depth, (depth == CV_8U) || (depth == CV_8S) || + (depth == CV_16U) || (depth == CV_16S) || + (depth == CV_32S) || + (depth == CV_32F) || (depth == CV_64F), "Unsupported depth"); + CV_CheckGE(frame.channels(), 1, "Unsupported channels"); + CV_CheckLE(frame.channels(), 4, "Unsupported channels"); if (newLearningRate != -1.0) { @@ -441,8 +446,12 @@ void BackgroundSubtractorGMGImpl::apply(InputArray _frame, OutputArray _fgmask, double maxval = maxVal_; if( minVal_ == 0 && maxVal_ == 0 ) { - minval = 0; - maxval = frame.depth() == CV_8U ? 255.0 : frame.depth() == CV_16U ? std::numeric_limits::max() : 1.0; + if( depth == CV_8U ) { minval = std::numeric_limits::min(); maxval = std::numeric_limits::max(); } + else if( depth == CV_8S ) { minval = std::numeric_limits::min(); maxval = std::numeric_limits::max(); } + else if( depth == CV_16U ) { minval = std::numeric_limits::min();maxval = std::numeric_limits::max();} + else if( depth == CV_16S ) { minval = std::numeric_limits::min(); maxval = std::numeric_limits::max(); } + else if( depth == CV_32S ) { minval = std::numeric_limits::min(); maxval = std::numeric_limits::max(); } + else /* CV_32F or CV_64F */ { minval = 0.0; maxval = 1.0; } } initialize(frame.size(), minval, maxval); } diff --git a/modules/bgsegm/test/test_backgroundsubtractor_gbh.cpp b/modules/bgsegm/test/test_backgroundsubtractor_gbh.cpp index f42f008e13b..7868480fa09 100644 --- a/modules/bgsegm/test/test_backgroundsubtractor_gbh.cpp +++ b/modules/bgsegm/test/test_backgroundsubtractor_gbh.cpp @@ -7,39 +7,24 @@ namespace opencv_test { namespace { -class CV_BackgroundSubtractorTest : public cvtest::BaseTest -{ -public: - CV_BackgroundSubtractorTest(); -protected: - void run(int); -}; - -CV_BackgroundSubtractorTest::CV_BackgroundSubtractorTest() -{ -} - /** * This test checks the following: * (i) BackgroundSubtractorGMG can operate with matrices of various types and sizes * (ii) Training mode returns empty fgmask * (iii) End of training mode, and anomalous frame yields every pixel detected as FG */ -void CV_BackgroundSubtractorTest::run(int) +typedef testing::TestWithParam> bgsubgmg_allTypes; +TEST_P(bgsubgmg_allTypes, accuracy) { - int code = cvtest::TS::OK; - RNG& rng = ts->get_rng(); - int type = ((unsigned int)rng)%7; //!< pick a random type, 0 - 6, defined in types_c.h - int channels = 1 + ((unsigned int)rng)%4; //!< random number of channels from 1 to 4. - int channelsAndType = CV_MAKETYPE(type,channels); - int width = 2 + ((unsigned int)rng)%98; //!< Mat will be 2 to 100 in width and height - int height = 2 + ((unsigned int)rng)%98; + const int depth = get<0>(GetParam()); + const int ncn = get<1>(GetParam()); + const int mtype = CV_MAKETYPE(depth, ncn); + const int width = 64; + const int height = 64; + RNG& rng = TS::ptr()->get_rng(); Ptr fgbg = createBackgroundSubtractorGMG(); - Mat fgmask; - - if (!fgbg) - CV_Error(Error::StsError,"Failed to create Algorithm\n"); + ASSERT_TRUE(fgbg != nullptr) << "Failed to call createBackgroundSubtractorGMG()"; /** * Set a few parameters @@ -57,49 +42,51 @@ void CV_BackgroundSubtractorTest::run(int) * Max value for simulated images picked randomly in upper half of type range * Min value for simulated images picked randomly in lower half of type range */ - if (type == CV_8U) + if (depth == CV_8U) { uchar half = UCHAR_MAX/2; maxd = (unsigned char)rng.uniform(half+32, UCHAR_MAX); mind = (unsigned char)rng.uniform(0, half-32); } - else if (type == CV_8S) + else if (depth == CV_8S) { maxd = (char)rng.uniform(32, CHAR_MAX); mind = (char)rng.uniform(CHAR_MIN, -32); } - else if (type == CV_16U) + else if (depth == CV_16U) { ushort half = USHRT_MAX/2; maxd = (unsigned int)rng.uniform(half+32, USHRT_MAX); mind = (unsigned int)rng.uniform(0, half-32); } - else if (type == CV_16S) + else if (depth == CV_16S) { maxd = rng.uniform(32, SHRT_MAX); mind = rng.uniform(SHRT_MIN, -32); } - else if (type == CV_32S) + else if (depth == CV_32S) { maxd = rng.uniform(32, INT_MAX); mind = rng.uniform(INT_MIN, -32); } - else if (type == CV_32F) + else { - maxd = rng.uniform(32.0f, FLT_MAX); - mind = rng.uniform(-FLT_MAX, -32.0f); - } - else if (type == CV_64F) - { - maxd = rng.uniform(32.0, DBL_MAX); - mind = rng.uniform(-DBL_MAX, -32.0); + ASSERT_TRUE( (depth == CV_32F)||(depth == CV_64F) ) << "Unsupported depth"; + const double harf = 0.5; + const double bias = 0.125; // = 32/256 (Like CV_8U) + maxd = rng.uniform(harf + bias, 1.0); + mind = rng.uniform(0.0, harf - bias ); } fgbg->setMinVal(mind); fgbg->setMaxVal(maxd); - Mat simImage = Mat::zeros(height, width, channelsAndType); - int numLearningFrames = 120; + Mat simImage(height, width, mtype); + Mat fgmask; + + const Mat fullbg(height, width, CV_8UC1, cv::Scalar(0)); // all background. + + const int numLearningFrames = 120; for (int i = 0; i < numLearningFrames; ++i) { /** @@ -111,27 +98,21 @@ void CV_BackgroundSubtractorTest::run(int) * Feed simulated images into background subtractor */ fgbg->apply(simImage,fgmask); - Mat fullbg = Mat::zeros(simImage.rows, simImage.cols, CV_8U); - //! fgmask should be entirely background during training - code = cvtest::cmpEps2( ts, fgmask, fullbg, 0, false, "The training foreground mask" ); - if (code < 0) - ts->set_failed_test_info( code ); + EXPECT_EQ(cv::norm(fgmask, fullbg, NORM_INF), 0) << "foreground mask should be entirely background during training"; } //! generate last image, distinct from training images rng.fill(simImage, RNG::UNIFORM, mind, maxd); - fgbg->apply(simImage,fgmask); - //! now fgmask should be entirely foreground - Mat fullfg = 255*Mat::ones(simImage.rows, simImage.cols, CV_8U); - code = cvtest::cmpEps2( ts, fgmask, fullfg, 255, false, "The final foreground mask" ); - if (code < 0) - { - ts->set_failed_test_info( code ); - } + const Mat fullfg(height, width, CV_8UC1, cv::Scalar(255)); // all foreground. + EXPECT_EQ(cv::norm(fgmask, fullfg, NORM_INF), 0) << "foreground mask should be entirely foreground finally"; } -TEST(VIDEO_BGSUBGMG, accuracy) { CV_BackgroundSubtractorTest test; test.safe_run(); } +INSTANTIATE_TEST_CASE_P(/**/, + bgsubgmg_allTypes, + testing::Combine( + testing::Values(CV_8U, CV_8S, CV_16U, CV_16S, CV_32S, CV_32F, CV_64F), + testing::Values(1,2,3,4))); }} // namespace diff --git a/modules/wechat_qrcode/samples/qrcode.py b/modules/wechat_qrcode/samples/qrcode.py index 7713734f993..405a523e07c 100644 --- a/modules/wechat_qrcode/samples/qrcode.py +++ b/modules/wechat_qrcode/samples/qrcode.py @@ -22,7 +22,7 @@ except: print("---------------------------------------------------------------") print("Failed to initialize WeChatQRCode.") - print("Please, download 'detector.*' and 'sr.*' from") + print("Please, download 'detect.*' and 'sr.*' from") print("https://github.com/WeChatCV/opencv_3rdparty/tree/wechat_qrcode") print("and put them into the current directory.") print("---------------------------------------------------------------")