Browse Source

Merge pull request #3638 from mshabunin:doc-upgrade

Documentation transition to fresh Doxygen #3638

Merge with https://github.com/opencv/opencv/pull/25042
pull/3695/head
Maksim Shabunin 1 year ago committed by GitHub
parent
commit
1aaf6e1c8b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 51
      modules/bioinspired/include/opencv2/bioinspired/retina.hpp
  2. 24
      modules/bioinspired/samples/default_retina_config.xml
  3. 24
      modules/bioinspired/samples/realistic_retina_config.xml
  4. 12
      modules/bioinspired/tutorials/retina_model/retina_model.markdown
  5. 4
      modules/cannops/include/opencv2/cann.hpp
  6. 4
      modules/cannops/include/opencv2/cann_interface.hpp
  7. 1
      modules/cudaimgproc/include/opencv2/cudaimgproc.hpp
  8. 29
      modules/dnn_superres/tutorials/benchmark/sr_benchmark.markdown
  9. 9
      modules/face/include/opencv2/face/facemark.hpp
  10. 6
      modules/face/include/opencv2/face/facemark_train.hpp
  11. 6
      modules/face/tutorials/face_landmark/face_landmark_trainer.markdown
  12. 2
      modules/hdf/include/opencv2/hdf.hpp
  13. 1
      modules/mcc/include/opencv2/mcc/checker_model.hpp
  14. 7
      modules/rgbd/include/opencv2/rgbd/dynafu.hpp
  15. 5
      modules/sfm/include/opencv2/sfm.hpp
  16. 6
      modules/stereo/include/opencv2/stereo/quasi_dense_stereo.hpp
  17. 1
      modules/text/include/opencv2/text/ocr.hpp
  18. 2
      modules/videostab/include/opencv2/videostab.hpp
  19. 1
      modules/viz/include/opencv2/viz.hpp
  20. 1
      modules/xfeatures2d/include/opencv2/xfeatures2d.hpp
  21. 3
      modules/xfeatures2d/include/opencv2/xfeatures2d/nonfree.hpp
  22. 10
      modules/ximgproc/include/opencv2/ximgproc.hpp
  23. 2
      modules/ximgproc/include/opencv2/ximgproc/color_match.hpp
  24. 2
      modules/ximgproc/include/opencv2/ximgproc/deriche_filter.hpp
  25. 4
      modules/ximgproc/include/opencv2/ximgproc/edgepreserving_filter.hpp
  26. 1
      modules/ximgproc/include/opencv2/ximgproc/fast_hough_transform.hpp
  27. 2
      modules/ximgproc/include/opencv2/ximgproc/paillou_filter.hpp
  28. 2
      modules/ximgproc/include/opencv2/ximgproc/peilin.hpp
  29. 2
      modules/ximgproc/include/opencv2/ximgproc/run_length_morphology.hpp

51
modules/bioinspired/include/opencv2/bioinspired/retina.hpp

@ -94,57 +94,12 @@ enum {
Here is the default configuration file of the retina module. It gives results such as the first Here is the default configuration file of the retina module. It gives results such as the first
retina output shown on the top of this page. retina output shown on the top of this page.
@code{xml} @include default_retina_config.xml
<?xml version="1.0"?>
<opencv_storage>
<OPLandIPLparvo>
<colorMode>1</colorMode>
<normaliseOutput>1</normaliseOutput>
<photoreceptorsLocalAdaptationSensitivity>7.5e-01</photoreceptorsLocalAdaptationSensitivity>
<photoreceptorsTemporalConstant>9.0e-01</photoreceptorsTemporalConstant>
<photoreceptorsSpatialConstant>5.3e-01</photoreceptorsSpatialConstant>
<horizontalCellsGain>0.01</horizontalCellsGain>
<hcellsTemporalConstant>0.5</hcellsTemporalConstant>
<hcellsSpatialConstant>7.</hcellsSpatialConstant>
<ganglionCellsSensitivity>7.5e-01</ganglionCellsSensitivity></OPLandIPLparvo>
<IPLmagno>
<normaliseOutput>1</normaliseOutput>
<parasolCells_beta>0.</parasolCells_beta>
<parasolCells_tau>0.</parasolCells_tau>
<parasolCells_k>7.</parasolCells_k>
<amacrinCellsTemporalCutFrequency>2.0e+00</amacrinCellsTemporalCutFrequency>
<V0CompressionParameter>9.5e-01</V0CompressionParameter>
<localAdaptintegration_tau>0.</localAdaptintegration_tau>
<localAdaptintegration_k>7.</localAdaptintegration_k></IPLmagno>
</opencv_storage>
@endcode
Here is the 'realistic" setup used to obtain the second retina output shown on the top of this page. Here is the 'realistic" setup used to obtain the second retina output shown on the top of this page.
@code{xml} @include realistic_retina_config.xml
<?xml version="1.0"?>
<opencv_storage>
<OPLandIPLparvo>
<colorMode>1</colorMode>
<normaliseOutput>1</normaliseOutput>
<photoreceptorsLocalAdaptationSensitivity>8.9e-01</photoreceptorsLocalAdaptationSensitivity>
<photoreceptorsTemporalConstant>9.0e-01</photoreceptorsTemporalConstant>
<photoreceptorsSpatialConstant>5.3e-01</photoreceptorsSpatialConstant>
<horizontalCellsGain>0.3</horizontalCellsGain>
<hcellsTemporalConstant>0.5</hcellsTemporalConstant>
<hcellsSpatialConstant>7.</hcellsSpatialConstant>
<ganglionCellsSensitivity>8.9e-01</ganglionCellsSensitivity></OPLandIPLparvo>
<IPLmagno>
<normaliseOutput>1</normaliseOutput>
<parasolCells_beta>0.</parasolCells_beta>
<parasolCells_tau>0.</parasolCells_tau>
<parasolCells_k>7.</parasolCells_k>
<amacrinCellsTemporalCutFrequency>2.0e+00</amacrinCellsTemporalCutFrequency>
<V0CompressionParameter>9.5e-01</V0CompressionParameter>
<localAdaptintegration_tau>0.</localAdaptintegration_tau>
<localAdaptintegration_k>7.</localAdaptintegration_k></IPLmagno>
</opencv_storage>
@endcode
*/ */
struct RetinaParameters{ struct RetinaParameters{
//! Outer Plexiform Layer (OPL) and Inner Plexiform Layer Parvocellular (IplParvo) parameters //! Outer Plexiform Layer (OPL) and Inner Plexiform Layer Parvocellular (IplParvo) parameters

24
modules/bioinspired/samples/default_retina_config.xml

@ -0,0 +1,24 @@
<?xml version="1.0"?>
<opencv_storage>
<OPLandIPLparvo>
<colorMode>1</colorMode>
<normaliseOutput>1</normaliseOutput>
<photoreceptorsLocalAdaptationSensitivity>7.5e-01</photoreceptorsLocalAdaptationSensitivity>
<photoreceptorsTemporalConstant>9.0e-01</photoreceptorsTemporalConstant>
<photoreceptorsSpatialConstant>5.3e-01</photoreceptorsSpatialConstant>
<horizontalCellsGain>0.01</horizontalCellsGain>
<hcellsTemporalConstant>0.5</hcellsTemporalConstant>
<hcellsSpatialConstant>7.</hcellsSpatialConstant>
<ganglionCellsSensitivity>7.5e-01</ganglionCellsSensitivity>
</OPLandIPLparvo>
<IPLmagno>
<normaliseOutput>1</normaliseOutput>
<parasolCells_beta>0.</parasolCells_beta>
<parasolCells_tau>0.</parasolCells_tau>
<parasolCells_k>7.</parasolCells_k>
<amacrinCellsTemporalCutFrequency>2.0e+00</amacrinCellsTemporalCutFrequency>
<V0CompressionParameter>9.5e-01</V0CompressionParameter>
<localAdaptintegration_tau>0.</localAdaptintegration_tau>
<localAdaptintegration_k>7.</localAdaptintegration_k>
</IPLmagno>
</opencv_storage>

24
modules/bioinspired/samples/realistic_retina_config.xml

@ -0,0 +1,24 @@
<?xml version="1.0"?>
<opencv_storage>
<OPLandIPLparvo>
<colorMode>1</colorMode>
<normaliseOutput>1</normaliseOutput>
<photoreceptorsLocalAdaptationSensitivity>8.9e-01</photoreceptorsLocalAdaptationSensitivity>
<photoreceptorsTemporalConstant>9.0e-01</photoreceptorsTemporalConstant>
<photoreceptorsSpatialConstant>5.3e-01</photoreceptorsSpatialConstant>
<horizontalCellsGain>0.3</horizontalCellsGain>
<hcellsTemporalConstant>0.5</hcellsTemporalConstant>
<hcellsSpatialConstant>7.</hcellsSpatialConstant>
<ganglionCellsSensitivity>8.9e-01</ganglionCellsSensitivity>
</OPLandIPLparvo>
<IPLmagno>
<normaliseOutput>1</normaliseOutput>
<parasolCells_beta>0.</parasolCells_beta>
<parasolCells_tau>0.</parasolCells_tau>
<parasolCells_k>7.</parasolCells_k>
<amacrinCellsTemporalCutFrequency>2.0e+00</amacrinCellsTemporalCutFrequency>
<V0CompressionParameter>9.5e-01</V0CompressionParameter>
<localAdaptintegration_tau>0.</localAdaptintegration_tau>
<localAdaptintegration_k>7.</localAdaptintegration_k>
</IPLmagno>
</opencv_storage>

12
modules/bioinspired/tutorials/retina_model/retina_model.markdown

@ -1,6 +1,8 @@
Retina and real-world vision {#tutorial_bioinspired_retina_model} Retina and real-world vision {#tutorial_bioinspired_retina_model}
============================================================= =============================================================
@tableofcontents
Goal Goal
---- ----
@ -382,7 +384,7 @@ need to know if mean luminance information is required or not. If not, the the r
significantly reduce its energy thus giving more visibility to higher spatial frequency details. significantly reduce its energy thus giving more visibility to higher spatial frequency details.
#### Basic parameters ## Basic parameters
The simplest parameters are as follows : The simplest parameters are as follows :
@ -397,7 +399,7 @@ processing. You can expect much faster processing using gray levels : it would r
product per pixel for all of the retina processes and it has recently been parallelized for multicore product per pixel for all of the retina processes and it has recently been parallelized for multicore
architectures. architectures.
#### Photo-receptors parameters ## Photo-receptors parameters
The following parameters act on the entry point of the retina - photo-receptors - and has impact on all The following parameters act on the entry point of the retina - photo-receptors - and has impact on all
of the following processes. These sensors are low pass spatio-temporal filters that smooth temporal and of the following processes. These sensors are low pass spatio-temporal filters that smooth temporal and
@ -421,7 +423,7 @@ and high frequency noise canceling.
A good compromise for color images is a 0.53 value since such choice won't affect too much the color spectrum. A good compromise for color images is a 0.53 value since such choice won't affect too much the color spectrum.
Higher values would lead to gray and blurred output images. Higher values would lead to gray and blurred output images.
#### Horizontal cells parameters ## Horizontal cells parameters
This parameter set tunes the neural network connected to the photo-receptors, the horizontal cells. This parameter set tunes the neural network connected to the photo-receptors, the horizontal cells.
It modulates photo-receptors sensitivity and completes the processing for final spectral whitening It modulates photo-receptors sensitivity and completes the processing for final spectral whitening
@ -446,7 +448,7 @@ It modulates photo-receptors sensitivity and completes the processing for final
and luminance is already partly enhanced. The following parameters act on the last processing stages and luminance is already partly enhanced. The following parameters act on the last processing stages
of the two outing retina signals. of the two outing retina signals.
#### Parvo (details channel) dedicated parameter ## Parvo (details channel) dedicated parameter
- **ganglionCellsSensitivity** specifies the strength of the final local adaptation occurring at - **ganglionCellsSensitivity** specifies the strength of the final local adaptation occurring at
the output of this details' dedicated channel. Parameter values remain between 0 and 1. Low value the output of this details' dedicated channel. Parameter values remain between 0 and 1. Low value
@ -455,7 +457,7 @@ of the two outing retina signals.
**Note :** this parameter can correct eventual burned images by favoring low energetic details of **Note :** this parameter can correct eventual burned images by favoring low energetic details of
the visual scene, even in bright areas. the visual scene, even in bright areas.
#### IPL Magno (motion/transient channel) parameters ## IPL Magno (motion/transient channel) parameters
Once image's information are cleaned, this channel acts as a high pass temporal filter that Once image's information are cleaned, this channel acts as a high pass temporal filter that
selects only the signals related to transient signals (events, motion, etc.). A low pass spatial filter selects only the signals related to transient signals (events, motion, etc.). A low pass spatial filter

4
modules/cannops/include/opencv2/cann.hpp

@ -8,12 +8,12 @@
#include "opencv2/core.hpp" #include "opencv2/core.hpp"
/** /**
@defgroup cann Ascend-accelerated Computer Vision @defgroup cannops Ascend-accelerated Computer Vision
@{ @{
@defgroup canncore Core part @defgroup canncore Core part
@{ @{
@defgroup cann_struct Data Structures @defgroup cann_struct Data Structures
@defgroup cann_init Initializeation and Information @defgroup cann_init Initialization and Information
@} @}
@} @}
*/ */

4
modules/cannops/include/opencv2/cann_interface.hpp

@ -13,9 +13,9 @@ namespace cann
{ {
/** /**
@addtogroup cann @addtogroup cannops
@{ @{
@defgroup cannops Operations for Ascend Backend. @defgroup cannops_ops Operations for Ascend Backend.
@{ @{
@defgroup cannops_elem Per-element Operations @defgroup cannops_elem Per-element Operations
@defgroup cannops_core Core Operations on Matrices @defgroup cannops_core Core Operations on Matrices

1
modules/cudaimgproc/include/opencv2/cudaimgproc.hpp

@ -844,7 +844,6 @@ cv::Moments cvMoments = convertSpatialMoments<float>(spatialMoments, order);
``` ```
see the \a CUDA_TEST_P(Moments, Async) test inside opencv_contrib_source_code/modules/cudaimgproc/test/test_moments.cpp for an example. see the \a CUDA_TEST_P(Moments, Async) test inside opencv_contrib_source_code/modules/cudaimgproc/test/test_moments.cpp for an example.
@returns cv::Moments.
@sa cuda::moments, cuda::convertSpatialMoments, cuda::numMoments, cuda::MomentsOrder @sa cuda::moments, cuda::convertSpatialMoments, cuda::numMoments, cuda::MomentsOrder
*/ */
CV_EXPORTS_W void spatialMoments(InputArray src, OutputArray moments, const bool binaryImage = false, const MomentsOrder order = MomentsOrder::THIRD_ORDER_MOMENTS, const int momentsType = CV_64F, Stream& stream = Stream::Null()); CV_EXPORTS_W void spatialMoments(InputArray src, OutputArray moments, const bool binaryImage = false, const MomentsOrder order = MomentsOrder::THIRD_ORDER_MOMENTS, const int momentsType = CV_64F, Stream& stream = Stream::Null());

29
modules/dnn_superres/tutorials/benchmark/sr_benchmark.markdown

@ -50,14 +50,9 @@ Explanation
Benchmarking results Benchmarking results
----------- -----------
Dataset benchmarking ## General100 dataset
----
###General100 dataset
<center>
#####2x scaling factor ### 2x scaling factor
| | Avg inference time in sec (CPU)| Avg PSNR | Avg SSIM | | | Avg inference time in sec (CPU)| Avg PSNR | Avg SSIM |
@ -70,7 +65,7 @@ Dataset benchmarking
| Nearest neighbor | 0.000114 | 29.1665 | 0.9049 | | Nearest neighbor | 0.000114 | 29.1665 | 0.9049 |
| Lanczos | 0.001094 | 32.4687 | 0.9327 | | Lanczos | 0.001094 | 32.4687 | 0.9327 |
#####3x scaling factor ### 3x scaling factor
| | Avg inference time in sec (CPU)| Avg PSNR | Avg SSIM | | | Avg inference time in sec (CPU)| Avg PSNR | Avg SSIM |
| ------------- |:-------------------:| ---------:|--------:| | ------------- |:-------------------:| ---------:|--------:|
@ -83,7 +78,7 @@ Dataset benchmarking
| Lanczos | 0.001012 |25.9115 |0.8706 | | Lanczos | 0.001012 |25.9115 |0.8706 |
#####4x scaling factor ### 4x scaling factor
| | Avg inference time in sec (CPU)| Avg PSNR | Avg SSIM | | | Avg inference time in sec (CPU)| Avg PSNR | Avg SSIM |
| ------------- |:-------------------:| ---------:|--------:| | ------------- |:-------------------:| ---------:|--------:|
@ -96,14 +91,10 @@ Dataset benchmarking
| Lanczos | 0.001012 |25.9115 |0.8706 | | Lanczos | 0.001012 |25.9115 |0.8706 |
</center>
Images ## Images
----
<center>
####2x scaling factor ### 2x scaling factor
|Set5: butterfly.png | size: 256x256 | || |Set5: butterfly.png | size: 256x256 | ||
|:-------------:|:-------------------:|:-------------:|:----:| |:-------------:|:-------------------:|:-------------:|:----:|
@ -112,7 +103,7 @@ Images
![ESPCN](images/espcn_butterfly.jpg)| ![FSRCNN](images/fsrcnn_butterfly.jpg) | ![LapSRN](images/lapsrn_butterfly.jpg) | ![EDSR](images/edsr_butterfly.jpg) ![ESPCN](images/espcn_butterfly.jpg)| ![FSRCNN](images/fsrcnn_butterfly.jpg) | ![LapSRN](images/lapsrn_butterfly.jpg) | ![EDSR](images/edsr_butterfly.jpg)
|29.0341 / 0.9354 / **0.004157**| 29.0077 / 0.9345 / 0.006325 | 27.8212 / 0.9230 / 0.037937 | **30.0347** / **0.9453** / 2.077280 | |29.0341 / 0.9354 / **0.004157**| 29.0077 / 0.9345 / 0.006325 | 27.8212 / 0.9230 / 0.037937 | **30.0347** / **0.9453** / 2.077280 |
####3x scaling factor ### 3x scaling factor
|Urban100: img_001.png | size: 1024x644 | || |Urban100: img_001.png | size: 1024x644 | ||
|:-------------:|:-------------------:|:-------------:|:----:| |:-------------:|:-------------------:|:-------------:|:----:|
@ -122,7 +113,7 @@ Images
|28.0118 / 0.8588 / **0.030748**| 28.0184 / 0.8597 / 0.094173 | | **30.5671** / **0.9019** / 9.517580 | |28.0118 / 0.8588 / **0.030748**| 28.0184 / 0.8597 / 0.094173 | | **30.5671** / **0.9019** / 9.517580 |
####4x scaling factor ### 4x scaling factor
|Set14: comic.png | size: 250x361 | || |Set14: comic.png | size: 250x361 | ||
|:-------------:|:-------------------:|:-------------:|:----:| |:-------------:|:-------------------:|:-------------:|:----:|
@ -131,7 +122,7 @@ Images
|![ESPCN](images/espcn_comic.jpg)| ![FSRCNN](images/fsrcnn_comic.jpg) | ![LapSRN](images/lapsrn_comic.jpg) | ![EDSR](images/edsr_comic.jpg) |![ESPCN](images/espcn_comic.jpg)| ![FSRCNN](images/fsrcnn_comic.jpg) | ![LapSRN](images/lapsrn_comic.jpg) | ![EDSR](images/edsr_comic.jpg)
|20.0417 / 0.6302 / **0.001894**| 20.0885 / 0.6384 / 0.002103 | 20.0676 / 0.6339 / 0.061640 | **20.5233** / **0.6901** / 0.665876 | |20.0417 / 0.6302 / **0.001894**| 20.0885 / 0.6384 / 0.002103 | 20.0676 / 0.6339 / 0.061640 | **20.5233** / **0.6901** / 0.665876 |
####8x scaling factor ### 8x scaling factor
|Div2K: 0006.png | size: 1356x2040 | | |Div2K: 0006.png | size: 1356x2040 | |
|:-------------:|:-------------------:|:-------------:| |:-------------:|:-------------------:|:-------------:|
@ -139,5 +130,3 @@ Images
|PSRN / SSIM / Speed (CPU)| 26.3139 / **0.8033** / 0.001107| 23.8291 / 0.7340 / **0.000611** | |PSRN / SSIM / Speed (CPU)| 26.3139 / **0.8033** / 0.001107| 23.8291 / 0.7340 / **0.000611** |
|![Lanczos interpolation](images/lanczos_div2k.jpg)| ![LapSRN](images/lapsrn_div2k.jpg) | | |![Lanczos interpolation](images/lanczos_div2k.jpg)| ![LapSRN](images/lapsrn_div2k.jpg) | |
|26.1565 / 0.7962 / 0.004782| **26.7046** / 0.7987 / 2.274290 | | |26.1565 / 0.7962 / 0.004782| **26.7046** / 0.7987 / 2.274290 | |
</center>

9
modules/face/include/opencv2/face/facemark.hpp

@ -12,12 +12,6 @@ Mentor: Delia Passalacqua
#ifndef __OPENCV_FACELANDMARK_HPP__ #ifndef __OPENCV_FACELANDMARK_HPP__
#define __OPENCV_FACELANDMARK_HPP__ #define __OPENCV_FACELANDMARK_HPP__
/**
@defgroup face Face Analysis
- @ref tutorial_table_of_content_facemark
- The Facemark API
*/
#include "opencv2/core.hpp" #include "opencv2/core.hpp"
#include <vector> #include <vector>
@ -25,6 +19,8 @@ Mentor: Delia Passalacqua
namespace cv { namespace cv {
namespace face { namespace face {
//! @addtogroup face
//! @{
/** @brief Abstract base class for all facemark models /** @brief Abstract base class for all facemark models
@ -88,6 +84,7 @@ CV_EXPORTS_W Ptr<Facemark> createFacemarkLBF();
//! construct a Kazemi facemark detector //! construct a Kazemi facemark detector
CV_EXPORTS_W Ptr<Facemark> createFacemarkKazemi(); CV_EXPORTS_W Ptr<Facemark> createFacemarkKazemi();
//! @}
} // face } // face
} // cv } // cv

6
modules/face/include/opencv2/face/facemark_train.hpp

@ -12,12 +12,6 @@ Mentor: Delia Passalacqua
#ifndef __OPENCV_FACELANDMARKTRAIN_HPP__ #ifndef __OPENCV_FACELANDMARKTRAIN_HPP__
#define __OPENCV_FACELANDMARKTRAIN_HPP__ #define __OPENCV_FACELANDMARKTRAIN_HPP__
/**
@defgroup face Face Analysis
- @ref tutorial_table_of_content_facemark
- The Facemark API
*/
#include "opencv2/face/facemark.hpp" #include "opencv2/face/facemark.hpp"
#include "opencv2/objdetect.hpp" #include "opencv2/objdetect.hpp"
#include <vector> #include <vector>

6
modules/face/tutorials/face_landmark/face_landmark_trainer.markdown

@ -21,7 +21,7 @@ The above format is similar to HELEN dataset which is used for training the mode
./sample_train_landmark_detector -annotations=/home/sukhad/Downloads/code/trainset/ -config=config.xml -face_cascade=lbpcascadefrontalface.xml -model=trained_model.dat -width=460 -height=460 ./sample_train_landmark_detector -annotations=/home/sukhad/Downloads/code/trainset/ -config=config.xml -face_cascade=lbpcascadefrontalface.xml -model=trained_model.dat -width=460 -height=460
``` ```
### Description of command parameters ## Description of command parameters
> * **annotations** a : (REQUIRED) Path to annotations txt file [example - /data/annotations.txt] > * **annotations** a : (REQUIRED) Path to annotations txt file [example - /data/annotations.txt]
> * **config** c : (REQUIRED) Path to configuration xml file containing parameters for training.[ example - /data/config.xml] > * **config** c : (REQUIRED) Path to configuration xml file containing parameters for training.[ example - /data/config.xml]
@ -30,7 +30,7 @@ The above format is similar to HELEN dataset which is used for training the mode
> * **height** h : (OPTIONAL) The height which you want all images to get to scale the annotations. Large images are slow to process [default = 460] > * **height** h : (OPTIONAL) The height which you want all images to get to scale the annotations. Large images are slow to process [default = 460]
> * **face_cascade** f (REQUIRED) Path to the face cascade xml file which you want to use as a detector. > * **face_cascade** f (REQUIRED) Path to the face cascade xml file which you want to use as a detector.
### Description of training parameters ## Description of training parameters
The configuration file described above which is used while training contains the training parameters which are required for training. The configuration file described above which is used while training contains the training parameters which are required for training.
@ -49,7 +49,7 @@ The configuration file described above which is used while training contains the
To get more detailed description about the training parameters you can refer to the [Research paper](https://pdfs.semanticscholar.org/d78b/6a5b0dcaa81b1faea5fb0000045a62513567.pdf). To get more detailed description about the training parameters you can refer to the [Research paper](https://pdfs.semanticscholar.org/d78b/6a5b0dcaa81b1faea5fb0000045a62513567.pdf).
### Understanding code ## Understanding code
![](images/3.jpg) ![](images/3.jpg)

2
modules/hdf/include/opencv2/hdf.hpp

@ -49,8 +49,6 @@ Hierarchical Data Format version 5
In order to use it, the hdf5 library has to be installed, which In order to use it, the hdf5 library has to be installed, which
means cmake should find it using `find_package(HDF5)`. means cmake should find it using `find_package(HDF5)`.
@} @}
*/ */

1
modules/mcc/include/opencv2/mcc/checker_model.hpp

@ -116,7 +116,6 @@ public:
virtual ~CCheckerDraw() {} virtual ~CCheckerDraw() {}
/** \brief Draws the checker to the given image. /** \brief Draws the checker to the given image.
* \param img image in color space BGR * \param img image in color space BGR
* \return void
*/ */
CV_WRAP virtual void draw(InputOutputArray img) = 0; CV_WRAP virtual void draw(InputOutputArray img) = 0;
/** \brief Create a new CCheckerDraw object. /** \brief Create a new CCheckerDraw object.

7
modules/rgbd/include/opencv2/rgbd/dynafu.hpp

@ -114,7 +114,6 @@ public:
virtual void renderSurface(OutputArray depthImage, OutputArray vertImage, OutputArray normImage, bool warp=true) = 0; virtual void renderSurface(OutputArray depthImage, OutputArray vertImage, OutputArray normImage, bool warp=true) = 0;
}; };
//! @} } // dynafu::
} } // cv::
} #endif // __OPENCV_RGBD_DYNAFU_HPP__
#endif

5
modules/sfm/include/opencv2/sfm.hpp

@ -85,18 +85,17 @@ This module has been originally developed as a project for Google Summer of Code
@defgroup triangulation Triangulation @defgroup triangulation Triangulation
@defgroup reconstruction Reconstruction @defgroup reconstruction Reconstruction
@note @note
- Notice that it is compiled only when Ceres Solver is correctly installed.\n - Notice that it is compiled only when Ceres Solver is correctly installed.\n
Check installation instructions in the following tutorial: @ref tutorial_sfm_installation Check installation instructions in the following tutorial: @ref tutorial_sfm_installation
@defgroup simple_pipeline Simple Pipeline @defgroup simple_pipeline Simple Pipeline
@note @note
- Notice that it is compiled only when Ceres Solver is correctly installed.\n - Notice that it is compiled only when Ceres Solver is correctly installed.\n
Check installation instructions in the following tutorial: @ref tutorial_sfm_installation Check installation instructions in the following tutorial: @ref tutorial_sfm_installation
@} @}
*/ */
#endif #endif

6
modules/stereo/include/opencv2/stereo/quasi_dense_stereo.hpp

@ -18,6 +18,7 @@ namespace cv
{ {
namespace stereo namespace stereo
{ {
/** \addtogroup stereo /** \addtogroup stereo
* @{ * @{
*/ */
@ -190,9 +191,8 @@ public:
CV_PROP_RW PropagationParameters Param; CV_PROP_RW PropagationParameters Param;
}; };
} //namespace cv
} //namespace stereo
/** @}*/ /** @}*/
} //namespace cv
} //namespace stereo
#endif // __OPENCV_QUASI_DENSE_STEREO_H__ #endif // __OPENCV_QUASI_DENSE_STEREO_H__

1
modules/text/include/opencv2/text/ocr.hpp

@ -363,7 +363,6 @@ CV_EXPORTS_W Ptr<OCRHMMDecoder::ClassifierCallback> loadOCRHMMClassifierCNN(cons
*/ */
CV_EXPORTS_W Ptr<OCRHMMDecoder::ClassifierCallback> loadOCRHMMClassifier(const String& filename, int classifier); CV_EXPORTS_W Ptr<OCRHMMDecoder::ClassifierCallback> loadOCRHMMClassifier(const String& filename, int classifier);
//! @}
/** @brief Utility function to create a tailored language model transitions table from a given list of words (lexicon). /** @brief Utility function to create a tailored language model transitions table from a given list of words (lexicon).
* *

2
modules/videostab/include/opencv2/videostab.hpp

@ -70,9 +70,7 @@ Both the functions and the classes are available.
The Fast Marching Method @cite Telea04 is used in of the video stabilization routines to do motion and The Fast Marching Method @cite Telea04 is used in of the video stabilization routines to do motion and
color inpainting. The method is implemented is a flexible way and it's made public for other users. color inpainting. The method is implemented is a flexible way and it's made public for other users.
@} @}
*/ */
#include "opencv2/videostab/stabilizer.hpp" #include "opencv2/videostab/stabilizer.hpp"

1
modules/viz/include/opencv2/viz.hpp

@ -77,7 +77,6 @@ myWindow.showWidget("CloudWidget1", cw);
// Modify it, and it will be modified in the window. // Modify it, and it will be modified in the window.
cw.setColor(viz::Color::yellow()); cw.setColor(viz::Color::yellow());
@endcode @endcode
@} @}
*/ */

1
modules/xfeatures2d/include/opencv2/xfeatures2d.hpp

@ -58,7 +58,6 @@ known to be patented. You need to set the OPENCV_ENABLE_NONFREE option in cmake
This section describes the following matching strategies: This section describes the following matching strategies:
- GMS: Grid-based Motion Statistics, @cite Bian2017gms - GMS: Grid-based Motion Statistics, @cite Bian2017gms
- LOGOS: Local geometric support for high-outlier spatial verification, @cite Lowry2018LOGOSLG - LOGOS: Local geometric support for high-outlier spatial verification, @cite Lowry2018LOGOSLG
@} @}
*/ */

3
modules/xfeatures2d/include/opencv2/xfeatures2d/nonfree.hpp

@ -50,6 +50,9 @@ namespace cv
namespace xfeatures2d namespace xfeatures2d
{ {
//! @addtogroup xfeatures2d_nonfree
//! @{
/** @brief Class for extracting Speeded Up Robust Features from an image @cite Bay06 . /** @brief Class for extracting Speeded Up Robust Features from an image @cite Bay06 .
The algorithm parameters: The algorithm parameters:

10
modules/ximgproc/include/opencv2/ximgproc.hpp

@ -65,7 +65,8 @@
#include "ximgproc/find_ellipses.hpp" #include "ximgproc/find_ellipses.hpp"
/** @defgroup ximgproc Extended Image Processing /**
@defgroup ximgproc Extended Image Processing
@{ @{
@defgroup ximgproc_edge Structured forests for fast edge detection @defgroup ximgproc_edge Structured forests for fast edge detection
@ -115,7 +116,6 @@ an additional hysteresis step.
The size of the original image is required for compatibility with the imgproc functions when the boundary handling requires that pixel outside the image boundary are The size of the original image is required for compatibility with the imgproc functions when the boundary handling requires that pixel outside the image boundary are
"on". "on".
@} @}
*/ */
@ -124,6 +124,9 @@ namespace cv
namespace ximgproc namespace ximgproc
{ {
//! @addtogroup ximgproc
//! @{
enum ThinningTypes{ enum ThinningTypes{
THINNING_ZHANGSUEN = 0, // Thinning technique of Zhang-Suen THINNING_ZHANGSUEN = 0, // Thinning technique of Zhang-Suen
THINNING_GUOHALL = 1 // Thinning technique of Guo-Hall THINNING_GUOHALL = 1 // Thinning technique of Guo-Hall
@ -139,9 +142,6 @@ enum LocalBinarizationMethods{
BINARIZATION_NICK = 3 //!< NICK technique. See @cite Khurshid2009 . BINARIZATION_NICK = 3 //!< NICK technique. See @cite Khurshid2009 .
}; };
//! @addtogroup ximgproc
//! @{
/** @brief Performs thresholding on input images using Niblack's technique or some of the /** @brief Performs thresholding on input images using Niblack's technique or some of the
popular variations it inspired. popular variations it inspired.

2
modules/ximgproc/include/opencv2/ximgproc/color_match.hpp

@ -61,6 +61,8 @@ CV_EXPORTS_W void qdft(InputArray img, OutputArray qimg, int flags, bool sideL
*/ */
CV_EXPORTS_W void colorMatchTemplate(InputArray img, InputArray templ, OutputArray result); CV_EXPORTS_W void colorMatchTemplate(InputArray img, InputArray templ, OutputArray result);
//! @}
} }
} }
#endif #endif

2
modules/ximgproc/include/opencv2/ximgproc/deriche_filter.hpp

@ -71,6 +71,8 @@ CV_EXPORTS_W void GradientDericheY(InputArray op, OutputArray dst, double alpha,
*/ */
CV_EXPORTS_W void GradientDericheX(InputArray op, OutputArray dst, double alpha,double omega); CV_EXPORTS_W void GradientDericheX(InputArray op, OutputArray dst, double alpha,double omega);
//! @}
} }
} }
#endif #endif

4
modules/ximgproc/include/opencv2/ximgproc/edgepreserving_filter.hpp

@ -26,8 +26,8 @@ namespace cv { namespace ximgproc {
*/ */
CV_EXPORTS_W void edgePreservingFilter( InputArray src, OutputArray dst, int d, double threshold ); CV_EXPORTS_W void edgePreservingFilter( InputArray src, OutputArray dst, int d, double threshold );
}} // namespace
//! @} //! @}
}} // namespace
#endif #endif

1
modules/ximgproc/include/opencv2/ximgproc/fast_hough_transform.hpp

@ -83,7 +83,6 @@ enum AngleRangeOption
* is a binary relation that maps elements of the Cartesian product * is a binary relation that maps elements of the Cartesian product
* @f$ S \times S @f$ to @f$ S @f$: * @f$ S \times S @f$ to @f$ S @f$:
* @f[ f: S \times S \to S @f] * @f[ f: S \times S \to S @f]
* @ingroup MinUtils_MathOper
*/ */
enum HoughOp enum HoughOp
{ {

2
modules/ximgproc/include/opencv2/ximgproc/paillou_filter.hpp

@ -61,6 +61,8 @@ namespace ximgproc {
CV_EXPORTS void GradientPaillouY(InputArray op, OutputArray _dst, double alpha, double omega); CV_EXPORTS void GradientPaillouY(InputArray op, OutputArray _dst, double alpha, double omega);
CV_EXPORTS void GradientPaillouX(InputArray op, OutputArray _dst, double alpha, double omega); CV_EXPORTS void GradientPaillouX(InputArray op, OutputArray _dst, double alpha, double omega);
//! @}
} }
} }
#endif #endif

2
modules/ximgproc/include/opencv2/ximgproc/peilin.hpp

@ -27,6 +27,8 @@ namespace cv { namespace ximgproc {
/** @overload */ /** @overload */
CV_EXPORTS_W void PeiLinNormalization ( InputArray I, OutputArray T ); CV_EXPORTS_W void PeiLinNormalization ( InputArray I, OutputArray T );
//! @}
}} // namespace }} // namespace
#endif #endif

2
modules/ximgproc/include/opencv2/ximgproc/run_length_morphology.hpp

@ -113,6 +113,8 @@ CV_EXPORTS void createRLEImage(const std::vector<cv::Point3i>& runs, OutputArray
CV_EXPORTS void morphologyEx(InputArray rlSrc, OutputArray rlDest, int op, InputArray rlKernel, CV_EXPORTS void morphologyEx(InputArray rlSrc, OutputArray rlDest, int op, InputArray rlKernel,
bool bBoundaryOnForErosion = true, Point anchor = Point(0,0)); bool bBoundaryOnForErosion = true, Point anchor = Point(0,0));
//! @}
} }
} }
} }

Loading…
Cancel
Save