Browse Source

Merge pull request #3638 from mshabunin:doc-upgrade

Documentation transition to fresh Doxygen #3638

Merge with https://github.com/opencv/opencv/pull/25042
pull/3695/head
Maksim Shabunin 1 year ago committed by GitHub
parent
commit
1aaf6e1c8b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 51
      modules/bioinspired/include/opencv2/bioinspired/retina.hpp
  2. 24
      modules/bioinspired/samples/default_retina_config.xml
  3. 24
      modules/bioinspired/samples/realistic_retina_config.xml
  4. 12
      modules/bioinspired/tutorials/retina_model/retina_model.markdown
  5. 4
      modules/cannops/include/opencv2/cann.hpp
  6. 4
      modules/cannops/include/opencv2/cann_interface.hpp
  7. 1
      modules/cudaimgproc/include/opencv2/cudaimgproc.hpp
  8. 29
      modules/dnn_superres/tutorials/benchmark/sr_benchmark.markdown
  9. 9
      modules/face/include/opencv2/face/facemark.hpp
  10. 6
      modules/face/include/opencv2/face/facemark_train.hpp
  11. 6
      modules/face/tutorials/face_landmark/face_landmark_trainer.markdown
  12. 10
      modules/fuzzy/include/opencv2/fuzzy.hpp
  13. 14
      modules/hdf/include/opencv2/hdf.hpp
  14. 1
      modules/mcc/include/opencv2/mcc/checker_model.hpp
  15. 7
      modules/rgbd/include/opencv2/rgbd/dynafu.hpp
  16. 17
      modules/sfm/include/opencv2/sfm.hpp
  17. 6
      modules/stereo/include/opencv2/stereo/quasi_dense_stereo.hpp
  18. 86
      modules/text/include/opencv2/text.hpp
  19. 1
      modules/text/include/opencv2/text/ocr.hpp
  20. 32
      modules/videostab/include/opencv2/videostab.hpp
  21. 29
      modules/viz/include/opencv2/viz.hpp
  22. 13
      modules/xfeatures2d/include/opencv2/xfeatures2d.hpp
  23. 3
      modules/xfeatures2d/include/opencv2/xfeatures2d/nonfree.hpp
  24. 36
      modules/ximgproc/include/opencv2/ximgproc.hpp
  25. 2
      modules/ximgproc/include/opencv2/ximgproc/color_match.hpp
  26. 2
      modules/ximgproc/include/opencv2/ximgproc/deriche_filter.hpp
  27. 4
      modules/ximgproc/include/opencv2/ximgproc/edgepreserving_filter.hpp
  28. 3
      modules/ximgproc/include/opencv2/ximgproc/fast_hough_transform.hpp
  29. 2
      modules/ximgproc/include/opencv2/ximgproc/paillou_filter.hpp
  30. 2
      modules/ximgproc/include/opencv2/ximgproc/peilin.hpp
  31. 2
      modules/ximgproc/include/opencv2/ximgproc/run_length_morphology.hpp

51
modules/bioinspired/include/opencv2/bioinspired/retina.hpp

@ -94,57 +94,12 @@ enum { @@ -94,57 +94,12 @@ enum {
Here is the default configuration file of the retina module. It gives results such as the first
retina output shown on the top of this page.
@code{xml}
<?xml version="1.0"?>
<opencv_storage>
<OPLandIPLparvo>
<colorMode>1</colorMode>
<normaliseOutput>1</normaliseOutput>
<photoreceptorsLocalAdaptationSensitivity>7.5e-01</photoreceptorsLocalAdaptationSensitivity>
<photoreceptorsTemporalConstant>9.0e-01</photoreceptorsTemporalConstant>
<photoreceptorsSpatialConstant>5.3e-01</photoreceptorsSpatialConstant>
<horizontalCellsGain>0.01</horizontalCellsGain>
<hcellsTemporalConstant>0.5</hcellsTemporalConstant>
<hcellsSpatialConstant>7.</hcellsSpatialConstant>
<ganglionCellsSensitivity>7.5e-01</ganglionCellsSensitivity></OPLandIPLparvo>
<IPLmagno>
<normaliseOutput>1</normaliseOutput>
<parasolCells_beta>0.</parasolCells_beta>
<parasolCells_tau>0.</parasolCells_tau>
<parasolCells_k>7.</parasolCells_k>
<amacrinCellsTemporalCutFrequency>2.0e+00</amacrinCellsTemporalCutFrequency>
<V0CompressionParameter>9.5e-01</V0CompressionParameter>
<localAdaptintegration_tau>0.</localAdaptintegration_tau>
<localAdaptintegration_k>7.</localAdaptintegration_k></IPLmagno>
</opencv_storage>
@endcode
@include default_retina_config.xml
Here is the 'realistic" setup used to obtain the second retina output shown on the top of this page.
@code{xml}
<?xml version="1.0"?>
<opencv_storage>
<OPLandIPLparvo>
<colorMode>1</colorMode>
<normaliseOutput>1</normaliseOutput>
<photoreceptorsLocalAdaptationSensitivity>8.9e-01</photoreceptorsLocalAdaptationSensitivity>
<photoreceptorsTemporalConstant>9.0e-01</photoreceptorsTemporalConstant>
<photoreceptorsSpatialConstant>5.3e-01</photoreceptorsSpatialConstant>
<horizontalCellsGain>0.3</horizontalCellsGain>
<hcellsTemporalConstant>0.5</hcellsTemporalConstant>
<hcellsSpatialConstant>7.</hcellsSpatialConstant>
<ganglionCellsSensitivity>8.9e-01</ganglionCellsSensitivity></OPLandIPLparvo>
<IPLmagno>
<normaliseOutput>1</normaliseOutput>
<parasolCells_beta>0.</parasolCells_beta>
<parasolCells_tau>0.</parasolCells_tau>
<parasolCells_k>7.</parasolCells_k>
<amacrinCellsTemporalCutFrequency>2.0e+00</amacrinCellsTemporalCutFrequency>
<V0CompressionParameter>9.5e-01</V0CompressionParameter>
<localAdaptintegration_tau>0.</localAdaptintegration_tau>
<localAdaptintegration_k>7.</localAdaptintegration_k></IPLmagno>
</opencv_storage>
@endcode
@include realistic_retina_config.xml
*/
struct RetinaParameters{
//! Outer Plexiform Layer (OPL) and Inner Plexiform Layer Parvocellular (IplParvo) parameters

24
modules/bioinspired/samples/default_retina_config.xml

@ -0,0 +1,24 @@ @@ -0,0 +1,24 @@
<?xml version="1.0"?>
<opencv_storage>
<OPLandIPLparvo>
<colorMode>1</colorMode>
<normaliseOutput>1</normaliseOutput>
<photoreceptorsLocalAdaptationSensitivity>7.5e-01</photoreceptorsLocalAdaptationSensitivity>
<photoreceptorsTemporalConstant>9.0e-01</photoreceptorsTemporalConstant>
<photoreceptorsSpatialConstant>5.3e-01</photoreceptorsSpatialConstant>
<horizontalCellsGain>0.01</horizontalCellsGain>
<hcellsTemporalConstant>0.5</hcellsTemporalConstant>
<hcellsSpatialConstant>7.</hcellsSpatialConstant>
<ganglionCellsSensitivity>7.5e-01</ganglionCellsSensitivity>
</OPLandIPLparvo>
<IPLmagno>
<normaliseOutput>1</normaliseOutput>
<parasolCells_beta>0.</parasolCells_beta>
<parasolCells_tau>0.</parasolCells_tau>
<parasolCells_k>7.</parasolCells_k>
<amacrinCellsTemporalCutFrequency>2.0e+00</amacrinCellsTemporalCutFrequency>
<V0CompressionParameter>9.5e-01</V0CompressionParameter>
<localAdaptintegration_tau>0.</localAdaptintegration_tau>
<localAdaptintegration_k>7.</localAdaptintegration_k>
</IPLmagno>
</opencv_storage>

24
modules/bioinspired/samples/realistic_retina_config.xml

@ -0,0 +1,24 @@ @@ -0,0 +1,24 @@
<?xml version="1.0"?>
<opencv_storage>
<OPLandIPLparvo>
<colorMode>1</colorMode>
<normaliseOutput>1</normaliseOutput>
<photoreceptorsLocalAdaptationSensitivity>8.9e-01</photoreceptorsLocalAdaptationSensitivity>
<photoreceptorsTemporalConstant>9.0e-01</photoreceptorsTemporalConstant>
<photoreceptorsSpatialConstant>5.3e-01</photoreceptorsSpatialConstant>
<horizontalCellsGain>0.3</horizontalCellsGain>
<hcellsTemporalConstant>0.5</hcellsTemporalConstant>
<hcellsSpatialConstant>7.</hcellsSpatialConstant>
<ganglionCellsSensitivity>8.9e-01</ganglionCellsSensitivity>
</OPLandIPLparvo>
<IPLmagno>
<normaliseOutput>1</normaliseOutput>
<parasolCells_beta>0.</parasolCells_beta>
<parasolCells_tau>0.</parasolCells_tau>
<parasolCells_k>7.</parasolCells_k>
<amacrinCellsTemporalCutFrequency>2.0e+00</amacrinCellsTemporalCutFrequency>
<V0CompressionParameter>9.5e-01</V0CompressionParameter>
<localAdaptintegration_tau>0.</localAdaptintegration_tau>
<localAdaptintegration_k>7.</localAdaptintegration_k>
</IPLmagno>
</opencv_storage>

12
modules/bioinspired/tutorials/retina_model/retina_model.markdown

@ -1,6 +1,8 @@ @@ -1,6 +1,8 @@
Retina and real-world vision {#tutorial_bioinspired_retina_model}
=============================================================
@tableofcontents
Goal
----
@ -382,7 +384,7 @@ need to know if mean luminance information is required or not. If not, the the r @@ -382,7 +384,7 @@ need to know if mean luminance information is required or not. If not, the the r
significantly reduce its energy thus giving more visibility to higher spatial frequency details.
#### Basic parameters
## Basic parameters
The simplest parameters are as follows :
@ -397,7 +399,7 @@ processing. You can expect much faster processing using gray levels : it would r @@ -397,7 +399,7 @@ processing. You can expect much faster processing using gray levels : it would r
product per pixel for all of the retina processes and it has recently been parallelized for multicore
architectures.
#### Photo-receptors parameters
## Photo-receptors parameters
The following parameters act on the entry point of the retina - photo-receptors - and has impact on all
of the following processes. These sensors are low pass spatio-temporal filters that smooth temporal and
@ -421,7 +423,7 @@ and high frequency noise canceling. @@ -421,7 +423,7 @@ and high frequency noise canceling.
A good compromise for color images is a 0.53 value since such choice won't affect too much the color spectrum.
Higher values would lead to gray and blurred output images.
#### Horizontal cells parameters
## Horizontal cells parameters
This parameter set tunes the neural network connected to the photo-receptors, the horizontal cells.
It modulates photo-receptors sensitivity and completes the processing for final spectral whitening
@ -446,7 +448,7 @@ It modulates photo-receptors sensitivity and completes the processing for final @@ -446,7 +448,7 @@ It modulates photo-receptors sensitivity and completes the processing for final
and luminance is already partly enhanced. The following parameters act on the last processing stages
of the two outing retina signals.
#### Parvo (details channel) dedicated parameter
## Parvo (details channel) dedicated parameter
- **ganglionCellsSensitivity** specifies the strength of the final local adaptation occurring at
the output of this details' dedicated channel. Parameter values remain between 0 and 1. Low value
@ -455,7 +457,7 @@ of the two outing retina signals. @@ -455,7 +457,7 @@ of the two outing retina signals.
**Note :** this parameter can correct eventual burned images by favoring low energetic details of
the visual scene, even in bright areas.
#### IPL Magno (motion/transient channel) parameters
## IPL Magno (motion/transient channel) parameters
Once image's information are cleaned, this channel acts as a high pass temporal filter that
selects only the signals related to transient signals (events, motion, etc.). A low pass spatial filter

4
modules/cannops/include/opencv2/cann.hpp

@ -8,12 +8,12 @@ @@ -8,12 +8,12 @@
#include "opencv2/core.hpp"
/**
@defgroup cann Ascend-accelerated Computer Vision
@defgroup cannops Ascend-accelerated Computer Vision
@{
@defgroup canncore Core part
@{
@defgroup cann_struct Data Structures
@defgroup cann_init Initializeation and Information
@defgroup cann_init Initialization and Information
@}
@}
*/

4
modules/cannops/include/opencv2/cann_interface.hpp

@ -13,9 +13,9 @@ namespace cann @@ -13,9 +13,9 @@ namespace cann
{
/**
@addtogroup cann
@addtogroup cannops
@{
@defgroup cannops Operations for Ascend Backend.
@defgroup cannops_ops Operations for Ascend Backend.
@{
@defgroup cannops_elem Per-element Operations
@defgroup cannops_core Core Operations on Matrices

1
modules/cudaimgproc/include/opencv2/cudaimgproc.hpp

@ -844,7 +844,6 @@ cv::Moments cvMoments = convertSpatialMoments<float>(spatialMoments, order); @@ -844,7 +844,6 @@ cv::Moments cvMoments = convertSpatialMoments<float>(spatialMoments, order);
```
see the \a CUDA_TEST_P(Moments, Async) test inside opencv_contrib_source_code/modules/cudaimgproc/test/test_moments.cpp for an example.
@returns cv::Moments.
@sa cuda::moments, cuda::convertSpatialMoments, cuda::numMoments, cuda::MomentsOrder
*/
CV_EXPORTS_W void spatialMoments(InputArray src, OutputArray moments, const bool binaryImage = false, const MomentsOrder order = MomentsOrder::THIRD_ORDER_MOMENTS, const int momentsType = CV_64F, Stream& stream = Stream::Null());

29
modules/dnn_superres/tutorials/benchmark/sr_benchmark.markdown

@ -50,14 +50,9 @@ Explanation @@ -50,14 +50,9 @@ Explanation
Benchmarking results
-----------
Dataset benchmarking
----
###General100 dataset
<center>
## General100 dataset
#####2x scaling factor
### 2x scaling factor
| | Avg inference time in sec (CPU)| Avg PSNR | Avg SSIM |
@ -70,7 +65,7 @@ Dataset benchmarking @@ -70,7 +65,7 @@ Dataset benchmarking
| Nearest neighbor | 0.000114 | 29.1665 | 0.9049 |
| Lanczos | 0.001094 | 32.4687 | 0.9327 |
#####3x scaling factor
### 3x scaling factor
| | Avg inference time in sec (CPU)| Avg PSNR | Avg SSIM |
| ------------- |:-------------------:| ---------:|--------:|
@ -83,7 +78,7 @@ Dataset benchmarking @@ -83,7 +78,7 @@ Dataset benchmarking
| Lanczos | 0.001012 |25.9115 |0.8706 |
#####4x scaling factor
### 4x scaling factor
| | Avg inference time in sec (CPU)| Avg PSNR | Avg SSIM |
| ------------- |:-------------------:| ---------:|--------:|
@ -96,14 +91,10 @@ Dataset benchmarking @@ -96,14 +91,10 @@ Dataset benchmarking
| Lanczos | 0.001012 |25.9115 |0.8706 |
</center>
Images
----
<center>
## Images
####2x scaling factor
### 2x scaling factor
|Set5: butterfly.png | size: 256x256 | ||
|:-------------:|:-------------------:|:-------------:|:----:|
@ -112,7 +103,7 @@ Images @@ -112,7 +103,7 @@ Images
![ESPCN](images/espcn_butterfly.jpg)| ![FSRCNN](images/fsrcnn_butterfly.jpg) | ![LapSRN](images/lapsrn_butterfly.jpg) | ![EDSR](images/edsr_butterfly.jpg)
|29.0341 / 0.9354 / **0.004157**| 29.0077 / 0.9345 / 0.006325 | 27.8212 / 0.9230 / 0.037937 | **30.0347** / **0.9453** / 2.077280 |
####3x scaling factor
### 3x scaling factor
|Urban100: img_001.png | size: 1024x644 | ||
|:-------------:|:-------------------:|:-------------:|:----:|
@ -122,7 +113,7 @@ Images @@ -122,7 +113,7 @@ Images
|28.0118 / 0.8588 / **0.030748**| 28.0184 / 0.8597 / 0.094173 | | **30.5671** / **0.9019** / 9.517580 |
####4x scaling factor
### 4x scaling factor
|Set14: comic.png | size: 250x361 | ||
|:-------------:|:-------------------:|:-------------:|:----:|
@ -131,7 +122,7 @@ Images @@ -131,7 +122,7 @@ Images
|![ESPCN](images/espcn_comic.jpg)| ![FSRCNN](images/fsrcnn_comic.jpg) | ![LapSRN](images/lapsrn_comic.jpg) | ![EDSR](images/edsr_comic.jpg)
|20.0417 / 0.6302 / **0.001894**| 20.0885 / 0.6384 / 0.002103 | 20.0676 / 0.6339 / 0.061640 | **20.5233** / **0.6901** / 0.665876 |
####8x scaling factor
### 8x scaling factor
|Div2K: 0006.png | size: 1356x2040 | |
|:-------------:|:-------------------:|:-------------:|
@ -139,5 +130,3 @@ Images @@ -139,5 +130,3 @@ Images
|PSRN / SSIM / Speed (CPU)| 26.3139 / **0.8033** / 0.001107| 23.8291 / 0.7340 / **0.000611** |
|![Lanczos interpolation](images/lanczos_div2k.jpg)| ![LapSRN](images/lapsrn_div2k.jpg) | |
|26.1565 / 0.7962 / 0.004782| **26.7046** / 0.7987 / 2.274290 | |
</center>

9
modules/face/include/opencv2/face/facemark.hpp

@ -12,12 +12,6 @@ Mentor: Delia Passalacqua @@ -12,12 +12,6 @@ Mentor: Delia Passalacqua
#ifndef __OPENCV_FACELANDMARK_HPP__
#define __OPENCV_FACELANDMARK_HPP__
/**
@defgroup face Face Analysis
- @ref tutorial_table_of_content_facemark
- The Facemark API
*/
#include "opencv2/core.hpp"
#include <vector>
@ -25,6 +19,8 @@ Mentor: Delia Passalacqua @@ -25,6 +19,8 @@ Mentor: Delia Passalacqua
namespace cv {
namespace face {
//! @addtogroup face
//! @{
/** @brief Abstract base class for all facemark models
@ -88,6 +84,7 @@ CV_EXPORTS_W Ptr<Facemark> createFacemarkLBF(); @@ -88,6 +84,7 @@ CV_EXPORTS_W Ptr<Facemark> createFacemarkLBF();
//! construct a Kazemi facemark detector
CV_EXPORTS_W Ptr<Facemark> createFacemarkKazemi();
//! @}
} // face
} // cv

6
modules/face/include/opencv2/face/facemark_train.hpp

@ -12,12 +12,6 @@ Mentor: Delia Passalacqua @@ -12,12 +12,6 @@ Mentor: Delia Passalacqua
#ifndef __OPENCV_FACELANDMARKTRAIN_HPP__
#define __OPENCV_FACELANDMARKTRAIN_HPP__
/**
@defgroup face Face Analysis
- @ref tutorial_table_of_content_facemark
- The Facemark API
*/
#include "opencv2/face/facemark.hpp"
#include "opencv2/objdetect.hpp"
#include <vector>

6
modules/face/tutorials/face_landmark/face_landmark_trainer.markdown

@ -21,7 +21,7 @@ The above format is similar to HELEN dataset which is used for training the mode @@ -21,7 +21,7 @@ The above format is similar to HELEN dataset which is used for training the mode
./sample_train_landmark_detector -annotations=/home/sukhad/Downloads/code/trainset/ -config=config.xml -face_cascade=lbpcascadefrontalface.xml -model=trained_model.dat -width=460 -height=460
```
### Description of command parameters
## Description of command parameters
> * **annotations** a : (REQUIRED) Path to annotations txt file [example - /data/annotations.txt]
> * **config** c : (REQUIRED) Path to configuration xml file containing parameters for training.[ example - /data/config.xml]
@ -30,7 +30,7 @@ The above format is similar to HELEN dataset which is used for training the mode @@ -30,7 +30,7 @@ The above format is similar to HELEN dataset which is used for training the mode
> * **height** h : (OPTIONAL) The height which you want all images to get to scale the annotations. Large images are slow to process [default = 460]
> * **face_cascade** f (REQUIRED) Path to the face cascade xml file which you want to use as a detector.
### Description of training parameters
## Description of training parameters
The configuration file described above which is used while training contains the training parameters which are required for training.
@ -49,7 +49,7 @@ The configuration file described above which is used while training contains the @@ -49,7 +49,7 @@ The configuration file described above which is used while training contains the
To get more detailed description about the training parameters you can refer to the [Research paper](https://pdfs.semanticscholar.org/d78b/6a5b0dcaa81b1faea5fb0000045a62513567.pdf).
### Understanding code
## Understanding code
![](images/3.jpg)

10
modules/fuzzy/include/opencv2/fuzzy.hpp

@ -52,19 +52,19 @@ @@ -52,19 +52,19 @@
Namespace for all functions is `ft`. The module brings implementation of the last image processing algorithms based on fuzzy mathematics. Method are named based on the pattern `FT`_degree_dimension`_`method.
@{
@{
@defgroup f0_math Math with F0-transform support
Fuzzy transform (\f$F^0\f$-transform) of the 0th degree transforms whole image to a matrix of its components. These components are used in latter computation where each of them represents average color of certain subarea.
Fuzzy transform (\f$F^0\f$-transform) of the 0th degree transforms whole image to a matrix of its components. These components are used in latter computation where each of them represents average color of certain subarea.
@defgroup f1_math Math with F1-transform support
Fuzzy transform (\f$F^1\f$-transform) of the 1th degree transforms whole image to a matrix of its components. Each component is polynomial of the 1th degree carrying information about average color and average gradient of certain subarea.
Fuzzy transform (\f$F^1\f$-transform) of the 1th degree transforms whole image to a matrix of its components. Each component is polynomial of the 1th degree carrying information about average color and average gradient of certain subarea.
@defgroup f_image Fuzzy image processing
Image proceesing based on fuzzy mathematics namely F-transform.
@}
Image proceesing based on fuzzy mathematics namely F-transform.
@}
*/

14
modules/hdf/include/opencv2/hdf.hpp

@ -41,17 +41,15 @@ @@ -41,17 +41,15 @@
This module provides storage routines for Hierarchical Data Format objects.
@{
@{
@defgroup hdf5 Hierarchical Data Format version 5
Hierarchical Data Format version 5
--------------------------------------------------------
Hierarchical Data Format version 5
--------------------------------------------------------
In order to use it, the hdf5 library has to be installed, which
means cmake should find it using `find_package(HDF5)` .
@}
In order to use it, the hdf5 library has to be installed, which
means cmake should find it using `find_package(HDF5)`.
@}
*/
#endif

1
modules/mcc/include/opencv2/mcc/checker_model.hpp

@ -116,7 +116,6 @@ public: @@ -116,7 +116,6 @@ public:
virtual ~CCheckerDraw() {}
/** \brief Draws the checker to the given image.
* \param img image in color space BGR
* \return void
*/
CV_WRAP virtual void draw(InputOutputArray img) = 0;
/** \brief Create a new CCheckerDraw object.

7
modules/rgbd/include/opencv2/rgbd/dynafu.hpp

@ -114,7 +114,6 @@ public: @@ -114,7 +114,6 @@ public:
virtual void renderSurface(OutputArray depthImage, OutputArray vertImage, OutputArray normImage, bool warp=true) = 0;
};
//! @}
}
}
#endif
} // dynafu::
} // cv::
#endif // __OPENCV_RGBD_DYNAFU_HPP__

17
modules/sfm/include/opencv2/sfm.hpp

@ -75,7 +75,7 @@ This module has been originally developed as a project for Google Summer of Code @@ -75,7 +75,7 @@ This module has been originally developed as a project for Google Summer of Code
- Notice that it is compiled only when Eigen, GLog and GFlags are correctly installed.\n
Check installation instructions in the following tutorial: @ref tutorial_sfm_installation
@{
@{
@defgroup conditioning Conditioning
@defgroup fundamental Fundamental
@defgroup io Input/Output
@ -85,18 +85,17 @@ This module has been originally developed as a project for Google Summer of Code @@ -85,18 +85,17 @@ This module has been originally developed as a project for Google Summer of Code
@defgroup triangulation Triangulation
@defgroup reconstruction Reconstruction
@note
- Notice that it is compiled only when Ceres Solver is correctly installed.\n
Check installation instructions in the following tutorial: @ref tutorial_sfm_installation
@note
- Notice that it is compiled only when Ceres Solver is correctly installed.\n
Check installation instructions in the following tutorial: @ref tutorial_sfm_installation
@defgroup simple_pipeline Simple Pipeline
@note
- Notice that it is compiled only when Ceres Solver is correctly installed.\n
Check installation instructions in the following tutorial: @ref tutorial_sfm_installation
@}
@note
- Notice that it is compiled only when Ceres Solver is correctly installed.\n
Check installation instructions in the following tutorial: @ref tutorial_sfm_installation
@}
*/
#endif

6
modules/stereo/include/opencv2/stereo/quasi_dense_stereo.hpp

@ -18,6 +18,7 @@ namespace cv @@ -18,6 +18,7 @@ namespace cv
{
namespace stereo
{
/** \addtogroup stereo
* @{
*/
@ -190,9 +191,8 @@ public: @@ -190,9 +191,8 @@ public:
CV_PROP_RW PropagationParameters Param;
};
} //namespace cv
} //namespace stereo
/** @}*/
} //namespace cv
} //namespace stereo
#endif // __OPENCV_QUASI_DENSE_STEREO_H__

86
modules/text/include/opencv2/text.hpp

@ -52,49 +52,49 @@ scene images. @@ -52,49 +52,49 @@ scene images.
@{
@defgroup text_detect Scene Text Detection
Class-specific Extremal Regions for Scene Text Detection
--------------------------------------------------------
The scene text detection algorithm described below has been initially proposed by Lukás Neumann &
Jiri Matas @cite Neumann11. The main idea behind Class-specific Extremal Regions is similar to the MSER
in that suitable Extremal Regions (ERs) are selected from the whole component tree of the image.
However, this technique differs from MSER in that selection of suitable ERs is done by a sequential
classifier trained for character detection, i.e. dropping the stability requirement of MSERs and
selecting class-specific (not necessarily stable) regions.
The component tree of an image is constructed by thresholding by an increasing value step-by-step
from 0 to 255 and then linking the obtained connected components from successive levels in a
hierarchy by their inclusion relation:
![image](pics/component_tree.png)
The component tree may contain a huge number of regions even for a very simple image as shown in
the previous image. This number can easily reach the order of 1 x 10\^6 regions for an average 1
Megapixel image. In order to efficiently select suitable regions among all the ERs the algorithm
make use of a sequential classifier with two differentiated stages.
In the first stage incrementally computable descriptors (area, perimeter, bounding box, and Euler's
number) are computed (in O(1)) for each region r and used as features for a classifier which
estimates the class-conditional probability p(r|character). Only the ERs which correspond to local
maximum of the probability p(r|character) are selected (if their probability is above a global limit
p_min and the difference between local maximum and local minimum is greater than a delta_min
value).
In the second stage, the ERs that passed the first stage are classified into character and
non-character classes using more informative but also more computationally expensive features. (Hole
area ratio, convex hull ratio, and the number of outer boundary inflexion points).
This ER filtering process is done in different single-channel projections of the input image in
order to increase the character localization recall.
After the ER filtering is done on each input channel, character candidates must be grouped in
high-level text blocks (i.e. words, text lines, paragraphs, ...). The opencv_text module implements
two different grouping algorithms: the Exhaustive Search algorithm proposed in @cite Neumann12 for
grouping horizontally aligned text, and the method proposed by Lluis Gomez and Dimosthenis Karatzas
in @cite Gomez13 @cite Gomez14 for grouping arbitrary oriented text (see erGrouping).
To see the text detector at work, have a look at the textdetection demo:
<https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/textdetection.cpp>
Class-specific Extremal Regions for Scene Text Detection
--------------------------------------------------------
The scene text detection algorithm described below has been initially proposed by Lukás Neumann &
Jiri Matas @cite Neumann11. The main idea behind Class-specific Extremal Regions is similar to the MSER
in that suitable Extremal Regions (ERs) are selected from the whole component tree of the image.
However, this technique differs from MSER in that selection of suitable ERs is done by a sequential
classifier trained for character detection, i.e. dropping the stability requirement of MSERs and
selecting class-specific (not necessarily stable) regions.
The component tree of an image is constructed by thresholding by an increasing value step-by-step
from 0 to 255 and then linking the obtained connected components from successive levels in a
hierarchy by their inclusion relation:
![image](pics/component_tree.png)
The component tree may contain a huge number of regions even for a very simple image as shown in
the previous image. This number can easily reach the order of 1 x 10\^6 regions for an average 1
Megapixel image. In order to efficiently select suitable regions among all the ERs the algorithm
make use of a sequential classifier with two differentiated stages.
In the first stage incrementally computable descriptors (area, perimeter, bounding box, and Euler's
number) are computed (in O(1)) for each region r and used as features for a classifier which
estimates the class-conditional probability p(r|character). Only the ERs which correspond to local
maximum of the probability p(r|character) are selected (if their probability is above a global limit
p_min and the difference between local maximum and local minimum is greater than a delta_min
value).
In the second stage, the ERs that passed the first stage are classified into character and
non-character classes using more informative but also more computationally expensive features. (Hole
area ratio, convex hull ratio, and the number of outer boundary inflexion points).
This ER filtering process is done in different single-channel projections of the input image in
order to increase the character localization recall.
After the ER filtering is done on each input channel, character candidates must be grouped in
high-level text blocks (i.e. words, text lines, paragraphs, ...). The opencv_text module implements
two different grouping algorithms: the Exhaustive Search algorithm proposed in @cite Neumann12 for
grouping horizontally aligned text, and the method proposed by Lluis Gomez and Dimosthenis Karatzas
in @cite Gomez13 @cite Gomez14 for grouping arbitrary oriented text (see erGrouping).
To see the text detector at work, have a look at the textdetection demo:
<https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/textdetection.cpp>
@defgroup text_recognize Scene Text Recognition
@}

1
modules/text/include/opencv2/text/ocr.hpp

@ -363,7 +363,6 @@ CV_EXPORTS_W Ptr<OCRHMMDecoder::ClassifierCallback> loadOCRHMMClassifierCNN(cons @@ -363,7 +363,6 @@ CV_EXPORTS_W Ptr<OCRHMMDecoder::ClassifierCallback> loadOCRHMMClassifierCNN(cons
*/
CV_EXPORTS_W Ptr<OCRHMMDecoder::ClassifierCallback> loadOCRHMMClassifier(const String& filename, int classifier);
//! @}
/** @brief Utility function to create a tailored language model transitions table from a given list of words (lexicon).
*

32
modules/videostab/include/opencv2/videostab.hpp

@ -44,7 +44,7 @@ @@ -44,7 +44,7 @@
#define OPENCV_VIDEOSTAB_HPP
/**
@defgroup videostab Video Stabilization
@defgroup videostab Video Stabilization
The video stabilization module contains a set of functions and classes that can be used to solve the
problem of video stabilization. There are a few methods implemented, most of them are described in
@ -53,26 +53,24 @@ paper methods. @@ -53,26 +53,24 @@ paper methods.
### References
1. "Full-Frame Video Stabilization with Motion Inpainting"
Yasuyuki Matsushita, Eyal Ofek, Weina Ge, Xiaoou Tang, Senior Member, and Heung-Yeung Shum
2. "Auto-Directed Video Stabilization with Robust L1 Optimal Camera Paths"
Matthias Grundmann, Vivek Kwatra, Irfan Essa
1. "Full-Frame Video Stabilization with Motion Inpainting"
Yasuyuki Matsushita, Eyal Ofek, Weina Ge, Xiaoou Tang, Senior Member, and Heung-Yeung Shum
2. "Auto-Directed Video Stabilization with Robust L1 Optimal Camera Paths"
Matthias Grundmann, Vivek Kwatra, Irfan Essa
@{
@defgroup videostab_motion Global Motion Estimation
@{
@defgroup videostab_motion Global Motion Estimation
The video stabilization module contains a set of functions and classes for global motion estimation
between point clouds or between images. In the last case features are extracted and matched
internally. For the sake of convenience the motion estimation functions are wrapped into classes.
Both the functions and the classes are available.
The video stabilization module contains a set of functions and classes for global motion estimation
between point clouds or between images. In the last case features are extracted and matched
internally. For the sake of convenience the motion estimation functions are wrapped into classes.
Both the functions and the classes are available.
@defgroup videostab_marching Fast Marching Method
The Fast Marching Method @cite Telea04 is used in of the video stabilization routines to do motion and
color inpainting. The method is implemented is a flexible way and it's made public for other users.
@}
@defgroup videostab_marching Fast Marching Method
The Fast Marching Method @cite Telea04 is used in of the video stabilization routines to do motion and
color inpainting. The method is implemented is a flexible way and it's made public for other users.
@}
*/
#include "opencv2/videostab/stabilizer.hpp"

29
modules/viz/include/opencv2/viz.hpp

@ -60,25 +60,24 @@ interact with it. @@ -60,25 +60,24 @@ interact with it.
3D visualization window (see Viz3d) is used to display widgets (see Widget), and it provides several
methods to interact with scene and widgets.
@{
@{
@defgroup viz_widget Widget
In this section, the widget framework is explained. Widgets represent 2D or 3D objects, varying from
simple ones such as lines to complex ones such as point clouds and meshes.
In this section, the widget framework is explained. Widgets represent 2D or 3D objects, varying from
simple ones such as lines to complex ones such as point clouds and meshes.
Widgets are **implicitly shared**. Therefore, one can add a widget to the scene, and modify the
widget without re-adding the widget.
Widgets are **implicitly shared**. Therefore, one can add a widget to the scene, and modify the
widget without re-adding the widget.
@code
// Create a cloud widget
viz::WCloud cw(cloud, viz::Color::red());
// Display it in a window
myWindow.showWidget("CloudWidget1", cw);
// Modify it, and it will be modified in the window.
cw.setColor(viz::Color::yellow());
@endcode
@}
@code
// Create a cloud widget
viz::WCloud cw(cloud, viz::Color::red());
// Display it in a window
myWindow.showWidget("CloudWidget1", cw);
// Modify it, and it will be modified in the window.
cw.setColor(viz::Color::yellow());
@endcode
@}
*/
#endif /* OPENCV_VIZ_HPP */

13
modules/xfeatures2d/include/opencv2/xfeatures2d.hpp

@ -46,19 +46,18 @@ the use of this software, even if advised of the possibility of such damage. @@ -46,19 +46,18 @@ the use of this software, even if advised of the possibility of such damage.
@{
@defgroup xfeatures2d_experiment Experimental 2D Features Algorithms
This section describes experimental algorithms for 2d feature detection.
This section describes experimental algorithms for 2d feature detection.
@defgroup xfeatures2d_nonfree Non-free 2D Features Algorithms
This section describes two popular algorithms for 2d feature detection, SIFT and SURF, that are
known to be patented. You need to set the OPENCV_ENABLE_NONFREE option in cmake to use those. Use them at your own risk.
This section describes two popular algorithms for 2d feature detection, SIFT and SURF, that are
known to be patented. You need to set the OPENCV_ENABLE_NONFREE option in cmake to use those. Use them at your own risk.
@defgroup xfeatures2d_match Experimental 2D Features Matching Algorithm
This section describes the following matching strategies:
- GMS: Grid-based Motion Statistics, @cite Bian2017gms
- LOGOS: Local geometric support for high-outlier spatial verification, @cite Lowry2018LOGOSLG
This section describes the following matching strategies:
- GMS: Grid-based Motion Statistics, @cite Bian2017gms
- LOGOS: Local geometric support for high-outlier spatial verification, @cite Lowry2018LOGOSLG
@}
*/

3
modules/xfeatures2d/include/opencv2/xfeatures2d/nonfree.hpp

@ -50,6 +50,9 @@ namespace cv @@ -50,6 +50,9 @@ namespace cv
namespace xfeatures2d
{
//! @addtogroup xfeatures2d_nonfree
//! @{
/** @brief Class for extracting Speeded Up Robust Features from an image @cite Bay06 .
The algorithm parameters:

36
modules/ximgproc/include/opencv2/ximgproc.hpp

@ -65,12 +65,13 @@ @@ -65,12 +65,13 @@
#include "ximgproc/find_ellipses.hpp"
/** @defgroup ximgproc Extended Image Processing
@{
/**
@defgroup ximgproc Extended Image Processing
@{
@defgroup ximgproc_edge Structured forests for fast edge detection
This module contains implementations of modern structured edge detection algorithms,
i.e. algorithms which somehow takes into account pixel affinities in natural images.
This module contains implementations of modern structured edge detection algorithms,
i.e. algorithms which somehow takes into account pixel affinities in natural images.
@defgroup ximgproc_edgeboxes EdgeBoxes
@ -84,16 +85,16 @@ i.e. algorithms which somehow takes into account pixel affinities in natural ima @@ -84,16 +85,16 @@ i.e. algorithms which somehow takes into account pixel affinities in natural ima
@defgroup ximgproc_edge_drawing EdgeDrawing
EDGE DRAWING LIBRARY FOR GEOMETRIC FEATURE EXTRACTION AND VALIDATION
EDGE DRAWING LIBRARY FOR GEOMETRIC FEATURE EXTRACTION AND VALIDATION
Edge Drawing (ED) algorithm is an proactive approach on edge detection problem. In contrast to many other existing edge detection algorithms which follow a subtractive
approach (i.e. after applying gradient filters onto an image eliminating pixels w.r.t. several rules, e.g. non-maximal suppression and hysteresis in Canny), ED algorithm
works via an additive strategy, i.e. it picks edge pixels one by one, hence the name Edge Drawing. Then we process those random shaped edge segments to extract higher level
edge features, i.e. lines, circles, ellipses, etc. The popular method of extraction edge pixels from the thresholded gradient magnitudes is non-maximal supression that tests
every pixel whether it has the maximum gradient response along its gradient direction and eliminates if it does not. However, this method does not check status of the
neighboring pixels, and therefore might result low quality (in terms of edge continuity, smoothness, thinness, localization) edge segments. Instead of non-maximal supression,
ED points a set of edge pixels and join them by maximizing the total gradient response of edge segments. Therefore it can extract high quality edge segments without need for
an additional hysteresis step.
Edge Drawing (ED) algorithm is an proactive approach on edge detection problem. In contrast to many other existing edge detection algorithms which follow a subtractive
approach (i.e. after applying gradient filters onto an image eliminating pixels w.r.t. several rules, e.g. non-maximal suppression and hysteresis in Canny), ED algorithm
works via an additive strategy, i.e. it picks edge pixels one by one, hence the name Edge Drawing. Then we process those random shaped edge segments to extract higher level
edge features, i.e. lines, circles, ellipses, etc. The popular method of extraction edge pixels from the thresholded gradient magnitudes is non-maximal supression that tests
every pixel whether it has the maximum gradient response along its gradient direction and eliminates if it does not. However, this method does not check status of the
neighboring pixels, and therefore might result low quality (in terms of edge continuity, smoothness, thinness, localization) edge segments. Instead of non-maximal supression,
ED points a set of edge pixels and join them by maximizing the total gradient response of edge segments. Therefore it can extract high quality edge segments without need for
an additional hysteresis step.
@defgroup ximgproc_fourier Fourier descriptors
@ -115,8 +116,7 @@ an additional hysteresis step. @@ -115,8 +116,7 @@ an additional hysteresis step.
The size of the original image is required for compatibility with the imgproc functions when the boundary handling requires that pixel outside the image boundary are
"on".
@}
@}
*/
namespace cv
@ -124,6 +124,9 @@ namespace cv @@ -124,6 +124,9 @@ namespace cv
namespace ximgproc
{
//! @addtogroup ximgproc
//! @{
enum ThinningTypes{
THINNING_ZHANGSUEN = 0, // Thinning technique of Zhang-Suen
THINNING_GUOHALL = 1 // Thinning technique of Guo-Hall
@ -139,9 +142,6 @@ enum LocalBinarizationMethods{ @@ -139,9 +142,6 @@ enum LocalBinarizationMethods{
BINARIZATION_NICK = 3 //!< NICK technique. See @cite Khurshid2009 .
};
//! @addtogroup ximgproc
//! @{
/** @brief Performs thresholding on input images using Niblack's technique or some of the
popular variations it inspired.

2
modules/ximgproc/include/opencv2/ximgproc/color_match.hpp

@ -61,6 +61,8 @@ CV_EXPORTS_W void qdft(InputArray img, OutputArray qimg, int flags, bool sideL @@ -61,6 +61,8 @@ CV_EXPORTS_W void qdft(InputArray img, OutputArray qimg, int flags, bool sideL
*/
CV_EXPORTS_W void colorMatchTemplate(InputArray img, InputArray templ, OutputArray result);
//! @}
}
}
#endif

2
modules/ximgproc/include/opencv2/ximgproc/deriche_filter.hpp

@ -71,6 +71,8 @@ CV_EXPORTS_W void GradientDericheY(InputArray op, OutputArray dst, double alpha, @@ -71,6 +71,8 @@ CV_EXPORTS_W void GradientDericheY(InputArray op, OutputArray dst, double alpha,
*/
CV_EXPORTS_W void GradientDericheX(InputArray op, OutputArray dst, double alpha,double omega);
//! @}
}
}
#endif

4
modules/ximgproc/include/opencv2/ximgproc/edgepreserving_filter.hpp

@ -26,8 +26,8 @@ namespace cv { namespace ximgproc { @@ -26,8 +26,8 @@ namespace cv { namespace ximgproc {
*/
CV_EXPORTS_W void edgePreservingFilter( InputArray src, OutputArray dst, int d, double threshold );
}} // namespace
//! @}
}} // namespace
#endif

3
modules/ximgproc/include/opencv2/ximgproc/fast_hough_transform.hpp

@ -82,8 +82,7 @@ enum AngleRangeOption @@ -82,8 +82,7 @@ enum AngleRangeOption
* two operands. Formally, a binary operation @f$ f @f$ on a set @f$ S @f$
* is a binary relation that maps elements of the Cartesian product
* @f$ S \times S @f$ to @f$ S @f$:
* @f[ f: S \times S \to S @f]
* @ingroup MinUtils_MathOper
* @f[ f: S \times S \to S @f]
*/
enum HoughOp
{

2
modules/ximgproc/include/opencv2/ximgproc/paillou_filter.hpp

@ -61,6 +61,8 @@ namespace ximgproc { @@ -61,6 +61,8 @@ namespace ximgproc {
CV_EXPORTS void GradientPaillouY(InputArray op, OutputArray _dst, double alpha, double omega);
CV_EXPORTS void GradientPaillouX(InputArray op, OutputArray _dst, double alpha, double omega);
//! @}
}
}
#endif

2
modules/ximgproc/include/opencv2/ximgproc/peilin.hpp

@ -27,6 +27,8 @@ namespace cv { namespace ximgproc { @@ -27,6 +27,8 @@ namespace cv { namespace ximgproc {
/** @overload */
CV_EXPORTS_W void PeiLinNormalization ( InputArray I, OutputArray T );
//! @}
}} // namespace
#endif

2
modules/ximgproc/include/opencv2/ximgproc/run_length_morphology.hpp

@ -113,6 +113,8 @@ CV_EXPORTS void createRLEImage(const std::vector<cv::Point3i>& runs, OutputArray @@ -113,6 +113,8 @@ CV_EXPORTS void createRLEImage(const std::vector<cv::Point3i>& runs, OutputArray
CV_EXPORTS void morphologyEx(InputArray rlSrc, OutputArray rlDest, int op, InputArray rlKernel,
bool bBoundaryOnForErosion = true, Point anchor = Point(0,0));
//! @}
}
}
}

Loading…
Cancel
Save