--- /tmp/opencv-4.5.4+dfsg-1uqtgbebt/debian/opencv-doc_4.5.4+dfsg-1_all.deb +++ opencv-doc_4.5.4+dfsg-1_all.deb ├── file list │ @@ -1,3 +1,3 @@ │ -rw-r--r-- 0 0 0 4 2021-10-18 02:54:56.000000 debian-binary │ --rw-r--r-- 0 0 0 264764 2021-10-18 02:54:56.000000 control.tar.xz │ --rw-r--r-- 0 0 0 96796632 2021-10-18 02:54:56.000000 data.tar.xz │ +-rw-r--r-- 0 0 0 264792 2021-10-18 02:54:56.000000 control.tar.xz │ +-rw-r--r-- 0 0 0 96796756 2021-10-18 02:54:56.000000 data.tar.xz ├── control.tar.xz │ ├── control.tar │ │ ├── ./md5sums │ │ │ ├── ./md5sums │ │ │ │┄ Files differ ├── data.tar.xz │ ├── data.tar │ │ ├── ./usr/share/doc/opencv-doc/opencv4/html/da/d58/deprecated.html │ │ │ @@ -176,20 +176,20 @@ │ │ │
G_TYPED_KERNEL
that is used for declaring any G-API Operation. G_TYPED_KERNEL
that is used for declaring any G-API Operation. detectedCorners
- List of detected marker corners of the board.detectedIds
- List of identifiers for each marker.objPoints
- Vector of vectors of board marker points in the board coordinate space.imgPoints
- Vector of vectors of the projections of board marker corner points.public static Dictionary getPredefinedDictionary(int dict)│ │ │ -
dict
- automatically generatedpublic static Dictionary custom_dictionary(int nMarkers, │ │ │ - int markerSize, │ │ │ - int randomSeed)│ │ │ -
nMarkers
- automatically generatedmarkerSize
- automatically generatedrandomSeed
- automatically generatedpublic static Dictionary custom_dictionary(int nMarkers, │ │ │ - int markerSize)│ │ │ -
nMarkers
- automatically generatedmarkerSize
- automatically generatedpublic static Dictionary custom_dictionary_from(int nMarkers, │ │ │ - int markerSize, │ │ │ - Dictionary baseDictionary, │ │ │ - int randomSeed)│ │ │ -
nMarkers
- number of markers in the dictionarymarkerSize
- number of bits per dimension of each markersbaseDictionary
- Include the markers in this dictionary at the beginning (optional)randomSeed
- a user supplied seed for theRNG()
│ │ │ -
│ │ │ - This function creates a new dictionary composed by nMarkers markers and each markers composed
│ │ │ - by markerSize x markerSize bits. If baseDictionary is provided, its markers are directly
│ │ │ - included and the rest are generated based on them. If the size of baseDictionary is higher
│ │ │ - than nMarkers, only the first nMarkers in baseDictionary are taken and no new marker is added.public static Dictionary custom_dictionary_from(int nMarkers, │ │ │ - int markerSize, │ │ │ - Dictionary baseDictionary)│ │ │ -
nMarkers
- number of markers in the dictionarymarkerSize
- number of bits per dimension of each markersbaseDictionary
- Include the markers in this dictionary at the beginning (optional)
│ │ │ -
│ │ │ - This function creates a new dictionary composed by nMarkers markers and each markers composed
│ │ │ - by markerSize x markerSize bits. If baseDictionary is provided, its markers are directly
│ │ │ - included and the rest are generated based on them. If the size of baseDictionary is higher
│ │ │ - than nMarkers, only the first nMarkers in baseDictionary are taken and no new marker is added.public static int interpolateCornersCharuco(java.util.List<Mat> markerCorners, │ │ │ @@ -3747,15 +3641,15 @@ │ │ │ Useful for debugging purposes. │ │ │ │ │ │
public static boolean testCharucoCornersCollinear(CharucoBoard _board, │ │ │ Mat _charucoIds)│ │ │
public static Dictionary getPredefinedDictionary(int dict)│ │ │ +
dict
- automatically generatedpublic static Dictionary custom_dictionary(int nMarkers, │ │ │ + int markerSize, │ │ │ + int randomSeed)│ │ │ +
nMarkers
- automatically generatedmarkerSize
- automatically generatedrandomSeed
- automatically generatedpublic static Dictionary custom_dictionary(int nMarkers, │ │ │ + int markerSize)│ │ │ +
nMarkers
- automatically generatedmarkerSize
- automatically generatedpublic static Dictionary custom_dictionary_from(int nMarkers, │ │ │ + int markerSize, │ │ │ + Dictionary baseDictionary, │ │ │ + int randomSeed)│ │ │ +
nMarkers
- number of markers in the dictionarymarkerSize
- number of bits per dimension of each markersbaseDictionary
- Include the markers in this dictionary at the beginning (optional)randomSeed
- a user supplied seed for theRNG()
│ │ │ +
│ │ │ + This function creates a new dictionary composed by nMarkers markers and each markers composed
│ │ │ + by markerSize x markerSize bits. If baseDictionary is provided, its markers are directly
│ │ │ + included and the rest are generated based on them. If the size of baseDictionary is higher
│ │ │ + than nMarkers, only the first nMarkers in baseDictionary are taken and no new marker is added.public static Dictionary custom_dictionary_from(int nMarkers, │ │ │ + int markerSize, │ │ │ + Dictionary baseDictionary)│ │ │ +
nMarkers
- number of markers in the dictionarymarkerSize
- number of bits per dimension of each markersbaseDictionary
- Include the markers in this dictionary at the beginning (optional)
│ │ │ +
│ │ │ + This function creates a new dictionary composed by nMarkers markers and each markers composed
│ │ │ + by markerSize x markerSize bits. If baseDictionary is provided, its markers are directly
│ │ │ + included and the rest are generated based on them. If the size of baseDictionary is higher
│ │ │ + than nMarkers, only the first nMarkers in baseDictionary are taken and no new marker is added.public static Facemark createFacemarkAAM()│ │ │ +
public static Facemark createFacemarkLBF()│ │ │ +
public static Facemark createFacemarkKazemi()│ │ │ +
public static boolean getFacesHAAR(Mat image, │ │ │ @@ -775,15 +802,15 @@ │ │ │ │ │ │ │ │ │
public static void drawFacemarks(Mat image, │ │ │ Mat points)│ │ │
public static Facemark createFacemarkAAM()│ │ │ -
public static Facemark createFacemarkLBF()│ │ │ -
public static Facemark createFacemarkKazemi()│ │ │ -
public static void radialVarianceHash(Mat inputArr, │ │ │ - Mat outputArr, │ │ │ - double sigma, │ │ │ - int numOfAngleLine)│ │ │ -
public static void averageHash(Mat inputArr, │ │ │ + Mat outputArr)│ │ │ +
inputArr
- input image want to compute hash value,
│ │ │ - type should be CV_8UC4, CV_8UC3, CV_8UC1.outputArr
- Hash value of inputsigma
- Gaussian kernel standard deviationnumOfAngleLine
- The number of angles to considerinputArr
- input image want to compute hash value, type should be CV_8UC4, CV_8UC3 or CV_8UC1.outputArr
- Hash value of input, it will contain 16 hex decimal number, return type is CV_8Upublic static void radialVarianceHash(Mat inputArr, │ │ │ - Mat outputArr, │ │ │ - double sigma)│ │ │ -
public static void blockMeanHash(Mat inputArr, │ │ │ + Mat outputArr, │ │ │ + int mode)│ │ │ +
inputArr
- input image want to compute hash value,
│ │ │ - type should be CV_8UC4, CV_8UC3, CV_8UC1.outputArr
- Hash value of inputsigma
- Gaussian kernel standard deviationinputArr
- input image want to compute hash value, type should be CV_8UC4, CV_8UC3 or CV_8UC1.outputArr
- Hash value of input, it will contain 16 hex decimal number, return type is CV_8Umode
- the modepublic static void radialVarianceHash(Mat inputArr, │ │ │ - Mat outputArr)│ │ │ -
public static void blockMeanHash(Mat inputArr, │ │ │ + Mat outputArr)│ │ │ +
inputArr
- input image want to compute hash value,
│ │ │ - type should be CV_8UC4, CV_8UC3, CV_8UC1.outputArr
- Hash value of inputinputArr
- input image want to compute hash value, type should be CV_8UC4, CV_8UC3 or CV_8UC1.outputArr
- Hash value of input, it will contain 16 hex decimal number, return type is CV_8Upublic static void pHash(Mat inputArr, │ │ │ - Mat outputArr)│ │ │ -
public static void colorMomentHash(Mat inputArr, │ │ │ + Mat outputArr)│ │ │ +
inputArr
- input image want to compute hash value,
│ │ │ - type should be CV_8UC4, CV_8UC3, CV_8UC1.outputArr
- Hash value of input, it will contain 8 uchar valueoutputArr
- 42 hash values with type CV_64F(double)inputArr
- input image want to compute hash value,
│ │ │ type should be CV_8UC4, CV_8UC3, CV_8UC1.outputArr
- Hash value of input, it will contain 16 hex
│ │ │ decimal number, return type is CV_8Upublic static void colorMomentHash(Mat inputArr, │ │ │ - Mat outputArr)│ │ │ -
public static void pHash(Mat inputArr, │ │ │ + Mat outputArr)│ │ │ +
inputArr
- input image want to compute hash value,
│ │ │ - type should be CV_8UC4, CV_8UC3 or CV_8UC1.outputArr
- 42 hash values with type CV_64F(double)outputArr
- Hash value of input, it will contain 8 uchar valuepublic static void blockMeanHash(Mat inputArr, │ │ │ - Mat outputArr, │ │ │ - int mode)│ │ │ -
public static void radialVarianceHash(Mat inputArr, │ │ │ + Mat outputArr, │ │ │ + double sigma, │ │ │ + int numOfAngleLine)│ │ │ +
inputArr
- input image want to compute hash value, type should be CV_8UC4, CV_8UC3 or CV_8UC1.outputArr
- Hash value of input, it will contain 16 hex decimal number, return type is CV_8Umode
- the modeinputArr
- input image want to compute hash value,
│ │ │ + type should be CV_8UC4, CV_8UC3, CV_8UC1.outputArr
- Hash value of inputsigma
- Gaussian kernel standard deviationnumOfAngleLine
- The number of angles to considerpublic static void blockMeanHash(Mat inputArr, │ │ │ - Mat outputArr)│ │ │ -
public static void radialVarianceHash(Mat inputArr, │ │ │ + Mat outputArr, │ │ │ + double sigma)│ │ │ +
inputArr
- input image want to compute hash value, type should be CV_8UC4, CV_8UC3 or CV_8UC1.outputArr
- Hash value of input, it will contain 16 hex decimal number, return type is CV_8UinputArr
- input image want to compute hash value,
│ │ │ + type should be CV_8UC4, CV_8UC3, CV_8UC1.outputArr
- Hash value of inputsigma
- Gaussian kernel standard deviationpublic static void averageHash(Mat inputArr, │ │ │ - Mat outputArr)│ │ │ -
public static void radialVarianceHash(Mat inputArr, │ │ │ + Mat outputArr)│ │ │ +
inputArr
- input image want to compute hash value, type should be CV_8UC4, CV_8UC3 or CV_8UC1.outputArr
- Hash value of input, it will contain 16 hex decimal number, return type is CV_8UinputArr
- input image want to compute hash value,
│ │ │ + type should be CV_8UC4, CV_8UC3, CV_8UC1.outputArr
- Hash value of inputpublic static final int DECODE_3D_UNDERWORLD│ │ │ -
public static final int FTP│ │ │ @@ -271,24 +258,37 @@ │ │ │
public static final int FAPS│ │ │
public static final int DECODE_3D_UNDERWORLD│ │ │ +
public static final int OCR_LEVEL_WORD│ │ │ +
public static final int ERFILTER_NM_RGBLGrad│ │ │
public static final int OCR_LEVEL_TEXTLINE│ │ │ +
public static final int ERFILTER_NM_IHSGrad│ │ │
public static final int ERFILTER_NM_RGBLGrad│ │ │ +
public static final int OCR_LEVEL_WORD│ │ │
public static final int ERFILTER_NM_IHSGrad│ │ │ +
public static final int OCR_LEVEL_TEXTLINE│ │ │
public static void detectTextSWT(Mat input, │ │ │ - MatOfRect result, │ │ │ - boolean dark_on_light, │ │ │ - Mat draw, │ │ │ - Mat chainBBs)│ │ │ -
input
- the input image with 3 channels.result
- a vector of resulting bounding boxes where probability of finding text is highdark_on_light
- a boolean value signifying whether the text is darker or lighter than the background, it is observed to reverse the gradient obtained from Scharr operator, and significantly affect the result.draw
- an optional Mat of type CV_8UC3 which visualises the detected letters using bounding boxes.chainBBs
- an optional parameter which chains the letter candidates according to heuristics in the paper and returns all possible regions where text is likely to occur.public static void detectTextSWT(Mat input, │ │ │ - MatOfRect result, │ │ │ - boolean dark_on_light, │ │ │ - Mat draw)│ │ │ -
input
- the input image with 3 channels.result
- a vector of resulting bounding boxes where probability of finding text is highdark_on_light
- a boolean value signifying whether the text is darker or lighter than the background, it is observed to reverse the gradient obtained from Scharr operator, and significantly affect the result.draw
- an optional Mat of type CV_8UC3 which visualises the detected letters using bounding boxes.public static void detectTextSWT(Mat input, │ │ │ - MatOfRect result, │ │ │ - boolean dark_on_light)│ │ │ -
input
- the input image with 3 channels.result
- a vector of resulting bounding boxes where probability of finding text is highdark_on_light
- a boolean value signifying whether the text is darker or lighter than the background, it is observed to reverse the gradient obtained from Scharr operator, and significantly affect the result.@Deprecated │ │ │ -public static OCRHMMDecoder_ClassifierCallback loadOCRHMMClassifierNM(java.lang.String filename)│ │ │ -
filename
- The XML or YAML file with the classifier model (e.g. OCRHMM_knn_model_data.xml)
│ │ │ -
│ │ │ - The KNN default classifier is based in the scene text recognition method proposed by Lukás Neumann &
│ │ │ - Jiri Matas in [Neumann11b]. Basically, the region (contour) in the input image is normalized to a
│ │ │ - fixed size, while retaining the centroid and aspect ratio, in order to extract a feature vector
│ │ │ - based on gradient orientations along the chain-code of its perimeter. Then, the region is classified
│ │ │ - using a KNN model trained with synthetic data of rendered characters with different standard font
│ │ │ - types.@Deprecated │ │ │ -public static OCRHMMDecoder_ClassifierCallback loadOCRHMMClassifierCNN(java.lang.String filename)│ │ │ -
filename
- The XML or YAML file with the classifier model (e.g. OCRBeamSearch_CNN_model_data.xml.gz)
│ │ │ -
│ │ │ - The CNN default classifier is based in the scene text recognition method proposed by Adam Coates &
│ │ │ - Andrew NG in [Coates11a]. The character classifier consists in a Single Layer Convolutional Neural Network and
│ │ │ - a linear classifier. It is applied to the input image in a sliding window fashion, providing a set of recognitions
│ │ │ - at each window location.public static OCRHMMDecoder_ClassifierCallback loadOCRHMMClassifier(java.lang.String filename, │ │ │ - int classifier)│ │ │ -
filename
- The XML or YAML file with the classifier model (e.g. OCRBeamSearch_CNN_model_data.xml.gz)classifier
- Can be one of classifier_type enum values.public static Mat createOCRHMMTransitionsTable(java.lang.String vocabulary, │ │ │ - java.util.List<java.lang.String> lexicon)│ │ │ -
vocabulary
- The language vocabulary (chars when ASCII English text).lexicon
- The list of words that are expected to be found in a particular image.
│ │ │ -
│ │ │ -
│ │ │ - The function calculate frequency statistics of character pairs from the given lexicon and fills the output transition_probabilities_table with them. The transition_probabilities_table can be used as input in the OCRHMMDecoder::create() and OCRBeamSearchDecoder::create() methods.
│ │ │ - Note:
│ │ │ - - (C++) An alternative would be to load the default generic language transition table provided in the text module samples folder (created from ispell 42869 english words list) :
│ │ │ - <https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/OCRHMM_transitions_table.xml>public static OCRBeamSearchDecoder_ClassifierCallback loadOCRBeamSearchClassifierCNN(java.lang.String filename)│ │ │ -
filename
- The XML or YAML file with the classifier model (e.g. OCRBeamSearch_CNN_model_data.xml.gz)
│ │ │ -
│ │ │ - The CNN default classifier is based in the scene text recognition method proposed by Adam Coates &
│ │ │ - Andrew NG in [Coates11a]. The character classifier consists in a Single Layer Convolutional Neural Network and
│ │ │ - a linear classifier. It is applied to the input image in a sliding window fashion, providing a set of recognitions
│ │ │ - at each window location.public static ERFilter createERFilterNM1(ERFilter_Callback cb, │ │ │ @@ -2026,15 +1851,15 @@ │ │ │
method
- Grouping method (see text::erGrouping_Modes). Can be one of ERGROUPING_ORIENTATION_HORIZ, ERGROUPING_ORIENTATION_ANY.public static void detectRegions(Mat image, │ │ │ ERFilter er_filter1, │ │ │ ERFilter er_filter2, │ │ │ MatOfRect groups_rects)│ │ │
image
- Source image where text blocks needs to be extracted from. Should be CV_8UC3 (color).er_filter1
- Extremal Region Filter for the 1st stage classifier of N&M algorithm CITE: Neumann12er_filter2
- Extremal Region Filter for the 2nd stage classifier of N&M algorithm CITE: Neumann12groups_rects
- Output list of rectangle blocks with text@Deprecated │ │ │ +public static OCRHMMDecoder_ClassifierCallback loadOCRHMMClassifierNM(java.lang.String filename)│ │ │ +
filename
- The XML or YAML file with the classifier model (e.g. OCRHMM_knn_model_data.xml)
│ │ │ +
│ │ │ + The KNN default classifier is based in the scene text recognition method proposed by Lukás Neumann &
│ │ │ + Jiri Matas in [Neumann11b]. Basically, the region (contour) in the input image is normalized to a
│ │ │ + fixed size, while retaining the centroid and aspect ratio, in order to extract a feature vector
│ │ │ + based on gradient orientations along the chain-code of its perimeter. Then, the region is classified
│ │ │ + using a KNN model trained with synthetic data of rendered characters with different standard font
│ │ │ + types.@Deprecated │ │ │ +public static OCRHMMDecoder_ClassifierCallback loadOCRHMMClassifierCNN(java.lang.String filename)│ │ │ +
filename
- The XML or YAML file with the classifier model (e.g. OCRBeamSearch_CNN_model_data.xml.gz)
│ │ │ +
│ │ │ + The CNN default classifier is based in the scene text recognition method proposed by Adam Coates &
│ │ │ + Andrew NG in [Coates11a]. The character classifier consists in a Single Layer Convolutional Neural Network and
│ │ │ + a linear classifier. It is applied to the input image in a sliding window fashion, providing a set of recognitions
│ │ │ + at each window location.public static OCRHMMDecoder_ClassifierCallback loadOCRHMMClassifier(java.lang.String filename, │ │ │ + int classifier)│ │ │ +
filename
- The XML or YAML file with the classifier model (e.g. OCRBeamSearch_CNN_model_data.xml.gz)classifier
- Can be one of classifier_type enum values.public static Mat createOCRHMMTransitionsTable(java.lang.String vocabulary, │ │ │ + java.util.List<java.lang.String> lexicon)│ │ │ +
vocabulary
- The language vocabulary (chars when ASCII English text).lexicon
- The list of words that are expected to be found in a particular image.
│ │ │ +
│ │ │ +
│ │ │ + The function calculate frequency statistics of character pairs from the given lexicon and fills the output transition_probabilities_table with them. The transition_probabilities_table can be used as input in the OCRHMMDecoder::create() and OCRBeamSearchDecoder::create() methods.
│ │ │ + Note:
│ │ │ + - (C++) An alternative would be to load the default generic language transition table provided in the text module samples folder (created from ispell 42869 english words list) :
│ │ │ + <https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/OCRHMM_transitions_table.xml>public static OCRBeamSearchDecoder_ClassifierCallback loadOCRBeamSearchClassifierCNN(java.lang.String filename)│ │ │ +
filename
- The XML or YAML file with the classifier model (e.g. OCRBeamSearch_CNN_model_data.xml.gz)
│ │ │ +
│ │ │ + The CNN default classifier is based in the scene text recognition method proposed by Adam Coates &
│ │ │ + Andrew NG in [Coates11a]. The character classifier consists in a Single Layer Convolutional Neural Network and
│ │ │ + a linear classifier. It is applied to the input image in a sliding window fashion, providing a set of recognitions
│ │ │ + at each window location.public static void detectTextSWT(Mat input, │ │ │ + MatOfRect result, │ │ │ + boolean dark_on_light, │ │ │ + Mat draw, │ │ │ + Mat chainBBs)│ │ │ +
input
- the input image with 3 channels.result
- a vector of resulting bounding boxes where probability of finding text is highdark_on_light
- a boolean value signifying whether the text is darker or lighter than the background, it is observed to reverse the gradient obtained from Scharr operator, and significantly affect the result.draw
- an optional Mat of type CV_8UC3 which visualises the detected letters using bounding boxes.chainBBs
- an optional parameter which chains the letter candidates according to heuristics in the paper and returns all possible regions where text is likely to occur.public static void detectTextSWT(Mat input, │ │ │ + MatOfRect result, │ │ │ + boolean dark_on_light, │ │ │ + Mat draw)│ │ │ +
input
- the input image with 3 channels.result
- a vector of resulting bounding boxes where probability of finding text is highdark_on_light
- a boolean value signifying whether the text is darker or lighter than the background, it is observed to reverse the gradient obtained from Scharr operator, and significantly affect the result.draw
- an optional Mat of type CV_8UC3 which visualises the detected letters using bounding boxes.public static void detectTextSWT(Mat input, │ │ │ + MatOfRect result, │ │ │ + boolean dark_on_light)│ │ │ +
input
- the input image with 3 channels.result
- a vector of resulting bounding boxes where probability of finding text is highdark_on_light
- a boolean value signifying whether the text is darker or lighter than the background, it is observed to reverse the gradient obtained from Scharr operator, and significantly affect the result.public static BackgroundSubtractorMOG2 createBackgroundSubtractorMOG2(int history, │ │ │ + double varThreshold, │ │ │ + boolean detectShadows)│ │ │ +
history
- Length of the history.varThreshold
- Threshold on the squared Mahalanobis distance between the pixel and the model
│ │ │ + to decide whether a pixel is well described by the background model. This parameter does not
│ │ │ + affect the background update.detectShadows
- If true, the algorithm will detect shadows and mark them. It decreases the
│ │ │ + speed a bit, so if you do not need this feature, set the parameter to false.public static BackgroundSubtractorMOG2 createBackgroundSubtractorMOG2(int history, │ │ │ + double varThreshold)│ │ │ +
history
- Length of the history.varThreshold
- Threshold on the squared Mahalanobis distance between the pixel and the model
│ │ │ + to decide whether a pixel is well described by the background model. This parameter does not
│ │ │ + affect the background update.
│ │ │ + speed a bit, so if you do not need this feature, set the parameter to false.public static BackgroundSubtractorMOG2 createBackgroundSubtractorMOG2(int history)│ │ │ +
history
- Length of the history.
│ │ │ + to decide whether a pixel is well described by the background model. This parameter does not
│ │ │ + affect the background update.
│ │ │ + speed a bit, so if you do not need this feature, set the parameter to false.public static BackgroundSubtractorMOG2 createBackgroundSubtractorMOG2()│ │ │ +
public static BackgroundSubtractorKNN createBackgroundSubtractorKNN(int history, │ │ │ + double dist2Threshold, │ │ │ + boolean detectShadows)│ │ │ +
history
- Length of the history.dist2Threshold
- Threshold on the squared distance between the pixel and the sample to decide
│ │ │ + whether a pixel is close to that sample. This parameter does not affect the background update.detectShadows
- If true, the algorithm will detect shadows and mark them. It decreases the
│ │ │ + speed a bit, so if you do not need this feature, set the parameter to false.public static BackgroundSubtractorKNN createBackgroundSubtractorKNN(int history, │ │ │ + double dist2Threshold)│ │ │ +
history
- Length of the history.dist2Threshold
- Threshold on the squared distance between the pixel and the sample to decide
│ │ │ + whether a pixel is close to that sample. This parameter does not affect the background update.
│ │ │ + speed a bit, so if you do not need this feature, set the parameter to false.public static BackgroundSubtractorKNN createBackgroundSubtractorKNN(int history)│ │ │ +
history
- Length of the history.
│ │ │ + whether a pixel is close to that sample. This parameter does not affect the background update.
│ │ │ + speed a bit, so if you do not need this feature, set the parameter to false.public static BackgroundSubtractorKNN createBackgroundSubtractorKNN()│ │ │ +
public static RotatedRect CamShift(Mat probImage, │ │ │ @@ -1736,15 +1894,15 @@ │ │ │
public static boolean writeOpticalFlow(java.lang.String path, │ │ │ Mat flow)│ │ │
public static BackgroundSubtractorMOG2 createBackgroundSubtractorMOG2(int history, │ │ │ - double varThreshold, │ │ │ - boolean detectShadows)│ │ │ -
history
- Length of the history.varThreshold
- Threshold on the squared Mahalanobis distance between the pixel and the model
│ │ │ - to decide whether a pixel is well described by the background model. This parameter does not
│ │ │ - affect the background update.detectShadows
- If true, the algorithm will detect shadows and mark them. It decreases the
│ │ │ - speed a bit, so if you do not need this feature, set the parameter to false.public static BackgroundSubtractorMOG2 createBackgroundSubtractorMOG2(int history, │ │ │ - double varThreshold)│ │ │ -
history
- Length of the history.varThreshold
- Threshold on the squared Mahalanobis distance between the pixel and the model
│ │ │ - to decide whether a pixel is well described by the background model. This parameter does not
│ │ │ - affect the background update.
│ │ │ - speed a bit, so if you do not need this feature, set the parameter to false.public static BackgroundSubtractorMOG2 createBackgroundSubtractorMOG2(int history)│ │ │ -
history
- Length of the history.
│ │ │ - to decide whether a pixel is well described by the background model. This parameter does not
│ │ │ - affect the background update.
│ │ │ - speed a bit, so if you do not need this feature, set the parameter to false.public static BackgroundSubtractorMOG2 createBackgroundSubtractorMOG2()│ │ │ -
public static BackgroundSubtractorKNN createBackgroundSubtractorKNN(int history, │ │ │ - double dist2Threshold, │ │ │ - boolean detectShadows)│ │ │ -
history
- Length of the history.dist2Threshold
- Threshold on the squared distance between the pixel and the sample to decide
│ │ │ - whether a pixel is close to that sample. This parameter does not affect the background update.detectShadows
- If true, the algorithm will detect shadows and mark them. It decreases the
│ │ │ - speed a bit, so if you do not need this feature, set the parameter to false.public static BackgroundSubtractorKNN createBackgroundSubtractorKNN(int history, │ │ │ - double dist2Threshold)│ │ │ -
history
- Length of the history.dist2Threshold
- Threshold on the squared distance between the pixel and the sample to decide
│ │ │ - whether a pixel is close to that sample. This parameter does not affect the background update.
│ │ │ - speed a bit, so if you do not need this feature, set the parameter to false.public static BackgroundSubtractorKNN createBackgroundSubtractorKNN(int history)│ │ │ -
history
- Length of the history.
│ │ │ - whether a pixel is close to that sample. This parameter does not affect the background update.
│ │ │ - speed a bit, so if you do not need this feature, set the parameter to false.public static BackgroundSubtractorKNN createBackgroundSubtractorKNN()│ │ │ -
dst
- Destination image of the same size and the same number of channels as src .alpha
- The amount of time to step forward by on each iteration (normally, it's between 0 and 1).K
- sensitivity to the edgesniters
- The number of iterationspublic static void weightedMedianFilter(Mat joint, │ │ │ - Mat src, │ │ │ - Mat dst, │ │ │ - int r, │ │ │ - double sigma, │ │ │ - int weightType, │ │ │ - Mat mask)│ │ │ -
joint
- automatically generatedsrc
- automatically generateddst
- automatically generatedr
- automatically generatedsigma
- automatically generatedweightType
- automatically generatedmask
- automatically generatedpublic static void weightedMedianFilter(Mat joint, │ │ │ - Mat src, │ │ │ - Mat dst, │ │ │ - int r, │ │ │ - double sigma, │ │ │ - int weightType)│ │ │ -
joint
- automatically generatedsrc
- automatically generateddst
- automatically generatedr
- automatically generatedsigma
- automatically generatedweightType
- automatically generatedpublic static void weightedMedianFilter(Mat joint, │ │ │ - Mat src, │ │ │ - Mat dst, │ │ │ - int r, │ │ │ - double sigma)│ │ │ -
joint
- automatically generatedsrc
- automatically generateddst
- automatically generatedr
- automatically generatedsigma
- automatically generatedpublic static void weightedMedianFilter(Mat joint, │ │ │ - Mat src, │ │ │ - Mat dst, │ │ │ - int r)│ │ │ -
joint
- automatically generatedsrc
- automatically generateddst
- automatically generatedr
- automatically generatedpublic static RFFeatureGetter createRFFeatureGetter()│ │ │ -
public static StructuredEdgeDetection createStructuredEdgeDetection(java.lang.String model, │ │ │ - RFFeatureGetter howToGetFeatures)│ │ │ -
public static StructuredEdgeDetection createStructuredEdgeDetection(java.lang.String model)│ │ │ -
public static EdgeAwareInterpolator createEdgeAwareInterpolator()│ │ │ -
public static RICInterpolator createRICInterpolator()│ │ │ -
public static SuperpixelSLIC createSuperpixelSLIC(Mat image, │ │ │ - int algorithm, │ │ │ - int region_size, │ │ │ - float ruler)│ │ │ -
image
- Image to segmentalgorithm
- Chooses the algorithm variant to use:
│ │ │ - SLIC segments image using a desired region_size, and in addition SLICO will optimize using adaptive compactness factor,
│ │ │ - while MSLIC will optimize using manifold methods resulting in more content-sensitive superpixels.region_size
- Chooses an average superpixel size measured in pixelsruler
- Chooses the enforcement of superpixel smoothness factor of superpixel
│ │ │ -
│ │ │ - The function initializes a SuperpixelSLIC object for the input image. It sets the parameters of choosed
│ │ │ - superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future
│ │ │ - computing iterations over the given image. For enanched results it is recommended for color images to
│ │ │ - preprocess image with little gaussian blur using a small 3 x 3 kernel and additional conversion into
│ │ │ - CieLAB color space. An example of SLIC versus SLICO and MSLIC is ilustrated in the following picture.
│ │ │ -
│ │ │ - ![image](pics/superpixels_slic.png)public static SuperpixelSLIC createSuperpixelSLIC(Mat image, │ │ │ - int algorithm, │ │ │ - int region_size)│ │ │ -
image
- Image to segmentalgorithm
- Chooses the algorithm variant to use:
│ │ │ - SLIC segments image using a desired region_size, and in addition SLICO will optimize using adaptive compactness factor,
│ │ │ - while MSLIC will optimize using manifold methods resulting in more content-sensitive superpixels.region_size
- Chooses an average superpixel size measured in pixels
│ │ │ -
│ │ │ - The function initializes a SuperpixelSLIC object for the input image. It sets the parameters of choosed
│ │ │ - superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future
│ │ │ - computing iterations over the given image. For enanched results it is recommended for color images to
│ │ │ - preprocess image with little gaussian blur using a small 3 x 3 kernel and additional conversion into
│ │ │ - CieLAB color space. An example of SLIC versus SLICO and MSLIC is ilustrated in the following picture.
│ │ │ -
│ │ │ - ![image](pics/superpixels_slic.png)public static SuperpixelSLIC createSuperpixelSLIC(Mat image, │ │ │ - int algorithm)│ │ │ -
image
- Image to segmentalgorithm
- Chooses the algorithm variant to use:
│ │ │ - SLIC segments image using a desired region_size, and in addition SLICO will optimize using adaptive compactness factor,
│ │ │ - while MSLIC will optimize using manifold methods resulting in more content-sensitive superpixels.
│ │ │ -
│ │ │ - The function initializes a SuperpixelSLIC object for the input image. It sets the parameters of choosed
│ │ │ - superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future
│ │ │ - computing iterations over the given image. For enanched results it is recommended for color images to
│ │ │ - preprocess image with little gaussian blur using a small 3 x 3 kernel and additional conversion into
│ │ │ - CieLAB color space. An example of SLIC versus SLICO and MSLIC is ilustrated in the following picture.
│ │ │ -
│ │ │ - ![image](pics/superpixels_slic.png)public static SuperpixelSLIC createSuperpixelSLIC(Mat image)│ │ │ -
image
- Image to segment
│ │ │ - SLIC segments image using a desired region_size, and in addition SLICO will optimize using adaptive compactness factor,
│ │ │ - while MSLIC will optimize using manifold methods resulting in more content-sensitive superpixels.
│ │ │ -
│ │ │ - The function initializes a SuperpixelSLIC object for the input image. It sets the parameters of choosed
│ │ │ - superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future
│ │ │ - computing iterations over the given image. For enanched results it is recommended for color images to
│ │ │ - preprocess image with little gaussian blur using a small 3 x 3 kernel and additional conversion into
│ │ │ - CieLAB color space. An example of SLIC versus SLICO and MSLIC is ilustrated in the following picture.
│ │ │ -
│ │ │ - ![image](pics/superpixels_slic.png)public static GraphSegmentation createGraphSegmentation(double sigma, │ │ │ - float k, │ │ │ - int min_size)│ │ │ -
sigma
- The sigma parameter, used to smooth imagek
- The k parameter of the algorythmmin_size
- The minimum size of segmentspublic static GraphSegmentation createGraphSegmentation(double sigma, │ │ │ - float k)│ │ │ -
sigma
- The sigma parameter, used to smooth imagek
- The k parameter of the algorythmpublic static GraphSegmentation createGraphSegmentation(double sigma)│ │ │ -
sigma
- The sigma parameter, used to smooth imagepublic static GraphSegmentation createGraphSegmentation()│ │ │ -
public static SelectiveSearchSegmentationStrategyColor createSelectiveSearchSegmentationStrategyColor()│ │ │ -
public static SelectiveSearchSegmentationStrategySize createSelectiveSearchSegmentationStrategySize()│ │ │ -
public static SelectiveSearchSegmentationStrategyTexture createSelectiveSearchSegmentationStrategyTexture()│ │ │ -
public static SelectiveSearchSegmentationStrategyFill createSelectiveSearchSegmentationStrategyFill()│ │ │ -
public static SelectiveSearchSegmentationStrategyMultiple createSelectiveSearchSegmentationStrategyMultiple()│ │ │ -
public static SelectiveSearchSegmentationStrategyMultiple createSelectiveSearchSegmentationStrategyMultiple(SelectiveSearchSegmentationStrategy s1)│ │ │ -
s1
- The first strategypublic static SelectiveSearchSegmentationStrategyMultiple createSelectiveSearchSegmentationStrategyMultiple(SelectiveSearchSegmentationStrategy s1, │ │ │ - SelectiveSearchSegmentationStrategy s2)│ │ │ -
s1
- The first strategys2
- The second strategypublic static SelectiveSearchSegmentationStrategyMultiple createSelectiveSearchSegmentationStrategyMultiple(SelectiveSearchSegmentationStrategy s1, │ │ │ - SelectiveSearchSegmentationStrategy s2, │ │ │ - SelectiveSearchSegmentationStrategy s3)│ │ │ -
s1
- The first strategys2
- The second strategys3
- The third strategypublic static SelectiveSearchSegmentationStrategyMultiple createSelectiveSearchSegmentationStrategyMultiple(SelectiveSearchSegmentationStrategy s1, │ │ │ - SelectiveSearchSegmentationStrategy s2, │ │ │ - SelectiveSearchSegmentationStrategy s3, │ │ │ - SelectiveSearchSegmentationStrategy s4)│ │ │ -
s1
- The first strategys2
- The second strategys3
- The third strategys4
- The forth strategypublic static SelectiveSearchSegmentation createSelectiveSearchSegmentation()│ │ │ -
public static SuperpixelSEEDS createSuperpixelSEEDS(int image_width, │ │ │ - int image_height, │ │ │ - int image_channels, │ │ │ - int num_superpixels, │ │ │ - int num_levels, │ │ │ - int prior, │ │ │ - int histogram_bins, │ │ │ - boolean double_step)│ │ │ -
image_width
- Image width.image_height
- Image height.image_channels
- Number of channels of the image.num_superpixels
- Desired number of superpixels. Note that the actual number may be smaller
│ │ │ - due to restrictions (depending on the image size and num_levels). Use getNumberOfSuperpixels() to
│ │ │ - get the actual number.num_levels
- Number of block levels. The more levels, the more accurate is the segmentation,
│ │ │ - but needs more memory and CPU time.prior
- enable 3x3 shape smoothing term if >0. A larger value leads to smoother shapes. prior
│ │ │ - must be in the range [0, 5].histogram_bins
- Number of histogram bins.double_step
- If true, iterate each block level twice for higher accuracy.
│ │ │ -
│ │ │ - The function initializes a SuperpixelSEEDS object for the input image. It stores the parameters of
│ │ │ - the image: image_width, image_height and image_channels. It also sets the parameters of the SEEDS
│ │ │ - superpixel algorithm, which are: num_superpixels, num_levels, use_prior, histogram_bins and
│ │ │ - double_step.
│ │ │ -
│ │ │ - The number of levels in num_levels defines the amount of block levels that the algorithm use in the
│ │ │ - optimization. The initialization is a grid, in which the superpixels are equally distributed through
│ │ │ - the width and the height of the image. The larger blocks correspond to the superpixel size, and the
│ │ │ - levels with smaller blocks are formed by dividing the larger blocks into 2 x 2 blocks of pixels,
│ │ │ - recursively until the smaller block level. An example of initialization of 4 block levels is
│ │ │ - illustrated in the following figure.
│ │ │ -
│ │ │ - ![image](pics/superpixels_blocks.png)public static SuperpixelSEEDS createSuperpixelSEEDS(int image_width, │ │ │ - int image_height, │ │ │ - int image_channels, │ │ │ - int num_superpixels, │ │ │ - int num_levels, │ │ │ - int prior, │ │ │ - int histogram_bins)│ │ │ -
image_width
- Image width.image_height
- Image height.image_channels
- Number of channels of the image.num_superpixels
- Desired number of superpixels. Note that the actual number may be smaller
│ │ │ - due to restrictions (depending on the image size and num_levels). Use getNumberOfSuperpixels() to
│ │ │ - get the actual number.num_levels
- Number of block levels. The more levels, the more accurate is the segmentation,
│ │ │ - but needs more memory and CPU time.prior
- enable 3x3 shape smoothing term if >0. A larger value leads to smoother shapes. prior
│ │ │ - must be in the range [0, 5].histogram_bins
- Number of histogram bins.
│ │ │ -
│ │ │ - The function initializes a SuperpixelSEEDS object for the input image. It stores the parameters of
│ │ │ - the image: image_width, image_height and image_channels. It also sets the parameters of the SEEDS
│ │ │ - superpixel algorithm, which are: num_superpixels, num_levels, use_prior, histogram_bins and
│ │ │ - double_step.
│ │ │ -
│ │ │ - The number of levels in num_levels defines the amount of block levels that the algorithm use in the
│ │ │ - optimization. The initialization is a grid, in which the superpixels are equally distributed through
│ │ │ - the width and the height of the image. The larger blocks correspond to the superpixel size, and the
│ │ │ - levels with smaller blocks are formed by dividing the larger blocks into 2 x 2 blocks of pixels,
│ │ │ - recursively until the smaller block level. An example of initialization of 4 block levels is
│ │ │ - illustrated in the following figure.
│ │ │ -
│ │ │ - ![image](pics/superpixels_blocks.png)public static SuperpixelSEEDS createSuperpixelSEEDS(int image_width, │ │ │ - int image_height, │ │ │ - int image_channels, │ │ │ - int num_superpixels, │ │ │ - int num_levels, │ │ │ - int prior)│ │ │ -
image_width
- Image width.image_height
- Image height.image_channels
- Number of channels of the image.num_superpixels
- Desired number of superpixels. Note that the actual number may be smaller
│ │ │ - due to restrictions (depending on the image size and num_levels). Use getNumberOfSuperpixels() to
│ │ │ - get the actual number.num_levels
- Number of block levels. The more levels, the more accurate is the segmentation,
│ │ │ - but needs more memory and CPU time.prior
- enable 3x3 shape smoothing term if >0. A larger value leads to smoother shapes. prior
│ │ │ - must be in the range [0, 5].
│ │ │ -
│ │ │ - The function initializes a SuperpixelSEEDS object for the input image. It stores the parameters of
│ │ │ - the image: image_width, image_height and image_channels. It also sets the parameters of the SEEDS
│ │ │ - superpixel algorithm, which are: num_superpixels, num_levels, use_prior, histogram_bins and
│ │ │ - double_step.
│ │ │ -
│ │ │ - The number of levels in num_levels defines the amount of block levels that the algorithm use in the
│ │ │ - optimization. The initialization is a grid, in which the superpixels are equally distributed through
│ │ │ - the width and the height of the image. The larger blocks correspond to the superpixel size, and the
│ │ │ - levels with smaller blocks are formed by dividing the larger blocks into 2 x 2 blocks of pixels,
│ │ │ - recursively until the smaller block level. An example of initialization of 4 block levels is
│ │ │ - illustrated in the following figure.
│ │ │ -
│ │ │ - ![image](pics/superpixels_blocks.png)public static SuperpixelSEEDS createSuperpixelSEEDS(int image_width, │ │ │ - int image_height, │ │ │ - int image_channels, │ │ │ - int num_superpixels, │ │ │ - int num_levels)│ │ │ -
image_width
- Image width.image_height
- Image height.image_channels
- Number of channels of the image.num_superpixels
- Desired number of superpixels. Note that the actual number may be smaller
│ │ │ - due to restrictions (depending on the image size and num_levels). Use getNumberOfSuperpixels() to
│ │ │ - get the actual number.num_levels
- Number of block levels. The more levels, the more accurate is the segmentation,
│ │ │ - but needs more memory and CPU time.
│ │ │ - must be in the range [0, 5].
│ │ │ -
│ │ │ - The function initializes a SuperpixelSEEDS object for the input image. It stores the parameters of
│ │ │ - the image: image_width, image_height and image_channels. It also sets the parameters of the SEEDS
│ │ │ - superpixel algorithm, which are: num_superpixels, num_levels, use_prior, histogram_bins and
│ │ │ - double_step.
│ │ │ -
│ │ │ - The number of levels in num_levels defines the amount of block levels that the algorithm use in the
│ │ │ - optimization. The initialization is a grid, in which the superpixels are equally distributed through
│ │ │ - the width and the height of the image. The larger blocks correspond to the superpixel size, and the
│ │ │ - levels with smaller blocks are formed by dividing the larger blocks into 2 x 2 blocks of pixels,
│ │ │ - recursively until the smaller block level. An example of initialization of 4 block levels is
│ │ │ - illustrated in the following figure.
│ │ │ -
│ │ │ - ![image](pics/superpixels_blocks.png)public static void PeiLinNormalization(Mat I, │ │ │ - Mat T)│ │ │ -
public static SuperpixelLSC createSuperpixelLSC(Mat image, │ │ │ - int region_size, │ │ │ - float ratio)│ │ │ -
image
- Image to segmentregion_size
- Chooses an average superpixel size measured in pixelsratio
- Chooses the enforcement of superpixel compactness factor of superpixel
│ │ │ -
│ │ │ - The function initializes a SuperpixelLSC object for the input image. It sets the parameters of
│ │ │ - superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future
│ │ │ - computing iterations over the given image. An example of LSC is ilustrated in the following picture.
│ │ │ - For enanched results it is recommended for color images to preprocess image with little gaussian blur
│ │ │ - with a small 3 x 3 kernel and additional conversion into CieLAB color space.
│ │ │ -
│ │ │ - ![image](pics/superpixels_lsc.png)public static SuperpixelLSC createSuperpixelLSC(Mat image, │ │ │ - int region_size)│ │ │ -
image
- Image to segmentregion_size
- Chooses an average superpixel size measured in pixels
│ │ │ -
│ │ │ - The function initializes a SuperpixelLSC object for the input image. It sets the parameters of
│ │ │ - superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future
│ │ │ - computing iterations over the given image. An example of LSC is ilustrated in the following picture.
│ │ │ - For enanched results it is recommended for color images to preprocess image with little gaussian blur
│ │ │ - with a small 3 x 3 kernel and additional conversion into CieLAB color space.
│ │ │ -
│ │ │ - ![image](pics/superpixels_lsc.png)public static SuperpixelLSC createSuperpixelLSC(Mat image)│ │ │ -
image
- Image to segment
│ │ │ -
│ │ │ - The function initializes a SuperpixelLSC object for the input image. It sets the parameters of
│ │ │ - superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future
│ │ │ - computing iterations over the given image. An example of LSC is ilustrated in the following picture.
│ │ │ - For enanched results it is recommended for color images to preprocess image with little gaussian blur
│ │ │ - with a small 3 x 3 kernel and additional conversion into CieLAB color space.
│ │ │ -
│ │ │ - ![image](pics/superpixels_lsc.png)public static void fourierDescriptor(Mat src, │ │ │ - Mat dst, │ │ │ - int nbElt, │ │ │ - int nbFD)│ │ │ -
src
- automatically generateddst
- automatically generatednbElt
- automatically generatednbFD
- automatically generatedpublic static void fourierDescriptor(Mat src, │ │ │ - Mat dst, │ │ │ - int nbElt)│ │ │ -
public static void createQuaternionImage(Mat img, │ │ │ + Mat qimg)│ │ │ +
src
- automatically generateddst
- automatically generatednbElt
- automatically generatedimg
- automatically generatedqimg
- automatically generatedpublic static void fourierDescriptor(Mat src, │ │ │ - Mat dst)│ │ │ -
public static void qconj(Mat qimg, │ │ │ + Mat qcimg)│ │ │ +
src
- automatically generateddst
- automatically generatedqimg
- automatically generatedqcimg
- automatically generatedpublic static void transformFD(Mat src, │ │ │ - Mat t, │ │ │ - Mat dst, │ │ │ - boolean fdContour)│ │ │ -
public static void qunitary(Mat qimg, │ │ │ + Mat qnimg)│ │ │ +
src
- automatically generatedt
- automatically generateddst
- automatically generatedfdContour
- automatically generatedqimg
- automatically generatedqnimg
- automatically generatedpublic static void transformFD(Mat src, │ │ │ - Mat t, │ │ │ - Mat dst)│ │ │ -
public static void qmultiply(Mat src1, │ │ │ + Mat src2, │ │ │ + Mat dst)│ │ │ +
src
- automatically generatedt
- automatically generatedsrc1
- automatically generatedsrc2
- automatically generateddst
- automatically generatedpublic static void contourSampling(Mat src, │ │ │ - Mat out, │ │ │ - int nbElt)│ │ │ -
src
- automatically generatedout
- automatically generatednbElt
- automatically generatedpublic static ContourFitting createContourFitting(int ctr, │ │ │ - int fd)│ │ │ -
ctr
- number of Fourier descriptors equal to number of contour points after resampling.fd
- Contour defining second shape (Target).public static ContourFitting createContourFitting(int ctr)│ │ │ -
ctr
- number of Fourier descriptors equal to number of contour points after resampling.public static ContourFitting createContourFitting()│ │ │ -
public static FastLineDetector createFastLineDetector(int length_threshold, │ │ │ - float distance_threshold, │ │ │ - double canny_th1, │ │ │ - double canny_th2, │ │ │ - int canny_aperture_size, │ │ │ - boolean do_merge)│ │ │ -
length_threshold
- Segment shorter than this will be discardeddistance_threshold
- A point placed from a hypothesis line
│ │ │ - segment farther than this will be regarded as an outliercanny_th1
- First threshold for hysteresis procedure in Canny()canny_th2
- Second threshold for hysteresis procedure in Canny()canny_aperture_size
- Aperturesize for the sobel operator in Canny().
│ │ │ - If zero, Canny() is not applied and the input image is taken as an edge image.do_merge
- If true, incremental merging of segments will be performedpublic static FastLineDetector createFastLineDetector(int length_threshold, │ │ │ - float distance_threshold, │ │ │ - double canny_th1, │ │ │ - double canny_th2, │ │ │ - int canny_aperture_size)│ │ │ -
length_threshold
- Segment shorter than this will be discardeddistance_threshold
- A point placed from a hypothesis line
│ │ │ - segment farther than this will be regarded as an outliercanny_th1
- First threshold for hysteresis procedure in Canny()canny_th2
- Second threshold for hysteresis procedure in Canny()canny_aperture_size
- Aperturesize for the sobel operator in Canny().
│ │ │ - If zero, Canny() is not applied and the input image is taken as an edge image.public static FastLineDetector createFastLineDetector(int length_threshold, │ │ │ - float distance_threshold, │ │ │ - double canny_th1, │ │ │ - double canny_th2)│ │ │ -
length_threshold
- Segment shorter than this will be discardeddistance_threshold
- A point placed from a hypothesis line
│ │ │ - segment farther than this will be regarded as an outliercanny_th1
- First threshold for hysteresis procedure in Canny()canny_th2
- Second threshold for hysteresis procedure in Canny()
│ │ │ - If zero, Canny() is not applied and the input image is taken as an edge image.public static FastLineDetector createFastLineDetector(int length_threshold, │ │ │ - float distance_threshold, │ │ │ - double canny_th1)│ │ │ -
length_threshold
- Segment shorter than this will be discardeddistance_threshold
- A point placed from a hypothesis line
│ │ │ - segment farther than this will be regarded as an outliercanny_th1
- First threshold for hysteresis procedure in Canny()
│ │ │ - If zero, Canny() is not applied and the input image is taken as an edge image.public static FastLineDetector createFastLineDetector(int length_threshold, │ │ │ - float distance_threshold)│ │ │ -
length_threshold
- Segment shorter than this will be discardeddistance_threshold
- A point placed from a hypothesis line
│ │ │ - segment farther than this will be regarded as an outlier
│ │ │ - If zero, Canny() is not applied and the input image is taken as an edge image.public static FastLineDetector createFastLineDetector(int length_threshold)│ │ │ -
public static void qdft(Mat img, │ │ │ + Mat qimg, │ │ │ + int flags, │ │ │ + boolean sideLeft)│ │ │ +
length_threshold
- Segment shorter than this will be discarded
│ │ │ - segment farther than this will be regarded as an outlier
│ │ │ - If zero, Canny() is not applied and the input image is taken as an edge image.public static FastLineDetector createFastLineDetector()│ │ │ -
img
- automatically generatedqimg
- automatically generatedflags
- automatically generatedsideLeft
- automatically generatedpublic static void FastHoughTransform(Mat src, │ │ │ - Mat dst, │ │ │ - int dstMatDepth, │ │ │ - int angleRange, │ │ │ - int op, │ │ │ - int makeSkew)│ │ │ -
public static void colorMatchTemplate(Mat img, │ │ │ + Mat templ, │ │ │ + Mat result)│ │ │ +
src
- automatically generateddst
- automatically generateddstMatDepth
- automatically generatedangleRange
- automatically generatedop
- automatically generatedmakeSkew
- automatically generatedimg
- automatically generatedtempl
- automatically generatedresult
- automatically generatedpublic static void FastHoughTransform(Mat src, │ │ │ - Mat dst, │ │ │ - int dstMatDepth, │ │ │ - int angleRange, │ │ │ - int op)│ │ │ -
public static void GradientDericheY(Mat op, │ │ │ + Mat dst, │ │ │ + double alpha, │ │ │ + double omega)│ │ │ +
src
- automatically generateddst
- automatically generateddstMatDepth
- automatically generatedangleRange
- automatically generatedop
- automatically generatedpublic static void FastHoughTransform(Mat src, │ │ │ - Mat dst, │ │ │ - int dstMatDepth, │ │ │ - int angleRange)│ │ │ -
src
- automatically generateddst
- automatically generateddstMatDepth
- automatically generatedangleRange
- automatically generatedalpha
- automatically generatedomega
- automatically generatedpublic static void FastHoughTransform(Mat src, │ │ │ - Mat dst, │ │ │ - int dstMatDepth)│ │ │ -
public static void GradientDericheX(Mat op, │ │ │ + Mat dst, │ │ │ + double alpha, │ │ │ + double omega)│ │ │ +
src
- automatically generatedop
- automatically generateddst
- automatically generateddstMatDepth
- automatically generatedpublic static void covarianceEstimation(Mat src, │ │ │ - Mat dst, │ │ │ - int windowRows, │ │ │ - int windowCols)│ │ │ -
src
- The source image. Input image must be of a complex type.dst
- The destination estimated covariance matrix. Output matrix will be size (windowRows*windowCols, windowRows*windowCols).windowRows
- The number of rows in the window.windowCols
- The number of cols in the window.
│ │ │ - The window size parameters control the accuracy of the estimation.
│ │ │ - The sliding window moves over the entire image from the top-left corner
│ │ │ - to the bottom right corner. Each location of the window represents a sample.
│ │ │ - If the window is the size of the image, then this gives the exact covariance matrix.
│ │ │ - For all other cases, the sizes of the window will impact the number of samples
│ │ │ - and the number of elements in the estimated covariance matrix.public static void edgePreservingFilter(Mat src, │ │ │ - Mat dst, │ │ │ - int d, │ │ │ - double threshold)│ │ │ -
src
- Source 8-bit 3-channel image.dst
- Destination image of the same size and type as src.d
- Diameter of each pixel neighborhood that is used during filtering. Must be greater or equal 3.threshold
- Threshold, which distinguishes between noise, outliers, and data.public static EdgeBoxes createEdgeBoxes(float alpha, │ │ │ - float beta, │ │ │ - float eta, │ │ │ - float minScore, │ │ │ - int maxBoxes, │ │ │ - float edgeMinMag, │ │ │ - float edgeMergeThr, │ │ │ - float clusterMinMag, │ │ │ - float maxAspectRatio, │ │ │ - float minBoxArea, │ │ │ - float gamma, │ │ │ - float kappa)│ │ │ -
alpha
- step size of sliding window search.beta
- nms threshold for object proposals.eta
- adaptation rate for nms threshold.minScore
- min score of boxes to detect.maxBoxes
- max number of boxes to detect.edgeMinMag
- edge min magnitude. Increase to trade off accuracy for speed.edgeMergeThr
- edge merge threshold. Increase to trade off accuracy for speed.clusterMinMag
- cluster min magnitude. Increase to trade off accuracy for speed.maxAspectRatio
- max aspect ratio of boxes.minBoxArea
- minimum area of boxes.gamma
- affinity sensitivity.kappa
- scale sensitivity.public static EdgeBoxes createEdgeBoxes(float alpha, │ │ │ - float beta, │ │ │ - float eta, │ │ │ - float minScore, │ │ │ - int maxBoxes, │ │ │ - float edgeMinMag, │ │ │ - float edgeMergeThr, │ │ │ - float clusterMinMag, │ │ │ - float maxAspectRatio, │ │ │ - float minBoxArea, │ │ │ - float gamma)│ │ │ -
alpha
- step size of sliding window search.beta
- nms threshold for object proposals.eta
- adaptation rate for nms threshold.minScore
- min score of boxes to detect.maxBoxes
- max number of boxes to detect.edgeMinMag
- edge min magnitude. Increase to trade off accuracy for speed.edgeMergeThr
- edge merge threshold. Increase to trade off accuracy for speed.clusterMinMag
- cluster min magnitude. Increase to trade off accuracy for speed.maxAspectRatio
- max aspect ratio of boxes.minBoxArea
- minimum area of boxes.gamma
- affinity sensitivity.public static EdgeBoxes createEdgeBoxes(float alpha, │ │ │ - float beta, │ │ │ - float eta, │ │ │ - float minScore, │ │ │ - int maxBoxes, │ │ │ - float edgeMinMag, │ │ │ - float edgeMergeThr, │ │ │ - float clusterMinMag, │ │ │ - float maxAspectRatio, │ │ │ - float minBoxArea)│ │ │ -
alpha
- step size of sliding window search.beta
- nms threshold for object proposals.eta
- adaptation rate for nms threshold.minScore
- min score of boxes to detect.maxBoxes
- max number of boxes to detect.edgeMinMag
- edge min magnitude. Increase to trade off accuracy for speed.edgeMergeThr
- edge merge threshold. Increase to trade off accuracy for speed.clusterMinMag
- cluster min magnitude. Increase to trade off accuracy for speed.maxAspectRatio
- max aspect ratio of boxes.minBoxArea
- minimum area of boxes.alpha
- automatically generatedomega
- automatically generatedpublic static EdgeBoxes createEdgeBoxes(float alpha, │ │ │ - float beta, │ │ │ - float eta, │ │ │ - float minScore, │ │ │ - int maxBoxes, │ │ │ - float edgeMinMag, │ │ │ - float edgeMergeThr, │ │ │ - float clusterMinMag, │ │ │ - float maxAspectRatio)│ │ │ -
public static DisparityWLSFilter createDisparityWLSFilter(StereoMatcher matcher_left)│ │ │ +
alpha
- step size of sliding window search.beta
- nms threshold for object proposals.eta
- adaptation rate for nms threshold.minScore
- min score of boxes to detect.maxBoxes
- max number of boxes to detect.edgeMinMag
- edge min magnitude. Increase to trade off accuracy for speed.edgeMergeThr
- edge merge threshold. Increase to trade off accuracy for speed.clusterMinMag
- cluster min magnitude. Increase to trade off accuracy for speed.maxAspectRatio
- max aspect ratio of boxes.matcher_left
- stereo matcher instance that will be used with the filterpublic static EdgeBoxes createEdgeBoxes(float alpha, │ │ │ - float beta, │ │ │ - float eta, │ │ │ - float minScore, │ │ │ - int maxBoxes, │ │ │ - float edgeMinMag, │ │ │ - float edgeMergeThr, │ │ │ - float clusterMinMag)│ │ │ -
public static StereoMatcher createRightMatcher(StereoMatcher matcher_left)│ │ │ +
alpha
- step size of sliding window search.beta
- nms threshold for object proposals.eta
- adaptation rate for nms threshold.minScore
- min score of boxes to detect.maxBoxes
- max number of boxes to detect.edgeMinMag
- edge min magnitude. Increase to trade off accuracy for speed.edgeMergeThr
- edge merge threshold. Increase to trade off accuracy for speed.clusterMinMag
- cluster min magnitude. Increase to trade off accuracy for speed.matcher_left
- main stereo matcher instance that will be used with the filterpublic static EdgeBoxes createEdgeBoxes(float alpha, │ │ │ - float beta, │ │ │ - float eta, │ │ │ - float minScore, │ │ │ - int maxBoxes, │ │ │ - float edgeMinMag, │ │ │ - float edgeMergeThr)│ │ │ -
public static DisparityWLSFilter createDisparityWLSFilterGeneric(boolean use_confidence)│ │ │ +
alpha
- step size of sliding window search.beta
- nms threshold for object proposals.eta
- adaptation rate for nms threshold.minScore
- min score of boxes to detect.maxBoxes
- max number of boxes to detect.edgeMinMag
- edge min magnitude. Increase to trade off accuracy for speed.edgeMergeThr
- edge merge threshold. Increase to trade off accuracy for speed.use_confidence
- filtering with confidence requires two disparity maps (for the left and right views) and is
│ │ │ + approximately two times slower. However, quality is typically significantly better.public static EdgeBoxes createEdgeBoxes(float alpha, │ │ │ - float beta, │ │ │ - float eta, │ │ │ - float minScore, │ │ │ - int maxBoxes, │ │ │ - float edgeMinMag)│ │ │ -
public static int readGT(java.lang.String src_path, │ │ │ + Mat dst)│ │ │ +
alpha
- step size of sliding window search.beta
- nms threshold for object proposals.eta
- adaptation rate for nms threshold.minScore
- min score of boxes to detect.maxBoxes
- max number of boxes to detect.edgeMinMag
- edge min magnitude. Increase to trade off accuracy for speed.src_path
- path to the image, containing ground-truth disparity mapdst
- output disparity map, CV_16S depthpublic static EdgeBoxes createEdgeBoxes(float alpha, │ │ │ - float beta, │ │ │ - float eta, │ │ │ - float minScore, │ │ │ - int maxBoxes)│ │ │ -
public static double computeMSE(Mat GT, │ │ │ + Mat src, │ │ │ + Rect ROI)│ │ │ +
alpha
- step size of sliding window search.beta
- nms threshold for object proposals.eta
- adaptation rate for nms threshold.minScore
- min score of boxes to detect.maxBoxes
- max number of boxes to detect.GT
- ground truth disparity mapsrc
- disparity map to evaluateROI
- region of interestpublic static EdgeBoxes createEdgeBoxes(float alpha, │ │ │ - float beta, │ │ │ - float eta, │ │ │ - float minScore)│ │ │ -
public static double computeBadPixelPercent(Mat GT, │ │ │ + Mat src, │ │ │ + Rect ROI, │ │ │ + int thresh)│ │ │ +
alpha
- step size of sliding window search.beta
- nms threshold for object proposals.eta
- adaptation rate for nms threshold.minScore
- min score of boxes to detect.GT
- ground truth disparity mapsrc
- disparity map to evaluateROI
- region of interestthresh
- threshold used to determine "bad" pixelspublic static EdgeBoxes createEdgeBoxes(float alpha, │ │ │ - float beta, │ │ │ - float eta)│ │ │ -
public static double computeBadPixelPercent(Mat GT, │ │ │ + Mat src, │ │ │ + Rect ROI)│ │ │ +
alpha
- step size of sliding window search.beta
- nms threshold for object proposals.eta
- adaptation rate for nms threshold.GT
- ground truth disparity mapsrc
- disparity map to evaluateROI
- region of interestpublic static EdgeBoxes createEdgeBoxes(float alpha, │ │ │ - float beta)│ │ │ -
public static void getDisparityVis(Mat src, │ │ │ + Mat dst, │ │ │ + double scale)│ │ │ +
alpha
- step size of sliding window search.beta
- nms threshold for object proposals.src
- input disparity map (CV_16S depth)dst
- output visualizationscale
- disparity map will be multiplied by this value for visualizationpublic static EdgeBoxes createEdgeBoxes(float alpha)│ │ │ -
public static void getDisparityVis(Mat src, │ │ │ + Mat dst)│ │ │ +
alpha
- step size of sliding window search.src
- input disparity map (CV_16S depth)dst
- output visualizationpublic static EdgeBoxes createEdgeBoxes()│ │ │ -
public static EdgeDrawing createEdgeDrawing()│ │ │ +
public static EdgeDrawing createEdgeDrawing()│ │ │ -
public static EdgeBoxes createEdgeBoxes(float alpha, │ │ │ + float beta, │ │ │ + float eta, │ │ │ + float minScore, │ │ │ + int maxBoxes, │ │ │ + float edgeMinMag, │ │ │ + float edgeMergeThr, │ │ │ + float clusterMinMag, │ │ │ + float maxAspectRatio, │ │ │ + float minBoxArea, │ │ │ + float gamma, │ │ │ + float kappa)│ │ │ +
alpha
- step size of sliding window search.beta
- nms threshold for object proposals.eta
- adaptation rate for nms threshold.minScore
- min score of boxes to detect.maxBoxes
- max number of boxes to detect.edgeMinMag
- edge min magnitude. Increase to trade off accuracy for speed.edgeMergeThr
- edge merge threshold. Increase to trade off accuracy for speed.clusterMinMag
- cluster min magnitude. Increase to trade off accuracy for speed.maxAspectRatio
- max aspect ratio of boxes.minBoxArea
- minimum area of boxes.gamma
- affinity sensitivity.kappa
- scale sensitivity.public static DisparityWLSFilter createDisparityWLSFilter(StereoMatcher matcher_left)│ │ │ -
public static EdgeBoxes createEdgeBoxes(float alpha, │ │ │ + float beta, │ │ │ + float eta, │ │ │ + float minScore, │ │ │ + int maxBoxes, │ │ │ + float edgeMinMag, │ │ │ + float edgeMergeThr, │ │ │ + float clusterMinMag, │ │ │ + float maxAspectRatio, │ │ │ + float minBoxArea, │ │ │ + float gamma)│ │ │ +
matcher_left
- stereo matcher instance that will be used with the filteralpha
- step size of sliding window search.beta
- nms threshold for object proposals.eta
- adaptation rate for nms threshold.minScore
- min score of boxes to detect.maxBoxes
- max number of boxes to detect.edgeMinMag
- edge min magnitude. Increase to trade off accuracy for speed.edgeMergeThr
- edge merge threshold. Increase to trade off accuracy for speed.clusterMinMag
- cluster min magnitude. Increase to trade off accuracy for speed.maxAspectRatio
- max aspect ratio of boxes.minBoxArea
- minimum area of boxes.gamma
- affinity sensitivity.public static StereoMatcher createRightMatcher(StereoMatcher matcher_left)│ │ │ -
public static EdgeBoxes createEdgeBoxes(float alpha, │ │ │ + float beta, │ │ │ + float eta, │ │ │ + float minScore, │ │ │ + int maxBoxes, │ │ │ + float edgeMinMag, │ │ │ + float edgeMergeThr, │ │ │ + float clusterMinMag, │ │ │ + float maxAspectRatio, │ │ │ + float minBoxArea)│ │ │ +
matcher_left
- main stereo matcher instance that will be used with the filteralpha
- step size of sliding window search.beta
- nms threshold for object proposals.eta
- adaptation rate for nms threshold.minScore
- min score of boxes to detect.maxBoxes
- max number of boxes to detect.edgeMinMag
- edge min magnitude. Increase to trade off accuracy for speed.edgeMergeThr
- edge merge threshold. Increase to trade off accuracy for speed.clusterMinMag
- cluster min magnitude. Increase to trade off accuracy for speed.maxAspectRatio
- max aspect ratio of boxes.minBoxArea
- minimum area of boxes.public static DisparityWLSFilter createDisparityWLSFilterGeneric(boolean use_confidence)│ │ │ -
public static EdgeBoxes createEdgeBoxes(float alpha, │ │ │ + float beta, │ │ │ + float eta, │ │ │ + float minScore, │ │ │ + int maxBoxes, │ │ │ + float edgeMinMag, │ │ │ + float edgeMergeThr, │ │ │ + float clusterMinMag, │ │ │ + float maxAspectRatio)│ │ │ +
use_confidence
- filtering with confidence requires two disparity maps (for the left and right views) and is
│ │ │ - approximately two times slower. However, quality is typically significantly better.alpha
- step size of sliding window search.beta
- nms threshold for object proposals.eta
- adaptation rate for nms threshold.minScore
- min score of boxes to detect.maxBoxes
- max number of boxes to detect.edgeMinMag
- edge min magnitude. Increase to trade off accuracy for speed.edgeMergeThr
- edge merge threshold. Increase to trade off accuracy for speed.clusterMinMag
- cluster min magnitude. Increase to trade off accuracy for speed.maxAspectRatio
- max aspect ratio of boxes.public static int readGT(java.lang.String src_path, │ │ │ - Mat dst)│ │ │ -
public static EdgeBoxes createEdgeBoxes(float alpha, │ │ │ + float beta, │ │ │ + float eta, │ │ │ + float minScore, │ │ │ + int maxBoxes, │ │ │ + float edgeMinMag, │ │ │ + float edgeMergeThr, │ │ │ + float clusterMinMag)│ │ │ +
src_path
- path to the image, containing ground-truth disparity mapdst
- output disparity map, CV_16S depthalpha
- step size of sliding window search.beta
- nms threshold for object proposals.eta
- adaptation rate for nms threshold.minScore
- min score of boxes to detect.maxBoxes
- max number of boxes to detect.edgeMinMag
- edge min magnitude. Increase to trade off accuracy for speed.edgeMergeThr
- edge merge threshold. Increase to trade off accuracy for speed.clusterMinMag
- cluster min magnitude. Increase to trade off accuracy for speed.public static double computeMSE(Mat GT, │ │ │ - Mat src, │ │ │ - Rect ROI)│ │ │ -
public static EdgeBoxes createEdgeBoxes(float alpha, │ │ │ + float beta, │ │ │ + float eta, │ │ │ + float minScore, │ │ │ + int maxBoxes, │ │ │ + float edgeMinMag, │ │ │ + float edgeMergeThr)│ │ │ +
GT
- ground truth disparity mapsrc
- disparity map to evaluateROI
- region of interestalpha
- step size of sliding window search.beta
- nms threshold for object proposals.eta
- adaptation rate for nms threshold.minScore
- min score of boxes to detect.maxBoxes
- max number of boxes to detect.edgeMinMag
- edge min magnitude. Increase to trade off accuracy for speed.edgeMergeThr
- edge merge threshold. Increase to trade off accuracy for speed.public static double computeBadPixelPercent(Mat GT, │ │ │ - Mat src, │ │ │ - Rect ROI, │ │ │ - int thresh)│ │ │ -
public static EdgeBoxes createEdgeBoxes(float alpha, │ │ │ + float beta, │ │ │ + float eta, │ │ │ + float minScore, │ │ │ + int maxBoxes, │ │ │ + float edgeMinMag)│ │ │ +
GT
- ground truth disparity mapsrc
- disparity map to evaluateROI
- region of interestthresh
- threshold used to determine "bad" pixelsalpha
- step size of sliding window search.beta
- nms threshold for object proposals.eta
- adaptation rate for nms threshold.minScore
- min score of boxes to detect.maxBoxes
- max number of boxes to detect.edgeMinMag
- edge min magnitude. Increase to trade off accuracy for speed.public static double computeBadPixelPercent(Mat GT, │ │ │ - Mat src, │ │ │ - Rect ROI)│ │ │ -
public static EdgeBoxes createEdgeBoxes(float alpha, │ │ │ + float beta, │ │ │ + float eta, │ │ │ + float minScore, │ │ │ + int maxBoxes)│ │ │ +
GT
- ground truth disparity mapsrc
- disparity map to evaluateROI
- region of interestalpha
- step size of sliding window search.beta
- nms threshold for object proposals.eta
- adaptation rate for nms threshold.minScore
- min score of boxes to detect.maxBoxes
- max number of boxes to detect.public static void getDisparityVis(Mat src, │ │ │ - Mat dst, │ │ │ - double scale)│ │ │ -
public static EdgeBoxes createEdgeBoxes(float alpha, │ │ │ + float beta, │ │ │ + float eta, │ │ │ + float minScore)│ │ │ +
src
- input disparity map (CV_16S depth)dst
- output visualizationscale
- disparity map will be multiplied by this value for visualizationalpha
- step size of sliding window search.beta
- nms threshold for object proposals.eta
- adaptation rate for nms threshold.minScore
- min score of boxes to detect.public static void getDisparityVis(Mat src, │ │ │ - Mat dst)│ │ │ -
public static EdgeBoxes createEdgeBoxes(float alpha, │ │ │ + float beta, │ │ │ + float eta)│ │ │ +
src
- input disparity map (CV_16S depth)dst
- output visualizationalpha
- step size of sliding window search.beta
- nms threshold for object proposals.eta
- adaptation rate for nms threshold.public static void GradientDericheY(Mat op, │ │ │ - Mat dst, │ │ │ - double alpha, │ │ │ - double omega)│ │ │ -
public static EdgeBoxes createEdgeBoxes(float alpha, │ │ │ + float beta)│ │ │ +
alpha
- step size of sliding window search.beta
- nms threshold for object proposals.public static EdgeBoxes createEdgeBoxes(float alpha)│ │ │ +
alpha
- step size of sliding window search.public static EdgeBoxes createEdgeBoxes()│ │ │ +
public static void edgePreservingFilter(Mat src, │ │ │ + Mat dst, │ │ │ + int d, │ │ │ + double threshold)│ │ │ +
op
- automatically generatedsrc
- Source 8-bit 3-channel image.dst
- Destination image of the same size and type as src.d
- Diameter of each pixel neighborhood that is used during filtering. Must be greater or equal 3.threshold
- Threshold, which distinguishes between noise, outliers, and data.public static void covarianceEstimation(Mat src, │ │ │ + Mat dst, │ │ │ + int windowRows, │ │ │ + int windowCols)│ │ │ +
src
- The source image. Input image must be of a complex type.dst
- The destination estimated covariance matrix. Output matrix will be size (windowRows*windowCols, windowRows*windowCols).windowRows
- The number of rows in the window.windowCols
- The number of cols in the window.
│ │ │ + The window size parameters control the accuracy of the estimation.
│ │ │ + The sliding window moves over the entire image from the top-left corner
│ │ │ + to the bottom right corner. Each location of the window represents a sample.
│ │ │ + If the window is the size of the image, then this gives the exact covariance matrix.
│ │ │ + For all other cases, the sizes of the window will impact the number of samples
│ │ │ + and the number of elements in the estimated covariance matrix.public static void FastHoughTransform(Mat src, │ │ │ + Mat dst, │ │ │ + int dstMatDepth, │ │ │ + int angleRange, │ │ │ + int op, │ │ │ + int makeSkew)│ │ │ +
src
- automatically generateddst
- automatically generatedalpha
- automatically generatedomega
- automatically generateddstMatDepth
- automatically generatedangleRange
- automatically generatedop
- automatically generatedmakeSkew
- automatically generatedpublic static void GradientDericheX(Mat op, │ │ │ - Mat dst, │ │ │ - double alpha, │ │ │ - double omega)│ │ │ -
public static void FastHoughTransform(Mat src, │ │ │ + Mat dst, │ │ │ + int dstMatDepth, │ │ │ + int angleRange, │ │ │ + int op)│ │ │ +
src
- automatically generateddst
- automatically generateddstMatDepth
- automatically generatedangleRange
- automatically generatedop
- automatically generatedpublic static void FastHoughTransform(Mat src, │ │ │ + Mat dst, │ │ │ + int dstMatDepth, │ │ │ + int angleRange)│ │ │ +
src
- automatically generateddst
- automatically generatedalpha
- automatically generatedomega
- automatically generateddstMatDepth
- automatically generatedangleRange
- automatically generatedpublic static void createQuaternionImage(Mat img, │ │ │ - Mat qimg)│ │ │ -
public static void FastHoughTransform(Mat src, │ │ │ + Mat dst, │ │ │ + int dstMatDepth)│ │ │ +
img
- automatically generatedqimg
- automatically generatedsrc
- automatically generateddst
- automatically generateddstMatDepth
- automatically generatedpublic static void qconj(Mat qimg, │ │ │ - Mat qcimg)│ │ │ -
public static FastLineDetector createFastLineDetector(int length_threshold, │ │ │ + float distance_threshold, │ │ │ + double canny_th1, │ │ │ + double canny_th2, │ │ │ + int canny_aperture_size, │ │ │ + boolean do_merge)│ │ │ +
qimg
- automatically generatedqcimg
- automatically generatedlength_threshold
- Segment shorter than this will be discardeddistance_threshold
- A point placed from a hypothesis line
│ │ │ + segment farther than this will be regarded as an outliercanny_th1
- First threshold for hysteresis procedure in Canny()canny_th2
- Second threshold for hysteresis procedure in Canny()canny_aperture_size
- Aperturesize for the sobel operator in Canny().
│ │ │ + If zero, Canny() is not applied and the input image is taken as an edge image.do_merge
- If true, incremental merging of segments will be performedpublic static void qunitary(Mat qimg, │ │ │ - Mat qnimg)│ │ │ -
public static FastLineDetector createFastLineDetector(int length_threshold, │ │ │ + float distance_threshold, │ │ │ + double canny_th1, │ │ │ + double canny_th2, │ │ │ + int canny_aperture_size)│ │ │ +
qimg
- automatically generatedqnimg
- automatically generatedlength_threshold
- Segment shorter than this will be discardeddistance_threshold
- A point placed from a hypothesis line
│ │ │ + segment farther than this will be regarded as an outliercanny_th1
- First threshold for hysteresis procedure in Canny()canny_th2
- Second threshold for hysteresis procedure in Canny()canny_aperture_size
- Aperturesize for the sobel operator in Canny().
│ │ │ + If zero, Canny() is not applied and the input image is taken as an edge image.public static void qmultiply(Mat src1, │ │ │ - Mat src2, │ │ │ - Mat dst)│ │ │ -
public static FastLineDetector createFastLineDetector(int length_threshold, │ │ │ + float distance_threshold, │ │ │ + double canny_th1, │ │ │ + double canny_th2)│ │ │ +
src1
- automatically generatedsrc2
- automatically generatedlength_threshold
- Segment shorter than this will be discardeddistance_threshold
- A point placed from a hypothesis line
│ │ │ + segment farther than this will be regarded as an outliercanny_th1
- First threshold for hysteresis procedure in Canny()canny_th2
- Second threshold for hysteresis procedure in Canny()
│ │ │ + If zero, Canny() is not applied and the input image is taken as an edge image.public static FastLineDetector createFastLineDetector(int length_threshold, │ │ │ + float distance_threshold, │ │ │ + double canny_th1)│ │ │ +
length_threshold
- Segment shorter than this will be discardeddistance_threshold
- A point placed from a hypothesis line
│ │ │ + segment farther than this will be regarded as an outliercanny_th1
- First threshold for hysteresis procedure in Canny()
│ │ │ + If zero, Canny() is not applied and the input image is taken as an edge image.public static FastLineDetector createFastLineDetector(int length_threshold, │ │ │ + float distance_threshold)│ │ │ +
length_threshold
- Segment shorter than this will be discardeddistance_threshold
- A point placed from a hypothesis line
│ │ │ + segment farther than this will be regarded as an outlier
│ │ │ + If zero, Canny() is not applied and the input image is taken as an edge image.public static FastLineDetector createFastLineDetector(int length_threshold)│ │ │ +
length_threshold
- Segment shorter than this will be discarded
│ │ │ + segment farther than this will be regarded as an outlier
│ │ │ + If zero, Canny() is not applied and the input image is taken as an edge image.public static FastLineDetector createFastLineDetector()│ │ │ +
public static void fourierDescriptor(Mat src, │ │ │ + Mat dst, │ │ │ + int nbElt, │ │ │ + int nbFD)│ │ │ +
src
- automatically generateddst
- automatically generatednbElt
- automatically generatednbFD
- automatically generatedpublic static void qdft(Mat img, │ │ │ - Mat qimg, │ │ │ - int flags, │ │ │ - boolean sideLeft)│ │ │ -
public static void fourierDescriptor(Mat src, │ │ │ + Mat dst, │ │ │ + int nbElt)│ │ │ +
img
- automatically generatedqimg
- automatically generatedflags
- automatically generatedsideLeft
- automatically generatedsrc
- automatically generateddst
- automatically generatednbElt
- automatically generatedpublic static void fourierDescriptor(Mat src, │ │ │ + Mat dst)│ │ │ +
src
- automatically generateddst
- automatically generatedpublic static void transformFD(Mat src, │ │ │ + Mat t, │ │ │ + Mat dst, │ │ │ + boolean fdContour)│ │ │ +
src
- automatically generatedt
- automatically generateddst
- automatically generatedfdContour
- automatically generatedpublic static void transformFD(Mat src, │ │ │ + Mat t, │ │ │ + Mat dst)│ │ │ +
src
- automatically generatedt
- automatically generateddst
- automatically generatedpublic static void contourSampling(Mat src, │ │ │ + Mat out, │ │ │ + int nbElt)│ │ │ +
src
- automatically generatedout
- automatically generatednbElt
- automatically generatedpublic static ContourFitting createContourFitting(int ctr, │ │ │ + int fd)│ │ │ +
ctr
- number of Fourier descriptors equal to number of contour points after resampling.fd
- Contour defining second shape (Target).public static ContourFitting createContourFitting(int ctr)│ │ │ +
ctr
- number of Fourier descriptors equal to number of contour points after resampling.public static ContourFitting createContourFitting()│ │ │ +
public static SuperpixelLSC createSuperpixelLSC(Mat image, │ │ │ + int region_size, │ │ │ + float ratio)│ │ │ +
image
- Image to segmentregion_size
- Chooses an average superpixel size measured in pixelsratio
- Chooses the enforcement of superpixel compactness factor of superpixel
│ │ │ +
│ │ │ + The function initializes a SuperpixelLSC object for the input image. It sets the parameters of
│ │ │ + superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future
│ │ │ + computing iterations over the given image. An example of LSC is ilustrated in the following picture.
│ │ │ + For enanched results it is recommended for color images to preprocess image with little gaussian blur
│ │ │ + with a small 3 x 3 kernel and additional conversion into CieLAB color space.
│ │ │ +
│ │ │ + ![image](pics/superpixels_lsc.png)public static SuperpixelLSC createSuperpixelLSC(Mat image, │ │ │ + int region_size)│ │ │ +
image
- Image to segmentregion_size
- Chooses an average superpixel size measured in pixels
│ │ │ +
│ │ │ + The function initializes a SuperpixelLSC object for the input image. It sets the parameters of
│ │ │ + superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future
│ │ │ + computing iterations over the given image. An example of LSC is ilustrated in the following picture.
│ │ │ + For enanched results it is recommended for color images to preprocess image with little gaussian blur
│ │ │ + with a small 3 x 3 kernel and additional conversion into CieLAB color space.
│ │ │ +
│ │ │ + ![image](pics/superpixels_lsc.png)public static SuperpixelLSC createSuperpixelLSC(Mat image)│ │ │ +
image
- Image to segment
│ │ │ +
│ │ │ + The function initializes a SuperpixelLSC object for the input image. It sets the parameters of
│ │ │ + superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future
│ │ │ + computing iterations over the given image. An example of LSC is ilustrated in the following picture.
│ │ │ + For enanched results it is recommended for color images to preprocess image with little gaussian blur
│ │ │ + with a small 3 x 3 kernel and additional conversion into CieLAB color space.
│ │ │ +
│ │ │ + ![image](pics/superpixels_lsc.png)public static void PeiLinNormalization(Mat I, │ │ │ + Mat T)│ │ │ +
public static SuperpixelSEEDS createSuperpixelSEEDS(int image_width, │ │ │ + int image_height, │ │ │ + int image_channels, │ │ │ + int num_superpixels, │ │ │ + int num_levels, │ │ │ + int prior, │ │ │ + int histogram_bins, │ │ │ + boolean double_step)│ │ │ +
image_width
- Image width.image_height
- Image height.image_channels
- Number of channels of the image.num_superpixels
- Desired number of superpixels. Note that the actual number may be smaller
│ │ │ + due to restrictions (depending on the image size and num_levels). Use getNumberOfSuperpixels() to
│ │ │ + get the actual number.num_levels
- Number of block levels. The more levels, the more accurate is the segmentation,
│ │ │ + but needs more memory and CPU time.prior
- enable 3x3 shape smoothing term if >0. A larger value leads to smoother shapes. prior
│ │ │ + must be in the range [0, 5].histogram_bins
- Number of histogram bins.double_step
- If true, iterate each block level twice for higher accuracy.
│ │ │ +
│ │ │ + The function initializes a SuperpixelSEEDS object for the input image. It stores the parameters of
│ │ │ + the image: image_width, image_height and image_channels. It also sets the parameters of the SEEDS
│ │ │ + superpixel algorithm, which are: num_superpixels, num_levels, use_prior, histogram_bins and
│ │ │ + double_step.
│ │ │ +
│ │ │ + The number of levels in num_levels defines the amount of block levels that the algorithm use in the
│ │ │ + optimization. The initialization is a grid, in which the superpixels are equally distributed through
│ │ │ + the width and the height of the image. The larger blocks correspond to the superpixel size, and the
│ │ │ + levels with smaller blocks are formed by dividing the larger blocks into 2 x 2 blocks of pixels,
│ │ │ + recursively until the smaller block level. An example of initialization of 4 block levels is
│ │ │ + illustrated in the following figure.
│ │ │ +
│ │ │ + ![image](pics/superpixels_blocks.png)public static SuperpixelSEEDS createSuperpixelSEEDS(int image_width, │ │ │ + int image_height, │ │ │ + int image_channels, │ │ │ + int num_superpixels, │ │ │ + int num_levels, │ │ │ + int prior, │ │ │ + int histogram_bins)│ │ │ +
image_width
- Image width.image_height
- Image height.image_channels
- Number of channels of the image.num_superpixels
- Desired number of superpixels. Note that the actual number may be smaller
│ │ │ + due to restrictions (depending on the image size and num_levels). Use getNumberOfSuperpixels() to
│ │ │ + get the actual number.num_levels
- Number of block levels. The more levels, the more accurate is the segmentation,
│ │ │ + but needs more memory and CPU time.prior
- enable 3x3 shape smoothing term if >0. A larger value leads to smoother shapes. prior
│ │ │ + must be in the range [0, 5].histogram_bins
- Number of histogram bins.
│ │ │ +
│ │ │ + The function initializes a SuperpixelSEEDS object for the input image. It stores the parameters of
│ │ │ + the image: image_width, image_height and image_channels. It also sets the parameters of the SEEDS
│ │ │ + superpixel algorithm, which are: num_superpixels, num_levels, use_prior, histogram_bins and
│ │ │ + double_step.
│ │ │ +
│ │ │ + The number of levels in num_levels defines the amount of block levels that the algorithm use in the
│ │ │ + optimization. The initialization is a grid, in which the superpixels are equally distributed through
│ │ │ + the width and the height of the image. The larger blocks correspond to the superpixel size, and the
│ │ │ + levels with smaller blocks are formed by dividing the larger blocks into 2 x 2 blocks of pixels,
│ │ │ + recursively until the smaller block level. An example of initialization of 4 block levels is
│ │ │ + illustrated in the following figure.
│ │ │ +
│ │ │ + ![image](pics/superpixels_blocks.png)public static SuperpixelSEEDS createSuperpixelSEEDS(int image_width, │ │ │ + int image_height, │ │ │ + int image_channels, │ │ │ + int num_superpixels, │ │ │ + int num_levels, │ │ │ + int prior)│ │ │ +
image_width
- Image width.image_height
- Image height.image_channels
- Number of channels of the image.num_superpixels
- Desired number of superpixels. Note that the actual number may be smaller
│ │ │ + due to restrictions (depending on the image size and num_levels). Use getNumberOfSuperpixels() to
│ │ │ + get the actual number.num_levels
- Number of block levels. The more levels, the more accurate is the segmentation,
│ │ │ + but needs more memory and CPU time.prior
- enable 3x3 shape smoothing term if >0. A larger value leads to smoother shapes. prior
│ │ │ + must be in the range [0, 5].
│ │ │ +
│ │ │ + The function initializes a SuperpixelSEEDS object for the input image. It stores the parameters of
│ │ │ + the image: image_width, image_height and image_channels. It also sets the parameters of the SEEDS
│ │ │ + superpixel algorithm, which are: num_superpixels, num_levels, use_prior, histogram_bins and
│ │ │ + double_step.
│ │ │ +
│ │ │ + The number of levels in num_levels defines the amount of block levels that the algorithm use in the
│ │ │ + optimization. The initialization is a grid, in which the superpixels are equally distributed through
│ │ │ + the width and the height of the image. The larger blocks correspond to the superpixel size, and the
│ │ │ + levels with smaller blocks are formed by dividing the larger blocks into 2 x 2 blocks of pixels,
│ │ │ + recursively until the smaller block level. An example of initialization of 4 block levels is
│ │ │ + illustrated in the following figure.
│ │ │ +
│ │ │ + ![image](pics/superpixels_blocks.png)public static SuperpixelSEEDS createSuperpixelSEEDS(int image_width, │ │ │ + int image_height, │ │ │ + int image_channels, │ │ │ + int num_superpixels, │ │ │ + int num_levels)│ │ │ +
image_width
- Image width.image_height
- Image height.image_channels
- Number of channels of the image.num_superpixels
- Desired number of superpixels. Note that the actual number may be smaller
│ │ │ + due to restrictions (depending on the image size and num_levels). Use getNumberOfSuperpixels() to
│ │ │ + get the actual number.num_levels
- Number of block levels. The more levels, the more accurate is the segmentation,
│ │ │ + but needs more memory and CPU time.
│ │ │ + must be in the range [0, 5].
│ │ │ +
│ │ │ + The function initializes a SuperpixelSEEDS object for the input image. It stores the parameters of
│ │ │ + the image: image_width, image_height and image_channels. It also sets the parameters of the SEEDS
│ │ │ + superpixel algorithm, which are: num_superpixels, num_levels, use_prior, histogram_bins and
│ │ │ + double_step.
│ │ │ +
│ │ │ + The number of levels in num_levels defines the amount of block levels that the algorithm use in the
│ │ │ + optimization. The initialization is a grid, in which the superpixels are equally distributed through
│ │ │ + the width and the height of the image. The larger blocks correspond to the superpixel size, and the
│ │ │ + levels with smaller blocks are formed by dividing the larger blocks into 2 x 2 blocks of pixels,
│ │ │ + recursively until the smaller block level. An example of initialization of 4 block levels is
│ │ │ + illustrated in the following figure.
│ │ │ +
│ │ │ + ![image](pics/superpixels_blocks.png)public static GraphSegmentation createGraphSegmentation(double sigma, │ │ │ + float k, │ │ │ + int min_size)│ │ │ +
sigma
- The sigma parameter, used to smooth imagek
- The k parameter of the algorythmmin_size
- The minimum size of segmentspublic static GraphSegmentation createGraphSegmentation(double sigma, │ │ │ + float k)│ │ │ +
sigma
- The sigma parameter, used to smooth imagek
- The k parameter of the algorythmpublic static GraphSegmentation createGraphSegmentation(double sigma)│ │ │ +
sigma
- The sigma parameter, used to smooth imagepublic static GraphSegmentation createGraphSegmentation()│ │ │ +
public static SelectiveSearchSegmentationStrategyColor createSelectiveSearchSegmentationStrategyColor()│ │ │ +
public static SelectiveSearchSegmentationStrategySize createSelectiveSearchSegmentationStrategySize()│ │ │ +
public static SelectiveSearchSegmentationStrategyTexture createSelectiveSearchSegmentationStrategyTexture()│ │ │ +
public static SelectiveSearchSegmentationStrategyFill createSelectiveSearchSegmentationStrategyFill()│ │ │ +
public static SelectiveSearchSegmentationStrategyMultiple createSelectiveSearchSegmentationStrategyMultiple()│ │ │ +
public static SelectiveSearchSegmentationStrategyMultiple createSelectiveSearchSegmentationStrategyMultiple(SelectiveSearchSegmentationStrategy s1)│ │ │ +
s1
- The first strategypublic static SelectiveSearchSegmentationStrategyMultiple createSelectiveSearchSegmentationStrategyMultiple(SelectiveSearchSegmentationStrategy s1, │ │ │ + SelectiveSearchSegmentationStrategy s2)│ │ │ +
s1
- The first strategys2
- The second strategypublic static SelectiveSearchSegmentationStrategyMultiple createSelectiveSearchSegmentationStrategyMultiple(SelectiveSearchSegmentationStrategy s1, │ │ │ + SelectiveSearchSegmentationStrategy s2, │ │ │ + SelectiveSearchSegmentationStrategy s3)│ │ │ +
s1
- The first strategys2
- The second strategys3
- The third strategypublic static SelectiveSearchSegmentationStrategyMultiple createSelectiveSearchSegmentationStrategyMultiple(SelectiveSearchSegmentationStrategy s1, │ │ │ + SelectiveSearchSegmentationStrategy s2, │ │ │ + SelectiveSearchSegmentationStrategy s3, │ │ │ + SelectiveSearchSegmentationStrategy s4)│ │ │ +
s1
- The first strategys2
- The second strategys3
- The third strategys4
- The forth strategypublic static SelectiveSearchSegmentation createSelectiveSearchSegmentation()│ │ │ +
public static SuperpixelSLIC createSuperpixelSLIC(Mat image, │ │ │ + int algorithm, │ │ │ + int region_size, │ │ │ + float ruler)│ │ │ +
image
- Image to segmentalgorithm
- Chooses the algorithm variant to use:
│ │ │ + SLIC segments image using a desired region_size, and in addition SLICO will optimize using adaptive compactness factor,
│ │ │ + while MSLIC will optimize using manifold methods resulting in more content-sensitive superpixels.region_size
- Chooses an average superpixel size measured in pixelsruler
- Chooses the enforcement of superpixel smoothness factor of superpixel
│ │ │ +
│ │ │ + The function initializes a SuperpixelSLIC object for the input image. It sets the parameters of choosed
│ │ │ + superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future
│ │ │ + computing iterations over the given image. For enanched results it is recommended for color images to
│ │ │ + preprocess image with little gaussian blur using a small 3 x 3 kernel and additional conversion into
│ │ │ + CieLAB color space. An example of SLIC versus SLICO and MSLIC is ilustrated in the following picture.
│ │ │ +
│ │ │ + ![image](pics/superpixels_slic.png)public static SuperpixelSLIC createSuperpixelSLIC(Mat image, │ │ │ + int algorithm, │ │ │ + int region_size)│ │ │ +
image
- Image to segmentalgorithm
- Chooses the algorithm variant to use:
│ │ │ + SLIC segments image using a desired region_size, and in addition SLICO will optimize using adaptive compactness factor,
│ │ │ + while MSLIC will optimize using manifold methods resulting in more content-sensitive superpixels.region_size
- Chooses an average superpixel size measured in pixels
│ │ │ +
│ │ │ + The function initializes a SuperpixelSLIC object for the input image. It sets the parameters of choosed
│ │ │ + superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future
│ │ │ + computing iterations over the given image. For enanched results it is recommended for color images to
│ │ │ + preprocess image with little gaussian blur using a small 3 x 3 kernel and additional conversion into
│ │ │ + CieLAB color space. An example of SLIC versus SLICO and MSLIC is ilustrated in the following picture.
│ │ │ +
│ │ │ + ![image](pics/superpixels_slic.png)public static SuperpixelSLIC createSuperpixelSLIC(Mat image, │ │ │ + int algorithm)│ │ │ +
image
- Image to segmentalgorithm
- Chooses the algorithm variant to use:
│ │ │ + SLIC segments image using a desired region_size, and in addition SLICO will optimize using adaptive compactness factor,
│ │ │ + while MSLIC will optimize using manifold methods resulting in more content-sensitive superpixels.
│ │ │ +
│ │ │ + The function initializes a SuperpixelSLIC object for the input image. It sets the parameters of choosed
│ │ │ + superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future
│ │ │ + computing iterations over the given image. For enanched results it is recommended for color images to
│ │ │ + preprocess image with little gaussian blur using a small 3 x 3 kernel and additional conversion into
│ │ │ + CieLAB color space. An example of SLIC versus SLICO and MSLIC is ilustrated in the following picture.
│ │ │ +
│ │ │ + ![image](pics/superpixels_slic.png)public static SuperpixelSLIC createSuperpixelSLIC(Mat image)│ │ │ +
image
- Image to segment
│ │ │ + SLIC segments image using a desired region_size, and in addition SLICO will optimize using adaptive compactness factor,
│ │ │ + while MSLIC will optimize using manifold methods resulting in more content-sensitive superpixels.
│ │ │ +
│ │ │ + The function initializes a SuperpixelSLIC object for the input image. It sets the parameters of choosed
│ │ │ + superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future
│ │ │ + computing iterations over the given image. For enanched results it is recommended for color images to
│ │ │ + preprocess image with little gaussian blur using a small 3 x 3 kernel and additional conversion into
│ │ │ + CieLAB color space. An example of SLIC versus SLICO and MSLIC is ilustrated in the following picture.
│ │ │ +
│ │ │ + ![image](pics/superpixels_slic.png)public static EdgeAwareInterpolator createEdgeAwareInterpolator()│ │ │ +
public static RICInterpolator createRICInterpolator()│ │ │ +
public static RFFeatureGetter createRFFeatureGetter()│ │ │ +
public static StructuredEdgeDetection createStructuredEdgeDetection(java.lang.String model, │ │ │ + RFFeatureGetter howToGetFeatures)│ │ │ +
public static StructuredEdgeDetection createStructuredEdgeDetection(java.lang.String model)│ │ │ +
public static void weightedMedianFilter(Mat joint, │ │ │ + Mat src, │ │ │ + Mat dst, │ │ │ + int r, │ │ │ + double sigma, │ │ │ + int weightType, │ │ │ + Mat mask)│ │ │ +
joint
- automatically generatedsrc
- automatically generateddst
- automatically generatedr
- automatically generatedsigma
- automatically generatedweightType
- automatically generatedmask
- automatically generatedpublic static void weightedMedianFilter(Mat joint, │ │ │ + Mat src, │ │ │ + Mat dst, │ │ │ + int r, │ │ │ + double sigma, │ │ │ + int weightType)│ │ │ +
joint
- automatically generatedsrc
- automatically generateddst
- automatically generatedr
- automatically generatedsigma
- automatically generatedweightType
- automatically generatedpublic static void weightedMedianFilter(Mat joint, │ │ │ + Mat src, │ │ │ + Mat dst, │ │ │ + int r, │ │ │ + double sigma)│ │ │ +
joint
- automatically generatedsrc
- automatically generateddst
- automatically generatedr
- automatically generatedsigma
- automatically generatedpublic static void colorMatchTemplate(Mat img, │ │ │ - Mat templ, │ │ │ - Mat result)│ │ │ -
public static void weightedMedianFilter(Mat joint, │ │ │ + Mat src, │ │ │ + Mat dst, │ │ │ + int r)│ │ │ +
img
- automatically generatedtempl
- automatically generatedresult
- automatically generatedjoint
- automatically generatedsrc
- automatically generateddst
- automatically generatedr
- automatically generatedpublic static SimpleWB createSimpleWB()│ │ │ -
public static GrayworldWB createGrayworldWB()│ │ │ -
public static LearningBasedWB createLearningBasedWB(java.lang.String path_to_model)│ │ │ -
path_to_model
- Path to a .yml file with the model. If not specified, the default model is usedpublic static LearningBasedWB createLearningBasedWB()│ │ │ -
public static void applyChannelGains(Mat src, │ │ │ - Mat dst, │ │ │ - float gainB, │ │ │ - float gainG, │ │ │ - float gainR)│ │ │ -
src
- Input three-channel image in the BGR color space (either CV_8UC3 or CV_16UC3)dst
- Output image of the same size and type as src.gainB
- gain for the B channelgainG
- gain for the G channelgainR
- gain for the R channelpublic static TonemapDurand createTonemapDurand(float gamma, │ │ │ - float contrast, │ │ │ - float saturation, │ │ │ - float sigma_color, │ │ │ - float sigma_space)│ │ │ -
gamma
- gamma value for gamma correction. See createTonemapcontrast
- resulting contrast on logarithmic scale, i. e. log(max / min), where max and min
│ │ │ - are maximum and minimum luminance values of the resulting image.saturation
- saturation enhancement value. See createTonemapDragosigma_color
- bilateral filter sigma in color spacesigma_space
- bilateral filter sigma in coordinate spacepublic static TonemapDurand createTonemapDurand(float gamma, │ │ │ - float contrast, │ │ │ - float saturation, │ │ │ - float sigma_color)│ │ │ -
gamma
- gamma value for gamma correction. See createTonemapcontrast
- resulting contrast on logarithmic scale, i. e. log(max / min), where max and min
│ │ │ - are maximum and minimum luminance values of the resulting image.saturation
- saturation enhancement value. See createTonemapDragosigma_color
- bilateral filter sigma in color spacepublic static TonemapDurand createTonemapDurand(float gamma, │ │ │ - float contrast, │ │ │ - float saturation)│ │ │ -
gamma
- gamma value for gamma correction. See createTonemapcontrast
- resulting contrast on logarithmic scale, i. e. log(max / min), where max and min
│ │ │ - are maximum and minimum luminance values of the resulting image.saturation
- saturation enhancement value. See createTonemapDragopublic static TonemapDurand createTonemapDurand(float gamma, │ │ │ - float contrast)│ │ │ -
gamma
- gamma value for gamma correction. See createTonemapcontrast
- resulting contrast on logarithmic scale, i. e. log(max / min), where max and min
│ │ │ - are maximum and minimum luminance values of the resulting image.public static TonemapDurand createTonemapDurand(float gamma)│ │ │ -
gamma
- gamma value for gamma correction. See createTonemap
│ │ │ - are maximum and minimum luminance values of the resulting image.public static TonemapDurand createTonemapDurand()│ │ │ -
public static void oilPainting(Mat src, │ │ │ - Mat dst, │ │ │ - int size, │ │ │ - int dynRatio, │ │ │ - int code)│ │ │ -
src
- Input three-channel or one channel image (either CV_8UC3 or CV_8UC1)dst
- Output image of the same size and type as src.size
- neighbouring size is 2-size+1dynRatio
- image is divided by dynRatio before histogram processingcode
- automatically generatedpublic static void oilPainting(Mat src, │ │ │ - Mat dst, │ │ │ - int size, │ │ │ - int dynRatio)│ │ │ -
src
- Input three-channel or one channel image (either CV_8UC3 or CV_8UC1)dst
- Output image of the same size and type as src.size
- neighbouring size is 2-size+1dynRatio
- image is divided by dynRatio before histogram processingpublic static void inpaint(Mat src, │ │ │ - Mat mask, │ │ │ - Mat dst, │ │ │ - int algorithmType)│ │ │ -
src
- source image
│ │ │ - mask
- mask (#CV_8UC1), where non-zero pixels indicate valid image area, while zero pixels
│ │ │ - indicate area to be inpainteddst
- destination imagealgorithmType
- see xphoto::InpaintTypes
│ │ │ - public static void dctDenoising(Mat src, │ │ │ - Mat dst, │ │ │ - double sigma, │ │ │ - int psize)│ │ │ -
src
- source imagedst
- destination imagesigma
- expected noise standard deviationpsize
- size of block side where dct is computed
│ │ │ -
│ │ │ - SEE:
│ │ │ - fastNlMeansDenoisingpublic static void dctDenoising(Mat src, │ │ │ - Mat dst, │ │ │ - double sigma)│ │ │ -
src
- source imagedst
- destination imagesigma
- expected noise standard deviation
│ │ │ -
│ │ │ - SEE:
│ │ │ - fastNlMeansDenoisingpublic static void bm3dDenoising(Mat src, │ │ │ @@ -2392,15 +2052,15 @@ │ │ │ fastNlMeansDenoising │ │ │ │ │ │
public static void bm3dDenoising(Mat src, │ │ │ Mat dst)│ │ │
public static void dctDenoising(Mat src, │ │ │ + Mat dst, │ │ │ + double sigma, │ │ │ + int psize)│ │ │ +
src
- source imagedst
- destination imagesigma
- expected noise standard deviationpsize
- size of block side where dct is computed
│ │ │ +
│ │ │ + SEE:
│ │ │ + fastNlMeansDenoisingpublic static void dctDenoising(Mat src, │ │ │ + Mat dst, │ │ │ + double sigma)│ │ │ +
src
- source imagedst
- destination imagesigma
- expected noise standard deviation
│ │ │ +
│ │ │ + SEE:
│ │ │ + fastNlMeansDenoisingpublic static void inpaint(Mat src, │ │ │ + Mat mask, │ │ │ + Mat dst, │ │ │ + int algorithmType)│ │ │ +
src
- source image
│ │ │ + mask
- mask (#CV_8UC1), where non-zero pixels indicate valid image area, while zero pixels
│ │ │ + indicate area to be inpainteddst
- destination imagealgorithmType
- see xphoto::InpaintTypes
│ │ │ + public static void oilPainting(Mat src, │ │ │ + Mat dst, │ │ │ + int size, │ │ │ + int dynRatio, │ │ │ + int code)│ │ │ +
src
- Input three-channel or one channel image (either CV_8UC3 or CV_8UC1)dst
- Output image of the same size and type as src.size
- neighbouring size is 2-size+1dynRatio
- image is divided by dynRatio before histogram processingcode
- automatically generatedpublic static void oilPainting(Mat src, │ │ │ + Mat dst, │ │ │ + int size, │ │ │ + int dynRatio)│ │ │ +
src
- Input three-channel or one channel image (either CV_8UC3 or CV_8UC1)dst
- Output image of the same size and type as src.size
- neighbouring size is 2-size+1dynRatio
- image is divided by dynRatio before histogram processingpublic static TonemapDurand createTonemapDurand(float gamma, │ │ │ + float contrast, │ │ │ + float saturation, │ │ │ + float sigma_color, │ │ │ + float sigma_space)│ │ │ +
gamma
- gamma value for gamma correction. See createTonemapcontrast
- resulting contrast on logarithmic scale, i. e. log(max / min), where max and min
│ │ │ + are maximum and minimum luminance values of the resulting image.saturation
- saturation enhancement value. See createTonemapDragosigma_color
- bilateral filter sigma in color spacesigma_space
- bilateral filter sigma in coordinate spacepublic static TonemapDurand createTonemapDurand(float gamma, │ │ │ + float contrast, │ │ │ + float saturation, │ │ │ + float sigma_color)│ │ │ +
gamma
- gamma value for gamma correction. See createTonemapcontrast
- resulting contrast on logarithmic scale, i. e. log(max / min), where max and min
│ │ │ + are maximum and minimum luminance values of the resulting image.saturation
- saturation enhancement value. See createTonemapDragosigma_color
- bilateral filter sigma in color spacepublic static TonemapDurand createTonemapDurand(float gamma, │ │ │ + float contrast, │ │ │ + float saturation)│ │ │ +
gamma
- gamma value for gamma correction. See createTonemapcontrast
- resulting contrast on logarithmic scale, i. e. log(max / min), where max and min
│ │ │ + are maximum and minimum luminance values of the resulting image.saturation
- saturation enhancement value. See createTonemapDragopublic static TonemapDurand createTonemapDurand(float gamma, │ │ │ + float contrast)│ │ │ +
gamma
- gamma value for gamma correction. See createTonemapcontrast
- resulting contrast on logarithmic scale, i. e. log(max / min), where max and min
│ │ │ + are maximum and minimum luminance values of the resulting image.public static TonemapDurand createTonemapDurand(float gamma)│ │ │ +
gamma
- gamma value for gamma correction. See createTonemap
│ │ │ + are maximum and minimum luminance values of the resulting image.public static TonemapDurand createTonemapDurand()│ │ │ +
public static SimpleWB createSimpleWB()│ │ │ +
public static GrayworldWB createGrayworldWB()│ │ │ +
public static LearningBasedWB createLearningBasedWB(java.lang.String path_to_model)│ │ │ +
path_to_model
- Path to a .yml file with the model. If not specified, the default model is usedpublic static LearningBasedWB createLearningBasedWB()│ │ │ +
public static void applyChannelGains(Mat src, │ │ │ + Mat dst, │ │ │ + float gainB, │ │ │ + float gainG, │ │ │ + float gainR)│ │ │ +
src
- Input three-channel image in the BGR color space (either CV_8UC3 or CV_16UC3)dst
- Output image of the same size and type as src.gainB
- gain for the B channelgainG
- gain for the G channelgainR
- gain for the R channel