Commit 102b7a76 authored by Timm Schoening's avatar Timm Schoening

Initial commit of the CoMoNoD algorithm with example file

parents
#pragma once
#include "opencv2/opencv.hpp"
#include "oceancv/proc/fspice.hpp"
namespace ocv {
/**
* @brief: Computes the mass center of a contour.
* Computes the mass center of all pixels contained in a contour.
* Pixel intensities do not contribute!
* @author: Timm Schoening - tschoening [at] geomar [dot] de
* @date: 2017-04-20
*/
cv::Point contourMassCenter(const std::vector<cv::Point>& contour, int minx, int maxx, int miny, int maxy) {
size_t wx = 0;
size_t wy = 0;
size_t mm = 0;
for(int y = miny; y <= maxy; y++) {
for(int x = minx; x <= maxx; x++) {
if(cv::pointPolygonTest(contour,cv::Point(x,y),false) >= 0) {
wx += x;
wy += y;
mm++;
}
}
}
return cv::Point(wx / mm,wy / mm);
}
/**
* @brief: Computes characteristic points of a pixel contour
* Computes the min and max x and y values of a contour (list of cv::Points).
* The returned vector contains the min x and y in the first element,
* the max x and y in the second and the center between those on the third.
* When with_mass is true, additionally, the true center of all pixels in the
* contour is computed using the contourMassCenter function.
* @author: Timm Schoening - tschoening [at] geomar [dot] de
* @date: 2017-04-20
*/
std::vector<cv::Point> contourInfo(const std::vector<cv::Point>& contour, bool with_mass = true) {
std::vector<cv::Point> ret(4,cv::Point(-1,-1));
int minx,maxx,miny,maxy;
minx = maxx = contour[0].x;
miny = maxy = contour[0].y;
for(size_t hh = 1; hh < contour.size(); hh++) {
minx = std::min(minx,contour[hh].x);
miny = std::min(miny,contour[hh].y);
maxx = std::max(maxx,contour[hh].x);
maxy = std::max(maxy,contour[hh].y);
}
ret[0] = cv::Point(minx,miny);
ret[1] = cv::Point(maxx,maxy);
ret[2] = cv::Point(minx + (maxx-minx)/2,miny + (maxy-miny)/2);
if(with_mass) {
ret[3] = ocv::contourMassCenter(contour,minx,maxx,miny,maxy);
}
return ret;
}
/**
* @brief: Computes a Gray Level Co-occurrence matrix
* Expects a CV_8UC3 image but analyses only the first (usually blue) channel.
* Compares a pixel to the two adjacent pixels with higher x and y value only.
* @author: Timm Schoening - tschoening [at] geomar [dot] de
* @date: 2017-04-20
*/
std::vector<std::vector<uint>> computeGLCM(const cv::Mat& tmp) {
assert(tmp.type() == CV_8UC3);
// Initialize GLCM
std::vector<std::vector<uint>> glcm(256,std::vector<uint>(256,0));
// Fill GLCM and histogram
for(int y = 0; y < tmp.rows-1; y++) {
for(int x = 0; x < tmp.cols-1; x++) {
glcm[tmp.at<uchar>(y,x,0)][tmp.at<uchar>(y,x+1,0)]++;
glcm[tmp.at<uchar>(y,x+1,0)][tmp.at<uchar>(y,x,0)]++;
glcm[tmp.at<uchar>(y,x,0)][tmp.at<uchar>(y+1,x,0)]++;
glcm[tmp.at<uchar>(y+1,x,0)][tmp.at<uchar>(y,x,0)]++;
}
}
return glcm;
}
/**
* @brief: Adaptive threshold for the CoMoNoD algorithm
* Computes an adaptive threshold from a given CV_8UC3 image in a way to create
* compact objects in the result image. Therefore, first a cooccurrence matrix
* of pixel intensities is computed. Did you note how elegant i changed from three
* channels to intesity? Correct, we actually use only the first channel.
* After the cooccurrence is computed (1px distance Moore-neighbour Haralick if you like),
* that threshold is picked for which the least transitions between black and white
* pixels would result.
* @author: Timm Schoening - tschoening [at] geomar [dot] de
* @date: 2017-04-20
* **/
uint adaptiveThreshold(const cv::Mat& tmp, float adaptive_threshold) {
// Compute Gray Level Co-occurrence matrix
auto glcm = ocv::computeGLCM(tmp);
// Compute initial compactness between the two classes
uint cc[2][2];
cc[0][0] = glcm[0][0];
cc[0][1] = 0;
cc[1][0] = 0;
cc[1][1] = (tmp.rows-1) * (tmp.cols-1) * 4 - glcm[0][0];
for(int t = 1; t < 256; t++) {
cc[0][1] += glcm[0][t];
cc[1][0] += glcm[t][0];
cc[1][1] -= glcm[0][t];
cc[1][1] -= glcm[t][0];
}
float gammas[256];
float gam, gam_chg, max_gamma = 0;
float prev_gam = 1.0 * (cc[0][0] + cc[1][1]) / (cc[0][0] + cc[0][1] + cc[1][0] + cc[1][1]);
uint max_gamma_t = 0;
// Increase threshold and compute the according compactness
for(int t = 1; t < 256; t++) {
// Move cooc's of current threshold from negative to positive class
cc[0][0] += glcm[t][t];
cc[1][1] -= glcm[t][t];
// Move cooc's of current threshold and t1 (<t) from mixed to positive class
for(int t1 = 0; t1 < t; t1++) {
cc[0][0] += glcm[t][t1];
cc[0][0] += glcm[t1][t];
cc[0][1] -= glcm[t1][t];
cc[1][0] -= glcm[t][t1];
}
// Move cooc's of current threshold and t1 (>t) from negative to mixed class
for(int t1 = t + 1; t1 < 256; t1++) {
cc[0][1] += glcm[t][t1];
cc[1][0] += glcm[t1][t];
cc[1][1] -= glcm[t][t1];
cc[1][1] -= glcm[t1][t];
}
// Compactness gamma (small for noisy images, larger for separate clusters of similar class)
gam = 1.0 * (cc[0][0] + cc[1][1]) / (cc[0][0] + cc[0][1] + cc[1][0] + cc[1][1]);
// Find maximum slope of gamma ("first derivative")
gam_chg = prev_gam - gam;
if(gam_chg > max_gamma) {
max_gamma = gam_chg;
max_gamma_t = t;
}
gammas[t] = gam_chg;
prev_gam = gam;
}
// Start from the compactness slope maximum and move to the left until the slope is smaller than the given adaptive_threshold
for(int t = max_gamma_t; t > 0; t--) {
if(gammas[t] < adaptive_threshold * gammas[max_gamma_t]) {
return t;
}
}
return 0;
}
/**
* @brief: Contrast maximization of the CoMoNoD algorithm
* Takes an OpenCV Mat and performs several image processing steps on the GPU.
* Essential is a colour normalization step using the fSpice algorithm
* (see see https://doi.org/10.1371/journal.pone.0038179). The function expects
* an input CV_8UC3 image and the top left and bottom right coordinates of the region
* of interest to analyse. theta_gamma defines the limit until which pixels are
* assigned to the nodule class (0 < theta_gamma < 1). Try 0.1 for starters.
* Scale_fac defines how much the images needs to be resized to fit the median area
* in the data set. Sigma is the fSpice parameter (try 701 for starters).
* Binary, blob_index and contours are return values for the next CoMoNoD phase.
* @author: Timm Schoening - tschoening [at] geomar [dot] de
* @date: 2017-04-20
*/
void contrastMaximization(const cv::Mat& input, cv::Point top_left, cv::Point bottom_right, float theta_gamma, float scale_fac, uint sigma, cv::Mat& binary, cv::Mat& blob_index, std::vector<std::vector<cv::Point>>& contours) {
cv::Mat tmp_1;
cv::cuda::GpuMat input_g, tmp_1_g, tmp_2_g;
std::vector<cv::Vec4i> hierarchy;
// Median
cv::medianBlur(input,tmp_1,5);
// Switch to Gpu
input_g.upload(tmp_1);
// Scale to uniform size (pixel-to-cm ratio)
cv::cuda::resize(input_g, tmp_1_g, cv::Size(), scale_fac, scale_fac, cv::INTER_CUBIC);
// Gaussian
ocv::cuda::Gauss(tmp_1_g,tmp_2_g,3);
// Grayscale
cv::cuda::cvtColor(tmp_2_g,tmp_1_g,CV_BGR2GRAY);
cv::cuda::merge(std::vector<cv::cuda::GpuMat>({tmp_1_g,tmp_1_g,tmp_1_g}),tmp_2_g);
// fspice
ocv::cuda::fspice(tmp_2_g,tmp_1_g,sigma);
// Resize back so that sx and ex make sense in the new scaled coordinate system!!
cv::cuda::resize(tmp_1_g, tmp_2_g, input.size(), -1, -1, cv::INTER_CUBIC);
// Pick center
tmp_1_g = tmp_2_g(cv::Rect(top_left.x,top_left.y,bottom_right.x-top_left.x-1,bottom_right.y-top_left.y-1));
tmp_1_g.download(tmp_1);
// Determine the binarization threshold adaptively through a compactness criterion
uint threshold = ocv::adaptiveThreshold(tmp_1,theta_gamma);
// Scale to uniform size again (pixel-to-cm ratio)
cv::cuda::resize(tmp_1_g, tmp_2_g, cv::Size(), scale_fac, scale_fac, cv::INTER_CUBIC);
// Convert to gray again
cv::cuda::cvtColor(tmp_2_g,tmp_1_g,CV_BGR2GRAY);
// Binarize
cv::cuda::threshold(tmp_1_g, tmp_2_g, threshold, 255, cv::THRESH_BINARY_INV);
// Return from Gpu
tmp_2_g.download(tmp_1);
// Plot contours in binary mat (these are the external contours where holes have been filled!)
cv::findContours(tmp_1,contours,hierarchy,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_NONE);
binary = cv::Mat(tmp_1.size(),CV_8UC1,cv::Scalar(0));
blob_index = cv::Mat(tmp_1.size(),CV_32SC1,cv::Scalar(0));
for(size_t n = 0; n < contours.size(); n++) {
cv::drawContours(binary,contours,n,cv::Scalar(255),CV_FILLED);
cv::drawContours(blob_index,contours,n,cv::Scalar(n),CV_FILLED);
}
}
/**
* @brief: Cuts pixel blobs at contour bottlenecks
* Determines peaks within the distance image computed from the given binary image.
* Then splits up blobs along blob contour bottlenecks to separate peaks into
* distinct blobs.
* Expects the contour from the first CoMoNoD phase and returns an index image where
* each pixel is encoded by an increasing number identifying the blob it belongs to
* and a binary image encoding blob / non-blob assignment.
* @author: Timm Schoening - tschoening [at] geomar [dot] de
* @date: 2017-04-20
*/
void cutBlobBottlenecks(cv::Mat& peak_img, std::vector<std::vector<cv::Point>> contours, cv::Mat& blob_index, cv::Mat& binary) {
std::vector<std::vector<cv::Point>> peaks, new_contours, tmp_contours;
std::vector<cv::Vec4i> hierarchy, defects;
std::map<uint,std::vector<cv::Point>> tmp_blob_peaks, new_blob_peaks;
std::vector<std::pair<cv::Point,cv::Point>> cuts;
bool all_separated;
std::vector<int> hull2,in_v1,in_v2;
std::vector<cv::Point> virtual_contour_1,virtual_contour_2,min_v1,min_v2;
int sep_tries, sep_limit = 100;
int min_d1,min_d2;
float min_dist, tmp_dist;
cv::Point pp1,pp2;
// Get peak markers
cv::findContours(peak_img,peaks,hierarchy,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_NONE);
// Find blob index for each peak
std::map<uint,std::vector<cv::Point>> blob_peaks = {};
for(uint n = 0; n < peaks.size(); n++) {
blob_peaks[blob_index.at<uint32_t>(peaks[n][0].y,peaks[n][0].x)].push_back(peaks[n][0]);
}
// Find contour bottlenecks such that the peaks are separated
for(uint n = 0; n < contours.size(); n++) {
if(blob_peaks[n].size() < 2)
continue;
tmp_contours = {contours[n]};
tmp_blob_peaks = {{0,blob_peaks[n]}};
cuts = {};
all_separated = false;
sep_tries = 0;
while(!all_separated) {
if(sep_tries++ > sep_limit)
break;
all_separated = true;
new_contours = {};
for(uint m = 0; m < tmp_contours.size(); m++) {
// Check whether this tmp blob is split already
if(tmp_blob_peaks[m].size() < 2) {
new_contours.push_back(tmp_contours[m]);
continue;
}
// Get convex hull and the defects (contour points that are far away from the hull)
cv::convexHull(tmp_contours[m],hull2);
cv::convexityDefects(tmp_contours[m],hull2,defects);
std::sort(defects.begin(),defects.end(),[](auto a,auto b) {return a[2] < b[2];});
// Find shortest line between two defects that would separate two peaks into separate blobs
min_dist = 10000000;
min_d1 = min_d2 = 0;
for(uint d1 = 0; d1 < defects.size(); d1++) {
for(uint d2 = d1 + 1; d2 < defects.size(); d2++) {
// Create two virtual blobs by slicing the current contour at the two defect locations
virtual_contour_1 = std::vector<cv::Point>(tmp_contours[m].begin(),tmp_contours[m].begin() + defects[d1][2]);
virtual_contour_2 = std::vector<cv::Point>(tmp_contours[m].begin() + defects[d1][2],tmp_contours[m].begin() + defects[d2][2]);
std::copy(tmp_contours[m].begin() + defects[d2][2],tmp_contours[m].end(),std::back_inserter(virtual_contour_1));
if(virtual_contour_1.size() == 0)
continue;
// Check which peaks lie in the first virtual blob
in_v1 = std::vector<int>(tmp_blob_peaks[m].size(),0);
for(uint p1 = 0; p1 < tmp_blob_peaks[m].size(); p1++) {
in_v1[p1] = (cv::pointPolygonTest(virtual_contour_1,tmp_blob_peaks[m][p1],false) > 0);
}
for(uint p1 = 0; p1 < tmp_blob_peaks[m].size(); p1++) {
for(uint p2 = p1+1; p2 < tmp_blob_peaks[m].size(); p2++) {
// Check whether two peaks really lie in separate blobs
if((in_v1[p1] && !in_v1[p2]) || (!in_v1[p1] && in_v1[p2])) {
pp1 = tmp_contours[m][defects[d1][2]];
pp2 = tmp_contours[m][defects[d2][2]];
// Find shortest cut to split the blob
tmp_dist = pow(pp1.x-pp2.x,2)+pow(pp1.y-pp2.y,2);
if(tmp_dist < min_dist) {
min_dist = tmp_dist;
min_d1 = d1;
min_d2 = d2;
min_v1 = virtual_contour_1;
min_v2 = virtual_contour_2;
}
}
}
}
}
}
// Check if there is a valid cut
if(min_d1 != min_d2) {
// Update contours
new_contours.push_back(min_v1);
new_contours.push_back(min_v2);
// Store cut
cuts.push_back(std::pair<cv::Point,cv::Point>(tmp_contours[m][defects[min_d1][2]],tmp_contours[m][defects[min_d2][2]]));
}
}
tmp_contours = new_contours;
// Update peak assignments
new_blob_peaks = {};
for(uint m = 0; m < tmp_blob_peaks.size(); m++) {
for(uint m2 = 0; m2 < tmp_blob_peaks[m].size(); m2++) {
for(uint m1 = 0; m1 < tmp_contours.size(); m1++) {
if(cv::pointPolygonTest(tmp_contours[m1],tmp_blob_peaks[m][m2],false) >= 0) {
new_blob_peaks[m1].push_back(tmp_blob_peaks[m][m2]);
// Check if we have to do the splitting again
if(new_blob_peaks[m1].size() > 1) {
all_separated = false;
}
break;
}
}
}
}
tmp_blob_peaks = new_blob_peaks;
}
// Draw cut lines in image
for(auto cut : cuts) {
cv::line(binary,cut.first,cut.second,cv::Scalar(0),2);
}
}
}
/**
* @brief: Second phase of the CoMoNoD algorithm for nodule delineation
* Takes a binary image from the first phase, cuts up connected pixel blobs,
* fuses small blobs in close vicinity and returns a binary image with final
* pixel classification in nodule-positive (255) and nodule-negative (0).
* @author: Timm Schoening - tschoening [at] geomar [dot] de
* @date: 2017-04-20
*/
void noduleDelineation(cv::Mat& binary, cv::Mat& blob_index, std::vector<std::vector<cv::Point>> contours, uint theta_r, float scale_fac) {
cv::Mat tmp_1, tmp_2, distance_img, peak_img, blob_sizes;
cv::cuda::GpuMat tmp_1_g, tmp_2_g, peaks_g;
std::vector<std::vector<cv::Point>> hull(1);
std::vector<cv::Vec4i> hierarchy;
std::map<uint,std::vector<cv::Point>> fuse_sets = {};
std::vector<cv::Point> contour_info,search_neighbors;
int min_peak_dist = 5 * theta_r;
uint max_neighbor_search_dist = 2 * theta_r;
uint max_neighbor_fuse_size = 3.14159 * theta_r * theta_r;
uint min_neighbor_fuse_size = 4 * max_neighbor_fuse_size;
int tx,ty;
uint max_neighbor_index, max_neighbor_size;
// A cuda filter pointer to erode / dilate with a 3x3 kernel
cv::Ptr<cv::cuda::Filter> erode = cv::cuda::createMorphologyFilter(cv::MORPH_ERODE,CV_8UC1,cv::getStructuringElement(cv::MORPH_RECT,cv::Size(3,3)));
cv::Ptr<cv::cuda::Filter> dilate = cv::cuda::createMorphologyFilter(cv::MORPH_DILATE,CV_8UC1,cv::getStructuringElement(cv::MORPH_RECT,cv::Size(3,3)));
// Split connected nodules
cv::distanceTransform(binary,tmp_1,CV_DIST_L2,3);
// Get briefly back to GPU to find peaks in distance image (200x speedup)
tmp_2_g.upload(tmp_1);
cv::cuda::threshold(tmp_2_g,tmp_1_g,theta_r,255,cv::THRESH_TOZERO);
tmp_1_g.download(distance_img);
tmp_1_g.convertTo(tmp_2_g,CV_8UC1);
dilate->apply(tmp_2_g,peaks_g);
cv::cuda::compare(tmp_2_g,peaks_g,peaks_g,cv::CMP_GE);
erode->apply(tmp_2_g,tmp_1_g);
cv::cuda::compare(tmp_2_g,tmp_1_g,tmp_1_g,cv::CMP_GT);
cv::cuda::bitwise_and(peaks_g,tmp_1_g,peaks_g);
// Final return to CPU
peaks_g.download(peak_img);
// Remove all peaks that have a larger neighbour in close vicinity
for(int y = 0; y < peak_img.rows; y++) {
for(int x = 0; x < peak_img.cols; x++) {
if(peak_img.at<uchar>(y,x) > 127) {
// Check pixel neighborhood for higher peaks
for(int dy = std::max(0,y-min_peak_dist); dy <= std::min(peak_img.rows-1,y+min_peak_dist); dy++) {
for(int dx = std::max(0,x-min_peak_dist); dx <= std::min(peak_img.cols-1,x+min_peak_dist); dx++) {
if(blob_index.at<uint32_t>(y,x) == blob_index.at<uint32_t>(dy,dx) && distance_img.at<float>(dy,dx) >= distance_img.at<float>(y,x) && peak_img.at<uchar>(dy,dx) > 127 && (y != dy || x != dx))
peak_img.at<uchar>(y,x) = 0;
}
}
}
}
}
// Cut connected blobs along bottlenecks
ocv::cutBlobBottlenecks(peak_img,contours,blob_index,binary);
// Fuse small blobs with locally biggest
cv::findContours(binary,contours,hierarchy,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_NONE);
// Redo blob index & size images
blob_sizes = cv::Mat(binary.size(),CV_32SC1,cv::Scalar(0));
blob_index = cv::Mat(binary.size(),CV_32SC1,cv::Scalar(0));
for(size_t n = 0; n < contours.size(); n++) {
cv::drawContours(blob_sizes,contours,n,cv::Scalar(cv::contourArea(contours[n])),CV_FILLED);
cv::drawContours(blob_index,contours,n,cv::Scalar(n),CV_FILLED);
}
// Create a list of neighbour pixel offsets to create a circular neighbourhood that can be traversed linearly
for(uint y = -max_neighbor_search_dist; y <= max_neighbor_search_dist; y++) {
for(uint x = -max_neighbor_search_dist; x <= max_neighbor_search_dist; x++) {
if(x == 0 && y == 0)
continue;
if(y*y + x*x > max_neighbor_search_dist)
continue;
search_neighbors.push_back(cv::Point(x,y));
}
}
for(size_t n = 0; n < contours.size(); n++) {
// For small nodules check if there is a bigger one close by
if(contours[n].size() < max_neighbor_fuse_size) {
// Get position and min/max of blob
contour_info = ocv::contourInfo(contours[n],true);
max_neighbor_size = 0;
max_neighbor_index = 0;
// Search around this nodules mass center whether another nodule occurs (up to 2 * min_nodule_pixel_radius distance)
for(cv::Point tp : search_neighbors) {
tx = contour_info[3].x+tp.x;
ty = contour_info[3].y+tp.y;
if(tx < 0 || ty < 0 || tx >= binary.cols || ty >= binary.rows)
continue;
if(blob_sizes.at<uint32_t>(ty,tx) > max_neighbor_size) {
max_neighbor_size = blob_sizes.at<uint32_t>(ty,tx);
max_neighbor_index = blob_index.at<uint32_t>(ty,tx);
}
}
// Eventually copy the contour pixels to the bigger neighbours contour
if(max_neighbor_size > min_neighbor_fuse_size) {
copy(contours[n].begin(),contours[n].end(),std::back_inserter(fuse_sets[max_neighbor_index]));
} else {
copy(contours[n].begin(),contours[n].end(),std::back_inserter(fuse_sets[n]));
}
} else {
copy(contours[n].begin(),contours[n].end(),std::back_inserter(fuse_sets[n]));
}
}
// Compute convex hulls of fused blobs and plot all
binary = cv::Scalar(0);
for(auto pp : fuse_sets) {
cv::convexHull(cv::Mat(pp.second),hull[0]);
// Paint it black (only the border, to prevent re-overlaps)
cv::drawContours(binary,hull,0,cv::Scalar(0),4);
// Paint it white
cv::drawContours(binary,hull,0,cv::Scalar(255),CV_FILLED);
}
// Rescale image back to original size
cv::resize(binary, tmp_1, cv::Size(), 1.0/scale_fac, 1.0/scale_fac, cv::INTER_CUBIC);
cv::threshold(tmp_1,tmp_2,127,255,CV_THRESH_BINARY);
// Final blob detection, filter by size and store results
cv::erode(tmp_2,binary,cv::Mat());
}
/**
* @brief: Computes pixel blob size statistics
* Takes a binary image, determines separate blobs within and computes individual blob
* sizes (in pixel and cm^2). Different 'particle size' measures are computed
* that are used for geological (e.g. sediment grain size) analysis.
* Nodules can be filtered by setting the min / max parameters. The default parameters
* cause no filtering. You can change which target_values for the nodule sizes
* you want as a result (default are the values needed for Tukey plots). Might be
* handy when you want to do a particular particle size analysis.
* @author: Timm Schoening - tschoening [at] geomar [dot] de
* @date: 2017-04-20
*/
std::vector<double> noduleStatistics(const cv::Mat& binary, uint theta_r, float cm_per_pix, cv::Point top_left, cv::Point bottom_right, float min_nodule_size = 0, float max_nodule_size = -1, float max_ellipse_distortion = -1, float max_blob_distortion = -1, const std::vector<double>& orig_target_values = {0.01,0.10,0.25,0.50,0.75,0.90,0.99}) {
std::vector<cv::Vec4i> hierarchy;
std::vector<std::vector<cv::Point>> contours;
std::vector<double> nodule_sizes, nodule_pix_sizes, tmp_nodule_sizes, tmp_nodule_pix_sizes, target_values, nod_size_bins = {0,0.01,0.25,0.5,0.75,0.99};
cv::RotatedRect ellipse;
double nodule_area, ellipse_area, ellipse_size,sum_cov, nodule_size, target_value;
uint target_index;
cv::findContours(binary,contours,hierarchy,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_NONE);
// Check which nodule candidates match the given size limits and eventually fit them with hulls or ellipses or measure the contained entropy per nodule
nodule_sizes = {};
nodule_pix_sizes = {};
for(size_t n = 0; n < contours.size(); n++) {
nodule_area = cv::contourArea(contours[n]);
if(nodule_area < theta_r)
continue;
// Fit ellipse needs at least 5 points!
if(contours[n].size() < 5)
continue;
nodule_size = 1.0 * nodule_area * cm_per_pix;
// Fit contour with an ellipse
ellipse = cv::fitEllipse(cv::Mat(contours[n]));
if(max_ellipse_distortion > 0 && (ellipse.size.width > max_ellipse_distortion*ellipse.size.height || max_ellipse_distortion*ellipse.size.width < ellipse.size.height))
continue;
ellipse_area = 0.25 * ellipse.size.width * ellipse.size.height * 3.14159;
ellipse_size = 1.0 * cm_per_pix * ellipse_area;
if(ellipse_size < min_nodule_size || (max_nodule_size > 0 && ellipse_size > max_nodule_size))
continue;
if(max_blob_distortion > 0 && (ellipse_area > max_blob_distortion*nodule_area || ellipse_area*max_blob_distortion < nodule_area))
continue;
nodule_pix_sizes.push_back(ellipse_area);
nodule_sizes.push_back(ellipse_size);
}
// NOTE: Here you could add further analyses in case you need every individual nodule size!
std::sort(nodule_sizes.begin(),nodule_sizes.end());
std::sort(nodule_pix_sizes.begin(), nodule_pix_sizes.end());
std::vector<double> phis(nod_size_bins.size() + orig_target_values.size() + 3,0.0);
// Contains the number of nodule-positive pixels, not a percentage value!
float coverage = accumulate(nodule_pix_sizes.begin(),nodule_pix_sizes.end(),0.0);
// Determine coverage from retained nodules
float num_nods = nodule_sizes.size();
// Store nodule number and seafloor coverage in percent
int phi_idx = 0;
phis[phi_idx++] = num_nods;
phis[phi_idx++] = round(100.0 * coverage / ((bottom_right.x-top_left.x) * (bottom_right.y-top_left.y)));
// Store nodule size and coverage bins
if(num_nods > 0) {
for(float nod_size_bin : nod_size_bins)
phis[phi_idx++] = nodule_sizes[nod_size_bin*num_nods];
phis[phi_idx++] = nodule_sizes[num_nods-1];
// Store ten nodule coverage bins; first find the bin thresholds (we have to pick those from the real data)
target_values = {};
for(double t : orig_target_values)
target_values.push_back(1.0 * t * coverage);
sum_cov = 0.0;
target_index = 0;
target_value = target_values[target_index];
for(double nod_size : nodule_pix_sizes) {
sum_cov += nod_size;
if(target_value <= sum_cov) {
target_index++;
target_value = target_values[target_index];
phis[phi_idx++] = nod_size * cm_per_pix;
if(target_index == target_values.size())
break;
}
}
}
return phis;
}
/**
* @brief: Computes particle size statistics
* Remember the sorting and skewness from geological particle size analysis?
* Me neither. But there is literature on that (e.g. Füchtbauer "Sedimente und
* Sedimentgesteine" 1988, Trask "Origin and environment ..." 1932).
* Takes the Phis from CoMoNoD and returns geological valuable metrics:
* Average (Mean or Median), Sorting, Skewness, Kurtosis
* Different target values are required:
* - TRASK: {0.25,0.50,0.75}
* - INMAN: {0.05,0.16,0.50,0.84,0.95}
* - FRIEDMAN-SANDERS: {0.05,0.16,0.50,0.84,0.95}
* @author: Timm Schoening
* @date: 2017-04-20
*/
enum analysis_type {TRASK, INMAN, FRIEDMAN_SANDERS};
std::vector<double> particleSizeAnalysis(const std::vector<double>& sizes, analysis_type method = TRASK) {
switch(method) {
case TRASK:
return {sizes[1],0.5*(sizes[2]-sizes[0]),sizes[0] + sizes[2] - 2 * sizes[1],-1};
break;
case INMAN:
return {0.5*(sizes[1]+sizes[3]),0.5*(sizes[3]-sizes[1]),1.0*(sizes[3]+sizes[1]-2*sizes[2])/(sizes[3]-sizes[1]),1.0*((sizes[4]-sizes[1])-(sizes[3]-sizes[2]))/(sizes[3]-sizes[1])};
break;
case FRIEDMAN_SANDERS:
return {0.3333*(sizes[1]+sizes[2]+sizes[3]),0.5*(sizes[4]-sizes[0]),sizes[4]+sizes[0]-2*sizes[2]};
break;
}
}
/**
* @brief: Executes the CoMoNoD algorithm on one image
* Main function for the "Compact Morphology Nodule Detection" (CoMoNoD) algorithm.
* Uses no machine learning, no pattern recognition, no deep learning, only image
* processing, fSpice (see https://doi.org/10.1371/journal.pone.0038179), and a