mirror of https://github.com/CGAL/cgal
Cast more rays if accepted rays after outlier removal are below some threshold.
Graph-cut experimets for optimization. Sampling methods are removed (other than Vogel). (a not for me: benchmark result-10)
This commit is contained in:
parent
c6b47c931c
commit
acd80fa21e
|
|
@ -42,9 +42,10 @@
|
|||
|
||||
//AF: macros must be prefixed with "CGAL_"
|
||||
//IOY: done
|
||||
#define CGAL_NORMALIZATION_ALPHA 6.0
|
||||
#define CGAL_NORMALIZATION_ALPHA 5.0
|
||||
#define CGAL_ANGLE_ST_DEV_DIVIDER 2.0
|
||||
#define CGAL_ST_DEV_MULTIPLIER 0.75
|
||||
#define CGAL_ACCEPTANCE_RATE_THRESHOLD 0.5
|
||||
|
||||
//IOY: these are going to be removed at the end (no CGAL_ pref)
|
||||
#define ACTIVATE_SEGMENTATION_DEBUG
|
||||
|
|
@ -99,14 +100,14 @@ protected:
|
|||
|
||||
template <typename ValueTypeName>
|
||||
struct Compare_second_element {
|
||||
bool operator()(const ValueTypeName& v1,const ValueTypeName& v2) const {
|
||||
bool operator()(const ValueTypeName& v1, const ValueTypeName& v2) const {
|
||||
return v1.second < v2.second;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename ValueTypeName>
|
||||
struct Compare_third_element {
|
||||
bool operator()(const ValueTypeName& v1,const ValueTypeName& v2) const {
|
||||
bool operator()(const ValueTypeName& v1, const ValueTypeName& v2) const {
|
||||
return v1.third > v2.third; //descending
|
||||
}
|
||||
};
|
||||
|
|
@ -126,8 +127,8 @@ public:
|
|||
double smoothing_lambda;
|
||||
|
||||
/*Store sampled points from disk, just for once */
|
||||
Disk_samples_list disk_samples;
|
||||
|
||||
Disk_samples_list disk_samples_sparse;
|
||||
Disk_samples_list disk_samples_dense;
|
||||
//these are going to be removed...
|
||||
std::ofstream log_file;
|
||||
|
||||
|
|
@ -146,7 +147,7 @@ public:
|
|||
void calculate_sdf_values();
|
||||
|
||||
double calculate_sdf_value_of_facet (const Facet_handle& facet,
|
||||
const Tree& tree) const;
|
||||
const Tree& tree, const Disk_samples_list& samples) const;
|
||||
template <class Query>
|
||||
boost::tuple<bool, bool, double> cast_and_return_minimum(const Query& ray,
|
||||
const Tree& tree, const Facet_handle& facet) const;
|
||||
|
|
@ -154,8 +155,8 @@ public:
|
|||
boost::tuple<bool, bool, double> cast_and_return_minimum_use_closest(
|
||||
const Query& ray, const Tree& tree, const Facet_handle& facet) const;
|
||||
|
||||
double calculate_sdf_value_from_rays(std::vector<double>& ray_distances,
|
||||
std::vector<double>& ray_weights) const;
|
||||
boost::tuple<double, double> calculate_sdf_value_from_rays(
|
||||
std::vector<double>& ray_distances, std::vector<double>& ray_weights) const;
|
||||
|
||||
void arrange_center_orientation(const Plane& plane, const Vector& unit_normal,
|
||||
Point& center) const;
|
||||
|
|
@ -163,10 +164,7 @@ public:
|
|||
double calculate_dihedral_angle_of_edge(const Halfedge_handle& edge) const;
|
||||
double calculate_dihedral_angle_of_edge_2(const Halfedge_handle& edge) const;
|
||||
|
||||
void disk_sampling_rejection();
|
||||
void disk_sampling_polar_mapping();
|
||||
void disk_sampling_concentric_mapping();
|
||||
void disk_sampling_vogel_method();
|
||||
void disk_sampling_vogel_method(Disk_samples_list& samples, int ray_count);
|
||||
|
||||
void select_cluster_number();
|
||||
|
||||
|
|
@ -216,7 +214,11 @@ inline Surface_mesh_segmentation<Polyhedron>::Surface_mesh_segmentation(
|
|||
use_minimum_segment(false), multiplier_for_segment(1), smoothing_lambda(23.0)
|
||||
{
|
||||
//disk_sampling_concentric_mapping();
|
||||
disk_sampling_vogel_method();
|
||||
int sparse_ray_count = number_of_rays_sqrt * number_of_rays_sqrt;
|
||||
int dense_ray_count = sparse_ray_count * 2;
|
||||
disk_sampling_vogel_method(disk_samples_sparse, sparse_ray_count);
|
||||
disk_sampling_vogel_method(disk_samples_dense, dense_ray_count);
|
||||
|
||||
#ifdef SEGMENTATION_PROFILE
|
||||
profile("profile.txt");
|
||||
#else
|
||||
|
|
@ -244,7 +246,7 @@ inline void Surface_mesh_segmentation<Polyhedron>::calculate_sdf_values()
|
|||
facet_it != mesh->facets_end(); ++facet_it) {
|
||||
CGAL_precondition(facet_it->is_triangle()); //Mesh should contain triangles.
|
||||
|
||||
double sdf = calculate_sdf_value_of_facet(facet_it, tree);
|
||||
double sdf = calculate_sdf_value_of_facet(facet_it, tree, disk_samples_sparse);
|
||||
sdf_values.insert(std::pair<Facet_handle, double>(facet_it, sdf));
|
||||
}
|
||||
check_zero_sdf_values();
|
||||
|
|
@ -255,7 +257,8 @@ inline void Surface_mesh_segmentation<Polyhedron>::calculate_sdf_values()
|
|||
template <class Polyhedron>
|
||||
inline double
|
||||
Surface_mesh_segmentation<Polyhedron>::calculate_sdf_value_of_facet(
|
||||
const Facet_handle& facet, const Tree& tree) const
|
||||
const Facet_handle& facet, const Tree& tree,
|
||||
const Disk_samples_list& samples) const
|
||||
{
|
||||
// AF: Use const Point&
|
||||
//IOY: Done, and I am going to change other places (where it is approprite) like this.
|
||||
|
|
@ -271,8 +274,8 @@ Surface_mesh_segmentation<Polyhedron>::calculate_sdf_value_of_facet(
|
|||
Plane plane(center, normal);
|
||||
Vector v1 = plane.base1();
|
||||
Vector v2 = plane.base2();
|
||||
v1 = v1 / sqrt(v1.squared_length());
|
||||
v2 = v2 / sqrt(v2.squared_length());
|
||||
v1 = v1 / std::sqrt(v1.squared_length());
|
||||
v2 = v2 / std::sqrt(v2.squared_length());
|
||||
|
||||
//arrange_center_orientation(plane, normal, center);
|
||||
|
||||
|
|
@ -293,8 +296,8 @@ Surface_mesh_segmentation<Polyhedron>::calculate_sdf_value_of_facet(
|
|||
#ifndef SHOOT_ONLY_RAYS
|
||||
boost::optional<double> segment_distance;
|
||||
#endif
|
||||
for(Disk_samples_list::const_iterator sample_it = disk_samples.begin();
|
||||
sample_it != disk_samples.end(); ++sample_it) {
|
||||
for(Disk_samples_list::const_iterator sample_it = samples.begin();
|
||||
sample_it != samples.end(); ++sample_it) {
|
||||
bool is_intersected, intersection_is_acute;
|
||||
double min_distance;
|
||||
Vector disk_vector = v1 * sample_it->first + v2 * sample_it->second;
|
||||
|
|
@ -318,7 +321,7 @@ Surface_mesh_segmentation<Polyhedron>::calculate_sdf_value_of_facet(
|
|||
}
|
||||
segment_distance = min_distance; //first assignment of the segment_distance
|
||||
} else { // use segment_distance to limit rays as segments
|
||||
ray_direction = ray_direction / sqrt(ray_direction.squared_length());
|
||||
ray_direction = ray_direction / std::sqrt(ray_direction.squared_length());
|
||||
ray_direction = ray_direction * (*segment_distance * multiplier_for_segment);
|
||||
|
||||
Segment segment(center, CGAL::operator+(center, ray_direction));
|
||||
|
|
@ -332,7 +335,7 @@ Surface_mesh_segmentation<Polyhedron>::calculate_sdf_value_of_facet(
|
|||
if(!intersection_is_acute) {
|
||||
continue;
|
||||
}
|
||||
min_distance += CGAL::sqrt(
|
||||
min_distance += std::sqrt(
|
||||
segment.squared_length()); // since our source is segment.target()
|
||||
} else if(
|
||||
!intersection_is_acute) { // intersection is found, but it is not acceptable (so, do not continue ray-segment casting)
|
||||
|
|
@ -355,7 +358,15 @@ Surface_mesh_segmentation<Polyhedron>::calculate_sdf_value_of_facet(
|
|||
ray_weights.push_back(sample_it->third);
|
||||
ray_distances.push_back(min_distance);
|
||||
}
|
||||
return calculate_sdf_value_from_rays(ray_distances, ray_weights);
|
||||
double sdf_result, acceptance_rate;
|
||||
boost::tie(sdf_result, acceptance_rate) = calculate_sdf_value_from_rays(
|
||||
ray_distances, ray_weights);
|
||||
|
||||
if(acceptance_rate > CGAL_ACCEPTANCE_RATE_THRESHOLD
|
||||
|| samples.size() == disk_samples_dense.size()) {
|
||||
return sdf_result;
|
||||
}
|
||||
return calculate_sdf_value_of_facet(facet, tree, disk_samples_dense);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -419,7 +430,7 @@ Surface_mesh_segmentation<Polyhedron>::cast_and_return_minimum(
|
|||
return min_distance;
|
||||
}
|
||||
min_distance.get<1>() = true; // founded intersection is acceptable.
|
||||
min_distance.get<2>() = sqrt(min_distance.get<2>());
|
||||
min_distance.get<2>() = std::sqrt(min_distance.get<2>());
|
||||
return min_distance;
|
||||
}
|
||||
|
||||
|
|
@ -471,16 +482,18 @@ Surface_mesh_segmentation<Polyhedron>::cast_and_return_minimum_use_closest (
|
|||
return min_distance;
|
||||
}
|
||||
min_distance.get<1>() = true;
|
||||
min_distance.get<2>() = sqrt(min_i_ray.squared_length());
|
||||
min_distance.get<2>() = std::sqrt(min_i_ray.squared_length());
|
||||
return min_distance;
|
||||
}
|
||||
|
||||
template <class Polyhedron>
|
||||
inline double
|
||||
inline boost::tuple<double, double>
|
||||
Surface_mesh_segmentation<Polyhedron>::calculate_sdf_value_from_rays(
|
||||
std::vector<double>& ray_distances,
|
||||
std::vector<double>& ray_weights) const
|
||||
{
|
||||
// get<0> : sdf value
|
||||
// get<1> : not outlier ray count / total ray count
|
||||
int accepted_ray_count = ray_distances.size();
|
||||
if(accepted_ray_count == 0) {
|
||||
return 0.0;
|
||||
|
|
@ -518,8 +531,9 @@ Surface_mesh_segmentation<Polyhedron>::calculate_sdf_value_from_rays(
|
|||
double dif = (*dist_it) - mean_sdf;
|
||||
st_dev += dif * dif;
|
||||
}
|
||||
st_dev = sqrt(st_dev / ray_distances.size());
|
||||
st_dev = std::sqrt(st_dev / ray_distances.size());
|
||||
/* Calculate sdf, accept rays : ray_dist - median < st dev */
|
||||
int not_outlier_count = 0;
|
||||
w_it = ray_weights.begin();
|
||||
for(std::vector<double>::iterator dist_it = ray_distances.begin();
|
||||
dist_it != ray_distances.end();
|
||||
|
|
@ -530,11 +544,16 @@ Surface_mesh_segmentation<Polyhedron>::calculate_sdf_value_from_rays(
|
|||
}
|
||||
total_distance += (*dist_it) * (*w_it);
|
||||
total_weights += (*w_it);
|
||||
not_outlier_count++;
|
||||
}
|
||||
|
||||
if(total_distance == 0.0) {
|
||||
return median_sdf; // no ray is accepted, return median.
|
||||
}
|
||||
return total_distance / total_weights;
|
||||
double sdf_res = total_distance / total_weights;
|
||||
double acceptance_rate = not_outlier_count / static_cast<double>
|
||||
(accepted_ray_count);
|
||||
return boost::tuple<double, double>(sdf_res, acceptance_rate);
|
||||
}
|
||||
|
||||
/* Slightly move center towards inverse normal of facet.
|
||||
|
|
@ -627,12 +646,13 @@ Surface_mesh_segmentation<Polyhedron>::calculate_dihedral_angle_of_edge(
|
|||
dot = -1.0;
|
||||
}
|
||||
double angle = acos(dot) / CGAL_PI; // [0-1] normalize
|
||||
if(!concave) {
|
||||
angle *= 0.08;
|
||||
}
|
||||
|
||||
if(angle < epsilon) {
|
||||
angle = epsilon;
|
||||
}
|
||||
if(!concave) {
|
||||
angle *= 0.05;
|
||||
}
|
||||
return angle;
|
||||
}
|
||||
|
||||
|
|
@ -641,7 +661,7 @@ inline double
|
|||
Surface_mesh_segmentation<Polyhedron>::calculate_dihedral_angle_of_edge_2(
|
||||
const Halfedge_handle& edge) const
|
||||
{
|
||||
double epsilon = 1e-8; // not sure but should not return zero for log(angle)...
|
||||
double epsilon = 1e-5; // not sure but should not return zero for log(angle)...
|
||||
const Point& a = edge->vertex()->point();
|
||||
const Point& b = edge->prev()->vertex()->point();
|
||||
const Point& c = edge->next()->vertex()->point();
|
||||
|
|
@ -649,14 +669,16 @@ Surface_mesh_segmentation<Polyhedron>::calculate_dihedral_angle_of_edge_2(
|
|||
// As far as I check: if, say, dihedral angle is 5, this returns 175,
|
||||
// if dihedral angle is -5, this returns -175.
|
||||
double n_angle = CGAL::Mesh_3::dihedral_angle(a, b, c, d) / 180.0;
|
||||
bool n_concave = n_angle > 0;
|
||||
double folded_angle = 1 + ((n_concave ? -1 : +1) * n_angle);
|
||||
folded_angle = (std::max)(folded_angle, epsilon);
|
||||
bool concave = n_angle > 0;
|
||||
double angle = 1 + ((concave ? -1 : +1) * n_angle);
|
||||
|
||||
if(!n_concave) {
|
||||
return epsilon; // we may want to also penalize convex angles as well...
|
||||
if(!concave) {
|
||||
angle *= 0.05;
|
||||
}
|
||||
return folded_angle;
|
||||
if(angle < epsilon) {
|
||||
angle = epsilon;
|
||||
}
|
||||
return angle;
|
||||
|
||||
//Facet_handle f1 = edge->facet();
|
||||
//Facet_handle f2 = edge->opposite()->facet();
|
||||
|
|
@ -695,115 +717,27 @@ Surface_mesh_segmentation<Polyhedron>::calculate_dihedral_angle_of_edge_2(
|
|||
}
|
||||
|
||||
template <class Polyhedron>
|
||||
inline void Surface_mesh_segmentation<Polyhedron>::disk_sampling_rejection()
|
||||
{
|
||||
int number_of_points_sqrt = number_of_rays_sqrt;
|
||||
double length_of_normal = 1.0 / tan(cone_angle / 2);
|
||||
double mid_point = (number_of_points_sqrt-1) / 2.0;
|
||||
double angle_st_dev = cone_angle / CGAL_ANGLE_ST_DEV_DIVIDER;
|
||||
|
||||
for(int i = 0; i < number_of_points_sqrt; ++i)
|
||||
for(int j = 0; j < number_of_points_sqrt; ++j) {
|
||||
double w1 = (i - mid_point)/(mid_point);
|
||||
double w2 = (j - mid_point)/(mid_point);
|
||||
double R = sqrt(w1*w1 + w2*w2);
|
||||
if(R > 1.0) {
|
||||
continue;
|
||||
}
|
||||
double angle = atan(R / length_of_normal);
|
||||
angle = exp(-0.5 * (pow(angle / angle_st_dev, 2))); // weight
|
||||
disk_samples.push_back(Disk_sample(w1, w2, angle));
|
||||
}
|
||||
}
|
||||
|
||||
template <class Polyhedron>
|
||||
inline void Surface_mesh_segmentation<Polyhedron>::disk_sampling_polar_mapping()
|
||||
{
|
||||
int number_of_points_sqrt = number_of_rays_sqrt;
|
||||
double length_of_normal = 1.0 / tan(cone_angle / 2);
|
||||
double angle_st_dev = cone_angle / CGAL_ANGLE_ST_DEV_DIVIDER;
|
||||
|
||||
for(int i = 0; i < number_of_points_sqrt; ++i)
|
||||
for(int j = 0; j < number_of_points_sqrt; ++j) {
|
||||
double w1 = i / (double) (number_of_points_sqrt-1);
|
||||
double w2 = j / (double) (number_of_points_sqrt-1);
|
||||
double R = w1;
|
||||
double Q = 2 * w1 * CGAL_PI;
|
||||
double angle = atan(R / length_of_normal);
|
||||
angle = exp(-0.5 * (pow(angle / angle_st_dev, 2))); // weight
|
||||
disk_samples.push_back(Disk_sample(R * cos(Q), R * sin(Q), angle));
|
||||
}
|
||||
}
|
||||
|
||||
template <class Polyhedron>
|
||||
inline void
|
||||
Surface_mesh_segmentation<Polyhedron>::disk_sampling_concentric_mapping()
|
||||
{
|
||||
int number_of_points_sqrt = number_of_rays_sqrt;
|
||||
double length_of_normal = 1.0 / tan(cone_angle / 2);
|
||||
double fraction = 2.0 / (number_of_points_sqrt -1);
|
||||
double angle_st_dev = cone_angle / CGAL_ANGLE_ST_DEV_DIVIDER;
|
||||
|
||||
for(int i = 0; i < number_of_points_sqrt; ++i)
|
||||
for(int j = 0; j < number_of_points_sqrt; ++j) {
|
||||
double w1 = -1 + i * fraction;
|
||||
double w2 = -1 + j * fraction;
|
||||
double R, Q;
|
||||
if(w1 == 0 && w2 == 0) {
|
||||
R = 0;
|
||||
Q = 0;
|
||||
} else if(w1 > -w2) {
|
||||
if(w1 > w2) {
|
||||
R = w1;
|
||||
Q = (w2 / w1);
|
||||
} else {
|
||||
R = w2;
|
||||
Q = (2 - w1 / w2);
|
||||
}
|
||||
} else {
|
||||
if(w1 < w2) {
|
||||
R = -w1;
|
||||
Q = (4 + w2 / w1);
|
||||
} else {
|
||||
R = -w2;
|
||||
Q = (6 - w1 / w2);
|
||||
}
|
||||
}
|
||||
Q *= (0.25 * CGAL_PI);
|
||||
double angle = atan(R / length_of_normal);
|
||||
angle = exp(-0.5 * (pow(angle / angle_st_dev, 2))); // weight
|
||||
disk_samples.push_back(Disk_sample(R * cos(Q), R * sin(Q), angle));
|
||||
}
|
||||
//Sorting sampling
|
||||
//The sampling that are closer to center comes first.
|
||||
//More detailed: weights are inverse proportional with distance to center,
|
||||
//use weights to sort in descending order.
|
||||
std::sort(disk_samples.begin(), disk_samples.end(),
|
||||
Compare_third_element<CGAL::Triple<double, double, double> >());
|
||||
}
|
||||
|
||||
template <class Polyhedron>
|
||||
inline void Surface_mesh_segmentation<Polyhedron>::disk_sampling_vogel_method()
|
||||
inline void Surface_mesh_segmentation<Polyhedron>::disk_sampling_vogel_method(
|
||||
Disk_samples_list& samples, int ray_count)
|
||||
{
|
||||
double length_of_normal = 1.0 / tan(cone_angle / 2);
|
||||
double angle_st_dev = cone_angle / CGAL_ANGLE_ST_DEV_DIVIDER;
|
||||
|
||||
int number_of_points = number_of_rays_sqrt * number_of_rays_sqrt;
|
||||
double golden_ratio = 3.0 - sqrt(5.0);
|
||||
double golden_ratio = 3.0 - std::sqrt(5.0);
|
||||
#if 0
|
||||
for(int i = 0; i < number_of_points; ++i) {
|
||||
double Q = i * golden_ratio * CGAL_PI;
|
||||
double R = sqrt(static_cast<double>(i)) / number_of_rays_sqrt;
|
||||
double R = std::sqrt(static_cast<double>(i) / ray_count);
|
||||
double angle = atan(R / length_of_normal);
|
||||
angle = exp(-0.5 * (pow(angle / angle_st_dev, 2))); // weight
|
||||
disk_samples.push_back(Disk_sample(R * cos(Q), R * sin(Q), angle));
|
||||
angle = exp(-0.5 * (std::pow(angle / angle_st_dev, 2))); // weight
|
||||
samples.push_back(Disk_sample(R * cos(Q), R * sin(Q), angle));
|
||||
}
|
||||
#else
|
||||
double custom_power = 8.0 / 8.0;
|
||||
for(int i = 0; i < number_of_points; ++i) {
|
||||
for(int i = 0; i < ray_count; ++i) {
|
||||
double Q = i * golden_ratio * CGAL_PI;
|
||||
double R = pow(static_cast<double>(i) / number_of_points, custom_power);
|
||||
disk_samples.push_back(Disk_sample(R * cos(Q), R * sin(Q), 1.0));
|
||||
double R = std::pow(static_cast<double>(i) / ray_count, custom_power);
|
||||
samples.push_back(Disk_sample(R * cos(Q), R * sin(Q), 1.0));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
|
@ -868,8 +802,8 @@ Surface_mesh_segmentation<Polyhedron>::smooth_sdf_values_with_gaussian()
|
|||
double total_weight = 0.0;
|
||||
for(std::map<Facet_handle, int>::iterator it = neighbors.begin();
|
||||
it != neighbors.end(); ++it) {
|
||||
double weight = exp(-0.5 * (pow(it->second / (window_size/2.0),
|
||||
2))); // window_size => 2*sigma
|
||||
double weight = exp(-0.5 * (std::pow(it->second / (window_size/2.0),
|
||||
2))); // window_size => 2*sigma
|
||||
total_sdf_value += sdf_values[it->first] * weight;
|
||||
total_weight += weight;
|
||||
}
|
||||
|
|
@ -944,7 +878,7 @@ Surface_mesh_segmentation<Polyhedron>::smooth_sdf_values_with_bilateral()
|
|||
double deviation = 0.0;
|
||||
for(std::map<Facet_handle, int>::iterator it = neighbors.begin();
|
||||
it != neighbors.end(); ++it) {
|
||||
deviation += pow(sdf_values[it->first] - current_sdf_value, 2);
|
||||
deviation += std::pow(sdf_values[it->first] - current_sdf_value, 2);
|
||||
}
|
||||
deviation = std::sqrt(deviation / neighbors.size());
|
||||
if(deviation == 0.0) {
|
||||
|
|
@ -952,10 +886,10 @@ Surface_mesh_segmentation<Polyhedron>::smooth_sdf_values_with_bilateral()
|
|||
}
|
||||
for(std::map<Facet_handle, int>::iterator it = neighbors.begin();
|
||||
it != neighbors.end(); ++it) {
|
||||
double spatial_weight = exp(-0.5 * (pow(it->second / (window_size/2.0),
|
||||
double spatial_weight = exp(-0.5 * (std::pow(it->second / (window_size/2.0),
|
||||
2))); // window_size => 2*sigma
|
||||
double domain_weight = exp(-0.5 * (pow((sdf_values[it->first] -
|
||||
current_sdf_value) / (2*deviation), 2)));
|
||||
double domain_weight = exp(-0.5 * (std::pow((sdf_values[it->first] -
|
||||
current_sdf_value) / (std::sqrt(2.0)*deviation), 2)));
|
||||
double weight = spatial_weight * domain_weight;
|
||||
total_sdf_value += sdf_values[it->first] * weight;
|
||||
total_weight += weight;
|
||||
|
|
@ -1407,7 +1341,7 @@ inline void Surface_mesh_segmentation<Polyhedron>::select_cluster_number()
|
|||
number_of_centers = i;
|
||||
apply_GMM_fitting_with_K_means_init();
|
||||
double distortion = fitter.calculate_distortion();
|
||||
distortions[i-(min_cluster_count -1)] = pow(distortion, -0.5);
|
||||
distortions[i-(min_cluster_count -1)] = std::pow(distortion, -0.5);
|
||||
}
|
||||
double max_jump = 0.0;
|
||||
for(int i = 1; i < range+1; ++i) {
|
||||
|
|
|
|||
|
|
@ -47,8 +47,8 @@ public:
|
|||
const std::vector<double>& edge_weights,
|
||||
const std::vector<std::vector<double> >& probability_matrix,
|
||||
std::vector<int>& labels, double* result = NULL) {
|
||||
double min_cut = apply_alpha_expansion(edges, edge_weights, probability_matrix,
|
||||
labels);
|
||||
double min_cut = apply_alpha_expansion_2(edges, edge_weights,
|
||||
probability_matrix, labels);
|
||||
if(result != NULL) {
|
||||
*result = min_cut;
|
||||
}
|
||||
|
|
@ -82,12 +82,18 @@ public:
|
|||
int number_of_clusters = probability_matrix.size();
|
||||
double min_cut = (std::numeric_limits<double>::max)();
|
||||
bool success;
|
||||
CGAL::Timer gt;
|
||||
gt.start();
|
||||
do {
|
||||
CGAL::Timer t;
|
||||
t.start();
|
||||
success = false;
|
||||
for(int alpha = 0; alpha < number_of_clusters; ++alpha) {
|
||||
CGAL::Timer t;
|
||||
t.start();
|
||||
Graph graph;
|
||||
#if 0
|
||||
graph.m_vertices.reserve(edges.size() + labels.size()); //not documented!
|
||||
// in order to see effect of pre-allocation of vector with maximum size
|
||||
#endif
|
||||
Vertex_descriptor cluster_source = boost::add_vertex(graph);
|
||||
Vertex_descriptor cluster_sink = boost::add_vertex(graph);
|
||||
std::vector<Vertex_descriptor> inserted_vertices;
|
||||
|
|
@ -108,6 +114,8 @@ public:
|
|||
add_edge_and_reverse(cluster_source, new_vertex, source_weight, 0.0, graph);
|
||||
add_edge_and_reverse(new_vertex, cluster_sink, sink_weight, 0.0, graph);
|
||||
}
|
||||
std::cout << "vertex time: " << t.time() << std::endl;
|
||||
t.reset();
|
||||
// For E-Smooth
|
||||
// add edge between every vertex,
|
||||
std::vector<double>::const_iterator weight_it = edge_weights.begin();
|
||||
|
|
@ -132,11 +140,11 @@ public:
|
|||
add_edge_and_reverse(inbetween, cluster_sink, *weight_it, 0.0, graph);
|
||||
}
|
||||
}
|
||||
//std::cout << "construction time: " << t.time() << std::endl;
|
||||
//t.reset();
|
||||
std::cout << "edge time: " << t.time() << std::endl;
|
||||
t.reset();
|
||||
double flow = boost::boykov_kolmogorov_max_flow(graph, cluster_source,
|
||||
cluster_sink);
|
||||
//std::cout << "flow time: " << t.time() << std::endl;
|
||||
std::cout << "flow time: " << t.time() << std::endl;
|
||||
if(min_cut - flow < flow * 1e-10) {
|
||||
continue;
|
||||
}
|
||||
|
|
@ -155,6 +163,105 @@ public:
|
|||
|
||||
}
|
||||
} while(success);
|
||||
std::cout << "tot time: " << gt.time() << std::endl;
|
||||
return min_cut;
|
||||
}
|
||||
|
||||
double apply_alpha_expansion_2(const std::vector<std::pair<int, int> >& edges,
|
||||
const std::vector<double>& edge_weights,
|
||||
const std::vector<std::vector<double> >& probability_matrix,
|
||||
std::vector<int>& labels) {
|
||||
int number_of_clusters = probability_matrix.size();
|
||||
double min_cut = (std::numeric_limits<double>::max)();
|
||||
bool success;
|
||||
CGAL::Timer gt;
|
||||
gt.start();
|
||||
double total_time = 0.0;
|
||||
do {
|
||||
success = false;
|
||||
for(int alpha = 0; alpha < number_of_clusters; ++alpha) {
|
||||
CGAL::Timer t;
|
||||
t.start();
|
||||
Graph graph(edges.size() + labels.size() +
|
||||
2); // allocate using maximum possible size.
|
||||
|
||||
Vertex_descriptor cluster_source = 0;
|
||||
Vertex_descriptor cluster_sink = 1;
|
||||
|
||||
// For E-Data
|
||||
// add every facet as a vertex to graph, put edges to source-sink vertices
|
||||
for(std::size_t vertex_i = 0; vertex_i < probability_matrix[0].size();
|
||||
++vertex_i) {
|
||||
Vertex_descriptor new_vertex = vertex_i + 2;
|
||||
|
||||
double source_weight = probability_matrix[alpha][vertex_i];
|
||||
// since it is expansion move, current alpha labeled vertices will be assigned to alpha again,
|
||||
// making sink_weight 'infinity' guarantee this.
|
||||
double sink_weight = (labels[vertex_i] == alpha) ?
|
||||
(std::numeric_limits<double>::max)() :
|
||||
probability_matrix[labels[vertex_i]][vertex_i];
|
||||
|
||||
add_edge_and_reverse(cluster_source, new_vertex, source_weight, 0.0, graph);
|
||||
add_edge_and_reverse(new_vertex, cluster_sink, sink_weight, 0.0, graph);
|
||||
}
|
||||
total_time += t.time();
|
||||
std::cout << "vertex time: " << t.time() << std::endl;
|
||||
t.reset();
|
||||
// For E-Smooth
|
||||
// add edge between every vertex,
|
||||
int b_index = labels.size() + 2;
|
||||
std::vector<double>::const_iterator weight_it = edge_weights.begin();
|
||||
for(std::vector<std::pair<int, int> >::const_iterator edge_it = edges.begin();
|
||||
edge_it != edges.end();
|
||||
++edge_it, ++weight_it) {
|
||||
Vertex_descriptor v1 = edge_it->first + 2, v2 = edge_it->second + 2;
|
||||
int label_1 = labels[edge_it->first], label_2 = labels[edge_it->second];
|
||||
if(label_1 == label_2) {
|
||||
// actually no need for this, since two alpha labeled vertices will not be seperated
|
||||
// (their edges between sink is infitinity)
|
||||
double w1 = (label_1 == alpha) ? 0 : *weight_it;
|
||||
add_edge_and_reverse(v1, v2, w1, w1, graph);
|
||||
} else {
|
||||
Vertex_descriptor inbetween = b_index++;
|
||||
|
||||
double w1 = (label_1 == alpha) ? 0 : *weight_it;
|
||||
double w2 = (label_2 == alpha) ? 0 : *weight_it;
|
||||
add_edge_and_reverse(inbetween, v1, w1, w1, graph);
|
||||
add_edge_and_reverse(inbetween, v2, w2, w2, graph);
|
||||
add_edge_and_reverse(inbetween, cluster_sink, *weight_it, 0.0, graph);
|
||||
}
|
||||
}
|
||||
total_time += t.time();
|
||||
std::cout << "edge time: " << t.time() << std::endl;
|
||||
t.reset();
|
||||
|
||||
double flow = boost::boykov_kolmogorov_max_flow(graph, cluster_source,
|
||||
cluster_sink);
|
||||
|
||||
total_time += t.time();
|
||||
std::cout << "flow time: " << t.time() << std::endl;
|
||||
t.reset();
|
||||
if(min_cut - flow < flow * 1e-10) {
|
||||
continue;
|
||||
}
|
||||
std::cout << "prev flow: " << min_cut << " new flow: " << flow << std::endl;
|
||||
min_cut = flow;
|
||||
success = true;
|
||||
//update labeling
|
||||
|
||||
for(std::size_t vertex_i = 0; vertex_i < labels.size(); ++vertex_i) {
|
||||
Vertex_descriptor v = vertex_i + 2;
|
||||
boost::default_color_type color = boost::get(boost::vertex_color, graph, v);
|
||||
if(labels[vertex_i] != alpha
|
||||
&& color == ColorTraits::white()) { //new comers (expansion occurs)
|
||||
labels[vertex_i] = alpha;
|
||||
}
|
||||
}
|
||||
std::cout << "label time: " << t.time() << std::endl;
|
||||
t.reset();
|
||||
}
|
||||
} while(success);
|
||||
std::cout << "tot time: " << gt.time() << " " << total_time << std::endl;
|
||||
return min_cut;
|
||||
}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ public:
|
|||
: mean(mean), deviation(deviation), mixing_coefficient(mixing_coefficient) {
|
||||
}
|
||||
double probability(double x) const {
|
||||
double e_over = -0.5 * pow((x - mean) / deviation, 2);
|
||||
double e_over = -0.5 * std::pow((x - mean) / deviation, 2);
|
||||
return exp(e_over) / deviation;
|
||||
}
|
||||
double probability_with_coef(double x) const {
|
||||
|
|
@ -56,7 +56,7 @@ public:
|
|||
}
|
||||
double log_probability_with_coef(double x) const {
|
||||
return log(mixing_coefficient) - log(deviation)
|
||||
-0.5 * pow((x - mean) / deviation, 2);
|
||||
-0.5 * std::pow((x - mean) / deviation, 2);
|
||||
}
|
||||
bool operator < (const Gaussian_center& center) const {
|
||||
return mean < center.mean;
|
||||
|
|
@ -92,6 +92,7 @@ public:
|
|||
final_likelihood(-(std::numeric_limits<double>::max)()) {
|
||||
initiate_centers_uniformly(number_of_centers);
|
||||
fit();
|
||||
sort(centers.begin(), centers.end());
|
||||
}
|
||||
/* For initialization with provided center ids per point, with one run */
|
||||
Expectation_maximization(int number_of_centers, const std::vector<double>& data,
|
||||
|
|
@ -105,7 +106,7 @@ public:
|
|||
membership_matrix = std::vector<std::vector<double> >(number_of_centers,
|
||||
std::vector<double>(points.size()));
|
||||
fit();
|
||||
//write_center_parameters("centers_param.txt");
|
||||
sort(centers.begin(), centers.end());
|
||||
}
|
||||
/* For initialization with random center selection (Forgy), with multiple run */
|
||||
Expectation_maximization(int number_of_centers, const std::vector<double>& data,
|
||||
|
|
@ -123,6 +124,7 @@ public:
|
|||
srand(DEF_SEED);
|
||||
#endif
|
||||
fit_with_multiple_run(number_of_centers, number_of_runs);
|
||||
sort(centers.begin(), centers.end());
|
||||
//write_center_parameters("centers_param.txt");
|
||||
}
|
||||
|
||||
|
|
@ -152,9 +154,9 @@ public:
|
|||
for(std::vector<double>::iterator point_it = points.begin();
|
||||
point_it != points.end(); ++point_it) {
|
||||
double max_likelihood = 0.0;
|
||||
int max_center = 0, center_counter = 0;
|
||||
int max_center = -1, center_counter = 0;
|
||||
for(std::vector<Gaussian_center>::iterator center_it = centers.begin();
|
||||
center_it != centers.end(); ++center_it, center_counter++) {
|
||||
center_it != centers.end(); ++center_it, ++center_counter) {
|
||||
double likelihood = center_it->probability_with_coef(*point_it);
|
||||
if(max_likelihood < likelihood) {
|
||||
max_likelihood = likelihood;
|
||||
|
|
@ -180,6 +182,8 @@ public:
|
|||
for(std::size_t center_i = 0; center_i < centers.size(); ++center_i) {
|
||||
double probability = probabilities[center_i][point_i] / total_probability;
|
||||
probability = (std::max)(probability, epsilon);
|
||||
//probability += epsilon;
|
||||
//probability = (std::min)(probability, 1.0);
|
||||
probability = -log(probability);
|
||||
probabilities[center_i][point_i] = (std::max)(probability,
|
||||
std::numeric_limits<double>::epsilon());
|
||||
|
|
@ -222,13 +226,13 @@ public:
|
|||
for(int i = 0; i < number_of_points; ++i) {
|
||||
int center_id = initial_center_ids[i];
|
||||
double data = points[i];
|
||||
centers[center_id].deviation += pow(data - centers[center_id].mean, 2);
|
||||
centers[center_id].deviation += std::pow(data - centers[center_id].mean, 2);
|
||||
}
|
||||
for(int i = 0; i < number_of_centers; ++i) {
|
||||
if(!cluster_exist[i]) {
|
||||
continue;
|
||||
}
|
||||
centers[i].deviation = sqrt(centers[i].deviation / member_count[i]);
|
||||
centers[i].deviation = std::sqrt(centers[i].deviation / member_count[i]);
|
||||
}
|
||||
|
||||
double epsilon = 1e
|
||||
|
|
@ -293,7 +297,7 @@ public:
|
|||
centers[closest_center].deviation;
|
||||
distortion += distance * distance;
|
||||
}
|
||||
return sqrt(distortion / points.size());
|
||||
return std::sqrt(distortion / points.size());
|
||||
}
|
||||
|
||||
protected:
|
||||
|
|
@ -318,7 +322,7 @@ protected:
|
|||
for(std::size_t i = 0; i < centers.size(); ++i) {
|
||||
CGAL_assertion(member_count[i] !=
|
||||
0); // There shouldn't be such case, unless same point is selected as a center twice (it is checked!)
|
||||
centers[i].deviation = sqrt(centers[i].deviation / member_count[i]);
|
||||
centers[i].deviation = std::sqrt(centers[i].deviation / member_count[i]);
|
||||
}
|
||||
}
|
||||
// Initialization functions for centers.
|
||||
|
|
@ -340,8 +344,6 @@ protected:
|
|||
}
|
||||
}
|
||||
calculate_initial_deviations();
|
||||
sort(centers.begin(), centers.end());
|
||||
//write_random_centers("center_indexes.txt");
|
||||
}
|
||||
|
||||
void initiate_centers_uniformly(int number_of_centers) {
|
||||
|
|
@ -355,7 +357,6 @@ protected:
|
|||
initial_mixing_coefficient));
|
||||
}
|
||||
calculate_initial_deviations();
|
||||
sort(centers.begin(), centers.end());
|
||||
}
|
||||
|
||||
void initiate_centers_plus_plus(int number_of_centers) {
|
||||
|
|
@ -373,7 +374,7 @@ protected:
|
|||
for(int i = 1; i < number_of_centers; ++i) {
|
||||
double cumulative_distance_square = 0.0;
|
||||
for(std::size_t j = 0; j < points.size(); ++j) {
|
||||
double new_distance = pow(centers.back().mean - points[j], 2);
|
||||
double new_distance = std::pow(centers.back().mean - points[j], 2);
|
||||
if(new_distance < distance_square[j]) {
|
||||
distance_square[j] = new_distance;
|
||||
}
|
||||
|
|
@ -396,7 +397,6 @@ protected:
|
|||
}
|
||||
}
|
||||
calculate_initial_deviations();
|
||||
sort(centers.begin(), centers.end());
|
||||
}
|
||||
|
||||
bool is_already_center(const Gaussian_center& center) const {
|
||||
|
|
@ -451,12 +451,12 @@ protected:
|
|||
/* Calculate deviation */
|
||||
for(int i = 0; i < number_of_points; ++i) {
|
||||
int center_id = initial_center_ids[i];
|
||||
centers[center_id].deviation += pow(points[i] - centers[center_id].mean, 2);
|
||||
centers[center_id].deviation += std::pow(points[i] - centers[center_id].mean,
|
||||
2);
|
||||
}
|
||||
for(int i = 0; i < number_of_centers; ++i) {
|
||||
centers[i].deviation = sqrt(centers[i].deviation / member_count[i]);
|
||||
centers[i].deviation = std::sqrt(centers[i].deviation / member_count[i]);
|
||||
}
|
||||
sort(centers.begin(), centers.end());
|
||||
return number_of_centers;
|
||||
}
|
||||
|
||||
|
|
@ -477,9 +477,9 @@ protected:
|
|||
double new_deviation = 0.0;
|
||||
for(std::size_t point_i = 0; point_i < points.size(); ++point_i) {
|
||||
double membership = membership_matrix[center_i][point_i];
|
||||
new_deviation += membership * pow(points[point_i] - new_mean, 2);
|
||||
new_deviation += membership * std::pow(points[point_i] - new_mean, 2);
|
||||
}
|
||||
new_deviation = sqrt(new_deviation/total_membership);
|
||||
new_deviation = std::sqrt(new_deviation/total_membership);
|
||||
// Assign new parameters
|
||||
centers[center_i].mixing_coefficient = total_membership / points.size();
|
||||
centers[center_i].deviation = new_deviation;
|
||||
|
|
|
|||
|
|
@ -143,7 +143,7 @@ public:
|
|||
for(int i = 1; i < number_of_centers; ++i) {
|
||||
double cumulative_distance_square = 0.0;
|
||||
for(std::size_t j = 0; j < points.size(); ++j) {
|
||||
double new_distance = pow(centers.back().mean - points[j].data, 2);
|
||||
double new_distance = std::pow(centers.back().mean - points[j].data, 2);
|
||||
if(new_distance < distance_square[j]) {
|
||||
distance_square[j] = new_distance;
|
||||
}
|
||||
|
|
@ -235,7 +235,7 @@ public:
|
|||
center_it != centers.end(); ++center_it) {
|
||||
for(std::vector<K_means_point>::const_iterator point_it = points.begin();
|
||||
point_it != points.end(); ++point_it) {
|
||||
sum += pow(center_it->mean - point_it->data, 2);
|
||||
sum += std::pow(center_it->mean - point_it->data, 2);
|
||||
}
|
||||
}
|
||||
return sum;
|
||||
|
|
@ -245,7 +245,7 @@ public:
|
|||
double sum = 0;
|
||||
for(std::vector<K_means_point>::const_iterator point_it = points.begin();
|
||||
point_it != points.end(); ++point_it) {
|
||||
sum += pow(centers[point_it->center_id].mean - point_it->data, 2);
|
||||
sum += std::pow(centers[point_it->center_id].mean - point_it->data, 2);
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue