Code review.

This commit is contained in:
Ílker Yaz 2012-08-28 02:44:30 +00:00
parent 6956642f14
commit ab9cb4b250
8 changed files with 145 additions and 261 deletions

View File

@ -106,7 +106,7 @@ These functions expects a manifold triangulated polyhedron without boundary as i
polyhedron with boundaries, but considering how the SDF value are computed, using a polyhedron with large holes is likely to result in
meaningless SDF values, and therefore unreliable segmentation.
The current implementation of the computation of the SDF values is based on the AABB_tree package.
The current implementation of the computation of the SDF values is relies on the AABB_tree package.
This operation is robust when the AABBTraits model provides has exact predicates.
##The SDF Computation##

View File

@ -5,15 +5,14 @@
* @brief This file contains 3 graph-cut algorithms, which can be used as a template parameter for CGAL::internal::Surface_mesh_segmentation.
*
* Main differences between implementations are underlying max-flow algorithm and graph type (i.e. results are the same, performance differs).
*
* Also algorithms can be used by their-own for applying alpha-expansion graph-cut on any graph.
*/
#include <CGAL/assertions.h>
#include <CGAL/Timer.h>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/boykov_kolmogorov_max_flow.hpp>
#include <iostream>
#include <fstream>
#include <vector>
//#define CGAL_USE_BOYKOV_KOLMOGOROV_MAXFLOW_SOFTWARE
@ -95,8 +94,14 @@ private:
typedef Traits::edge_descriptor Edge_descriptor;
typedef Traits::edge_iterator Edge_iterator;
/*
* Helper method
/**
* It adds two directional edge between @a v1 and @a v2
* @param v1 first vertex
* @param v2 second vertex
* @param w1 weight for edge from v1 to v2 (v1->v2)
* @param w2 weight for edge from v2 to v1 (v2->v1)
* @graph to be added
* @return pair of added edges, first: v1->v2 and second: v2->v1
*/
boost::tuple<Edge_descriptor, Edge_descriptor>
add_edge_and_reverse(Vertex_descriptor& v1, Vertex_descriptor& v2, double w1,
@ -120,15 +125,20 @@ private:
}
public:
/**
* Applies alpha-expansion graph-cut for energy minimization.
* @param edges contains incident vertex-id pairs for each edge (vertex-ids should be between [0, number of vertices -1])
* @param edge_weights contains weights for each edge in @a edges (correspondence according to order)
* @param probability_matrix contains responsibility of the center on the vertex probability[center][vertex]
* @param[in, out] labels as input it contains initial labeling of vertices (i.e. a center-id between [0, number of centers -1]),
* and as output it returns final labeling of vertices
*/
double operator()(const std::vector<std::pair<int, int> >& edges,
const std::vector<double>& edge_weights,
const std::vector<std::vector<double> >& probability_matrix,
std::vector<int>& labels) const {
double min_cut = (std::numeric_limits<double>::max)();
bool success;
Timer gt;
gt.start();
do {
success = false;
int alpha = 0;
@ -189,11 +199,9 @@ public:
double flow = boost::boykov_kolmogorov_max_flow(graph, cluster_source,
cluster_sink);
if(min_cut - flow < flow * 1e-10) {
continue;
}
std::cout << "prev flow: " << min_cut << " new flow: " << flow << std::endl;
min_cut = flow;
success = true;
@ -209,11 +217,16 @@ public:
}
} while(success);
std::cout << "Graph-cut time: " << gt.time() << std::endl;
return min_cut;
}
};
/**
* @brief Implements alpha-expansion graph cut algorithm.
*
* For representing graph, it uses adjacency_list with OutEdgeList = vecS, VertexList = vecS.
* Also preallocates vertex-list by using maximum possible number of nodes.
*/
class Alpha_expansion_graph_cut_boost_with_preallocate
{
private:
@ -241,6 +254,15 @@ private:
typedef Traits::edge_descriptor Edge_descriptor;
typedef Traits::edge_iterator Edge_iterator;
/**
* It adds two directional edge between @a v1 and @a v2
* @param v1 first vertex
* @param v2 second vertex
* @param w1 weight for edge from v1 to v2 (v1->v2)
* @param w2 weight for edge from v2 to v1 (v2->v1)
* @graph to be added
* @return pair of added edges, first: v1->v2 and second: v2->v1
*/
boost::tuple<Edge_descriptor, Edge_descriptor>
add_edge_and_reverse(Vertex_descriptor& v1, Vertex_descriptor& v2, double w1,
double w2, Graph& graph) const {
@ -263,14 +285,18 @@ private:
}
public:
/**
* Applies alpha-expansion graph-cut for energy minimization.
* @param edges contains incident vertex-id pairs for each edge (vertex-ids should be between [0, number of vertices -1])
* @param edge_weights contains weights for each edge in @a edges (correspondence according to order)
* @param probability_matrix contains responsibility of the center on the vertex probability[center][vertex]
* @param[in, out] labels as input it contains initial labeling of vertices (i.e. a center-id between [0, number of centers -1]),
* and as output it returns final labeling of vertices
*/
double operator()(const std::vector<std::pair<int, int> >& edges,
const std::vector<double>& edge_weights,
const std::vector<std::vector<double> >& probability_matrix,
std::vector<int>& labels) const {
Timer gt;
gt.start();
double min_cut = (std::numeric_limits<double>::max)();
bool success;
do {
@ -339,25 +365,34 @@ public:
}
} // end of for loop on labels
} while(success);
std::cout << "Graph-cut time: " << gt.time() << std::endl;
return min_cut;
}
};
#ifdef CGAL_USE_BOYKOV_KOLMOGOROV_MAXFLOW_SOFTWARE
/**
* @brief Implements alpha-expansion graph cut algorithm.
*
* For underlying max-flow algorithm, it uses the MAXFLOW software implemented by Boykov & Kolmogorov.
* Also no pre-allocation is made.
*/
class Alpha_expansion_graph_cut_boykov_kolmogorov
{
public:
/**
* Applies alpha-expansion graph-cut for energy minimization.
* @param edges contains incident vertex-id pairs for each edge (vertex-ids should be between [0, number of vertices -1])
* @param edge_weights contains weights for each edge in @a edges (correspondence according to order)
* @param probability_matrix contains responsibility of the center on the vertex probability[center][vertex]
* @param[in, out] labels as input it contains initial labeling of vertices (i.e. a center-id between [0, number of centers -1]),
* and as output it returns final labeling of vertices
*/
double operator()(const std::vector<std::pair<int, int> >& edges,
const std::vector<double>& edge_weights,
const std::vector<std::vector<double> >& probability_matrix,
std::vector<int>& labels) const {
double min_cut = (std::numeric_limits<double>::max)();
bool success;
Timer gt;
gt.start();
do {
success = false;
int alpha = 0;
@ -410,7 +445,7 @@ public:
if(min_cut - flow < flow * 1e-10) {
continue;
}
std::cout << "prev flow: " << min_cut << " new flow: " << flow << std::endl;
min_cut = flow;
success = true;
//update labeling
@ -422,8 +457,6 @@ public:
}
}
} while(success);
std::cout << "Graph-cut time: " << gt.time() << std::endl;
return min_cut;
}
};

View File

@ -4,10 +4,6 @@
* @file Disk_samplers.h
* @brief This file contains 3 sampling methods, which can be used as a template parameter for CGAL::internal::SDF_calculation.
*/
#include <CGAL/number_type_basic.h>
#include <boost/tuple/tuple.hpp>
#include <vector>
#include <cmath>
#define CGAL_ANGLE_ST_DEV_DIVIDER 3.0
@ -43,29 +39,29 @@ namespace internal
// Uniform // // Custom power (biased to center) //
/**
* @brief Uses Vogel's method to sample points from unit-disk.
*
* Template parameter @a Tuple should have a constructor which takes 3 double parameters.
* @see Disk_samplers.h, SDF_calculation
*/
template<class Tuple>
class Vogel_disk_sampling
{
private:
bool uniform;
public:
Vogel_disk_sampling() : uniform(false) { }
/**
* Samples points from unit-disk.
* @param number_of_points number of points to be picked
* @param cone_angle opening angle of cone (might be necessary for weighting)
* @param[out] samples sampled points from disk, each point is tuple of:
* - get<0> = coordinate-x
* - get<1> = coordinate-y
* - get<2> = weight (proportional to angle between cone-normal)
* @param[out] out_it sampled points from disk, each point is tuple of:
* - first = coordinate-x
* - second = coordinate-y
* - third = weight (proportional to angle between cone-normal)
* @param uniform if false custom power will be used and sampled points will be biased to center
*/
void operator()(int number_of_points, double cone_angle,
std::vector<boost::tuple<double, double, double> >& samples) const {
typedef boost::tuple<double, double, double> Disk_sample;
typedef std::vector<Disk_sample> Disk_samples_list;
template<class OutputIterator>
void operator()(int number_of_points,
double cone_angle,
OutputIterator out_it,
bool uniform = false) const {
const double golden_ratio = 3.0 - std::sqrt(5.0);
if(uniform) {
@ -76,7 +72,7 @@ public:
double R = std::sqrt(static_cast<double>(i) / number_of_points);
double angle = atan(R / length_of_normal);
double weight = exp(-0.5 * (std::pow(angle / angle_st_dev, 2)));
samples.push_back(Disk_sample(R * cos(Q), R * sin(Q), weight));
*out_it++ = Tuple(R * cos(Q), R * sin(Q), weight);
}
} else {
const double custom_power = 8.0 /
@ -86,7 +82,7 @@ public:
double Q = i * golden_ratio * CGAL_PI;
double R = std::pow(static_cast<double>(i) / number_of_points, custom_power);
// use uniform weigths, since we already give importance to locations that are close to center.
samples.push_back(Disk_sample(R * cos(Q), R * sin(Q), 1.0));
*out_it++ = Tuple(R * cos(Q), R * sin(Q), 1.0);
}
}
}
@ -119,7 +115,10 @@ public:
/////////////////////////////////////////////////////////
/**
* @brief Uses polar mapping to sample points from unit-disk.
*
* Template parameter @a Tuple should have a constructor which takes 3 double parameters.
*/
template<class Tuple>
class Polar_disk_sampling
{
public:
@ -127,19 +126,18 @@ public:
* Samples points from unit-disk.
* @param number_of_points number of points to be picked
* @param cone_angle opening angle of cone (might be necessary for weighting)
* @param[out] samples sampled points from disk, each point is tuple of:
* - get<0> = coordinate-x
* - get<1> = coordinate-y
* - get<2> = weight (proportional to angle between cone-normal)
* @param[out] out_it sampled points from disk, each point is tuple of:
* - first = coordinate-x
* - second = coordinate-y
* - third = weight (proportional to angle between cone-normal)
*
* Note:
* Returned samples size = \f$ \lfloor \sqrt {number\_of\_points} \rfloor ^ 2 \f$
*/
void operator()(int number_of_points, double cone_angle,
std::vector<boost::tuple<double, double, double> >& samples) const {
typedef boost::tuple<double, double, double> Disk_sample;
typedef std::vector<Disk_sample> Disk_samples_list;
template<class OutputIterator>
void operator()(int number_of_points,
double cone_angle,
OutputIterator out_it) const {
const int number_of_points_sqrt = static_cast<int>(std::sqrt(
static_cast<double>(number_of_points)));
const double length_of_normal = 1.0 / tan(cone_angle / 2.0);
@ -154,7 +152,7 @@ public:
double Q = 2 * w2 * CGAL_PI;
double angle = atan(R / length_of_normal);
double weight = exp(-0.5 * (pow(angle / angle_st_dev, 2)));
samples.push_back(Disk_sample(R * cos(Q), R * sin(Q), weight));
*out_it++ = Tuple(R * cos(Q), R * sin(Q), weight);
}
}
};
@ -184,7 +182,10 @@ public:
/////////////////////////////////////////////////////////
/**
* @brief Uses concentric mapping to sample points from unit-disk.
*
* Template parameter @a Tuple should have a constructor which takes 3 double parameters.
*/
template<class Tuple>
class Concentric_disk_sampling
{
public:
@ -192,19 +193,18 @@ public:
* Samples points from unit-disk.
* @param number_of_points number of points to be picked
* @param cone_angle opening angle of cone (might be necessary for weighting)
* @param[out] samples sampled points from disk, each point is tuple of:
* - get<0> = coordinate-x
* - get<1> = coordinate-y
* - get<2> = weight (proportional to angle between cone-normal)
* @param[out] out_it sampled points from disk, each point is tuple of:
* - first = coordinate-x
* - second = coordinate-y
* - third = weight (proportional to angle between cone-normal)
*
* Note:
* Returned samples size = \f$ \lfloor \sqrt {number\_of\_points} \rfloor ^ 2 \f$
*/
void operator()(int number_of_points, double cone_angle,
std::vector<boost::tuple<double, double, double> >& samples) const {
typedef boost::tuple<double, double, double> Disk_sample;
typedef std::vector<Disk_sample> Disk_samples_list;
template<class OutputIterator>
void operator()(int number_of_points,
double cone_angle,
OutputIterator out_it) const {
const int number_of_points_sqrt = static_cast<int>(std::sqrt(
static_cast<double>(number_of_points)));
const double length_of_normal = 1.0 / tan(cone_angle / 2.0);
@ -240,7 +240,7 @@ public:
Q *= (CGAL_PI / 4.0);
double angle = atan(R / length_of_normal);
double weight = exp(-0.5 * (pow(angle / angle_st_dev, 2)));
samples.push_back(Disk_sample(R * cos(Q), R * sin(Q), weight));
*out_it++ = Tuple(R * cos(Q), R * sin(Q), weight);
}
}
};

View File

@ -1,22 +1,11 @@
#ifndef CGAL_SURFACE_MESH_SEGMENTATION_EXPECTATION_MAXIMIZATION_H
#define CGAL_SURFACE_MESH_SEGMENTATION_EXPECTATION_MAXIMIZATION_H
/* NEED TO BE DONE */
/* About implementation:
* + There are a lot of parameters (with default values) and initialization types,
* so I am planning to use whether 'Named Parameter Idiom' or Boost Parameter Library...
*
*/
#include <vector>
#include <cmath>
#include <algorithm>
#include <ctime>
#include <cstdlib>
#include <limits>
#include <fstream>
#include <iostream>
#include <CGAL/assertions.h>
#include <CGAL/internal/Surface_mesh_segmentation/K_means_clustering.h>
#define CGAL_DEFAULT_MAXIMUM_ITERATION 15
@ -24,13 +13,6 @@
#define CGAL_DEFAULT_NUMBER_OF_RUN 20
#define CGAL_DEFAULT_SEED 1340818006
#define ACTIVATE_SEGMENTATION_EM_DEBUG
#ifdef ACTIVATE_SEGMENTATION_EM_DEBUG
#define SEG_DEBUG(x) x;
#else
#define SEG_DEBUG(x)
#endif
namespace CGAL
{
namespace internal
@ -102,8 +84,6 @@ private:
Initialization_types init_type;
public:
/** Going to be removed */
Expectation_maximization() { }
/**
* Constructs structures and runs the algorithm.
*
@ -124,11 +104,11 @@ public:
double threshold = CGAL_DEFAULT_THRESHOLD,
int maximum_iteration = CGAL_DEFAULT_MAXIMUM_ITERATION )
:
points(data), init_type(init_type), threshold(threshold),
maximum_iteration(maximum_iteration),
final_likelihood(-(std::numeric_limits<double>::max)()),
points(data), responsibility_matrix(std::vector<std::vector<double> >
(number_of_centers, std::vector<double>(points.size()))),
threshold(threshold), maximum_iteration(maximum_iteration),
init_type(init_type) {
responsibility_matrix(std::vector<std::vector<double> >(number_of_centers,
std::vector<double>(points.size()))) {
// For initialization with k-means, with one run
if(init_type == K_MEANS_INITIALIZATION) {
K_means_clustering k_means(number_of_centers, data);
@ -210,7 +190,7 @@ private:
centers[closest_center].deviation += min_distance * min_distance;
}
for(std::size_t i = 0; i < centers.size(); ++i) {
// There shouldn't be such case, unless same point is selected as a center twice (it is checked!)
// There shouldn't be such case, unless same point is selected as a center twice (and it is also checked!)
CGAL_assertion(member_count[i] != 0);
centers[i].deviation = std::sqrt(centers[i].deviation / member_count[i]);
}
@ -361,6 +341,7 @@ private:
total_membership += membership;
}
new_mean /= total_membership;
// Calculate new deviation
double new_deviation = 0.0;
for(std::size_t point_i = 0; point_i < points.size(); ++point_i) {
@ -368,6 +349,7 @@ private:
new_deviation += membership * std::pow(points[point_i] - new_mean, 2);
}
new_deviation = std::sqrt(new_deviation/total_membership);
// Assign new parameters
centers[center_i].mixing_coefficient = total_membership / points.size();
centers[center_i].deviation = new_deviation;

View File

@ -54,7 +54,6 @@ public:
NeighborSelector()(facet_it, window_size,
neighbors); // gather neighbors in the window
double total_sdf_value = 0.0, total_weight = 0.0;
double current_sdf_value = values[facet_it];
// calculate deviation for range weighting.
double deviation = 0.0;
@ -69,6 +68,8 @@ public:
smoothed_values.push_back(current_sdf_value);
continue;
}
// smooth
double total_sdf_value = 0.0, total_weight = 0.0;
for(typename std::map<Facet_const_handle, int>::iterator it = neighbors.begin();
it != neighbors.end(); ++it) {
double spatial_weight = gaussian_function(it->second,
@ -170,7 +171,8 @@ public:
* @param max_level maximum allowed distance (number of levels) between root facet and visited facet
* @param[out] neighbors visited facets and their distances to root facet
*/
void operator()(Facet_const_handle& facet, int max_level,
void operator()(Facet_const_handle& facet,
int max_level,
std::map<Facet_const_handle, int>& neighbors) const {
typedef std::pair<Facet_const_handle, int> Facet_level_pair;
std::queue<Facet_level_pair> facet_queue;
@ -210,7 +212,8 @@ public:
* @param max_level maximum allowed distance (number of levels) between root facet and visited facet
* @param[out] neighbors visited facets and their distances to root facet
*/
void operator()(Facet_const_handle& facet, int max_level,
void operator()(Facet_const_handle& facet,
int max_level,
std::map<Facet_const_handle, int>& neighbors) const {
typedef std::pair<Facet_const_handle, int> Facet_level_pair;
std::queue<Facet_level_pair> facet_queue;

View File

@ -3,8 +3,6 @@
#include <vector>
#include <cmath>
#include <ctime>
#include <cstdlib>
#include <limits>
#include <algorithm>

View File

@ -6,12 +6,11 @@
#include <CGAL/AABB_polyhedron_triangle_primitive.h>
#include <CGAL/internal/Surface_mesh_segmentation/AABB_traversal_traits.h>
#include <CGAL/internal/Surface_mesh_segmentation/Disk_samplers.h>
#include <map>
#include <vector>
#include <algorithm>
#include <boost/tuple/tuple.hpp>
#include <boost/property_map/property_map.hpp>
#include <boost/optional.hpp>
#define CGAL_ACCEPTANCE_RATE_THRESHOLD 0.5
#define CGAL_ST_DEV_MULTIPLIER 0.75
@ -25,10 +24,9 @@ namespace internal
* @brief Responsible for calculating Shape Diameter Function over surface of the mesh.
* @see Disk_samplers.h
*/
template <class Polyhedron, class DiskSampling = Vogel_disk_sampling>
template <class Polyhedron, class DiskSampling = Vogel_disk_sampling<boost::tuple<double, double, double> > >
class SDF_calculation
{
//type definitions
private:
typedef typename Polyhedron::Traits Kernel;
@ -89,13 +87,17 @@ public:
const int sparse_ray_count = number_of_rays;
const int dense_ray_count = sparse_ray_count * 2;
DiskSampling()(sparse_ray_count, cone_angle, disk_samples_sparse);
DiskSampling()(dense_ray_count, cone_angle, disk_samples_dense);
DiskSampling()(sparse_ray_count, cone_angle,
std::back_inserter(disk_samples_sparse));
DiskSampling()(dense_ray_count, cone_angle,
std::back_inserter(disk_samples_dense));
Tree tree(mesh.facets_begin(), mesh.facets_end());
for(Facet_const_iterator facet_it = mesh.facets_begin();
facet_it != mesh.facets_end(); ++facet_it) {
double sdf = calculate_sdf_value_of_facet(facet_it, tree, disk_samples_sparse);
// Note that calculate_sdf_value_of_facet call itself again with disk_samples_dense if
// number of accepted rays after outlier removal is below CGAL_ACCEPTANCE_RATE_THRESHOLD
sdf_values[facet_it] = sdf;
}
}
@ -246,7 +248,7 @@ private:
intersections.begin();
op_it != intersections.end() ; ++op_it) {
Object object = op_it->first;
Primitive_id id = op_it->second;
Primitive_id id = op_it->second;
if(id == facet) {
continue; //Since center is located on related facet, we should skip it if there is an intersection with it.
}
@ -377,7 +379,7 @@ private:
st_dev += dif * dif;
}
st_dev = std::sqrt(st_dev / accepted_ray_count);
/* Calculate sdf, accept rays : ray_dist - median < st dev */
/* Calculate sdf, accept rays if ray_dist - median < st dev */
int not_outlier_count = 0;
w_it = ray_weights.begin();
for(std::vector<double>::iterator dist_it = ray_distances.begin();

View File

@ -1,57 +1,25 @@
#ifndef CGAL_SURFACE_MESH_SEGMENTATION_H
#define CGAL_SURFACE_MESH_SEGMENTATION_H
/* NEED TO BE DONE
* About implementation:
* +) I am not using BGL, as far as I checked there is a progress on BGL redesign
* (https://cgal.geometryfactory.com/CGAL/Members/wiki/Features/BGL) which introduces some features
* for face-based traversal / manipulation by FaceGraphs
* +) Deciding on which parameters will be taken from user
*
* About paper (and correctness / efficiency etc.):
* +) Weighting ray distances with inverse of their angles: not sure how to weight exactly
* +) Anisotropic smoothing: have no idea what it is exactly, should read some material (google search is not enough)
* +) Deciding how to generate rays in cone: for now using "polar angle" and "accept-reject (square)" and "concentric mapping" techniques
*/
#include <CGAL/internal/Surface_mesh_segmentation/Expectation_maximization.h>
#include <CGAL/internal/Surface_mesh_segmentation/K_means_clustering.h>
#include <CGAL/internal/Surface_mesh_segmentation/Filters.h>
//#include "Alpha_expansion_graph_cut.h"
#include <CGAL/internal/Surface_mesh_segmentation/Alpha_expansion_graph_cut.h>
#include <CGAL/internal/Surface_mesh_segmentation/SDF_calculation.h>
#include <CGAL/utility.h>
#include <CGAL/Timer.h>
#include <CGAL/Mesh_3/dihedral_angle_3.h>
#include <boost/optional.hpp>
#include <boost/property_map/property_map.hpp>
#include <iostream>
#include <fstream>
#include <cmath>
#include <vector>
#include <algorithm>
#include <utility>
#include <queue>
#include <map>
#define CGAL_NORMALIZATION_ALPHA 5.0
#define CGAL_CONVEX_FACTOR 0.08
#define CGAL_SMOOTHING_LAMBDA_MULTIPLIER 100.0
//IOY: these are going to be removed at the end (no CGAL_ pref)
#define ACTIVATE_SEGMENTATION_DEBUG
#ifdef ACTIVATE_SEGMENTATION_DEBUG
#define SEG_DEBUG(x) x;
#else
#define SEG_DEBUG(x)
#endif
// If defined then profile function becomes active, and called from constructor.
//#define SEGMENTATION_PROFILE
namespace CGAL
{
namespace internal
@ -79,45 +47,14 @@ class Polyhedron,
>
class Surface_mesh_segmentation
{
private:
/**
* An adaptor for Lvalue property-map. It stores a pointer to vector for underlying data-structure,
* and also stores another property-map which maps each `key` to vector index.
*/
template<class AnyPolyhedron, class ValueType, class FacetIdPropertyMap>
struct Polyhedron_property_map_for_facet
: public boost::put_get_helper<ValueType&,
Polyhedron_property_map_for_facet<AnyPolyhedron, ValueType, FacetIdPropertyMap>
> {
public:
typedef typename AnyPolyhedron::Facet_const_handle key_type;
typedef ValueType value_type;
typedef value_type& reference;
typedef boost::writable_property_map_tag category;
Polyhedron_property_map_for_facet() : internal_vector(NULL) { }
Polyhedron_property_map_for_facet(std::vector<ValueType>* internal_vector,
FacetIdPropertyMap facet_id_map)
: internal_vector(internal_vector), facet_id_map(facet_id_map) { }
reference operator[](key_type key) const {
return (*internal_vector)[facet_id_map[key]];
}
private:
std::vector<ValueType>* internal_vector;
FacetIdPropertyMap facet_id_map;
};
//type definitions
public:
typedef typename Polyhedron::Facet_const_handle Facet_const_handle;
private:
typedef typename Polyhedron::Traits Kernel;
typedef typename Polyhedron::Facet Facet;
typedef typename Polyhedron::Facet Vertex;
typedef typename Kernel::Vector_3 Vector;
typedef typename Kernel::Point_3 Point;
typedef typename Kernel::Plane_3 Plane;
typedef typename Polyhedron::Facet Facet;
typedef typename Polyhedron::Edge_const_iterator Edge_const_iterator;
typedef typename Polyhedron::Halfedge_const_handle Edge_const_handle;
@ -134,7 +71,7 @@ private:
// member functions
public:
/**
* @pre parameter @a mesh should consist of triangles.
* @pre @a polyhedron.is_pure_triangle()
* @param mesh `CGAL Polyhedron` on which other functions operate.
*/
Surface_mesh_segmentation(const Polyhedron& mesh)
@ -145,22 +82,17 @@ public:
// Use these two functions together
template <class SDFPropertyMap>
std::pair<double, double>
calculate_sdf_values(double cone_angle,
int number_of_rays, SDFPropertyMap sdf_pmap) {
SEG_DEBUG(Timer t)
SEG_DEBUG(t.start())
calculate_sdf_values(double cone_angle, int number_of_rays,
SDFPropertyMap sdf_pmap) {
// calculate sdf values
SDF_calculation_class().calculate_sdf_values(mesh, cone_angle, number_of_rays,
sdf_pmap);
SEG_DEBUG(std::cout << t.time() << std::endl)
// apply post-processing steps
check_zero_sdf_values(sdf_pmap);
Filter()(mesh, get_window_size(), sdf_pmap);
std::pair<double, double> min_max_sdf_values = linear_normalize_sdf_values(
sdf_pmap);
SEG_DEBUG(std::cout << t.time() << std::endl)
// return minimum and maximum sdf values before normalization
return min_max_sdf_values;
}
@ -175,6 +107,7 @@ public:
// log normalize sdf values
std::vector<double> sdf_values;
log_normalize_sdf_values(sdf_pmap, sdf_values);
// soft clustering using GMM-fitting initialized with k-means
Expectation_maximization fitter(number_of_centers, sdf_values,
Expectation_maximization::K_MEANS_INITIALIZATION, 1);
@ -201,53 +134,12 @@ public:
segment_pmap[facet_it] = *label_it;
}
// assign a segment id for each facet
//return number_of_centers;
int number_of_segments = assign_segments(number_of_centers, sdf_pmap,
segment_pmap);
//std::cout << "ne : " <<number_of_segments << std::endl;
return number_of_segments;
}
private:
/**
* Going to be removed
*/
double calculate_dihedral_angle_of_edge(Edge_const_handle& edge) const {
CGAL_precondition(!edge->is_border_edge());
Facet_const_handle f1 = edge->facet();
Facet_const_handle f2 = edge->opposite()->facet();
const Point& f2_v1 = f2->halfedge()->vertex()->point();
const Point& f2_v2 = f2->halfedge()->next()->vertex()->point();
const Point& f2_v3 = f2->halfedge()->prev()->vertex()->point();
/*
* As far as I see from results, segment boundaries are occurred in 'concave valleys'.
* There is no such thing written (clearly) in the paper but should we just penalize 'concave' edges (not convex edges) ?
* Actually that is what I understood from 'positive dihedral angle'.
*/
const Point& unshared_point_on_f1 = edge->next()->vertex()->point();
Plane p2(f2_v1, f2_v2, f2_v3);
bool concave = p2.has_on_positive_side(unshared_point_on_f1);
const Point& f1_v1 = f1->halfedge()->vertex()->point();
const Point& f1_v2 = f1->halfedge()->next()->vertex()->point();
const Point& f1_v3 = f1->halfedge()->prev()->vertex()->point();
Vector f1_normal = unit_normal(f1_v1, f1_v2, f1_v3);
Vector f2_normal = unit_normal(f2_v1, f2_v2, f2_v3);
double dot = f1_normal * f2_normal;
if(dot > 1.0) {
dot = 1.0;
} else if(dot < -1.0) {
dot = -1.0;
}
double angle = acos(dot) / CGAL_PI; // [0-1] normalize
if(!concave) {
angle *= CGAL_CONVEX_FACTOR;
}
return angle;
}
/**
* Calculates dihedral angle between facets and normalize them between [0-1] from [0 - 2*pi].
@ -256,7 +148,7 @@ private:
* @param edge whose dihedral angle is computed using incident facets
* @return computed dihedral angle
*/
double calculate_dihedral_angle_of_edge_2(Edge_const_handle& edge) const {
double calculate_dihedral_angle_of_edge(Edge_const_handle& edge) const {
CGAL_precondition(!edge->is_border_edge());
const Point& a = edge->vertex()->point();
const Point& b = edge->prev()->vertex()->point();
@ -265,7 +157,8 @@ private:
// As far as I check: if, say, dihedral angle is 5, this returns 175,
// if dihedral angle is -5, this returns -175.
// Another words this function returns angle between planes.
double n_angle = Mesh_3::dihedral_angle(a, b, c, d) / 180.0;
double n_angle = Mesh_3::dihedral_angle(a, b, c, d);
n_angle /= 180.0;
bool concave = n_angle > 0;
double angle = 1 + ((concave ? -1 : +1) * n_angle);
@ -273,41 +166,6 @@ private:
angle *= CGAL_CONVEX_FACTOR;
}
return angle;
//Facet_const_handle f1 = edge->facet();
//Facet_const_handle f2 = edge->opposite()->facet();
//
//const Point& f2_v1 = f2->halfedge()->vertex()->point();
//const Point& f2_v2 = f2->halfedge()->next()->vertex()->point();
//const Point& f2_v3 = f2->halfedge()->prev()->vertex()->point();
///*
// * As far as I see from results, segment boundaries are occurred in 'concave valleys'.
// * There is no such thing written (clearly) in the paper but should we just penalize 'concave' edges (not convex edges) ?
// * Actually that is what I understood from 'positive dihedral angle'.
// */
//const Point& unshared_point_on_f1 = edge->next()->vertex()->point();
//Plane p2(f2_v1, f2_v2, f2_v3);
//bool concave = p2.has_on_positive_side(unshared_point_on_f1);
////std::cout << n_angle << std::endl;
////if(!concave) { return epsilon; } // So no penalties for convex dihedral angle ? Not sure...
//const Point& f1_v1 = f1->halfedge()->vertex()->point();
//const Point& f1_v2 = f1->halfedge()->next()->vertex()->point();
//const Point& f1_v3 = f1->halfedge()->prev()->vertex()->point();
//Vector f1_normal = unit_normal(f1_v1, f1_v2, f1_v3);
//Vector f2_normal = unit_normal(f2_v1, f2_v2, f2_v3);
//
//double dot = f1_normal * f2_normal;
//if(dot > 1.0) { dot = 1.0; }
//else if(dot < -1.0) { dot = -1.0; }
//double angle = acos(dot) / CGAL_PI; // [0-1] normalize
//std::cout << angle << " " << n_angle << " " << (concave ? "concave": "convex") << std::endl;
//if(std::abs(angle - folded_angle) > 1e-6)
//{
//
//}
//if(angle < epsilon) { angle = epsilon; }
//return angle;
}
/**
@ -315,6 +173,7 @@ private:
* normalized_sdf = log( alpha * ( current_sdf - min_sdf ) / ( max_sdf - min_sdf ) + 1 ) / log( alpha + 1 )
* @param sdf_values `ReadablePropertyMap` with `Polyhedron::Facet_const_handle` as key and `double` as value type
* @param[out] normalized_sdf_values normalized values stored in facet iteration order
* Important note: @a sdf_values should contain linearly normalized values between [0-1]
*/
template<class SDFPropertyMap>
void log_normalize_sdf_values(SDFPropertyMap sdf_values,
@ -330,6 +189,8 @@ private:
/**
* Normalize sdf values between [0-1].
* @param sdf_values `ReadWritePropertyMap` with `Polyhedron::Facet_const_handle` as key and `double` as value type
* @return minimum and maximum SDF values before normalization
*/
template<class SDFPropertyMap>
std::pair<double, double> linear_normalize_sdf_values(SDFPropertyMap
@ -361,7 +222,6 @@ private:
* - ...
*/
int get_window_size() {
double facet_sqrt = std::sqrt(mesh.size_of_facets() / 2000.0);
return static_cast<int>(facet_sqrt) + 1;
}
@ -426,10 +286,10 @@ private:
it_i != probabilities.end(); ++it_i) {
for(std::vector<double>::iterator it = it_i->begin(); it != it_i->end(); ++it) {
double probability = (std::max)(*it,
epsilon); // give any facet a little probability to be in any cluster
epsilon); // give every facet a little probability to be in any cluster
probability = -log(probability);
*it = (std::max)(probability,
std::numeric_limits<double>::epsilon()); // zero values are not accepted in max-flow
*it = (std::max)(probability, std::numeric_limits<double>::epsilon());
// zero values are not accepted in max-flow as weights for edges which connects some vertex with Source or Sink (in boost::boykov..)
}
}
}
@ -443,6 +303,10 @@ private:
void calculate_and_log_normalize_dihedral_angles(double smoothing_lambda,
std::vector<std::pair<int, int> >& edges,
std::vector<double>& edge_weights) const {
// associate each facet with an id
// important note: ids should be compatible with iteration order of facets:
// [0 <- facet_begin(),...., size_of_facets() -1 <- facet_end()]
// Why ? it is send to graph cut algorithm where other data associated with facets are also sorted according to iteration order.
std::map<Facet_const_handle, int> facet_index_map;
int facet_index = 0;
for(Facet_const_iterator facet_it = mesh.facets_begin();
@ -452,7 +316,7 @@ private:
}
const double epsilon = 1e-5;
//edges and their weights. pair<int, int> stores facet-id pairs (see above) (may be using boost::tuple can be more suitable)
// edges and their weights. pair<int, int> stores facet-id pairs (see above) (may be using boost::tuple can be more suitable)
for(Edge_const_iterator edge_it = mesh.edges_begin();
edge_it != mesh.edges_end(); ++edge_it) {
if(edge_it->is_border_edge()) {
@ -462,7 +326,7 @@ private:
const int index_f2 = facet_index_map[edge_it->opposite()->facet()];
edges.push_back(std::pair<int, int>(index_f1, index_f2));
double angle = calculate_dihedral_angle_of_edge_2(edge_it);
double angle = calculate_dihedral_angle_of_edge(edge_it);
if(angle < epsilon) {
angle = epsilon;
@ -502,6 +366,7 @@ private:
template<class SegmentPropertyMap, class SDFProperyMap>
int assign_segments(int number_of_clusters, SDFProperyMap sdf_values,
SegmentPropertyMap segments) {
// assign a segment-id to each facet
int segment_id = number_of_clusters;
std::vector<std::pair<int, double> > segments_with_average_sdf_values;
@ -511,6 +376,7 @@ private:
number_of_clusters) { // not visited by depth_first_traversal
std::pair<double, int> sdf_facet_count_pair = depth_first_traversal(facet_it,
segment_id, sdf_values, segments);
double average_sdf_value_for_segment = sdf_facet_count_pair.first /
sdf_facet_count_pair.second;
segments_with_average_sdf_values.push_back(std::pair<int, double>(segment_id,