update svn to r733 (3)

main
Razvan Mihalyi 12 years ago
parent 5aed26340d
commit b03c2a3e0c

@ -0,0 +1,50 @@
/**
* Point Cloud Segmentation using Felzenszwalb-Huttenlocher Algorithm
*
* Copyright (C) Jacobs University Bremen
*
* Released under the GPL version 3.
*
* @author Mihai-Cotizo Sima
*/
#ifndef __FHGRAPH_H_
#define __FHGRAPH_H_
#include <vector>
#include <list>
#include <slam6d/point.h>
#include <slam6d/scan.h>
#include <segmentation/segment-graph.h>
#include <ANN/ANN.h>
class FHGraph {
public:
FHGraph(std::vector< Point >& ps, double weight(Point, Point), double sigma, double eps, int neighbors, float radius);
edge* getGraph();
Point operator[](int index);
int getNumPoints();
int getNumEdges();
void dispose();
private:
void compute_neighbors(double weight(Point, Point), double eps);
void do_gauss(double sigma);
void without_gauss();
std::vector<edge> edges;
std::vector<Point>& points;
int V;
int E;
int nr_neighbors;
float radius;
struct he{ int x; float w; };
std::vector< std::list<he> > adjency_list;
};
#endif

@ -0,0 +1,48 @@
/*
Copyright (C) 2006 Pedro Felzenszwalb
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef DISJOINT_SET
#define DISJOINT_SET
// disjoint-set forests using union-by-rank and path compression (sort of).
typedef struct {
int rank;
int p;
int size;
} uni_elt;
class universe {
public:
universe(int elements);
~universe();
int find(int x);
void join(int x, int y);
int size(int x) const {
return elts[x].size;
}
int num_sets() const {
return num;
}
private:
uni_elt *elts;
int num;
};
#endif

@ -0,0 +1,49 @@
/*
Copyright (C) 2006 Pedro Felzenszwalb
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef SEGMENT_GRAPH
#define SEGMENT_GRAPH
#include <algorithm>
#include <cmath>
#include "disjoint-set.h"
// threshold function
#define THRESHOLD(size, c) (c/size)
typedef struct {
float w;
int a, b;
} edge;
bool operator<(const edge &a, const edge &b);
/*
* Segment a graph
*
* Returns a disjoint-set forest representing the segmentation.
*
* num_vertices: number of vertices in graph.
* num_edges: number of edges in graph
* edges: array of edges.
* c: constant for treshold function.
*/
universe *segment_graph(int num_vertices, int num_edges, edge *edges,
float c);
#endif

@ -0,0 +1,238 @@
/** @file
* @brief Representation of the optimized k-d tree.
* @author Remus Dumitru. Jacobs University Bremen, Germany
* @author Corneliu-Claudiu Prodescu. Jacobs University Bremen, Germany
* @author Andreas Nuechter. Institute of Computer Science, University of Osnabrueck, Germany.
* @author Kai Lingemann. Institute of Computer Science, University of Osnabrueck, Germany.
* @author Thomas Escher. Institute of Computer Science, University of Osnabrueck, Germany.
*/
#ifndef __KD_TREE_IMPL_H__
#define __KD_TREE_IMPL_H__
#include "slam6d/kdparams.h"
#include "globals.icc"
#ifdef _MSC_VER
#if !defined _OPENMP && defined OPENMP
#define _OPENMP
#endif
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
/**
* @brief The optimized k-d tree.
*
* A kD tree for points, with limited
* capabilities (find nearest point to
* a given point, or to a ray).
**/
template<class PointData, class AccessorData, class AccessorFunc>
class KDTreeImpl {
public:
inline KDTreeImpl() { }
virtual inline ~KDTreeImpl() {
if (!npts) {
#ifdef WITH_OPENMP_KD
omp_set_num_threads(OPENMP_NUM_THREADS);
#pragma omp parallel for schedule(dynamic)
#endif
for (int i = 0; i < 2; i++) {
if (i == 0 && node.child1) delete node.child1;
if (i == 1 && node.child2) delete node.child2;
}
} else {
if (leaf.p) delete [] leaf.p;
}
}
virtual void create(PointData pts, AccessorData *indices, size_t n) {
AccessorFunc point;
// Find bbox
double xmin = point(pts, indices[0])[0], xmax = point(pts, indices[0])[0];
double ymin = point(pts, indices[0])[1], ymax = point(pts, indices[0])[1];
double zmin = point(pts, indices[0])[2], zmax = point(pts, indices[0])[2];
for(unsigned int i = 1; i < n; i++) {
xmin = min(xmin, point(pts, indices[i])[0]);
xmax = max(xmax, point(pts, indices[i])[0]);
ymin = min(ymin, point(pts, indices[i])[1]);
ymax = max(ymax, point(pts, indices[i])[1]);
zmin = min(zmin, point(pts, indices[i])[2]);
zmax = max(zmax, point(pts, indices[i])[2]);
}
// Leaf nodes
if ((n > 0) && (n <= 10)) {
npts = n;
leaf.p = new AccessorData[n];
// fill leaf index array with indices
for(unsigned int i = 0; i < n; ++i) {
leaf.p[i] = indices[i];
}
return;
}
// Else, interior nodes
npts = 0;
node.center[0] = 0.5 * (xmin+xmax);
node.center[1] = 0.5 * (ymin+ymax);
node.center[2] = 0.5 * (zmin+zmax);
node.dx = 0.5 * (xmax-xmin);
node.dy = 0.5 * (ymax-ymin);
node.dz = 0.5 * (zmax-zmin);
node.r2 = sqr(node.dx) + sqr(node.dy) + sqr(node.dz);
// Find longest axis
if (node.dx > node.dy) {
if (node.dx > node.dz) {
node.splitaxis = 0;
} else {
node.splitaxis = 2;
}
} else {
if (node.dy > node.dz) {
node.splitaxis = 1;
} else {
node.splitaxis = 2;
}
}
// Partition
double splitval = node.center[node.splitaxis];
if ( fabs(max(max(node.dx,node.dy),node.dz)) < 0.01 ) {
npts = n;
leaf.p = new AccessorData[n];
// fill leaf index array with indices
for(unsigned int i = 0; i < n; ++i) {
leaf.p[i] = indices[i];
}
return;
}
AccessorData* left = indices, * right = indices + n - 1;
while(true) {
while(point(pts, *left)[node.splitaxis] < splitval)
left++;
while(point(pts, *right)[node.splitaxis] >= splitval)
right--;
if(right < left)
break;
std::swap(*left, *right);
}
// Build subtrees
int i;
#ifdef WITH_OPENMP_KD // does anybody know the reason why this is slower ?? --Andreas
omp_set_num_threads(OPENMP_NUM_THREADS);
#pragma omp parallel for schedule(dynamic)
#endif
for (i = 0; i < 2; i++) {
if (i == 0) {
node.child1 = new KDTreeImpl();
node.child1->create(pts, indices, left - indices);
}
if (i == 1) {
node.child2 = new KDTreeImpl();
node.child2->create(pts, left, n - (left - indices));
}
}
}
protected:
/**
* storing the parameters of the k-d tree, i.e., the current closest point,
* the distance to the current closest point and the point itself.
* These global variable are needed in this search.
*
* Padded in the parallel case.
*/
#ifdef _OPENMP
#ifdef __INTEL_COMPILER
__declspec (align(16)) static KDParams params[MAX_OPENMP_NUM_THREADS];
#else
static KDParams params[MAX_OPENMP_NUM_THREADS];
#endif //__INTEL_COMPILER
#else
static KDParams params[MAX_OPENMP_NUM_THREADS];
#endif
/**
* number of points. If this is 0: intermediate node. If nonzero: leaf.
*/
int npts;
/**
* Cue the standard rant about anon unions but not structs in C++
*/
union {
/**
* in case of internal node...
*/
struct {
double center[3]; ///< storing the center of the voxel (R^3)
double dx, ///< defining the voxel itself
dy, ///< defining the voxel itself
dz, ///< defining the voxel itself
r2; ///< defining the voxel itself
int splitaxis; ///< defining the kind of splitaxis
KDTreeImpl *child1; ///< pointers to the childs
KDTreeImpl *child2; ///< pointers to the childs
} node;
/**
* in case of leaf node ...
*/
struct {
/**
* store the value itself
* Here we store just a pointer to the data
*/
AccessorData* p;
} leaf;
};
void _FindClosest(const PointData& pts, int threadNum) const {
AccessorFunc point;
// Leaf nodes
if (npts) {
for (int i = 0; i < npts; i++) {
double myd2 = Dist2(params[threadNum].p, point(pts, leaf.p[i]));
if (myd2 < params[threadNum].closest_d2) {
params[threadNum].closest_d2 = myd2;
params[threadNum].closest = point(pts, leaf.p[i]);
}
}
return;
}
// Quick check of whether to abort
double approx_dist_bbox = max(max(fabs(params[threadNum].p[0]-node.center[0])-node.dx,
fabs(params[threadNum].p[1]-node.center[1])-node.dy),
fabs(params[threadNum].p[2]-node.center[2])-node.dz);
if (approx_dist_bbox >= 0 && sqr(approx_dist_bbox) >= params[threadNum].closest_d2)
return;
// Recursive case
double myd = node.center[node.splitaxis] - params[threadNum].p[node.splitaxis];
if (myd >= 0.0) {
node.child1->_FindClosest(pts, threadNum);
if (sqr(myd) < params[threadNum].closest_d2) {
node.child2->_FindClosest(pts, threadNum);
}
} else {
node.child2->_FindClosest(pts, threadNum);
if (sqr(myd) < params[threadNum].closest_d2) {
node.child1->_FindClosest(pts, threadNum);
}
}
}
};
#endif

@ -0,0 +1,7 @@
IF(WITH_NORMALS)
FIND_PACKAGE(OpenCV REQUIRED)
add_executable(calculateNormals calculate_normals.cc ../slam6d/fbr/fbr_global.cc ../slam6d/fbr/panorama.cc)
target_link_libraries(calculateNormals scan ANN newmat fbr_panorama fbr_cv_io ${Boost_LIBRARIES} ${OpenCV_LIBS})
ENDIF(WITH_NORMALS)

@ -0,0 +1,643 @@
/*
* calculateNormals implementation
*
* Copyright (C) Johannes Schauer, Razvan Mihaly
*
* Released under the GPL version 3
*
*/
#include "ANN/ANN.h"
#include "newmat/newmat.h"
#include "newmat/newmatap.h"
#include "newmat/newmatio.h"
using namespace NEWMAT;
#include "slam6d/point.h"
#include "normals/pointNeighbor.h"
#include "slam6d/scan.h"
#include "slam6d/globals.icc"
#include "slam6d/fbr/panorama.h"
#include "normals/point.h"
#include "normals/SRI.h"
#include <string>
using std::string;
#include <iostream>
using std::cout;
using std::endl;
using std::vector;
#include <algorithm>
#include <boost/program_options.hpp>
#include <boost/filesystem/operations.hpp>
#include <boost/filesystem/fstream.hpp>
namespace po = boost::program_options;
enum normal_method {KNN_PCA, AKNN_PCA, PANO_PCA, PANO_SRI};
void normal_option_dependency(const po::variables_map & vm, normal_method ntype, const char *option)
{
if (vm.count("normalMethod") && vm["normalMethod"].as<normal_method>() == ntype) {
if (!vm.count(option)) {
throw std::logic_error (string("this normal method needs ")+option+" to be set");
}
}
}
void normal_option_conflict(const po::variables_map & vm, normal_method ntype, const char *option)
{
if (vm.count("normalMethod") && vm["normalMethod"].as<normal_method>() == ntype) {
if (vm.count(option)) {
throw std::logic_error (string("this normal method is incompatible with ")+option);
}
}
}
/*
* validates input type specification
*/
void validate(boost::any& v, const std::vector<std::string>& values,
IOType*, int) {
if (values.size() == 0)
throw std::runtime_error("Invalid model specification");
string arg = values.at(0);
try {
v = formatname_to_io_type(arg.c_str());
} catch (...) { // runtime_error
throw std::runtime_error("Format " + arg + " unknown.");
}
}
void validate(boost::any& v, const std::vector<std::string>& values,
normal_method*, int) {
if (values.size() == 0)
throw std::runtime_error("Invalid model specification");
string arg = values.at(0);
if(strcasecmp(arg.c_str(), "KNN_PCA") == 0) v = KNN_PCA;
else if(strcasecmp(arg.c_str(), "AKNN_PCA") == 0) v = AKNN_PCA;
else if(strcasecmp(arg.c_str(), "PANO_PCA") == 0) v = PANO_PCA;
else if(strcasecmp(arg.c_str(), "PANO_SRI") == 0) v = PANO_SRI;
else throw std::runtime_error(std::string("normal method ") + arg + std::string(" is unknown"));
}
/*
* parse commandline options, fill arguments
*/
void parse_options(int argc, char **argv, int &start, int &end,
bool &scanserver, string &dir, IOType &iotype,
int &maxDist, int &minDist, normal_method &normalMethod, int &knn,
int &kmin, int &kmax, double& alpha, int &width, int &height,
bool &flipnormals, double &factor)
{
po::options_description generic("Generic options");
generic.add_options()
("help,h", "output this help message");
po::options_description input("Input options");
input.add_options()
("start,s", po::value<int>(&start)->default_value(0),
"start at scan <arg> (i.e., neglects the first <arg> scans) "
"[ATTENTION: counting naturally starts with 0]")
("end,e", po::value<int>(&end)->default_value(-1),
"end after scan <arg>")
("format,f", po::value<IOType>(&iotype)->default_value(UOS),
"using shared library <arg> for input. (chose F from {uos, uos_map, "
"uos_rgb, uos_frames, uos_map_frames, old, rts, rts_map, ifp, "
"riegl_txt, riegl_rgb, riegl_bin, zahn, ply})")
("max,M", po::value<int>(&maxDist)->default_value(-1),
"neglegt all data points with a distance larger than <arg> 'units")
("min,m", po::value<int>(&minDist)->default_value(-1),
"neglegt all data points with a distance smaller than <arg> 'units")
("scanserver,S", po::bool_switch(&scanserver),
"Use the scanserver as an input method and handling of scan data")
;
po::options_description normal("Normal options");
normal.add_options()
("normalMethod,N", po::value<normal_method>(&normalMethod)->default_value(KNN_PCA),
"choose the method for computing normals:\n"
"KNN_PCA -- use kNN and PCA\n"
"AKNN_PCA -- use adaptive kNN and PCA\n"
"PANO_PCA -- use panorama image neighbors and PCA\n"
"PANO_SRI -- use panorama image neighbors and spherical range image differentiation\n")
("knn,K", po::value<int>(&knn),
"select the k in kNN search")
("kmin,1", po::value<int>(&kmin),
"select k_min in adaptive kNN search")
("kmax,2", po::value<int>(&kmax),
"select k_max in adaptive kNN search")
("alpha,a", po::value<double>(&alpha),
"select the alpha parameter for detecting an ill-conditioned neighborhood")
("width,w", po::value<int>(&width),
"width of panorama")
("height,h", po::value<int>(&height),
"height of panorama")
("flipnormals,F", po::bool_switch(&flipnormals),
"flip orientation of normals towards scan pose")
("factor,c", po::value<double>(&factor),
"factor for SRI computation")
;
po::options_description hidden("Hidden options");
hidden.add_options()
("input-dir", po::value<string>(&dir), "input dir");
// all options
po::options_description all;
all.add(generic).add(input).add(normal).add(hidden);
// options visible with --help
po::options_description cmdline_options;
cmdline_options.add(generic).add(input).add(normal);
// positional argument
po::positional_options_description pd;
pd.add("input-dir", 1);
// process options
po::variables_map vm;
po::store(po::command_line_parser(argc, argv).
options(all).positional(pd).run(), vm);
po::notify(vm);
// display help
if (vm.count("help")) {
cout << cmdline_options;
exit(0);
}
normal_option_dependency(vm, KNN_PCA, "knn");
normal_option_conflict(vm, KNN_PCA, "kmin");
normal_option_conflict(vm, KNN_PCA, "kmax");
normal_option_conflict(vm, KNN_PCA, "alpha");
normal_option_conflict(vm, KNN_PCA, "width");
normal_option_conflict(vm, KNN_PCA, "height");
normal_option_conflict(vm, KNN_PCA, "factor");
normal_option_conflict(vm, AKNN_PCA, "knn");
normal_option_dependency(vm, AKNN_PCA, "kmin");
normal_option_dependency(vm, AKNN_PCA, "kmax");
normal_option_dependency(vm, AKNN_PCA, "alpha");
normal_option_conflict(vm, AKNN_PCA, "width");
normal_option_conflict(vm, AKNN_PCA, "height");
normal_option_conflict(vm, AKNN_PCA, "factor");
//normal_option_conflict(vm, PANO_PCA, "knn");
normal_option_dependency(vm, KNN_PCA, "knn");
normal_option_conflict(vm, PANO_PCA, "kmin");
normal_option_conflict(vm, PANO_PCA, "kmax");
normal_option_conflict(vm, PANO_PCA, "alpha");
normal_option_dependency(vm, PANO_PCA, "width");
normal_option_dependency(vm, PANO_PCA, "height");
normal_option_conflict(vm, PANO_PCA, "factor");
normal_option_conflict(vm, PANO_SRI, "knn");
normal_option_conflict(vm, PANO_SRI, "kmin");
normal_option_conflict(vm, PANO_SRI, "kmax");
normal_option_conflict(vm, PANO_SRI, "alpha");
normal_option_conflict(vm, PANO_SRI, "width");
normal_option_conflict(vm, PANO_SRI, "height");
normal_option_dependency(vm, PANO_SRI, "factor");
// add trailing slash to directory if not present yet
if (dir[dir.length()-1] != '/') dir = dir + "/";
}
/*
* retrieve a cv::Mat with x,y,z,r from a scan object
* functionality borrowed from scan_cv::convertScanToMat but this function
* does not allow a scanserver to be used, prints to stdout and can only
* handle a single scan
*/
void scan2mat(Scan* scan, cv::Mat& scan_cv) {
DataXYZ xyz = scan->get("xyz");
unsigned int nPoints = xyz.size();
scan_cv.create(nPoints,1,CV_32FC(4));
scan_cv = cv::Scalar::all(0);
cv::MatIterator_<cv::Vec3f> it = scan_cv.begin<cv::Vec3f>();
for(unsigned int i = 0; i < nPoints; i++){
(*it)[0] = xyz[i][0];
(*it)[1] = xyz[i][1];
(*it)[2] = xyz[i][2];
++it;
}
}
/**
* Helper function that maps x, y, z to R, G, B using a linear function
*/
void mapNormalToRGB(const Point& normal, Point& rgb)
{
rgb.x = 127.5 * normal.x + 127.5;
rgb.y = 127.5 * normal.y + 127.5;
rgb.z = 255.0 * fabs(normal.z);
}
/**
* Write normals to .3d files using the uos_rgb format
*/
void writeNormals(const Scan* scan, const string& dir,
const vector<Point>& points, const vector<Point>& normals)
{
stringstream ss;
ss << dir << "scan" << string(scan->getIdentifier()) << ".3d";
ofstream scan_file;
scan_file.open(ss.str().c_str());
for(size_t i = 0; i < points.size(); ++i) {
Point rgb;
mapNormalToRGB(normals[i], rgb);
scan_file
<< points[i].x << " " << points[i].y << " " << points[i].z << " "
<< (unsigned int) rgb.x << " " << (unsigned int) rgb.y << " "
<< (unsigned int) rgb.z << "\n";
}
scan_file.close();
ss.clear(); ss.str(string());
ss << dir << "scan" << string(scan->getIdentifier()) << ".pose";
ofstream pose_file;
pose_file.open(ss.str().c_str());
pose_file << 0 << " " << 0 << " " << 0 << "\n" << 0 << " " << 0 << " " << 0 << "\n";
pose_file.close();
}
/**
* Compute eigen decomposition of a point and its neighbors using the NEWMAT library
* @param point - input points with corresponding neighbors
* @param e_values - out parameter returns the eigenvalues
* @param e_vectors - out parameter returns the eigenvectors
*/
void computeEigenDecomposition(const PointNeighbor& point,
DiagonalMatrix& e_values, Matrix& e_vectors)
{
Point centroid(0, 0, 0);
vector<Point> neighbors = point.neighbors;
for (size_t j = 0; j < neighbors.size(); ++j) {
centroid.x += neighbors[j].x;
centroid.y += neighbors[j].y;
centroid.z += neighbors[j].z;
}
centroid.x /= (double) neighbors.size();
centroid.y /= (double) neighbors.size();
centroid.z /= (double) neighbors.size();
Matrix S(3, 3);
S = 0.0;
for (size_t j = 0; j < neighbors.size(); ++j) {
ColumnVector point_prime(3);
point_prime(1) = neighbors[j].x - centroid.x;
point_prime(2) = neighbors[j].y - centroid.y;
point_prime(3) = neighbors[j].z - centroid.z;
S = S + point_prime * point_prime.t();
}
// normalize S
for (int j = 0; j < 3; ++j)
for (int k = 0; k < 3; ++k)
S(j+1, k+1) /= (double) neighbors.size();
SymmetricMatrix C;
C << S;
// the decomposition
Jacobi(C, e_values, e_vectors);
#ifdef DEBUG
// Print the result
cout << "The eigenvalues matrix:" << endl;
cout << e_values << endl;
#endif
}
/**
* Compute neighbors using kNN search
* @param points - input set of points
* @param points_neighbors - output set of points with corresponding neighbors
* @param knn - k constant in kNN search
* @param kmax - to be used in adaptive knn search as the upper bound on adapting the k constant, defaults to -1 for regular kNN search
* @param alpha - to be used in adaptive knn search for detecting ill-conditioned neighborhoods
* @param eps - parameter required by the ANN library in kNN search
*/
void computeKNearestNeighbors(const vector<Point>& points,
vector<PointNeighbor>& points_neighbors, int knn, int kmax=-1,
double alpha=1000.0, double eps=1.0)
{
ANNpointArray point_array = annAllocPts(points.size(), 3);
for (size_t i = 0; i < points.size(); ++i) {
point_array[i] = new ANNcoord[3];
point_array[i][0] = points[i].x;
point_array[i][1] = points[i].y;
point_array[i][2] = points[i].z;
}
ANNkd_tree t(point_array, points.size(), 3);
ANNidxArray n;
ANNdistArray d;
if (kmax < 0) {
/// regular kNN search, allocate memory for knn
n = new ANNidx[knn];
d = new ANNdist[knn];
} else {
/// adaptive kNN search, allocate memory for kmax
n = new ANNidx[kmax];
d = new ANNdist[kmax];
}
for (size_t i = 0; i < points.size(); ++i) {
vector<Point> neighbors;
ANNpoint p = point_array[i];
t.annkSearch(p, knn, n, d, eps);
neighbors.push_back(points[i]);
for (int j = 0; j < knn; ++j) {
if ( n[j] != (int)i )
neighbors.push_back(points[n[j]]);
}
PointNeighbor current_point(points[i], neighbors);
points_neighbors.push_back( current_point );
Matrix e_vectors(3,3); e_vectors = 0.0;
DiagonalMatrix e_values(3); e_values = 0.0;
computeEigenDecomposition( current_point, e_values, e_vectors );
if (kmax > 0) {
/// detecting an ill-conditioned neighborhood
if (e_values(3) / e_values(2) > alpha && e_values(2) > 0.0) {
if (knn < kmax)
cout << "Increasing kmin to " << ++knn << endl;
}
}
}
delete[] n;
delete[] d;
}
/**
* Compute neighbors using kNN search
* @param point - input point with neighbors
* @param new_point - output point with new neighbors
* @param knn - k constant in kNN search
* @param eps - parameter required by the ANN library in kNN search
*/
void computeKNearestNeighbors(const PointNeighbor& point,
PointNeighbor& new_point, int knn,
double eps=1.0)
{
/// allocate memory for all neighbors of point plus the point itself
ANNpointArray point_array = annAllocPts(point.neighbors.size()+1, 3);
for (size_t i = 0; i < point.neighbors.size(); ++i) {
point_array[i] = new ANNcoord[3];
point_array[i][0] = point.neighbors[i].x;
point_array[i][1] = point.neighbors[i].y;
point_array[i][2] = point.neighbors[i].z;
}
int last = point.neighbors.size();
point_array[last] = new ANNcoord[3];
point_array[last][0] = point.point.x;
point_array[last][1] = point.point.y;
point_array[last][2] = point.point.z;
ANNkd_tree t(point_array, point.neighbors.size()+1, 3);
ANNidxArray n;
ANNdistArray d;
/// regular kNN search, allocate memory for knn
n = new ANNidx[knn];
d = new ANNdist[knn];
vector<Point> new_neighbors;
/// last point in the array is the current point
ANNpoint p = point_array[point.neighbors.size()];
t.annkSearch(p, knn, n, d, eps);
new_neighbors.push_back(point.point);
for (int j = 0; j < knn; ++j) {
if ( n[j] != (int) point.neighbors.size() )
new_neighbors.push_back(point.neighbors[n[j]]);
}
new_point.point = point.point;
new_point.neighbors = new_neighbors;
delete[] n;
delete[] d;
}
/**
* Compute neighbors using panorama images
* @param fPanorama - input panorama image created from the current scan
* @param points_neighbors - output set of points with corresponding neighbors
*/
void computePanoramaNeighbors(Scan* scan,
vector<PointNeighbor>& points_neighbors, int width, int height, int knn)
{
cv::Mat scan_cv;
scan2mat(scan, scan_cv);
fbr::panorama fPanorama(width, height, fbr::EQUIRECTANGULAR, 1, 0, fbr::EXTENDED);
fPanorama.createPanorama(scan_cv);
cv::Mat img = fPanorama.getRangeImage();
vector<vector<vector<cv::Vec3f> > > extended_map = fPanorama.getExtendedMap();
for (int row = 0; row < height; ++row) {
for (int col = 0; col < width; ++col) {
vector<cv::Vec3f> points_panorama = extended_map[row][col];
/// if no points found, skip pixel
if (points_panorama.size() < 1) continue;
/// foreach point from panorama consider all points in the bucket as its neighbors
for (size_t point_idx = 0; point_idx < points_panorama.size(); ++point_idx) {
Point point;
point.x = points_panorama[point_idx][0];
point.y = points_panorama[point_idx][1];
point.z = points_panorama[point_idx][2];
vector<Point> neighbors;
for (size_t i = 0; i < points_panorama.size(); ++i) {
if (i != point_idx)
neighbors.push_back(Point (points_panorama[i][0], points_panorama[i][1], points_panorama[i][2]) );
}
/// add neighbors from adjacent pixels and buckets
for (int i = -1; i <= 1; ++i) {
for (int j = -1; j <= 1; ++j) {
if (!(i==0 && j==0) && !(row+i < 0 || col+j < 0)
&& !(row+i >= height || col+j >= width) ) {
vector<cv::Vec3f> neighbors_panorama = extended_map[row+i][col+j];
for (size_t k = 0; k < neighbors_panorama.size(); ++k)
neighbors.push_back(Point (neighbors_panorama[k][0],
neighbors_panorama[k][1],
neighbors_panorama[k][2]) );
}
}
}
/// filter the point by kNN search
PointNeighbor current_point(point, neighbors);
if (knn > 0) {
PointNeighbor filtered_point;
computeKNearestNeighbors(current_point, filtered_point, knn);
points_neighbors.push_back(filtered_point);
} else {
points_neighbors.push_back(current_point);
}
}
}
}
}
/**
* Compute normals using PCA given a set of points and their neighbors
* @param scan - pointer to current scan, used to compute the position vectors
* @param points - input set of points with corresponding neighbors
* @param normals - output set of normals
*/
void computePCA(const Scan* scan, const vector<PointNeighbor>& points,
vector<Point>& normals, bool flipnormals)
{
ColumnVector origin(3);
const double *scan_pose = scan->get_rPos();
for (int i = 0; i < 3; ++i)
origin(i+1) = scan_pose[i];
for(size_t i = 0; i < points.size(); ++i) {
vector<Point> neighbors = points[i].neighbors;
if (points[i].neighbors.size() < 2) {
normals.push_back( Point(0,0,0) );
continue;
}
ColumnVector point_vector(3);
point_vector(1) = points[i].point.x - origin(1);
point_vector(2) = points[i].point.y - origin(2);
point_vector(3) = points[i].point.z - origin(3);
point_vector = point_vector / point_vector.NormFrobenius();
Matrix e_vectors(3,3); e_vectors = 0.0;
DiagonalMatrix e_values(3); e_values = 0.0;
computeEigenDecomposition(points[i], e_values, e_vectors);
ColumnVector v1(3);
v1(1) = e_vectors(1,1);
v1(2) = e_vectors(2,1);
v1(3) = e_vectors(3,1);
// consider first (smallest) eigenvector as the normal
Real angle = (v1.t() * point_vector).AsScalar();
// orient towards scan pose
// works better when orientation is not flipped
if (flipnormals && angle < 0) {
v1 *= -1.0;
}
normals.push_back( Point(v1(1), v1(2), v1(3)) );
}
}
void computeSRI(int factor, vector<Point>& points, vector<Point>& normals)
{
SRI *sri2 = new SRI(0, factor);
for (size_t i = 0; i < points.size(); i++) {
sri2->addPoint(points[i].x, points[i].y, points[i].z);
}
points.clear();
for (unsigned int i = 0; i < sri2->points.size(); i++) {
double rgbN[3], x, y, z;
PointN* p = sri2->points[i];
p->getCartesian(x, y, z);
sri2->getNormalSRI(p, rgbN);
normals.push_back(Point(rgbN[0], rgbN[1], rgbN[2]));
points.push_back(Point(x, z, y));
}
}
void scan2points(Scan* scan, vector<Point> &points)
{
DataXYZ xyz = scan->get("xyz");
unsigned int nPoints = xyz.size();
for(unsigned int i = 0; i < nPoints; ++i) {
points.push_back(Point(xyz[i][0], xyz[i][1], xyz[i][2]));
}
}
int main(int argc, char **argv)
{
// commandline arguments
int start, end;
bool scanserver;
int maxDist, minDist;
string dir;
IOType iotype;
normal_method normalMethod;
int knn, kmin, kmax;
double alpha;
int width, height;
bool flipnormals;
double factor;
parse_options(argc, argv, start, end, scanserver, dir, iotype, maxDist,
minDist, normalMethod, knn, kmin, kmax, alpha, width, height,
flipnormals, factor);
Scan::openDirectory(scanserver, dir, iotype, start, end);
if(Scan::allScans.size() == 0) {
cerr << "No scans found. Did you use the correct format?" << endl;
exit(-1);
}
boost::filesystem::path boost_dir(dir + "normals/");
boost::filesystem::create_directory(boost_dir);
for(ScanVector::iterator it = Scan::allScans.begin(); it != Scan::allScans.end(); ++it) {
Scan* scan = *it;
// apply optional filtering
scan->setRangeFilter(maxDist, minDist);
vector<PointNeighbor> points_neighbors;
vector<Point> normals;
vector<Point> points;
scan2points(scan, points);
switch (normalMethod) {
case KNN_PCA:
computeKNearestNeighbors(points, points_neighbors, knn);
computePCA(scan, points_neighbors, normals, flipnormals);
break;
case AKNN_PCA:
computeKNearestNeighbors(points, points_neighbors, kmin, kmax, alpha);
computePCA(scan, points_neighbors, normals, flipnormals);
break;
case PANO_PCA:
computePanoramaNeighbors(scan, points_neighbors, width, height, knn);
computePCA(scan, points_neighbors, normals, flipnormals);
break;
case PANO_SRI:
computeSRI(factor, points, normals);
break;
default:
cerr << "unknown normal method" << endl;
return 1;
break;
}
if (points.size() != normals.size()) {
cerr << "got " << points.size() << " points but " << normals.size() << " normals" << endl;
return 1;
}
writeNormals(scan, dir + "normals/", points, normals);
}
Scan::closeDirectory();
return 0;
}

@ -0,0 +1,95 @@
/*******************************************************
A simple program that demonstrates NewMat10 library.
The program defines a random symmetric matrix
and computes its eigendecomposition.
For further details read the NewMat10 Reference Manual
********************************************************/
#include <stdlib.h>
#include <time.h>
#include <string.h>
// the following two are needed for printing
#include <iostream.h>
#include <iomanip.h>
/**************************************
/* The NewMat10 include files */
#include <include.h>
#include <newmat.h>
#include <newmatap.h>
#include <newmatio.h>
/***************************************/
int main(int argc, char **argv) {
int M = 3, N = 5;
Matrix X(M,N); // Define an M x N general matrix
// Fill X by random numbers between 0 and 9
// Note that indexing into matrices in NewMat is 1-based!
srand(time(NULL));
for (int i = 1; i <= M; ++i) {
for (int j = 1; j <= N; ++j) {
X(i,j) = rand() % 10;
}
}
SymmetricMatrix C;
C << X * X.t(); // fill in C by X * X^t.
// Works because we *know* that the result is symmetric
cout << "The symmetrix matrix C" << endl;
cout << setw(5) << setprecision(0) << C << endl;
// compute eigendecomposition of C
Matrix V(3,3); // for eigenvectors
DiagonalMatrix D(3); // for eigenvalues
// the decomposition
Jacobi(C, D, V);
// Print the result
cout << "The eigenvalues matrix:" << endl;
cout << setw(10) << setprecision(5) << D << endl;
cout << "The eigenvectors matrix:" << endl;
cout << setw(10) << setprecision(5) << V << endl;
// Check that the first eigenvector indeed has the eigenvector property
ColumnVector v1(3);
v1(1) = V(1,1);
v1(2) = V(2,1);
v1(3) = V(3,1);
ColumnVector Cv1 = C * v1;
ColumnVector lambda1_v1 = D(1) * v1;
cout << "The max-norm of the difference between C*v1 and lambda1*v1 is " <<
NormInfinity(Cv1 - lambda1_v1) << endl << endl;
// Build the inverse and check the result
Matrix Ci = C.i();
Matrix I = Ci * C;
cout << "The inverse of C is" << endl;
cout << setw(10) << setprecision(5) << Ci << endl;
cout << "And the inverse times C is identity" << endl;
cout << setw(10) << setprecision(5) << I << endl;
// Example for multiple solves (see NewMat documentation)
ColumnVector r1(3), r2(3);
for (i = 1; i <= 3; ++i) {
r1(i) = rand() % 10;
r2(i) = rand() % 10;
}
LinearEquationSolver CLU = C; // decomposes C
ColumnVector s1 = CLU.i() * r1;
ColumnVector s2 = CLU.i() * r2;
cout << "solution for right hand side r1" << endl;
cout << setw(10) << setprecision(5) << s1 << endl;
cout << "solution for right hand side r2" << endl;
cout << setw(10) << setprecision(5) << s2 << endl;
return 0;
}

@ -0,0 +1,269 @@
/**
* Point Cloud Segmentation using Felzenszwalb-Huttenlocher Algorithm
*
* Copyright (C) Jacobs University Bremen
*
* Released under the GPL version 3.
*
* @author Mihai-Cotizo Sima
*/
#include <segmentation/FHGraph.h>
#include <map>
#include <omp.h>
#include <algorithm>
using namespace std;
FHGraph::FHGraph(std::vector< Point >& ps, double weight(Point, Point), double sigma, double eps, int neighbors, float radius) :
points( ps ), V( ps.size() )
{
/*
* 1. create adjency list using a map<int, vector<half_edge> >
* 2. use get_neighbors(e, max_dist) to get all the edges e' that are at a distance smaller than max_dist than e
* 3. using all these edges, compute the gaussian smoothed weight
* 4. insert the edges in a new list
*/
nr_neighbors = neighbors;
this->radius = radius;
compute_neighbors(weight, eps);
if ( sigma > 0.01 )
{
do_gauss(sigma);
}
else
{
without_gauss();
}
adjency_list.clear();
}
void FHGraph::compute_neighbors(double weight(Point, Point), double eps)
{
adjency_list.reserve(points.size());
adjency_list.resize(points.size());
ANNpointArray pa = annAllocPts(points.size(), 3);
for (size_t i=0; i<points.size(); ++i)
{
pa[i] = new ANNcoord[3];
pa[i][0] = points[i].x;
pa[i][1] = points[i].y;
pa[i][2] = points[i].z;
}
ANNkd_tree t(pa, points.size(), 3);
if ( radius < 0 ) // Using knn search
{
nr_neighbors++;
ANNidxArray n = new ANNidx[nr_neighbors];
ANNdistArray d = new ANNdist[nr_neighbors];
for (size_t i=0; i<points.size(); ++i)
{
ANNpoint p = pa[i];
t.annkSearch(p, nr_neighbors, n, d, eps);
for (int j=0; j<nr_neighbors; ++j)
{
if ( n[j] == (int)i ) continue;
he e;
e.x = n[j];
e.w = weight(points[i], points[n[j]]);
adjency_list[i].push_back(e);
}
}
delete[] n;
delete[] d;
}
else // Using radius search
{
float sqradius = radius*radius;
ANNidxArray n;
ANNdistArray d;
int nret;
int total = 0;
const int MOD = 1000;
int TMP = MOD;
for (size_t i=0; i<points.size(); ++i)
{
ANNpoint p = pa[i];
nret = t.annkFRSearch(p, sqradius, 0, NULL, NULL, eps);
total += nret;
n = new ANNidx[nret];
d = new ANNdist[nret];
t.annkFRSearch(p, sqradius, nret, n, d, eps);
if ( nr_neighbors > 0 && nr_neighbors < nret )
{
random_shuffle(n, n+nret);
nret = nr_neighbors;
}
for (int j=0; j<nret; ++j)
{
if ( n[j] == (int)i ) continue;
he e;
e.x = n[j];
e.w = weight(points[i], points[n[j]]);
adjency_list[i].push_back(e);
}
delete[] n;
delete[] d;
if ( TMP==0 )
{
TMP = MOD;
cout << "Point " << i << "/" << V << ", or "<< (i*100.0 / V) << "%\r"; cout.flush();
}
TMP --;
}
cout << "Average nr of neighbors: " << (float) total / points.size() << endl;
}
annDeallocPts(pa);
}
static double gauss(double x, double miu, double sigma)
{
double tmp = ((x-miu)/sigma);
return exp(- .5 * tmp * tmp);
}
static void normalize(std::vector<double>& v)
{
double s = 0;
for (size_t i=0; i<v.size(); ++i)
s += v[i];
for (size_t i=0; i<v.size(); ++i)
v[i] /= s;
}
string tostr(int x)
{
stringstream ss;
ss << x;
return ss.str();
}
void FHGraph::do_gauss(double sigma)
{
edges.reserve( V * nr_neighbors);
list<he>::iterator k, j;
vector<double> gauss_weight, edge_weight;
edge e;
#pragma omp parallel for private(j, k, e, gauss_weight, edge_weight) schedule(dynamic)
for (int i=0; i<V; ++i)
{
for (j=adjency_list[i].begin();
j!=adjency_list[i].end();
j++)
{
gauss_weight.clear();
edge_weight.clear();
for (k=adjency_list[i].begin();
k!=adjency_list[i].end();
++k)
{
gauss_weight.push_back(gauss(k->w, j->w, sigma));
edge_weight.push_back(k->w);
}
for (k=adjency_list[j->x].begin();
k!=adjency_list[j->x].end();
++k)
{
gauss_weight.push_back(gauss(k->w, j->w, sigma));
edge_weight.push_back(k->w);
}
normalize(gauss_weight);
e.a = i; e.b = j->x;
e.w = 0;
for (size_t k=0; k<edge_weight.size(); ++k)
e.w += gauss_weight[k] * edge_weight[k];
#pragma omp critical
{
edges.push_back(e);
}
}
}
}
void FHGraph::without_gauss()
{
edges.reserve( V * nr_neighbors);
list<he>::iterator j;
edge e;
for (int i=0; i<V; ++i)
{
for (j=adjency_list[i].begin();
j!=adjency_list[i].end();
j++)
{
e.a = i; e.b = j->x; e.w = j->w;
edges.push_back(e);
}
}
}
edge* FHGraph::getGraph()
{
edge* ret = new edge[edges.size()];
for (size_t i=0; i<edges.size(); ++i)
ret[i] = edges[i];
return ret;
}
Point FHGraph::operator[](int index)
{
return points[index];
}
int FHGraph::getNumPoints()
{
return V;
}
int FHGraph::getNumEdges()
{
return edges.size();
}
template<typename T>
void vectorFree(T& t) {
T tmp;
t.swap(tmp);
}
void FHGraph::dispose() {
vectorFree(edges);
vectorFree(points);
vectorFree(adjency_list);
}

@ -0,0 +1,47 @@
/**
* Point Cloud Segmentation using Felzenszwalb-Huttenlocher Algorithm
*
* Copyright (C) Jacobs University Bremen
*
* Released under the GPL version 3.
*
* @author Mihai-Cotizo Sima
*/
#include <segmentation/disjoint-set.h>
universe::universe(int elements) {
elts = new uni_elt[elements];
num = elements;
for (int i = 0; i < elements; i++) {
elts[i].rank = 0;
elts[i].size = 1;
elts[i].p = i;
}
}
universe::~universe() {
delete [] elts;
}
int universe::find(int x) {
int y = x;
while (y != elts[y].p)
y = elts[y].p;
elts[x].p = y;
return y;
}
void universe::join(int x, int y) {
if (elts[x].rank > elts[y].rank) {
elts[y].p = x;
elts[x].size += elts[y].size;
} else {
elts[x].p = y;
elts[y].size += elts[x].size;
if (elts[x].rank == elts[y].rank)
elts[y].rank++;
}
num--;
}

@ -0,0 +1,297 @@
/**
* Point Cloud Segmentation using Felzenszwalb-Huttenlocher Algorithm
*
* Copyright (C) Jacobs University Bremen
*
* Released under the GPL version 3.
*
* @author Billy Okal <b.okal@jacobs-university.de>
* @author Mihai-Cotizo Sima
* @file fhsegmentation.cc
*/
#include <iostream>
#include <string>
#include <fstream>
#include <errno.h>
#include <boost/program_options.hpp>
#include <slam6d/io_types.h>
#include <slam6d/globals.icc>
#include <slam6d/scan.h>
#include <scanserver/clientInterface.h>
#include <segmentation/FHGraph.h>
#ifdef _MSC_VER
#define strcasecmp _stricmp
#define strncasecmp _strnicmp
#else
#include <strings.h>
#endif
namespace po = boost::program_options;
using namespace std;
/// validate IO types
void validate(boost::any& v, const std::vector<std::string>& values,
IOType*, int) {
if (values.size() == 0)
throw std::runtime_error("Invalid model specification");
string arg = values.at(0);
try {
v = formatname_to_io_type(arg.c_str());
} catch (...) { // runtime_error
throw std::runtime_error("Format " + arg + " unknown.");
}
}
/// Parse commandline options
void parse_options(int argc, char **argv, int &start, int &end, bool &scanserver, int &max_dist, int &min_dist, string &dir,
IOType &iotype, float &sigma, int &k, int &neighbors, float &eps, float &radius, int &min_size)
{
/// ----------------------------------
/// set up program commandline options
/// ----------------------------------
po::options_description cmd_options("Usage: fhsegmentation <options> where options are (default values in brackets)");
cmd_options.add_options()
("help,?", "Display this help message")
("start,s", po::value<int>(&start)->default_value(0), "Start at scan number <arg>")
("end,e", po::value<int>(&end)->default_value(-1), "Stop at scan number <arg>")
("scanserver,S", po::value<bool>(&scanserver)->default_value(false), "Use the scanserver as an input method")
("format,f", po::value<IOType>(&iotype)->default_value(UOS),
"using shared library <arg> for input. (chose format from [uos|uosr|uos_map|"
"uos_rgb|uos_frames|uos_map_frames|old|rts|rts_map|ifp|"
"riegl_txt|riegl_rgb|riegl_bin|zahn|ply])")
("max,M", po::value<int>(&max_dist)->default_value(-1),"neglegt all data points with a distance larger than <arg> 'units")
("min,m", po::value<int>(&min_dist)->default_value(-1), "neglegt all data points with a distance smaller than <arg> 'units")
("K,k", po::value<int>(&k)->default_value(1), "<arg> value of K value used in the FH segmentation")
("neighbors,N", po::value<int>(&neighbors)->default_value(1), "use approximate <arg>-nearest neighbors search or limit the number of points")
("sigma,v", po::value<float>(&sigma)->default_value(1.0), "Set the Gaussian variance for smoothing to <arg>")
("radius,r", po::value<float>(&radius)->default_value(-1.0), "Set the range of radius search to <arg>")
("eps,E", po::value<float>(&eps)->default_value(1.0), "Set error threshold used by the AKNN algorithm to <arg>")
("minsize,z", po::value<int>(&min_size)->default_value(0), "Keep segments of size at least <arg>")
;
po::options_description hidden("Hidden options");
hidden.add_options()
("input-dir", po::value<string>(&dir), "input dir");
po::positional_options_description pd;
pd.add("input-dir", 1);
po::options_description all;
all.add(cmd_options).add(hidden);
po::variables_map vmap;
po::store(po::command_line_parser(argc, argv).options(all).positional(pd).run(), vmap);
po::notify(vmap);
if (vmap.count("help")) {
cout << cmd_options << endl;
exit(-1);
}
// read scan path
if (dir[dir.length()-1] != '/') dir = dir + "/";
}
/// distance measures
double weight1(Point a, Point b)
{
return a.distance(b);
}
double weight2(Point a, Point b)
{
return a.distance(b) * .5 + fabs(a.reflectance-b.reflectance) * .5;
}
/// Write a pose file with the specofied name
void writePoseFiles(string dir, const double* rPos, const double* rPosTheta, int num, int outnum)
{
for (int i = outnum; i < num; i++) {
string poseFileName = dir + "segments/scan" + to_string(i, 3) + ".pose";
ofstream posout(poseFileName.c_str());
posout << rPos[0] << " "
<< rPos[1] << " "
<< rPos[2] << endl
<< deg(rPosTheta[0]) << " "
<< deg(rPosTheta[1]) << " "
<< deg(rPosTheta[2]) << endl;
posout.clear();
posout.close();
}
}
/// write scan files for all segments
void writeScanFiles(string dir, int outnum, const vector<vector<Point>* > cloud)
{
for (int i = outnum, j = 0; i < (int)cloud.size() && j < (int)cloud.size(); i++, j++) {
vector<Point>* segment = cloud[j];
string scanFileName = dir + "segments/scan" + to_string(i,3) + ".3d";
ofstream scanout(scanFileName.c_str());
for (int k = 0; k < (int)segment->size(); k++) {
Point p = segment->at(k);
scanout << p.x << " " << p.y << " " << p.z << endl;
}
scanout.close();
}
}
/// =============================================
/// Main
/// =============================================
int main(int argc, char** argv)
{
int start, end;
bool scanserver;
int max_dist, min_dist;
string dir;
IOType iotype;
float sigma;
int k, neighbors;
float eps;
float radius;
int min_size;
parse_options(argc, argv, start, end, scanserver, max_dist, min_dist,
dir, iotype, sigma, k, neighbors, eps, radius, min_size);
/// ----------------------------------
/// Prepare and read scans
/// ----------------------------------
if (scanserver) {
try {
ClientInterface::create();
} catch(std::runtime_error& e) {
cerr << "ClientInterface could not be created: " << e.what() << endl;
cerr << "Start the scanserver first." << endl;
exit(-1);
}
}
/// Make directory for saving the scan segments
string segdir = dir + "segments";
#ifdef _MSC_VER
int success = mkdir(segdir.c_str());
#else
int success = mkdir(segdir.c_str(), S_IRWXU|S_IRWXG|S_IRWXO);
#endif
if(success == 0) {
cout << "Writing segments to " << segdir << endl;
} else if(errno == EEXIST) {
cout << "WARN: Directory " << segdir << " exists already. Contents will be overwriten" << endl;
} else {
cerr << "Creating directory " << segdir << " failed" << endl;
exit(1);
}
/// Read the scans
Scan::openDirectory(scanserver, dir, iotype, start, end);
if(Scan::allScans.size() == 0) {
cerr << "No scans found. Did you use the correct format?" << endl;
exit(-1);
}
/// --------------------------------------------
/// Initialize and perform segmentation
/// --------------------------------------------
std::vector<Scan*>::iterator it = Scan::allScans.begin();
int outscan = start;
for( ; it != Scan::allScans.end(); ++it) {
Scan* scan = *it;
const double* rPos = scan->get_rPos();
const double* rPosTheta = scan->get_rPosTheta();
/// read scan into points
DataXYZ xyz(scan->get("xyz"));
vector<Point> points;
points.reserve(xyz.size());
for(unsigned int j = 0; j < xyz.size(); j++) {
Point p(xyz[j][0], xyz[j][1], xyz[j][2]);
points.push_back(p);
}
/// create the graph and get the segments
cout << "creating graph" << endl;
FHGraph sgraph(points, weight2, sigma, eps, neighbors, radius);
cout << "segmenting graph" << endl;
edge* sedges = sgraph.getGraph();
universe* segmented = segment_graph(sgraph.getNumPoints(),
sgraph.getNumEdges(),
sedges, k);
cout << "post processing" << endl;
for (int i = 0; i < sgraph.getNumEdges(); ++i)
{
int a = sedges[i].a;
int b = sedges[i].b;
int aa = segmented->find(a);
int bb = segmented->find(b);
if ( (aa!=bb) &&
(segmented->size(aa) < min_size ||
segmented->size(bb) < min_size) )
segmented->join(aa, bb);
}
delete[] sedges;
int nr = segmented->num_sets();
cout << "Obtained " << nr << " segment(s)" << endl;
/// write point clouds with segments
vector< vector<Point>* > clouds;
clouds.reserve(nr);
for (int i=0; i<nr; ++i)
clouds.push_back( new vector<Point> );
map<int, int> components2cloud;
int kk = 0;
for (int i = 0; i < sgraph.getNumPoints(); ++i)
{
int component = segmented->find(i);
if ( components2cloud.find(component)==components2cloud.end() )
{
components2cloud[component] = kk++;
clouds[components2cloud[component]]->reserve(segmented->size(component));
}
clouds[components2cloud[component]]->push_back(sgraph[i]);
}
// pose file (repeated for the number of segments
writePoseFiles(dir, rPos, rPosTheta, clouds.size(), outscan);
// scan files for all segments
writeScanFiles(dir, outscan, clouds);
outscan += clouds.size();
/// clean up
sgraph.dispose();
}
// shutdown everything
if (scanserver)
ClientInterface::destroy();
else
Scan::closeDirectory();
cout << "Normal program end" << endl;
return 0;
}

@ -0,0 +1,50 @@
/**
* Point Cloud Segmentation using Felzenszwalb-Huttenlocher Algorithm
*
* Copyright (C) Jacobs University Bremen
*
* Released under the GPL version 3.
*
* @author Mihai-Cotizo Sima
*/
#include <segmentation/segment-graph.h>
bool operator<(const edge &a, const edge &b) {
return a.w < b.w;
}
universe *segment_graph(int num_vertices, int num_edges, edge *edges,
float c) {
// sort edges by weight
std::sort(edges, edges + num_edges);
// make a disjoint-set forest
universe *u = new universe(num_vertices);
// init thresholds
float *threshold = new float[num_vertices];
for (int i = 0; i < num_vertices; i++)
threshold[i] = THRESHOLD(1,c);
// for each edge, in non-decreasing weight order...
for (int i = 0; i < num_edges; i++) {
edge *pedge = &edges[i];
// components conected by this edge
int a = u->find(pedge->a);
int b = u->find(pedge->b);
if (a != b) {
if ((pedge->w <= threshold[a]) &&
(pedge->w <= threshold[b])) {
u->join(a, b);
a = u->find(a);
threshold[a] = pedge->w + THRESHOLD(u->size(a), c);
}
}
}
// free up
delete threshold;
return u;
}
Loading…
Cancel
Save