update svn to r758

This commit is contained in:
Razvan Mihalyi 2012-11-07 11:27:39 +01:00
parent e469fd8c86
commit 8b4640b056
97 changed files with 21023 additions and 887 deletions

View file

@ -0,0 +1,88 @@
#pragma once
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cxcore.h>
#include <highgui.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include <libconfig.h>
#include "cvpmd.h"
#include "slam6d/icp6Dminimizer.h"
struct PMDFiles {
FILE *i; // intens
FILE *a; // ampl
FILE *p; // 3d pts
FILE *h; // pmd image headers
};
struct TrackingSettings {
int winSz;
double quality;
int minFeatures;
int maxFeatures;
int minDist;
IplImage *pyr;
IplImage *pyrPrev;
IplImage *iCamPrev;
CvPoint2D32f *pts[2];
char *ptsStatus;
int trackingFlags;
};
struct PMDCam {
PMD *_pmd;
CvCapture *_capture;
PMDFiles _f;
CvMat *intrinsicPMD;
CvMat *intrinsicCam;
CvMat *distortionPMD;
CvMat *distortionCam;
CvMat *rotation;
CvMat *translation;
// Undistortion maps
IplImage *_mapXCam;
IplImage *_mapYCam;
IplImage *_mapXPMD;
IplImage *_mapYPMD;
// Data
IplImage *_iCamColorUBuffer;
IplImage *iCamColor;
IplImage *iCam;
IplImage *_iCamColorU;
IplImage *iPMDI;
IplImage *_iPMDIU;
IplImage *iPMDA;
IplImage *_iPMDAU;
CvPoint3D32f **pts;
unsigned int timestamp;
unsigned int timestampUsec;
// Pose Estimation
icp6Dminimizer *icp;
int minPts4Pose;
double maxError;
int minConsensusPts;
int savePoses;
// Settings
ImageHeaderInformation *header;
int synchronous;
int hybrid;
int _offlineMode;
int historyLen;
TrackingSettings _track;
double sigmaDepth;
double sigmaColor;
double dpThreshold;
};
PMDCam *initPMDCam(const char *confPath);
int grabData(PMDCam *pmdc);

View file

@ -0,0 +1,491 @@
cmake_minimum_required (VERSION 2.8.2)
SET(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/3rdparty/CMakeModules" ${CMAKE_MODULE_PATH})
project (3DTK)
#include_directories(OPENGL_INCLUDE_DIR)
IF(WIN32)
set(Boost_USE_STATIC_LIBS TRUE)
ELSE(WIN32)
set(Boost_USE_STATIC_LIBS FALSE)
ENDIF(WIN32)
SET(Boost_ADDITIONAL_VERSIONS "1.42" "1.42.0" "1.44" "1.44.0" "1.45.0" "1.45" "1.46" "1.46.1" "1.47.0" "1.47" "1.48" "1.49")
IF(WIN32)
# for some unknown reason no one variant works on all windows platforms
find_package( Boost COMPONENTS serialization graph regex filesystem system thread date_time REQUIRED)
ELSE(WIN32)
find_package( Boost COMPONENTS serialization graph regex filesystem system thread date_time REQUIRED)
ENDIF(WIN32)
if(Boost_FOUND)
link_directories(${BOOST_LIBRARY_DIRS})
include_directories(${Boost_INCLUDE_DIRS})
add_definitions(${Boost_LIB_DIAGNOSTIC_DEFINITIONS})
endif()
#################################################
# Declare Options and modify build accordingly ##
#################################################
FUNCTION(ENFORCE_OPTION_DEP_3DTK option VALUE)
SET (${option} "${VALUE}" CACHE BOOL "${${option}_DESCRIPTION}" FORCE) # this option set to VALUE as advised
#now make sure other dependencies are also true
FOREACH(d ${${option}_DEPENDENCIES}) # look through all my dependencies
STRING(REGEX REPLACE " +" ";" CMAKE_DEPENDENT_OPTION_DEP "${d}")
# check for a not in front
STRING(STRIP "${CMAKE_DEPENDENT_OPTION_DEP}" CMAKE_DEPENDENT_OPTION_DEP)
STRING(SUBSTRING "${CMAKE_DEPENDENT_OPTION_DEP}" 0 3 CMAKE_DEPENDENT_OPTION_DEP_3)
STRING(TOUPPER "${CMAKE_DEPENDENT_OPTION_DEP_3}" CMAKE_DEPENDENT_OPTION_DEP_3)
STRING(COMPARE EQUAL "${CMAKE_DEPENDENT_OPTION_DEP_3}" "NOT" CMAKE_DEPENDENT_OPTION_DEP_NOT)
#STRING(REPLACE "NOT " "" CMAKE_DEPENDENT_OPTION_DEP "${d}")
IF(CMAKE_DEPENDENT_OPTION_DEP_NOT) # we found a NOT
STRING(REPLACE "NOT;" "" CMAKE_DEPENDENT_OPTION_DEP "${CMAKE_DEPENDENT_OPTION_DEP}")
IF(${CMAKE_DEPENDENT_OPTION_DEP}) # not met, make sure it is
ENFORCE_OPTION_DEP_3DTK(${CMAKE_DEPENDENT_OPTION_DEP} OFF)
ELSE(${CMAKE_DEPENDENT_OPTION_DEP}) # dependency is met
ENDIF(${CMAKE_DEPENDENT_OPTION_DEP})
ELSE(CMAKE_DEPENDENT_OPTION_DEP_NOT)
IF(${CMAKE_DEPENDENT_OPTION_DEP}) # dependency is met
ELSE(${CMAKE_DEPENDENT_OPTION_DEP}) # not met, make sure it is
ENFORCE_OPTION_DEP_3DTK(${CMAKE_DEPENDENT_OPTION_DEP} ON)
ENDIF(${CMAKE_DEPENDENT_OPTION_DEP})
ENDIF(CMAKE_DEPENDENT_OPTION_DEP_NOT)
ENDFOREACH(d)
ENDFUNCTION(ENFORCE_OPTION_DEP_3DTK)
MACRO(OPT_DEP option doc default depends)
OPTION(${option} "${doc}" "${default}")
SET(${option} "${${option}}" CACHE BOOL "${doc}" FORCE)
SET(${option}_DEPENDENCIES "${depends}" CACHE INTERNAL "" FORCE)
SET(${option}_DESCRIPTION "${doc}" CACHE INTERNAL "" FORCE)
IF (${option})
#MESSAGE(STATUS "Yes ${option} is true")
# MESSAGE("FOREACH d in ${depends}")
FOREACH(d ${depends})
STRING(REGEX REPLACE " +" ";" CMAKE_DEPENDENT_OPTION_DEP "${d}")
# check for a not in front
STRING(STRIP "${CMAKE_DEPENDENT_OPTION_DEP}" CMAKE_DEPENDENT_OPTION_DEP)
STRING(SUBSTRING "${CMAKE_DEPENDENT_OPTION_DEP}" 0 3 CMAKE_DEPENDENT_OPTION_DEP_3)
STRING(TOUPPER "${CMAKE_DEPENDENT_OPTION_DEP_3}" CMAKE_DEPENDENT_OPTION_DEP_3)
STRING(COMPARE EQUAL "${CMAKE_DEPENDENT_OPTION_DEP_3}" "NOT" CMAKE_DEPENDENT_OPTION_DEP_NOT)
IF(CMAKE_DEPENDENT_OPTION_DEP_NOT) # we found a NOT
STRING(REPLACE "NOT;" "" CMAKE_DEPENDENT_OPTION_DEP "${CMAKE_DEPENDENT_OPTION_DEP}")
IF(${CMAKE_DEPENDENT_OPTION_DEP}) # not met, make sure it is
ENFORCE_OPTION_DEP_3DTK(${CMAKE_DEPENDENT_OPTION_DEP} OFF)
ELSE(${CMAKE_DEPENDENT_OPTION_DEP}) # dependency is met
ENDIF(${CMAKE_DEPENDENT_OPTION_DEP})
ELSE(CMAKE_DEPENDENT_OPTION_DEP_NOT)
IF(${CMAKE_DEPENDENT_OPTION_DEP}) # dependency is met
ELSE(${CMAKE_DEPENDENT_OPTION_DEP}) # not met, make sure it is
ENFORCE_OPTION_DEP_3DTK(${CMAKE_DEPENDENT_OPTION_DEP} ON)
ENDIF(${CMAKE_DEPENDENT_OPTION_DEP})
ENDIF(CMAKE_DEPENDENT_OPTION_DEP_NOT)
ENDFOREACH(d)
ENDIF(${option})
ENDMACRO(OPT_DEP)
## FreeGLUT
OPT_DEP(WITH_FREEGLUT "Whether freeglut is available. This enables iterative drawing in show. ON/OFF" ON "")
IF(WITH_FREEGLUT)
MESSAGE(STATUS "With freeglut")
SET (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DWITH_FREEGLUT")
ELSE(WITH_FREEGLUT)
MESSAGE(STATUS "Without freeglut")
ENDIF(WITH_FREEGLUT)
## Show
OPT_DEP(WITH_SHOW "Whether to build Show. This is the Visualization program of slam6d. ON/OFF" ON "" )
IF(WITH_SHOW)
FIND_PACKAGE(OpenGL REQUIRED)
FIND_PACKAGE(GLUT REQUIRED)
MESSAGE(STATUS "With show")
ELSE(WITH_SHOW)
# SET (WITH_OCTREE_DISPLAY "ON" CACHE INTERNAL "" FORCE)
MESSAGE(STATUS "Without show")
ENDIF(WITH_SHOW)
## WXShow
OPT_DEP(WITH_WXSHOW "Whether to build WXShow. This is the wxwidgets variant of Show. ON/OFF" OFF "")
IF(WITH_WXSHOW)
FIND_PACKAGE(OpenGL REQUIRED)
FIND_PACKAGE(GLUT REQUIRED)
find_package(wxWidgets COMPONENTS core base gl REQUIRED)
# set wxWidgets_wxrc_EXECUTABLE to be ignored in the configuration
SET (wxWidgets_wxrc_EXECUTABLE " " CACHE INTERNAL "" FORCE)
# wxWidgets include (this will do all the magic to configure everything)
include( ${wxWidgets_USE_FILE})
MESSAGE(STATUS "With wxshow")
ELSE(WITH_WXSHOW)
MESSAGE(STATUS "Without wxshow")
ENDIF(WITH_WXSHOW)
## Shapes
OPT_DEP(WITH_SHAPE_DETECTION "Whether to build shapes and planes executable for detecting planes. ON/OFF" OFF "")
IF(WITH_SHAPE_DETECTION)
MESSAGE(STATUS "With shape detection")
ELSE(WITH_SHAPE_DETECTION)
MESSAGE(STATUS "Without shape detection")
ENDIF(WITH_SHAPE_DETECTION)
## Interior reconstruction
OPT_DEP(WITH_MODEL "Whether to build model executable for modelling interior environments. ON/OFF" OFF "")
if(WITH_MODEL)
message(STATUS "With interior reconstruction")
else(WITH_MODEL)
message(STATUS "Without interior reconstruction")
endif(WITH_MODEL)
## Thermo
OPT_DEP(WITH_THERMO "Whether to build executables for mutual calibration of laser scanner and camera. ON/OFF" OFF "WITH_SHAPE_DETECTION")
IF(WITH_THERMO)
#for OpenCV 2.1
FIND_PACKAGE(OpenCV REQUIRED)
include("3rdparty/CMakeModules/OpenCV.cmake")
SET (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ADDITIONAL_OPENCV_FLAGS}")
add_subdirectory(3rdparty/cvblob)
include_directories(${CMAKE_SOURCE_DIR}/3rdparty/cvblob)
link_directories(${CMAKE_SOURCE_DIR}/3rdparty/cvblob)
MESSAGE(STATUS "With thermo")
ELSE(WITH_THERMO)
MESSAGE(STATUS "Without thermo")
ENDIF(WITH_THERMO)
## Octree
OPT_DEP(WITH_COMPACT_OCTREE "Whether to use the compact octree display ON/OFF" OFF "")
IF(WITH_COMPACT_OCTREE)
MESSAGE(STATUS "Using compact octrees")
SET (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_COMPACT_TREE")
ELSE(WITH_COMPACT_OCTREE)
MESSAGE(STATUS "Not using compact octreees: Warning uses more memory")
ENDIF(WITH_COMPACT_OCTREE)
## Glee?
OPT_DEP(WITH_GLEE "Whether to use OpenGL extensions, requires glee. ON/OFF" OFF "")
IF(WITH_GLEE)
MESSAGE(STATUS "Using opengl extensions")
SET (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DWITH_GLEE")
ELSE(WITH_GLEE)
MESSAGE(STATUS "Not using opengl extensions")
ENDIF(WITH_GLEE)
## Gridder
OPT_DEP(WITH_GRIDDER "Whether to build the 2DGridder binary ON/OFF" OFF "")
IF(WITH_GRIDDER)
MESSAGE(STATUS "With 2DGridder")
ELSE(WITH_GRIDDER)
MESSAGE(STATUS "Without 2DGridder")
ENDIF(WITH_GRIDDER)
## Dynamic VELOSLAM
OPT_DEP(WITH_VELOSLAM "Whether to build the Velodyne data processing (veloslam/veloshow) ON/OFF" OFF "WITH_SHOW")
IF(WITH_VELOSLAM)
MESSAGE(STATUS "With VELOSLAM")
ELSE(WITH_VELOSLAM)
MESSAGE(STATUS "Without VELOSLAM")
ENDIF(WITH_VELOSLAM)
## Home-made Laserscanner
OPT_DEP(WITH_DAVID_3D_SCANNER "Whether to build the David scanner app for homemade laser scanners binary ON/OFF" OFF "")
IF(WITH_DAVID_3D_SCANNER)
MESSAGE(STATUS "With David scanner")
ELSE(WITH_DAVID_3D_SCANNER)
MESSAGE(STATUS "Without David scanner")
ENDIF(WITH_DAVID_3D_SCANNER)
## Tools
OPT_DEP(WITH_TOOLS "Whether to build additional tools like convergence frame_to_graph etc. ON/OFF" OFF "WITH_FBR")
IF(WITH_TOOLS)
MESSAGE(STATUS "With Tools")
find_package (Boost COMPONENTS program_options REQUIRED)
ELSE(WITH_TOOLS)
MESSAGE(STATUS "Without Tools")
ENDIF(WITH_TOOLS)
## Segmentation
OPT_DEP(WITH_SEGMENTATION "Whether to build scan segmantion program ON/OFF" OFF "WITH_FBR")
IF(WITH_SEGMENTATION)
MESSAGE(STATUS "With segmentation")
find_package (Boost COMPONENTS program_options REQUIRED)
ELSE(WITH_SEGMENTATION)
MESSAGE(STATUS "Without segmentation")
ENDIF(WITH_SEGMENTATION)
## Normals
OPT_DEP(WITH_NORMALS "Whether to build program for normal computation ON/OFF" OFF "WITH_FBR")
IF(WITH_NORMALS)
MESSAGE(STATUS "With normals")
ELSE(WITH_NORMALS)
MESSAGE(STATUS "Without normals")
ENDIF(WITH_NORMALS)
## RivLib
OPT_DEP(WITH_RIVLIB "Whether the RIEGL rivlib is present ON/OFF" OFF "")
IF(WITH_RIVLIB)
MESSAGE(STATUS "Compiling a scan IO for RXP files")
include_directories(${CMAKE_SOURCE_DIR}/3rdparty)
link_directories(${CMAKE_SOURCE_DIR}/3rdparty)
SET(RIEGL_DIR ${CMAKE_SOURCE_DIR}/3rdparty/riegl/)
IF(WIN32)
SET(RIVLIB ${RIEGL_DIR}libscanlib-mt.lib ${RIEGL_DIR}libctrllib-mt.lib ${RIEGL_DIR}libboost_system-mt-1_43_0-vns.lib)
ELSE(WIN32)
SET(RIVLIB ${RIEGL_DIR}libscanlib-mt-s.a ${RIEGL_DIR}libctrllib-mt-s.a ${RIEGL_DIR}libboost_system-mt-s-1_43_0-vns.a pthread)
ENDIF(WIN32)
FIND_PACKAGE(LibXml2 )
ELSE(WITH_RIVLIB)
MESSAGE(STATUS "Do NOT compile a scan IO for RXP")
ENDIF(WITH_RIVLIB)
## CUDA support, TODO depend on CUDA_FIND
OPT_DEP(WITH_CUDA "Compile with CUDA support" OFF "")
IF(WITH_CUDA)
MESSAGE(STATUS "Compiling WITH CUDA support")
FIND_PACKAGE(CUDA)
SET (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DWITH_CUDA")
ELSE(WITH_CUDA)
MESSAGE(STATUS "Compiling WITHOUT CUDA support")
ENDIF(WITH_CUDA)
## PMD
OPT_DEP(WITH_PMD "Whether to build the PMD tools like grabVideoAnd3D calibrate etc. ON/OFF" OFF "")
IF(WITH_PMD)
FIND_PACKAGE(OpenGL REQUIRED)
MESSAGE(STATUS "With Tools")
ELSE(WITH_PMD)
MESSAGE(STATUS "Without Tools")
ENDIF(WITH_PMD)
## FBR
OPT_DEP(WITH_FBR "Whether to compile feature based registration. ON/OFF" OFF "")
IF(WITH_FBR)
FIND_PACKAGE(OpenCV REQUIRED)
include("3rdparty/CMakeModules/OpenCV.cmake")
SET (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ADDITIONAL_OPENCV_FLAGS}")
MESSAGE(STATUS "With FBR ")
ELSE(WITH_FBR)
MESSAGE(STATUS "Without FBR")
ENDIF(WITH_FBR)
# OPEN
FIND_PACKAGE(OpenMP)
IF(OPENMP_FOUND)
OPT_DEP(WITH_OPENMP "Whether to use parallel processing capabilities of OPENMP. ON/OFF" ON "")
ENDIF(OPENMP_FOUND)
IF(OPENMP_FOUND AND WITH_OPENMP)
MESSAGE(STATUS "With OpenMP ")
SET (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMAX_OPENMP_NUM_THREADS=${NUMBER_OF_CPUS} -DOPENMP_NUM_THREADS=${NUMBER_OF_CPUS} ${OpenMP_CXX_FLAGS} -DOPENMP")
ELSE(OPENMP_FOUND AND WITH_OPENMP)
MESSAGE(STATUS "Without OpenMP")
SET (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMAX_OPENMP_NUM_THREADS=1 -DOPENMP_NUM_THREADS=1")
ENDIF(OPENMP_FOUND AND WITH_OPENMP)
## TORO
OPT_DEP(WITH_TORO "Whether to use TORO. ON/OFF" OFF "")
IF(WITH_TORO)
IF(WIN32)
SET(Subversion_SVN_EXECUTABLE "svn.exe")
ENDIF(WIN32)
cmake_minimum_required (VERSION 2.8)
include(ExternalProject)
ExternalProject_Add(toro3d
SVN_REPOSITORY https://www.openslam.org/data/svn/toro/trunk
SOURCE_DIR "${CMAKE_SOURCE_DIR}/3rdparty/toro"
CONFIGURE_COMMAND ""
BUILD_COMMAND make
BUILD_IN_SOURCE 1
INSTALL_COMMAND cp ${CMAKE_SOURCE_DIR}/3rdparty/toro/toro3d ${CMAKE_SOURCE_DIR}/bin/
)
MESSAGE(STATUS "With TORO ")
ELSE(WITH_TORO)
MESSAGE(STATUS "Without TORO")
ENDIF(WITH_TORO)
## HOGMAN
OPT_DEP(WITH_HOGMAN "Whether to use HOGMAN. ON/OFF" OFF "")
IF(WITH_HOGMAN)
# dependant on libqt4-devi
find_package( Qt4 REQUIRED)
# CMake of earlier versions do not have external project capabilities
cmake_minimum_required (VERSION 2.8)
include(ExternalProject)
ExternalProject_Add(hogman
SVN_REPOSITORY https://svn.openslam.org/data/svn/hog-man/trunk
SOURCE_DIR "${CMAKE_SOURCE_DIR}/3rdparty/hogman"
CONFIGURE_COMMAND <SOURCE_DIR>/configure --prefix=<INSTALL_DIR>
BUILD_COMMAND LD_LIBRARY_PATH=${CMAKE_SOURCE_DIR}/3rdparty/hogman/lib make
BUILD_IN_SOURCE 1
INSTALL_COMMAND cp ${CMAKE_SOURCE_DIR}/3rdparty/hogman/bin/hogman3d ${CMAKE_SOURCE_DIR}/bin/ &&
cp ${CMAKE_SOURCE_DIR}/3rdparty/hogman/lib/libhogman_csparse.so ${CMAKE_SOURCE_DIR}/lib/ &&
cp ${CMAKE_SOURCE_DIR}/3rdparty/hogman/lib/libhogman_graph_optimizer_hogman.so ${CMAKE_SOURCE_DIR}/lib/ &&
cp ${CMAKE_SOURCE_DIR}/3rdparty/hogman/lib/libhogman_graph_optimizer.so ${CMAKE_SOURCE_DIR}/lib/ &&
cp ${CMAKE_SOURCE_DIR}/3rdparty/hogman/lib/libhogman_graph.so ${CMAKE_SOURCE_DIR}/lib/ &&
cp ${CMAKE_SOURCE_DIR}/3rdparty/hogman/lib/libhogman_graph_viewer.so ${CMAKE_SOURCE_DIR}/lib/ &&
cp ${CMAKE_SOURCE_DIR}/3rdparty/hogman/lib/libhogman_math.so ${CMAKE_SOURCE_DIR}/lib/ &&
cp ${CMAKE_SOURCE_DIR}/3rdparty/hogman/lib/libhogman_qglviewer.so ${CMAKE_SOURCE_DIR}/lib/ &&
cp ${CMAKE_SOURCE_DIR}/3rdparty/hogman/lib/libhogman_stuff.so ${CMAKE_SOURCE_DIR}/lib/
)
MESSAGE(STATUS "With HOGMAN: Currently hogman needs to be compiled manually, please make sure hogman3d is somewhere in your PATH")
ELSE(WITH_HOGMAN)
MESSAGE(STATUS "Without HOGMAN")
ENDIF(WITH_HOGMAN)
OPT_DEP(EXPORT_SHARED_LIBS "Whether to build additional shared libraries for use in other projects. ON/OFF" OFF "WITH_SHOW;WITH_FBR")
IF(EXPORT_SHARED_LIBS)
## Compile a single shared library containing all of 3DTK
add_library(slam SHARED src/slam6d/icp6D.cc)
target_link_libraries(slam scan_s ANN_s sparse_s newmat_s show_s fbr_s)
MESSAGE(STATUS "exporting additional libraries")
ELSE(EXPORT_SHARED_LIBS)
MESSAGE(STATUS "not exporting libraries")
ENDIF(EXPORT_SHARED_LIBS)
OPT_DEP(WITH_METRICS "Whether to use metrics in slam6d. ON/OFF" OFF "")
IF(WITH_METRICS)
MESSAGE(STATUS "With metrics in slam6d.")
SET (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DWITH_METRICS")
ELSE(WITH_METRICS)
MESSAGE(STATUS "Without metrics in slam6d.")
ENDIF(WITH_METRICS)
#################################################
# OPERATING SYSTEM SPECIFIC BEHAVIOUR ##
#################################################
## Special treatment for system specifics
IF(APPLE)
add_definitions(-Dfopen64=fopen)
ENDIF(APPLE)
## Multiple Cores
IF(APPLE)
SET(PROCESSOR_COUNT 2)
ELSE(APPLE)
INCLUDE(CountProcessors)
SET(NUMBER_OF_CPUS "${PROCESSOR_COUNT}" CACHE STRING "The number of processors to use (default: ${PROCESSOR_COUNT})" )
ENDIF(APPLE)
IF(WIN32)
SET(ADDITIONAL_CFLAGS "-O2" CACHE STRING "Additional flags given to the compiler ( -O2)" )
include_directories(${CMAKE_SOURCE_DIR}/3rdparty/windows/)
link_directories(${CMAKE_SOURCE_DIR}/3rdparty/windows)
link_directories(${CMAKE_SOURCE_DIR}/3rdparty/windows/x64)
add_library(XGetopt STATIC ${CMAKE_SOURCE_DIR}/3rdparty/windows/XGetopt.cpp)
SET(CMAKE_STATIC_LIBRARY_SUFFIX "32.lib")
ELSE(WIN32)
SET(ADDITIONAL_CFLAGS "-O3 -std=c++0x -msse3 -Wall -finline-functions -Wno-unused-but-set-variable -Wno-write-strings -Wno-char-subscripts -Wno-unused-result" CACHE STRING"Additional flags given to the compiler (-O3 -Wall -finline-functions -Wno-write-strings)" )
# Add include path for OpenGL without GL/-prefix
# to avoid the include incompatibility between MACOS
# and linux
FIND_PATH(OPENGL_INC gl.h /usr/include/GL)
include_directories(${OPENGL_INC})
ENDIF(WIN32)
# Add OpenGL includes for MACOS if needed
# The OSX OpenGL frameworks natively supports freeglut extensions
IF(APPLE)
include_directories(/System/Library/Frameworks/GLUT.framework/Headers)
include_directories(/System/Library/Frameworks/OpenGL.framework/Headers)
ENDIF(APPLE)
# hack to "circumvent" Debug and Release folders that are created under visual studio
# this is why the INSTALL target has to be used in visual studio
IF(MSVC)
INSTALL(DIRECTORY ${CMAKE_SOURCE_DIR}/bin/Release/ CONFIGURATIONS Release DESTINATION ${CMAKE_SOURCE_DIR}/windows FILES_MATCHING PATTERN "*.dll" PATTERN "*.exe")
IF( CMAKE_SIZEOF_VOID_P EQUAL 8 )
INSTALL(DIRECTORY ${CMAKE_SOURCE_DIR}/3rdparty/windows/x64/ CONFIGURATIONS Release DESTINATION ${CMAKE_SOURCE_DIR}/windows FILES_MATCHING PATTERN "*.dll" PATTERN "*.exe")
ELSE( CMAKE_SIZEOF_VOID_P EQUAL 8 )
INSTALL(DIRECTORY ${CMAKE_SOURCE_DIR}/3rdparty/windows/ CONFIGURATIONS Release DESTINATION ${CMAKE_SOURCE_DIR}/windows FILES_MATCHING PATTERN "*.dll" PATTERN "*.exe")
ENDIF(CMAKE_SIZEOF_VOID_P EQUAL 8 )
INSTALL(DIRECTORY ${CMAKE_SOURCE_DIR}/bin/Debug/ CONFIGURATIONS Debug DESTINATION ${CMAKE_SOURCE_DIR}/windows FILES_MATCHING PATTERN "*.dll" PATTERN "*.exe")
IF( CMAKE_SIZEOF_VOID_P EQUAL 8 )
INSTALL(DIRECTORY ${CMAKE_SOURCE_DIR}/3rdparty/windows/x64/ CONFIGURATIONS Debug DESTINATION ${CMAKE_SOURCE_DIR}/windows FILES_MATCHING PATTERN "*.dll" PATTERN "*.exe")
ELSE( CMAKE_SIZEOF_VOID_P EQUAL 8 )
INSTALL(DIRECTORY ${CMAKE_SOURCE_DIR}/3rdparty/windows/ CONFIGURATIONS Debug DESTINATION ${CMAKE_SOURCE_DIR}/windows FILES_MATCHING PATTERN "*.dll" PATTERN "*.exe")
ENDIF(CMAKE_SIZEOF_VOID_P EQUAL 8 )
ENDIF(MSVC)
#################################################
# GENERAL PROJECT SETTINGS ##
#################################################
SET (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ADDITIONAL_CFLAGS}")
# Hide CMake variables
SET (CMAKE_INSTALL_PREFIX "/usr/local" CACHE INTERNAL "" FORCE)
SET (CMAKE_BUILD_TYPE "" CACHE INTERNAL "" FORCE)
# Set output directories for libraries and executables
SET( CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_SOURCE_DIR}/lib )
SET( CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_SOURCE_DIR}/obj )
SET( CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_SOURCE_DIR}/bin )
# Set include and link dirs ...
include_directories(${CMAKE_SOURCE_DIR}/include)
include_directories(${CMAKE_SOURCE_DIR}/3rdparty/)
include_directories(${CMAKE_SOURCE_DIR}/3rdparty/glui)
include_directories(${CMAKE_SOURCE_DIR}/3rdparty/wxthings/include/)
include_directories(${CMAKE_SOURCE_DIR}/3rdparty/ann_1.1.1_modified/include)
include_directories(${CMAKE_SOURCE_DIR}/3rdparty/ann_1.1.1_modified/src)
link_directories(${CMAKE_SOURCE_DIR}/obj)
link_directories(${CMAKE_SOURCE_DIR}/lib)
add_subdirectory(3rdparty)
add_subdirectory(src/slam6d)
add_subdirectory(src/scanio)
add_subdirectory(src/scanserver)
add_subdirectory(src/segmentation)
add_subdirectory(src/normals)
add_subdirectory(src/veloslam)
add_subdirectory(src/show)
add_subdirectory(src/grid)
add_subdirectory(src/pmd)
add_subdirectory(src/shapes)
add_subdirectory(src/thermo)
add_subdirectory(src/slam6d/fbr)
add_subdirectory(src/scanner)
add_subdirectory(src/model)
MESSAGE (STATUS "Build environment is set up!")

View file

@ -0,0 +1,112 @@
/*
* grabFramesPMD implementation
*
* Copyright (C) Stanislav Serebryakov
*
* Released under the GPL version 3.
*
*/
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#include <highgui.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include <stdio.h>
#include <stdlib.h>
#include "pmdsdk2.h"
#include "cvpmd.h"
/* TODO:
* flags:
* subpixel
* camera id
* flip x and y
* pmd mode?
*/
void usage(char *progName) {
printf("%s <board-size-x> <board-size-y> <camera-id>\n", progName);
printf("i.e.: %s 6 4 0\n", progName);
printf("press space to detect chessboard and (again) to proceed.\n");
}
int main(int argc, char **argv) {
if(argc < 4) {
usage(argv[0]);
exit(1);
}
PMD *pmd = initPMD("../o3d.L32.pcp", "192.168.0.69");
IplImage *img = cvCreateImage(pmdGetSize(pmd), 8, 1);
pmdUpdate(pmd->hnd);
pmdQueryImage(pmd, img);
IplImage *imgColor = cvCreateImage(cvGetSize(img), 8, 3);
int patx = atoi(argv[1]);
int paty = atoi(argv[2]);
CvSize patternSize = cvSize(patx, paty);
int cornersTotal = patternSize.width * patternSize.height;
CvPoint2D32f *corners = (CvPoint2D32f*) malloc(cornersTotal * sizeof(CvPoint2D32f));
cvNamedWindow("Camera", 0);
int imageCnt = 0;
bool grabFrame = false;
char *filename = (char*) malloc(11 * sizeof(char));
while(1) {
pmdUpdate(pmd->hnd);
pmdQueryImage(pmd, img);
cvFlip(img, 0, 1); // flips image around the x-axes
if(grabFrame) {
/* ----- Chessboard detection -----
-------------------------------- */
int cornersCount; // should be the same for wcam and pmd and equal totalCorners
int found = cvFindChessboardCorners(img, patternSize, corners,
&cornersCount, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
cvFindCornerSubPix(img, corners, cornersCount, cvSize(4,4), cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1));
if(found && (cornersCount == cornersTotal)) {
cvCvtColor(img, imgColor, CV_GRAY2BGR);
cvDrawChessboardCorners(imgColor, patternSize, corners, cornersCount, found);
cvShowImage("Camera", imgColor);
printf("grab?\n");
int k = cvWaitKey(0);
if(k == (int)' ') {
imageCnt++;
sprintf(filename, "image%02i.jpg", imageCnt); //TODO:try png
printf("saved %s.\n", filename);
cvSaveImage(filename, img);
grabFrame = false;
continue;
}
}
}
cvShowImage("Camera", img);
if((int)' ' == cvWaitKey(5)) grabFrame = true;
}
return 0;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,35 @@
#pragma once
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cxcore.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
struct Frame {
CvMat *trn;
CvMat *rot;
IplImage *img;
CvPoint **status;
CvPoint3D32f **pts;
CvSize sz;
double alignError;
};
Frame *allocFrame3DData(CvSize pmdSz);
void fillFrame(Frame *f, IplImage *img, CvSize pmdSz, CvPoint3D32f **pts, CvPoint **status
, CvMat *rot, CvMat *trn, double alignError);
void releaseFrame(Frame **f);
struct History {
Frame *frame;
History *prev;
};
History *createHistory();
History *addFrame(History *h, Frame *f);
void releaseTail(History *h);
void checkHistoryLen(History *h, int maxLen);

View file

@ -0,0 +1,648 @@
/*
* David Scanner implementation
*
* Copyright (C) Vladislav Perelman
*
* Released under the GPL version 3.
*
*/
/*
* david_scanner.cc
* Program takes as an input path to the config file which needs to
* have all the necessary information for the program.
* Config file has to have (each on a new line, 9 lines in total):
*
* Path to the directory where frames from the video are stored
* The first frame that has to be used
* The last frame that has to be used
* The empty frame without the laser
* Path to the file with intrinsics of the camera
* Path to the rotation of the left board
* Path to the rotation of the right board
* Path to the translation of the left board
* Path to the translation of the right board
*
* Program computes the 3 point cloud of the object and stores it in the
* file scan000.3d, each point in the cloud is represented by the line
* in the file:
* x y z r g b
*
*
* Created on: Oct 4, 2010
* Author: Vladislav Perelman v.perelman@jacobs-university.de
*/
#include <iostream>
#include <string>
#include <fstream>
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#include <highgui.h>
#include <cvaux.h>
#include <cxcore.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include <math.h>
#include <vector>
#define PI 3.14159265
using namespace std;
int main(int argc, char** argv){
if (argc!=2){
cout<<"USAGE: david_scanner config_file\nConfig file should contain path_to_frames first_valid_frame last_valid_frame empty_frame path_to_intrinsics"
"path_to_rotation_left path_to_rotation_right path_to_translation_left and path_to_translation_right each on a new line!"<<endl;
return -1;
}
//******Reading Input********
ifstream indata;
indata.open(argv[1]);
if (!indata){
cout<<"Config file could not be opened"<<endl;
return -1;
}
string line;
int numlines=0;
while( getline(indata, line) ) numlines++;
if (numlines != 9) {
cout<<"Invalid number of lines in a config file!\nConfig file should contain path_to_frames first_valid_frame last_valid_frame empty_frame path_to_intrinsics"
"path_to_rotation_left path_to_rotation_right path_to_translation_left and path_to_translation_right each on a new line!";
return -1;
}
indata.clear();
indata.seekg(0);
char path[200];
indata.getline(path,200);
char first_c[10];
char last_c[10];
char empty_c[10];
indata.getline(first_c,10);
indata.getline(last_c,10);
indata.getline(empty_c,10);
int first = atoi(first_c);
int last = atoi(last_c);
int empty = atoi(empty_c);
char intrinsics_path[200];
char rot_left[200];
char rot_right[200];
char tran_left[200];
char tran_right[200];
indata.getline(intrinsics_path,200);
indata.getline(rot_left,200);
indata.getline(rot_right,200);
indata.getline(tran_left,200);
indata.getline(tran_right,200);
//*********done************
//loading an empty frame
IplImage* image_empty;
IplImage* image;
char empty_name[100];
sprintf(empty_name,"%s/%08d.ppm",path,empty);
if ((image_empty=cvLoadImage(empty_name,1))==NULL){
cout<<"Cannot load empty frame...check input name"<<endl;
return -1;
}
//*******LOADING CAMERA PARAMETERS + CREATING MATRICES FOR FUTURE USE*********
CvMat *intrinsic = cvCreateMat(3,3,CV_32F);
if ((intrinsic = (CvMat*)cvLoad( intrinsics_path ))==NULL){
cout<<"Cannot load intrinsic parameters...check input path and file name"<<endl;
return -1;
}
//loading R1
CvMat* rotation_left = cvCreateMat(3,1,CV_32F);
if ((rotation_left = (CvMat*)cvLoad( rot_left ))==NULL){
cout<<"Cannot load rotation of the left board...check input"<<endl;
return -1;
}
//loading T1
CvMat* translation_left = cvCreateMat(3,1,CV_32F);
if ((translation_left= (CvMat*)cvLoad( tran_left ))==NULL){
cout<<"Cannot load translation of the left board...check input"<<endl;
return -1;
}
CvMat* rotation_matrix_left = cvCreateMat( 3, 3, CV_32F );
cvRodrigues2(rotation_left, rotation_matrix_left);
//loading R2
CvMat* rotation_right = cvCreateMat(3,1,CV_32F);
if ((rotation_right = (CvMat*)cvLoad( rot_right ))==NULL){
cout<<"Cannot load rotation of the right board...check input"<<endl;
return -1;
}
//loading T2
CvMat* translation_right = cvCreateMat(3,1,CV_32F);
if((translation_right=(CvMat*)cvLoad( tran_right ))==NULL){
cout<<"Cannot load translation of the right board...check input"<<endl;
return -1;
}
CvMat* rotation_matrix_right = cvCreateMat( 3, 3, CV_32F );
cvRodrigues2(rotation_right, rotation_matrix_right);
//creating [R1|T1]
CvMat* r1t1 = cvCreateMat( 3, 4, CV_32F );
for (int i = 0; i < 3; i++){
CV_MAT_ELEM( *r1t1, float, i, 0) = CV_MAT_ELEM( *rotation_matrix_left, float, i, 0);
CV_MAT_ELEM( *r1t1, float, i, 1) = CV_MAT_ELEM( *rotation_matrix_left, float, i, 1);
CV_MAT_ELEM( *r1t1, float, i, 2) = CV_MAT_ELEM( *rotation_matrix_left, float, i, 2);
CV_MAT_ELEM( *r1t1, float, i, 3) = CV_MAT_ELEM( *translation_left, float, i, 0);
}
//creating [R2|T2]
CvMat* r2t2 = cvCreateMat( 3, 4, CV_32F );
for (int i = 0; i < 3; i++){
CV_MAT_ELEM( *r2t2, float, i, 0) = CV_MAT_ELEM( *rotation_matrix_right, float, i, 0);
CV_MAT_ELEM( *r2t2, float, i, 1) = CV_MAT_ELEM( *rotation_matrix_right, float, i, 1);
CV_MAT_ELEM( *r2t2, float, i, 2) = CV_MAT_ELEM( *rotation_matrix_right, float, i, 2);
CV_MAT_ELEM( *r2t2, float, i, 3) = CV_MAT_ELEM( *translation_right, float, i, 0);
}
//creating R1.i()
CvMat* r1inv = cvCreateMat( 3, 3, CV_32F );
cvInvert(rotation_matrix_left, r1inv);
//creating A.i()
CvMat* intrinsicinv = cvCreateMat( 3, 3, CV_32F );
cvInvert(intrinsic, intrinsicinv);
//creating R1.i()*A.i()
CvMat* R1iAi = cvCreateMat( 3, 3, CV_32F );
cvMatMul(r1inv, intrinsicinv, R1iAi);
//creating R2.i()
CvMat* r2inv = cvCreateMat( 3, 3, CV_32F );
cvInvert(rotation_matrix_right, r2inv, CV_LU);
//creating R2.i()*A.i()
CvMat* R2iAi = cvCreateMat( 3, 3, CV_32F );
cvMatMul(r2inv, intrinsicinv, R2iAi);
//creating R1.i()*T1
CvMat* a1 = cvCreateMat(3, 1, CV_32F);
cvMatMul(r1inv, translation_left, a1);
//creating R2.i()*T2
CvMat* a2 = cvCreateMat(3, 1, CV_32F);
cvMatMul(r2inv, translation_right, a2);
//*****************DONE********************
//open file for writing
ofstream scanfile;
char scanname[20];
sprintf(scanname,"scan000.3d");
scanfile.open(scanname);
//for loop going through each frame in the provided folder between first_valid_frame and last_valid_frame
for (int m=first; m<last; m++){
char name[100];
sprintf(name, "%s/%08d.ppm", path,m);
cout<<name<<endl;
if ((image =cvLoadImage(name))==NULL){
cout<<"cannot load image: "<<name<<endl;
continue;
}
//do difference between current frame and the empty frame with no laser
IplImage* diff = cvCloneImage(image);
cvAbsDiff(image_empty, image, diff);
//focus on the red pixels, make others black
unsigned char* pixels = (unsigned char*)diff->imageData;
for (int row = 0; row < diff->height; row++){
for (int col = 0; col < diff->width; col++){
int R;
R = pixels[ row * diff->widthStep + col * 3 + 2 ];
if (R>30) {
pixels[ row * diff->widthStep + col * 3 + 0 ] = 0;
pixels[ row * diff->widthStep + col * 3 + 1 ] = 0;
pixels[ row * diff->widthStep + col * 3 + 2 ] = 255;
} else {
pixels[ row * diff->widthStep + col * 3 + 0 ] = 0;
pixels[ row * diff->widthStep + col * 3 + 1 ] = 0;
pixels[ row * diff->widthStep + col * 3 + 2 ] = 0;
}
}
}
//remove pixels that don't have at least 2 red neighbors
for (int row = 1; row < diff->height-1; row++){
for (int col = 1; col < diff->width-1; col++){
int R = pixels[ row * diff->widthStep + col * 3 + 2 ];
if (R == 255){
int r1 = pixels[ (row-1)*diff->widthStep + col * 3 + 2];
int r2 = pixels[ (row-1)*diff->widthStep + (col-1) * 3 + 2];
int r3 = pixels[ (row-1)*diff->widthStep + (col+1) * 3 + 2];
int r4 = pixels[ (row+1)*diff->widthStep + col * 3 + 2];
int r5 = pixels[ (row+1)*diff->widthStep + (col-1) * 3 + 2];
int r6 = pixels[ (row+1)*diff->widthStep + (col+1) * 3 + 2];
int r7 = pixels[ (row)*diff->widthStep + (col-1) * 3 + 2];
int r8 = pixels[ (row)*diff->widthStep + (col+1) * 3 + 2];
if (r1+r2+r3+r4+r5+r6+r7+r8<=255) pixels[ row * diff->widthStep + col * 3 + 2 ]=0;
}
}
}
//*****finding 2 lines on the image*****
bool good = false;
int threshold = 50; //original threshold for Hough transform, incremented if too many groups of lines found
IplImage* color_dst;
IplImage* tmpImage;
int minX1, minX2, maxX1, maxX2;
CvSeq* lines = 0;
CvPoint* line1;
CvPoint* line2;
int count_groups;
//incrementing thresholds until only 2 groups of lines can be found
while(!good){
good = true;
count_groups = 0; //counter for number of line groups. Line group is defined by the slope
int epsilon = 1.5; //error margin for the slope
color_dst = cvCreateImage( cvGetSize(diff), 8, 3 );
color_dst = cvCloneImage(diff);
tmpImage = cvCreateImage(cvGetSize(diff), IPL_DEPTH_8U, 1);
cvCvtColor(diff, tmpImage, CV_RGB2GRAY);
IplImage* dst = cvCreateImage( cvGetSize(diff), 8, 1 );
cvCanny(tmpImage, dst, 20, 60, 3 );
CvMemStorage* storage = cvCreateMemStorage(0);
//find all lines using Hough transform
lines = cvHoughLines2( dst, storage, CV_HOUGH_PROBABILISTIC, 1, CV_PI/180,threshold, 150, 100 );
double first_group, second_group;
for(int i = 0; i < lines->total; i++ ){
//get the slope of the line, check if it belongs to an already existing group
CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i);
double angle = atan((double)(line[1].x-line[0].x)/(double)(line[1].y-line[0].y))*180/PI;
//starting first group
if (count_groups==0){
first_group = angle;
line1 = line;
minX1 = line[0].x;
maxX1 = line[1].x;
count_groups++;
} else {
if (angle-first_group<epsilon && angle-first_group>(epsilon*-1)){
//line belongs to the first group of line..that's good
if (line[0].x<minX1)minX1=line[0].x;
if (line[1].x>maxX1)maxX1=line[1].x;
} else {
//check if belongs to the second group
if ( count_groups == 2 ){
if (angle-second_group<epsilon && angle - second_group>(epsilon*-1)){
if (line[0].x<minX2)minX2=line[0].x;
if (line[1].x>maxX2)maxX2=line[1].x;
}else{
//if not then try again with a higher threshold
good = false;
threshold+=20;
cout<<"Increased threshold: "<<threshold<<" ";
cvReleaseImage(&color_dst);
cvReleaseImage(&tmpImage);
cvReleaseImage(&dst);
break; //get out of here and increase the threshold since too many lines were found
}
} else { //starting second group
second_group = angle;
minX2 = line[0].x;
maxX2 = line[1].x;
line2 = line;
count_groups++;
}
}
}
}
//freeing some memory along the way
cvReleaseMemStorage(&storage);
cvReleaseImage(&dst);
}
//at this point we have found at most 2 groups of lines, we need to take only 1 line from each group
//basically finding the left-most and right-most point of each group and draw a line between those points, removing all the other lines.
//starting and ending points of 2 lines
CvPoint point1;
CvPoint point2;
CvPoint point3;
CvPoint point4;
if (count_groups==2){
int x1 = line1[0].x;
int x2 = line1[1].x;
int y1 = line1[0].y;
int y2 = line1[1].y;
double c1 = (double)(x1 - minX1)/(double)(x2 - minX1);
double c2 = (double)(maxX1 - x1)/(double)(maxX1 - x2);
int ymax, ymin;
ymin = (c1*y2 - y1)/(c1-1);
ymax = (c2*y2 - y1)/(c2-1);
if (maxX1 == x2) ymax = y2;
if (minX1 == x1) ymin = y1;
//getting start and end of the first line
point1 = cvPoint(minX1, ymin);
point2 = cvPoint(maxX1, ymax);
//points around all the lines in a group so that a black rectangle can be drawn above them
CvPoint points[4];
points[0]=cvPoint(minX1, max(0,ymin-10));
points[1]=cvPoint(minX1, min(color_dst->height,ymin+10));
points[2]=cvPoint(maxX1, min(color_dst->height,ymax+10));
points[3]=cvPoint(maxX1, max(0,ymax-10));
CvPoint* pts[1];
pts[0]=points;
int npts[1];
npts[0]=4;
cvPolyLine(color_dst, pts, npts,1,1, CV_RGB(0,0,0), 20, 8 );//removing the group
x1 = line2[0].x;
x2 = line2[1].x;
y1 = line2[0].y;
y2 = line2[1].y;
c1 = (double)(x1 - minX2)/(double)(x2 - minX2);
c2 = (double)(maxX2 - x1)/(double)(maxX2 - x2);
ymin = (c1*y2 - y1)/(c1-1);
ymax = (c2*y2 - y1)/(c2-1);
if (maxX2 == x2) ymax = y2;
if (minX2 == x1) ymin = y1;
//getting start and end of the second line
point3 = cvPoint(minX2, ymin);
point4 = cvPoint(maxX2, ymax);
points[0]=cvPoint(minX2, max(0,ymin-10));
points[1]=cvPoint(minX2, min(color_dst->height,ymin+10));
points[2]=cvPoint(maxX2, min(color_dst->height,ymax+10));
points[3]=cvPoint(maxX2, max(0,ymax-10));
pts[0]=points;
cvPolyLine(color_dst, pts, npts,1,1, CV_RGB(0,0,0), 20, 8 );//removing the group
cvLine(color_dst, point3, point4,CV_RGB(0,255,0),3, 8 ); //draw the second line!
cvLine(color_dst, point1, point2,CV_RGB(0,255,0),3, 8 ); //draw the first line!
//removing everything to the left of the left line and to the right of the right line
if (point4.x > point2.x){
if (color_dst->width > point4.x){
cvRectangle(color_dst,cvPoint(point4.x,0),cvPoint(color_dst->width,color_dst->height),CV_RGB(0,0,0),CV_FILLED);
}
if (point1.x > 0){
cvRectangle(color_dst,cvPoint(point1.x,0),cvPoint(0,color_dst->height),CV_RGB(0,0,0),CV_FILLED);
}
}
if (point4.x < point2.x){
if (color_dst->width > point2.x){
cvRectangle(color_dst,cvPoint(point2.x,0),cvPoint(color_dst->width,color_dst->height),CV_RGB(0,0,0),CV_FILLED);
}
if (point3.x > 0){
cvRectangle(color_dst,cvPoint(point3.x,0),cvPoint(0,color_dst->height),CV_RGB(0,0,0),CV_FILLED);
}
}
//at this point we have to lines which we drew in green...which means all the red pixels that remain on the image
//are supposed to be laying on the object. Make them blue (for no particular reason..just looked nicer :) )
unsigned char* pixels = (unsigned char*)color_dst->imageData;
for (int row = 1; row < color_dst->height-1; row++){
for (int col = 1; col < color_dst->width-1; col++){
int R = pixels[ row * color_dst->widthStep + col * 3 + 2 ];
if (R == 255){
pixels[ row * color_dst->widthStep + col * 3 + 0 ]=255;
pixels[ row * color_dst->widthStep + col * 3 + 1 ]=0;
pixels[ row * color_dst->widthStep + col * 3 + 2 ]=0;
}
}
}
} else continue;
//take points on planes
CvPoint left1, left2, right1;
if (point1.x < point3.x){
left1 = point1;
left2 = point2;
right1 = point3;
} else {
left1 = point3;
left2 = point4;
right1 = point1;
}
//find 3d coordinate of the 2 points on the line on the left plane
//(x,y,z).t() = s*R.i()*A.i()*(u,v,1).t() - R.i()*T
CvMat* imagepoint1 = cvCreateMat( 3, 1, CV_32F );
CV_MAT_ELEM(*imagepoint1, float, 0, 0) = left1.x;
CV_MAT_ELEM(*imagepoint1, float, 1, 0) = left1.y;
CV_MAT_ELEM(*imagepoint1, float, 2, 0) = 1;
CvMat* b1 = cvCreateMat(3, 1, CV_32F);
cvMatMul(R1iAi, imagepoint1, b1);
//calculate scalar s based on the fact that point we take is on the wall => z coordinate is 0
float s1 = CV_MAT_ELEM(*a1, float, 2, 0)/CV_MAT_ELEM(*b1, float, 2, 0);
CvMat* identity = cvCreateMat(3,3,CV_32F);
cvSetIdentity(identity);
for (int i = 0; i < 3; i++){
CV_MAT_ELEM(*identity, float, i, i)=s1;
}
CvMat* temp = cvCreateMat(3,1,CV_32F);
cvMatMul(identity,b1, temp);
CvMat* dpoint1 = cvCreateMat(3,1,CV_32F);
cvSub(temp, a1, dpoint1); //first 3d point on the left plane
//same thing for the second point
CvMat* imagepoint2 = cvCreateMat( 3, 1, CV_32F );
CV_MAT_ELEM(*imagepoint2, float, 0, 0) = left2.x;
CV_MAT_ELEM(*imagepoint2, float, 1, 0) = left2.y;
CV_MAT_ELEM(*imagepoint2, float, 2, 0) = 1;
CvMat* b2 = cvCreateMat(3, 1, CV_32F);
cvMatMul(R1iAi, imagepoint2, b2);
float s2 = CV_MAT_ELEM(*a1, float, 2, 0)/CV_MAT_ELEM(*b2, float, 2, 0);
cvSetIdentity(identity, cvRealScalar(s2));
cvMatMul(identity,b2, b2);
CvMat* dpoint2 = cvCreateMat(3,1,CV_32F);
cvSub(b2, a1, dpoint2); //second 3d point on the left plane
//same for the point on the right plane
CvMat* imagepoint3 = cvCreateMat( 3, 1, CV_32F );
CV_MAT_ELEM(*imagepoint3, float, 0, 0) = right1.x;
CV_MAT_ELEM(*imagepoint3, float, 1, 0) = right1.y;
CV_MAT_ELEM(*imagepoint3, float, 2, 0) = 1;
CvMat* b3 = cvCreateMat(3, 1, CV_32F);
cvMatMul(R2iAi, imagepoint3, b3);
float s3 = CV_MAT_ELEM(*a2, float, 2, 0)/CV_MAT_ELEM(*b3, float, 2, 0);
cvSetIdentity(identity, cvRealScalar(s3));
cvMatMul(identity,b3, b3);
CvMat* dpoint3 = cvCreateMat(3,1,CV_32F);
cvSub(b3, a2, dpoint3); //point on the right plane
//convert point from the right plane into the coord. system of the left plane
//p1 = R1.i()*[R2|T2]*p2 - R1.i()*T1
CvMat* dpoint3left = cvCreateMat(3,1,CV_32F);
CvMat* pw = cvCreateMat(4,1,CV_32F);
for (int i = 0; i<3; i++){
CV_MAT_ELEM(*pw, float, i, 0) = CV_MAT_ELEM(*dpoint3, float, i, 0);
}
CV_MAT_ELEM(*pw, float, 3, 0) = 1.0;
CvMat* r2t2pw = cvCreateMat(3,1,CV_32F);
cvMatMul(r2t2, pw, r2t2pw);
CvMat* r1invr2t2pw = cvCreateMat(3,1,CV_32F);
cvMatMul(r1inv, r2t2pw, r1invr2t2pw);
cvSub(r1invr2t2pw, a1, dpoint3left);
//now that we have 3 non-colinear point in the same coordinate system we can find the equation of the plane
/*
A = y1 (z2 - z3) + y2 (z3 - z1) + y3 (z1 - z2)
B = z1 (x2 - x3) + z2 (x3 - x1) + z3 (x1 - x2)
C = x1 (y2 - y3) + x2 (y3 - y1) + x3 (y1 - y2)
- D = x1 (y2 z3 - y3 z2) + x2 (y3 z1 - y1 z3) + x3 (y1 z2 - y2 z1)
*/
float x1 = CV_MAT_ELEM(*dpoint1, float,0,0);
float y1 = CV_MAT_ELEM(*dpoint1, float,1,0);
float z1 = CV_MAT_ELEM(*dpoint1, float,2,0);
float x2 = CV_MAT_ELEM(*dpoint2, float,0,0);
float y2 = CV_MAT_ELEM(*dpoint2, float,1,0);
float z2 = CV_MAT_ELEM(*dpoint2, float,2,0);
float x3 = CV_MAT_ELEM(*dpoint3left, float,0,0);
float y3 = CV_MAT_ELEM(*dpoint3left, float,1,0);
float z3 = CV_MAT_ELEM(*dpoint3left, float,2,0);
float planeA = (y1 * (z2 - z3)) + (y2 * (z3 - z1)) + (y3 * (z1 - z2));
float planeB = (z1 * (x2 - x3)) + (z2 * (x3 - x1)) + (z3 * (x1 - x2));
float planeC = (x1 * (y2 - y3)) + (x2 * (y3 - y1)) + (x3 * (y1 - y2));
float planeD = -((x1 * (y2 * z3 - y3 * z2)) + (x2 * (y3 * z1 - y1 * z3)) + (x3 * (y1 * z2 - y2 * z1)));
//calculate normal to the lazer plane
CvMat* planeNormal = cvCreateMat(3, 1, CV_32F);
CV_MAT_ELEM(*planeNormal, float,0,0) = planeA;
CV_MAT_ELEM(*planeNormal, float,1,0) = planeB;
CV_MAT_ELEM(*planeNormal, float,2,0) = planeC;
pixels = (unsigned char*)color_dst->imageData;
unsigned char* color_pixels = (unsigned char*)image_empty->imageData;
//go through all the pixels on the object and calculate the 3d coordinate
for (int row = 1; row < color_dst->height-1; row++){
for (int col = 1; col < color_dst->width-1; col++){
int B = pixels[ row * color_dst->widthStep + col * 3];
if (B == 255){
//get RGB of the pixel on the original image
int realB = color_pixels[ row * color_dst->widthStep + col * 3];
int realG = color_pixels[ row * color_dst->widthStep + col * 3 + 1];
int realR = color_pixels[ row * color_dst->widthStep + col * 3 + 2];
//Used http://www.cs.princeton.edu/courses/archive/fall00/cs426/lectures/raycast/sld017.htm for reference
//on how to find intersection of ray and a plane
float p0dotN = cvDotProduct(a1,planeNormal);
CvMat* vtmp = cvCreateMat(3,1,CV_32F);
CV_MAT_ELEM(*vtmp, float,0,0) = col;
CV_MAT_ELEM(*vtmp, float,1,0) = row;
CV_MAT_ELEM(*vtmp, float,2,0) = 1;
CvMat* v = cvCreateMat(3,1,CV_32F);
cvMatMul(R1iAi, vtmp, v);
float vdotN = cvDotProduct(v,planeNormal);
float t = (p0dotN - planeD)/vdotN;
cvSetIdentity(identity, cvRealScalar(t));
cvMatMul(identity,v,v);
CvMat* final = cvCreateMat(3,1,CV_32F);
cvSub(v,a1,final); //final point is still in the coordinate system of the left plane.
CvMat* final_rotated = cvCreateMat(3,1,CV_32F); //translate it into the coordinate system of the camera
cvMatMul(rotation_matrix_left,final,final_rotated);
cvAdd(final_rotated,translation_left, final_rotated);
//add point to the file (minus next to the y coordinate is there to compensate for the left-handed coordinate system of slam6d, otherwise
//dwarf is shown upside-down.
scanfile<<CV_MAT_ELEM(*final_rotated,float,0,0)<<" "<<-CV_MAT_ELEM(*final_rotated,float,1,0)<<" "<<CV_MAT_ELEM(*final_rotated,float,2,0)<<
" "<< realR<<" "<<realG<<" "<<realB<<"\n";
cvReleaseMat(&vtmp);
cvReleaseMat(&v);
cvReleaseMat(&final);
cvReleaseMat(&final_rotated);
}
}
}
//save the image of the lines and points of the object
char name2[100];
sprintf(name2, "%s/%08d_diff.ppm", path,m);
cvSaveImage(name2, color_dst);
//free memory
cvReleaseImage(&image);
cvReleaseImage(&diff);
cvReleaseImage(&color_dst);
cvReleaseImage(&tmpImage);
cvReleaseMat(&imagepoint1);
cvReleaseMat(&imagepoint2);
cvReleaseMat(&imagepoint3);
cvReleaseMat(&b1);
cvReleaseMat(&b2);
cvReleaseMat(&b3);
cvReleaseMat(&temp);
cvReleaseMat(&dpoint1);
cvReleaseMat(&dpoint2);
cvReleaseMat(&dpoint3);
cvReleaseMat(&dpoint3left);
cvReleaseMat(&identity);
cvReleaseMat(&pw);
cvReleaseMat(&r2t2pw);
cvReleaseMat(&r1invr2t2pw);
cvReleaseMat(&planeNormal);
}
//free more memory
cvReleaseImage(&image_empty);
cvReleaseMat(&intrinsic);
cvReleaseMat(&intrinsicinv);
cvReleaseMat(&rotation_left);
cvReleaseMat(&rotation_matrix_left);
cvReleaseMat(&rotation_right);
cvReleaseMat(&rotation_matrix_right);
cvReleaseMat(&translation_left);
cvReleaseMat(&translation_right);
cvReleaseMat(&r1inv);
cvReleaseMat(&r2inv);
cvReleaseMat(&R1iAi);
cvReleaseMat(&R2iAi);
cvReleaseMat(&r1t1);
cvReleaseMat(&r2t2);
cvReleaseMat(&a1);
cvReleaseMat(&a2);
//close file
scanfile.close();
return 0;
}

View file

@ -0,0 +1,37 @@
IF(WITH_FBR)
FIND_PACKAGE(OpenCV REQUIRED)
SET(FBR_IO_SRC scan_cv.cc)
add_library(fbr_cv_io STATIC ${FBR_IO_SRC})
SET(FBR_PANORAMA_SRC panorama.cc)
#add_library(fbr_panorama STATIC ${FBR_PANORAMA_SRC})
add_library(fbr_panorama STATIC ${FBR_PANORAMA_SRC} fbr_global.cc)
SET(FBR_FEATURE_SRC feature.cc)
add_library(fbr_feature STATIC ${FBR_FEATURE_SRC})
SET(FBR_FEATURE_MATCHER_SRC feature_matcher.cc)
add_library(fbr_feature_matcher STATIC ${FBR_FEATURE_MATCHER_SRC})
SET(FBR_REGISTRATION_SRC registration.cc)
add_library(fbr_registration STATIC ${FBR_REGISTRATION_SRC})
SET(FBR_SRC scan_cv.cc panorama.cc feature.cc feature_matcher.cc registration.cc fbr_global.cc)
add_library(fbr STATIC ${FBR_SRC})
SET(FBR_LIBS scan ANN ${OpenCV_LIBS})
add_executable(featurebasedregistration feature_based_registration.cc fbr_global.cc)
#target_link_libraries(featurebasedregistration fbr_cv_io fbr_panorama fbr_feature fbr_feature_matcher fbr_registration ${FBR_LIBS})
target_link_libraries(featurebasedregistration fbr ${FBR_LIBS})
### EXPORT SHARED LIBS
IF(EXPORT_SHARED_LIBS)
add_library(fbr_s SHARED ${FBR_SRC})
target_link_libraries(fbr_s scan_s ANN_s ${OpenCV_LIBS})
ENDIF(EXPORT_SHARED_LIBS)
ENDIF(WITH_FBR)

View file

@ -0,0 +1,15 @@
find_package(OpenCV REQUIRED)
if(EXISTS "${OpenCV_DIR}/OpenCVConfig.cmake")
include("${OpenCV_DIR}/OpenCVConfig.cmake")
set(ADDITIONAL_OPENCV_FLAGS
"-DCV_MINOR_VERSION=${OpenCV_VERSION_MINOR} -DCV_MAJOR_VERSION=${OpenCV_VERSION_MAJOR}"
CACHE STRING"OpenCV Version Defines)"
)
## Include the standard CMake script
ELSE(EXISTS "${OpenCV_DIR}/OpenCVConfig.cmake")
set(ADDITIONAL_OPENCV_FLAGS
""
CACHE STRING"OpenCV Version Defines (BLUB)"
)
endif(EXISTS "${OpenCV_DIR}/OpenCVConfig.cmake")

View file

@ -0,0 +1,82 @@
/**
* @file panorama.h
* @brife create panorama images from 3D scans.
* This class is a panorama image container with different projections.
* It creats panoramic images with specified resolutions.
* @author HamidReza Houshiar. Jacobs University Bremen gGmbH, Germany.
* @date Date: 2012/05/23 2:00
*/
#ifndef PANORAMA_H_
#define PANORAMA_H_
#include "fbr_global.h"
using namespace std;
namespace fbr{
/**
* @class panorama : create panorama images with different projection methods from input scan files(Mat from scan_cv class) in opencv Mat format
* @param iReflectance panorama image from reflectance data
* @param iRange panorama image from range data
* @param iMap panorama map of 3D cartesian coordinate of input scan(same points as iRange and iReflectance)
* @param extendedIMap 3D vector as panorama map with all the points
* @param iWidth width of the panorama image (cols in opencv)
* @param iHeight height of panorama image (rows in opencv)
* @param pMethod projection method for panorama creation
* @param nImage number of images per scan specially for Rectilinear, Pannini and Stereographic projections
* @param pParam special d parameter of Pannini projection (Master Thesis for more info) or special R parameter of Stereographic projection (Master Thesis for more info)
*/
class panorama{
cv::Mat iReflectance;
cv::Mat iMap;
cv::Mat iRange;
vector<vector<vector<cv::Vec3f> > > extendedIMap;
unsigned int iWidth;
unsigned int iHeight;
projection_method pMethod;
unsigned int nImages;
double pParam;
panorama_map_method mapMethod;
void init(unsigned int width, unsigned int height, projection_method method, unsigned int numberOfImages, double param, panorama_map_method mapMethod);
void map(int x, int y, cv::MatIterator_<cv::Vec4f> it, double range);
public:
/**
* constructor of class panorama
* @param width the width of the panorama image
* @param height the height of the panorama image
* @param method the projection method
* @param images number of subsets to crate panorama image
* @param param special parameter for pannini or stereographic projections
* @param mapMethod mapping method for panorama image and 3D points
*/
panorama (unsigned int width, unsigned int height, projection_method method);
panorama (unsigned int width, unsigned int height, projection_method method, unsigned int numberOfImages);
panorama (unsigned int width, unsigned int height, projection_method method, unsigned int numberOfImages, double param);
panorama (unsigned int width, unsigned int height, projection_method method, unsigned int numberOfImages, double param, panorama_map_method mapMethod);
/**
* @brief creates the panorama reflectance image and map.
*/
void createPanorama(cv::Mat scan);
/**
* @brief recovers the point cloud from the panorama image and range information
* @param image - input range image to be converted to point cloud
* @param file - destination of .3d file containing the point cloud
*/
void recoverPointCloud(const cv::Mat& range_image, cv::Mat& reflectance_image, vector<cv::Vec4f> &reduced_points);
unsigned int getImageWidth();
unsigned int getImageHeight();
projection_method getProjectionMethod();
unsigned int getNumberOfImages();
double getProjectionParam();
cv::Mat getReflectanceImage();
cv::Mat getMap();
cv::Mat getRangeImage();
vector<vector<vector<cv::Vec3f> > > getExtendedMap();
panorama_map_method getMapMethod();
void getDescription();
};
}
#endif /* PANORAMA_H_ */

View file

@ -0,0 +1,346 @@
// Copyright (C) 2007 by Cristóbal Carnero Liñán
// grendel.ccl@gmail.com
//
// This file is part of cvBlob.
//
// cvBlob is free software: you can redistribute it and/or modify
// it under the terms of the Lesser GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// cvBlob is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// Lesser GNU General Public License for more details.
//
// You should have received a copy of the Lesser GNU General Public License
// along with cvBlob. If not, see <http://www.gnu.org/licenses/>.
//
#include <cmath>
#include <iostream>
using namespace std;
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#include <highgui.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#include <opencv/highgui.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include "cvblob.h"
namespace cvb
{
CvLabel cvGreaterBlob(const CvBlobs &blobs)
{
CvLabel label=0;
unsigned int maxArea=0;
for (CvBlobs::const_iterator it=blobs.begin();it!=blobs.end();++it)
{
CvBlob *blob=(*it).second;
//if ((!blob->_parent)&&(blob->area>maxArea))
if (blob->area>maxArea)
{
label=blob->label;
maxArea=blob->area;
}
}
return label;
}
void cvFilterByArea(CvBlobs &blobs, unsigned int minArea, unsigned int maxArea)
{
CvBlobs::iterator it=blobs.begin();
while(it!=blobs.end())
{
CvBlob *blob=(*it).second;
if ((blob->area<minArea)||(blob->area>maxArea))
{
cvReleaseBlob(blob);
CvBlobs::iterator tmp=it;
++it;
blobs.erase(tmp);
}
else
++it;
}
}
void cvFilterByLabel(CvBlobs &blobs, CvLabel label)
{
CvBlobs::iterator it=blobs.begin();
while(it!=blobs.end())
{
CvBlob *blob=(*it).second;
if (blob->label!=label)
{
delete blob;
CvBlobs::iterator tmp=it;
++it;
blobs.erase(tmp);
}
else
++it;
}
}
/*void cvCentralMoments(CvBlob *blob, const IplImage *img)
{
CV_FUNCNAME("cvCentralMoments");
__CV_BEGIN__;
if (!blob->centralMoments)
{
CV_ASSERT(img&&(img->depth==IPL_DEPTH_LABEL)&&(img->nChannels==1));
//cvCentroid(blob); // Here?
blob->u11=blob->u20=blob->u02=0.;
// Only in the bounding box
int stepIn = img->widthStep / (img->depth / 8);
int img_width = img->width;
int img_height = img->height;
int img_offset = 0;
if(0 != img->roi)
{
img_width = img->roi->width;
img_height = img->roi->height;
img_offset = img->roi->xOffset + (img->roi->yOffset * stepIn);
}
CvLabel *imgData=(CvLabel *)img->imageData + (blob->miny * stepIn) + img_offset;
for (unsigned int r=blob->miny;
r<blob->maxy;
r++,imgData+=stepIn)
for (unsigned int c=blob->minx;c<blob->maxx;c++)
if (imgData[c]==blob->label)
{
double tx=(c-blob->centroid.x);
double ty=(r-blob->centroid.y);
blob->u11+=tx*ty;
blob->u20+=tx*tx;
blob->u02+=ty*ty;
}
blob->centralMoments = true;
}
__CV_END__;
}*/
void cvRenderBlob(const IplImage *imgLabel, CvBlob *blob, IplImage *imgSource, IplImage *imgDest, unsigned short mode, CvScalar const &color, double alpha)
{
CV_FUNCNAME("cvRenderBlob");
__CV_BEGIN__;
CV_ASSERT(imgLabel&&(imgLabel->depth==IPL_DEPTH_LABEL)&&(imgLabel->nChannels==1));
CV_ASSERT(imgDest&&(imgDest->depth==IPL_DEPTH_8U)&&(imgDest->nChannels==3));
if (mode&CV_BLOB_RENDER_COLOR)
{
int stepLbl = imgLabel->widthStep/(imgLabel->depth/8);
int stepSrc = imgSource->widthStep/(imgSource->depth/8);
int stepDst = imgDest->widthStep/(imgDest->depth/8);
int imgLabel_width = imgLabel->width;
int imgLabel_height = imgLabel->height;
int imgLabel_offset = 0;
int imgSource_width = imgSource->width;
int imgSource_height = imgSource->height;
int imgSource_offset = 0;
int imgDest_width = imgDest->width;
int imgDest_height = imgDest->height;
int imgDest_offset = 0;
if(imgLabel->roi)
{
imgLabel_width = imgLabel->roi->width;
imgLabel_height = imgLabel->roi->height;
imgLabel_offset = (imgLabel->nChannels * imgLabel->roi->xOffset) + (imgLabel->roi->yOffset * stepLbl);
}
if(imgSource->roi)
{
imgSource_width = imgSource->roi->width;
imgSource_height = imgSource->roi->height;
imgSource_offset = (imgSource->nChannels * imgSource->roi->xOffset) + (imgSource->roi->yOffset * stepSrc);
}
if(imgDest->roi)
{
imgDest_width = imgDest->roi->width;
imgDest_height = imgDest->roi->height;
imgDest_offset = (imgDest->nChannels * imgDest->roi->xOffset) + (imgDest->roi->yOffset * stepDst);
}
CvLabel *labels = (CvLabel *)imgLabel->imageData + imgLabel_offset + (blob->miny * stepLbl);
unsigned char *source = (unsigned char *)imgSource->imageData + imgSource_offset + (blob->miny * stepSrc);
unsigned char *imgData = (unsigned char *)imgDest->imageData + imgDest_offset + (blob->miny * stepDst);
for (unsigned int r=blob->miny; r<blob->maxy; r++, labels+=stepLbl, source+=stepSrc, imgData+=stepDst)
for (unsigned int c=blob->minx; c<blob->maxx; c++)
{
if (labels[c]==blob->label)
{
imgData[imgDest->nChannels*c+0] = (unsigned char)((1.-alpha)*source[imgSource->nChannels*c+0]+alpha*color.val[0]);
imgData[imgDest->nChannels*c+1] = (unsigned char)((1.-alpha)*source[imgSource->nChannels*c+1]+alpha*color.val[1]);
imgData[imgDest->nChannels*c+2] = (unsigned char)((1.-alpha)*source[imgSource->nChannels*c+2]+alpha*color.val[2]);
}
}
}
if (mode)
{
if (mode&CV_BLOB_RENDER_TO_LOG)
{
std::clog << "Blob " << blob->label << std::endl;
std::clog << " - Bounding box: (" << blob->minx << ", " << blob->miny << ") - (" << blob->maxx << ", " << blob->maxy << ")" << std::endl;
std::clog << " - Bounding box area: " << (1 + blob->maxx - blob->minx) * (1 + blob->maxy - blob->miny) << std::endl;
std::clog << " - Area: " << blob->area << std::endl;
std::clog << " - Centroid: (" << blob->centroid.x << ", " << blob->centroid.y << ")" << std::endl;
std::clog << std::endl;
}
if (mode&CV_BLOB_RENDER_TO_STD)
{
std::cout << "Blob " << blob->label << std::endl;
std::cout << " - Bounding box: (" << blob->minx << ", " << blob->miny << ") - (" << blob->maxx << ", " << blob->maxy << ")" << std::endl;
std::cout << " - Bounding box area: " << (1 + blob->maxx - blob->minx) * (1 + blob->maxy - blob->miny) << std::endl;
std::cout << " - Area: " << blob->area << std::endl;
std::cout << " - Centroid: (" << blob->centroid.x << ", " << blob->centroid.y << ")" << std::endl;
std::cout << std::endl;
}
if (mode&CV_BLOB_RENDER_BOUNDING_BOX)
cvRectangle(imgDest, cvPoint(blob->minx, blob->miny), cvPoint(blob->maxx-1, blob->maxy-1), CV_RGB(255., 0., 0.));
if (mode&CV_BLOB_RENDER_ANGLE)
{
double angle = cvAngle(blob);
double x1,y1,x2,y2;
double lengthLine = MAX(blob->maxx-blob->minx, blob->maxy-blob->miny)/2.;
x1=blob->centroid.x-lengthLine*cos(angle);
y1=blob->centroid.y-lengthLine*sin(angle);
x2=blob->centroid.x+lengthLine*cos(angle);
y2=blob->centroid.y+lengthLine*sin(angle);
cvLine(imgDest,cvPoint(int(x1),int(y1)),cvPoint(int(x2),int(y2)),CV_RGB(0.,255.,0.));
}
if (mode&CV_BLOB_RENDER_CENTROID)
{
cvLine(imgDest,cvPoint(int(blob->centroid.x)-3,int(blob->centroid.y)),cvPoint(int(blob->centroid.x)+3,int(blob->centroid.y)),CV_RGB(0.,0.,255.));
cvLine(imgDest,cvPoint(int(blob->centroid.x),int(blob->centroid.y)-3),cvPoint(int(blob->centroid.x),int(blob->centroid.y)+3),CV_RGB(0.,0.,255.));
}
}
__CV_END__;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
// Based on http://en.wikipedia.org/wiki/HSL_and_HSV
/// \def _HSV2RGB_(H, S, V, R, G, B)
/// \brief Color translation between HSV and RGB.
#define _HSV2RGB_(H, S, V, R, G, B) \
{ \
double _h = H/60.; \
int _hf = (int)floor(_h); \
int _hi = ((int)_h)%6; \
double _f = _h - _hf; \
\
double _p = V * (1. - S); \
double _q = V * (1. - _f * S); \
double _t = V * (1. - (1. - _f) * S); \
\
switch (_hi) \
{ \
case 0: \
R = 255.*V; G = 255.*_t; B = 255.*_p; \
break; \
case 1: \
R = 255.*_q; G = 255.*V; B = 255.*_p; \
break; \
case 2: \
R = 255.*_p; G = 255.*V; B = 255.*_t; \
break; \
case 3: \
R = 255.*_p; G = 255.*_q; B = 255.*V; \
break; \
case 4: \
R = 255.*_t; G = 255.*_p; B = 255.*V; \
break; \
case 5: \
R = 255.*V; G = 255.*_p; B = 255.*_q; \
break; \
} \
}
///////////////////////////////////////////////////////////////////////////////////////////////////
typedef std::map<CvLabel, CvScalar> Palete;
void cvRenderBlobs(const IplImage *imgLabel, CvBlobs &blobs, IplImage *imgSource, IplImage *imgDest, unsigned short mode, double alpha)
{
CV_FUNCNAME("cvRenderBlobs");
__CV_BEGIN__;
{
CV_ASSERT(imgLabel&&(imgLabel->depth==IPL_DEPTH_LABEL)&&(imgLabel->nChannels==1));
CV_ASSERT(imgDest&&(imgDest->depth==IPL_DEPTH_8U)&&(imgDest->nChannels==3));
Palete pal;
if (mode&CV_BLOB_RENDER_COLOR)
{
unsigned int colorCount = 0;
for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it)
{
CvLabel label = (*it).second->label;
double r, g, b;
_HSV2RGB_((double)((colorCount*77)%360), .5, 1., r, g, b);
colorCount++;
pal[label] = CV_RGB(r, g, b);
}
}
for (CvBlobs::iterator it=blobs.begin(); it!=blobs.end(); ++it)
cvRenderBlob(imgLabel, (*it).second, imgSource, imgDest, mode, pal[(*it).second->label], alpha);
}
__CV_END__;
}
// Returns radians
double cvAngle(CvBlob *blob)
{
CV_FUNCNAME("cvAngle");
__CV_BEGIN__;
return .5*atan2(2.*blob->u11,(blob->u20-blob->u02));
__CV_END__;
}
void cvSaveImageBlob(const char *filename, IplImage *img, CvBlob const *blob)
{
CvRect roi = cvGetImageROI(img);
cvSetImageROItoBlob(img, blob);
cvSaveImage(filename, img);
cvSetImageROI(img, roi);
}
}
ostream& operator<< (ostream& output, const cvb::CvBlob& b)
{
output << b.label << ": " << b.area << ", (" << b.centroid.x << ", " << b.centroid.y << "), [(" << b.minx << ", " << b.miny << ") - (" << b.maxx << ", " << b.maxy << ")]";
return output;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,440 @@
/*
* basicScan implementation
*
* Copyright (C) Thomas Escher, Kai Lingemann
*
* Released under the GPL version 3.
*
*/
#include "slam6d/basicScan.h"
#include "scanio/scan_io.h"
#include "slam6d/kd.h"
#include "slam6d/Boctree.h"
#include "slam6d/ann_kd.h"
#ifdef WITH_METRICS
#include "slam6d/metrics.h"
#endif //WITH_METRICS
#include <list>
#include <utility>
#include <fstream>
using std::ifstream;
using std::ofstream;
using std::flush;
using std::string;
using std::map;
using std::pair;
using std::vector;
#include <boost/filesystem/operations.hpp>
using namespace boost::filesystem;
void BasicScan::openDirectory(const std::string& path, IOType type, int start, int end)
{
#ifdef WITH_METRICS
Timer t = ClientMetric::read_scan_time.start();
#endif //WITH_METRICS
// create an instance of ScanIO
ScanIO* sio = ScanIO::getScanIO(type);
// query available scans in the directory from the ScanIO
std::list<std::string> identifiers(sio->readDirectory(path.c_str(), start, end));
Scan::allScans.reserve(identifiers.size());
// for each identifier, create a scan
for(std::list<std::string>::iterator it = identifiers.begin(); it != identifiers.end(); ++it) {
Scan::allScans.push_back(new BasicScan(path, *it, type));
}
#ifdef WITH_METRICS
ClientMetric::read_scan_time.end(t);
#endif //WITH_METRICS
}
void BasicScan::closeDirectory()
{
// clean up the scan vector
for(ScanVector::iterator it = Scan::allScans.begin(); it != Scan::allScans.end(); ++it)
delete *it;
Scan::allScans.clear();
}
BasicScan::BasicScan(double *_rPos, double *_rPosTheta, vector<double*> points) {
init();
for(int i = 0; i < 3; i++) {
rPos[i] = _rPos[i];
rPosTheta[i] = _rPosTheta[i];
}
// write original pose matrix
EulerToMatrix4(rPos, rPosTheta, transMatOrg);
// initialize transform matrices from the original one, could just copy transMatOrg to transMat instead
transformMatrix(transMatOrg);
// reset the delta align matrix to represent only the transformations after local-to-global (transMatOrg) one
M4identity(dalignxf);
PointFilter filter;
if(m_filter_range_set)
filter.setRange(m_filter_max, m_filter_min);
if(m_filter_height_set)
filter.setHeight(m_filter_top, m_filter_bottom);
if(m_range_mutation_set)
filter.setRangeMutator(m_range_mutation);
double* data = reinterpret_cast<double*>(create("xyz", sizeof(double) * 3 * points.size()).get_raw_pointer());
int tmp = 0;
for(unsigned int i = 0; i < points.size(); ++i) {
for(unsigned int j = 0; j < 3; j++) {
data[tmp++] = points[i][j];
}
}
}
BasicScan::BasicScan(const std::string& path, const std::string& identifier, IOType type) :
m_path(path), m_identifier(identifier), m_type(type)
{
init();
// request pose from file
double euler[6];
ScanIO* sio = ScanIO::getScanIO(m_type);
sio->readPose(m_path.c_str(), m_identifier.c_str(), euler);
rPos[0] = euler[0];
rPos[1] = euler[1];
rPos[2] = euler[2];
rPosTheta[0] = euler[3];
rPosTheta[1] = euler[4];
rPosTheta[2] = euler[5];
// write original pose matrix
EulerToMatrix4(euler, &euler[3], transMatOrg);
// initialize transform matrices from the original one, could just copy transMatOrg to transMat instead
transformMatrix(transMatOrg);
// reset the delta align matrix to represent only the transformations after local-to-global (transMatOrg) one
M4identity(dalignxf);
}
BasicScan::~BasicScan()
{
for (map<string, pair<unsigned char*, unsigned int>>::iterator it = m_data.begin(); it != m_data.end(); it++) {
delete it->second.first;
}
}
void BasicScan::init()
{
m_filter_max = 0.0;
m_filter_min = 0.0;
m_filter_top = 0.0;
m_filter_bottom = 0.0;
m_range_mutation = 0.0;
m_filter_range_set = false;
m_filter_height_set = false;
m_range_mutation_set = false;
}
void BasicScan::setRangeFilter(double max, double min)
{
m_filter_max = max;
m_filter_min = min;
m_filter_range_set = true;
}
void BasicScan::setHeightFilter(double top, double bottom)
{
m_filter_top = top;
m_filter_bottom = bottom;
m_filter_height_set = true;
}
void BasicScan::setRangeMutation(double range)
{
m_range_mutation_set = true;
m_range_mutation = range;
}
void BasicScan::get(unsigned int types)
{
ScanIO* sio = ScanIO::getScanIO(m_type);
vector<double> xyz;
vector<unsigned char> rgb;
vector<float> reflectance;
vector<float> temperature;
vector<float> amplitude;
vector<int> type;
vector<float> deviation;
PointFilter filter;
if(m_filter_range_set)
filter.setRange(m_filter_max, m_filter_min);
if(m_filter_height_set)
filter.setHeight(m_filter_top, m_filter_bottom);
if(m_range_mutation_set)
filter.setRangeMutator(m_range_mutation);
sio->readScan(m_path.c_str(),
m_identifier.c_str(),
filter,
&xyz,
&rgb,
&reflectance,
&temperature,
&amplitude,
&type,
&deviation);
// for each requested and filled data vector, allocate and write contents to their new data fields
if(types & DATA_XYZ && !xyz.empty()) {
double* data = reinterpret_cast<double*>(create("xyz", sizeof(double) * xyz.size()).get_raw_pointer());
for(unsigned int i = 0; i < xyz.size(); ++i) data[i] = xyz[i];
}
if(types & DATA_RGB && !rgb.empty()) {
unsigned char* data = reinterpret_cast<unsigned char*>(create("rgb", sizeof(unsigned char) * rgb.size()).get_raw_pointer());
for(unsigned int i = 0; i < rgb.size(); ++i) data[i] = rgb[i];
}
if(types & DATA_REFLECTANCE && !reflectance.empty()) {
float* data = reinterpret_cast<float*>(create("reflectance", sizeof(float) * reflectance.size()).get_raw_pointer());
for(unsigned int i = 0; i < reflectance.size(); ++i) data[i] = reflectance[i];
}
if(types & DATA_TEMPERATURE && !temperature.empty()) {
float* data = reinterpret_cast<float*>(create("temperature", sizeof(float) * temperature.size()).get_raw_pointer());
for(unsigned int i = 0; i < temperature.size(); ++i) data[i] = temperature[i];
}
if(types & DATA_AMPLITUDE && !amplitude.empty()) {
int* data = reinterpret_cast<int*>(create("amplitude", sizeof(int) * amplitude.size()).get_raw_pointer());
for(unsigned int i = 0; i < amplitude.size(); ++i) data[i] = amplitude[i];
}
if(types & DATA_TYPE && !type.empty()) {
float* data = reinterpret_cast<float*>(create("type", sizeof(double) * type.size()).get_raw_pointer());
for(unsigned int i = 0; i < type.size(); ++i) data[i] = type[i];
}
if(types & DATA_DEVIATION && !deviation.empty()) {
float* data = reinterpret_cast<float*>(create("deviation", sizeof(float) * deviation.size()).get_raw_pointer());
for(unsigned int i = 0; i < deviation.size(); ++i) data[i] = deviation[i];
}
}
DataPointer BasicScan::get(const std::string& identifier)
{
// try to get data
map<string, pair<unsigned char*, unsigned int>>::iterator it = m_data.find(identifier);
// create data fields
if(it == m_data.end()) {
// load from file
if(identifier == "xyz") get(DATA_XYZ); else
if(identifier == "rgb") get(DATA_RGB); else
if(identifier == "reflectance") get(DATA_REFLECTANCE); else
if(identifier == "temperature") get(DATA_TEMPERATURE); else
if(identifier == "amplitude") get(DATA_AMPLITUDE); else
if(identifier == "type") get(DATA_TYPE); else
if(identifier == "deviation") get(DATA_DEVIATION); else
// reduce on demand
if(identifier == "xyz reduced") calcReducedOnDemand(); else
if(identifier == "xyz reduced original") calcReducedOnDemand(); else
// show requests reduced points, manipulate in showing the same entry
if(identifier == "xyz reduced show") {
calcReducedOnDemand();
m_data["xyz reduced show"] = m_data["xyz reduced"];
} else
if(identifier == "octtree") {
createOcttree();
}
it = m_data.find(identifier);
}
// if nothing can be loaded, return an empty pointer
if(it == m_data.end())
return DataPointer(0, 0);
else
return DataPointer(it->second.first, it->second.second);
}
DataPointer BasicScan::create(const std::string& identifier, unsigned int size)
{
map<string, pair<unsigned char*, unsigned int>>::iterator it = m_data.find(identifier);
if(it != m_data.end()) {
// try to reuse, otherwise reallocate
if(it->second.second != size) {
delete it->second.first;
it->second.first = new unsigned char[size];
it->second.second = size;
}
} else {
// create a new block of data
it = m_data.insert(
std::make_pair(
identifier,
std::make_pair(
new unsigned char[size],
size
)
)
).first;
}
return DataPointer(it->second.first, it->second.second);
}
void BasicScan::clear(const std::string& identifier)
{
map<string, pair<unsigned char*, unsigned int>>::iterator it = m_data.find(identifier);
if(it != m_data.end()) {
delete it->second.first;
m_data.erase(it);
}
}
void BasicScan::createSearchTreePrivate()
{
DataXYZ xyz_orig(get("xyz reduced original"));
PointerArray<double> ar(xyz_orig);
switch(searchtree_nnstype)
{
case simpleKD:
kd = new KDtree(ar.get(), xyz_orig.size());
break;
case ANNTree:
kd = new ANNtree(ar, xyz_orig.size());
break;
case BOCTree:
kd = new BOctTree<double>(ar.get(), xyz_orig.size(), 10.0, PointType(), true);
break;
case -1:
throw runtime_error("Cannot create a SearchTree without setting a type.");
default:
throw runtime_error("SearchTree type not implemented");
}
// TODO: make the switch cases above work with CUDA
if (searchtree_cuda_enabled) createANNTree();
}
void BasicScan::calcReducedOnDemandPrivate()
{
// create reduced points and transform to initial position, save a copy of this for SearchTree
calcReducedPoints();
transformReduced(transMatOrg);
copyReducedToOriginal();
}
void BasicScan::createANNTree()
{
// TODO: metrics
#ifdef WITH_CUDA
if(!ann_kd_tree) {
DataXYZ xyz_orig(get("xyz reduced original"));
ann_kd_tree = new ANNkd_tree(PointArray<double>(xyz_orig).get(), xyz_orig.size(), 3, 1, ANN_KD_STD);
cout << "Cuda tree was generated with " << xyz_orig.size() << " points" << endl;
} else {
cout << "Cuda tree exists. No need for another creation" << endl;
}
#endif
}
void BasicScan::createOcttree()
{
string scanFileName = m_path + "scan" + m_identifier + ".oct";
BOctTree<float>* btree = 0;
// try to load from file, if successful return
if(octtree_loadOct && exists(scanFileName)) {
btree = new BOctTree<float>(scanFileName);
m_data.insert(
std::make_pair(
"octtree",
std::make_pair(
reinterpret_cast<unsigned char*>(btree),
0 // or memorySize()?
)
)
);
return;
}
// create octtree from scan
if(octtree_reduction_voxelSize > 0) { // with reduction, only xyz points
DataXYZ xyz_r(get("xyz reduced show"));
btree = new BOctTree<float>(PointerArray<double>(xyz_r).get(), xyz_r.size(), octtree_voxelSize, octtree_pointtype, true);
} else { // without reduction, xyz + attribute points
float** pts = octtree_pointtype.createPointArray<float>(this);
unsigned int nrpts = size<DataXYZ>("xyz");
btree = new BOctTree<float>(pts, nrpts, octtree_voxelSize, octtree_pointtype, true);
for(unsigned int i = 0; i < nrpts; ++i) delete[] pts[i]; delete[] pts;
}
// save created octtree
if(octtree_saveOct) {
cout << "Saving octree " << scanFileName << endl;
btree->serialize(scanFileName);
}
m_data.insert(
std::make_pair(
"octtree",
std::make_pair(
reinterpret_cast<unsigned char*>(btree),
0 // or memorySize()?
)
)
);
}
unsigned int BasicScan::readFrames()
{
string filename = m_path + "scan" + m_identifier + ".frames";
ifstream file(filename.c_str());
file.exceptions(ifstream::eofbit|ifstream::failbit|ifstream::badbit);
try {
double transformation[16];
unsigned int type;
do {
file >> transformation >> type;
m_frames.push_back(Frame(transformation, type));
} while(file.good());
} catch(...) {}
return m_frames.size();
}
void BasicScan::saveFrames()
{
string filename = m_path + "scan" + m_identifier + ".frames";
ofstream file(filename.c_str());
for(vector<Frame>::iterator it = m_frames.begin(); it != m_frames.end(); ++it) {
file << it->transformation << it->type << '\n';
}
file << flush;
file.close();
}
unsigned int BasicScan::getFrameCount()
{
return m_frames.size();
}
void BasicScan::getFrame(unsigned int i, const double*& pose_matrix, AlgoType& type)
{
const Frame& frame(m_frames.at(i));
pose_matrix = frame.transformation;
type = static_cast<AlgoType>(frame.type);
}
void BasicScan::addFrame(AlgoType type)
{
m_frames.push_back(Frame(transMat, type));
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,128 @@
/**
* @file fbr_global.h
* @brief Globally used headers, functions, structures
* @author HamidReza Houshiar. Jacobs University Bremen gGmbH, Germany.
* @date 2012/05/9 14:00
*/
#ifndef FBR_GLOBAL_H_
#define FBR_GLOBAL_H_
#include <iostream>
#include <vector>
#include <fstream>
#if (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
//for opencv 2.4
#if (CV_MAJOR_VERSION >= 2) && (CV_MINOR_VERSION >= 4)
#include <opencv2/nonfree/nonfree.hpp>
#endif
#include <math.h>
#include <string>
#include "slam6d/io_types.h"
#include "slam6d/globals.icc"
using namespace std;
namespace fbr{
//Vertical angle of view of scanner
#define MAX_ANGLE 60.0
#define MIN_ANGLE -40.0
/**
* @enum projection_method
*/
enum projection_method{
EQUIRECTANGULAR,
CYLINDRICAL,
MERCATOR,
RECTILINEAR,
PANNINI,
STEREOGRAPHIC,
ZAXIS,
CONIC
};
/**
* @enum panorama_map_method
*/
enum panorama_map_method{
FARTHEST,
EXTENDED,
};
/**
* @enum feature_method
*/
enum feature_detector_method{
SIFT_DET,
SURF_DET,
ORB_DET,
FAST_DET,
STAR_DET,
};
enum feature_descriptor_method{
SIFT_DES,
SURF_DES,
ORB_DES,
};
/**
* @enum matching_method
*/
enum matcher_method{
BRUTEFORCE,
FLANN,
KNN,
RADIUS,
RATIO,
};
/**
* @enum registration_method
*/
enum registration_method{
ALL,
RANSAC,
DISABLE,
};
/**
* @enum feature_filtration_method
*/
enum feature_filtration_method{
OCCLUSION,
STANDARD_DEVIATION,
DISABLE_FILTER,
};
/**
* @enum matching_filtration_method
*/
enum matching_filtration_method{
FUNDEMENTAL_MATRIX,
DISABLE_MATCHING_FILTER,
};
//RANSAC iteration
#define RANSACITR 20000
//Inlier influence
#define iInfluence 0.5
string scanFormatToString(IOType format);
IOType stringToScanFormat(string format);
string projectionMethodToString(projection_method method);
projection_method stringToProjectionMethod(string method);
string panoramaMapMethodToString(panorama_map_method method);
panorama_map_method stringToPanoramaMapMethod(string method);
string featureDetectorMethodToString(feature_detector_method method);
feature_detector_method stringToFeatureDetectorMethod(string method);
string featureDescriptorMethodToString(feature_descriptor_method method);
feature_descriptor_method stringToFeatureDescriptorMethod(string method);
string matcherMethodToString(matcher_method method);
matcher_method stringToMatcherMethod(string method);
string registrationMethodToString(registration_method method);
registration_method stringToRegistrationMethod(string method);
string featureFiltrationMethodToString(feature_filtration_method method);
feature_filtration_method stringToFeatureFiltrationMethod(string method);
string matchingFiltrationMethodToString(matching_filtration_method method);
matching_filtration_method stringToMatchingFiltrationMethod(string method);
}
#endif /* FBR_GLOBAL_H_ */

View file

@ -0,0 +1,52 @@
#MAKEFLAGS += -j
ifeq ($(MAKE),)
MAKE=make
endif
all: .configured
cd .build && $(MAKE) --no-print-directory
config: .build
cd .build && ccmake ..
touch .configured
.configured: .build
cd .build && cmake .. && cmake ..
touch .configured
.build:
mkdir -p .build
clean: .build
-cd .build && $(MAKE) clean --no-print-directory
-rm -rf .build
rm -f .configured
DOC = doc/
docu: docu_html docu_latex docu_hl
echo
echo
echo + Reference documentation generated: $(DOC)html/index.html
echo + Reference documentation generated: $(DOC)refman.pdf
echo + Highlevel documentation generated: $(DOC)documentation_HL.pdf
echo
docu_html:
doxygen doc/doxygen.cfg
cd $(DOC) ; zip -q html.zip html/*
echo
echo
docu_latex:
$(MAKE) -C $(DOC)latex
cd $(DOC)latex ; dvips refman
cd $(DOC)latex ; ps2pdf14 refman.ps refman.pdf
cp $(DOC)latex/refman.pdf $(DOC)
docu_hl: $(DOC)high_level_doc/documentation.tex
cd $(DOC)high_level_doc ; latex documentation.tex
cd $(DOC)high_level_doc ; bibtex documentation
cd $(DOC)high_level_doc ; latex documentation.tex
cd $(DOC)high_level_doc ; dvips documentation
cd $(DOC)high_level_doc ; ps2pdf14 documentation.ps ../documentation_HL.pdf

View file

@ -0,0 +1,418 @@
#ifndef SCAN_H
#define SCAN_H
#include "io_types.h"
#include "data_types.h"
#include "point_type.h"
#include "ptpair.h"
#include <string>
#include <vector>
#include <boost/thread/mutex.hpp>
#include <boost/thread/locks.hpp>
//! SearchTree types
enum nns_type {
simpleKD, ANNTree, BOCTree
};
class Scan;
typedef std::vector<Scan*> ScanVector;
class SearchTree;
class ANNkd_tree;
/** HOWTO scan
First: Load scans (if you want to use the scanmanager, use ManagedScan)
BasicScan/ManagedScan::openDirectory(path, type, start, end);
Pass it to functions (by reference to link it to the same instance) or store it in a global variable
After loading you might want to set parameters
for(ScanVector::iterator it = Scan::allScans.begin(); it != Scan::allScans.end(); ++it) {
Scan* scan = *it;
scan->setRangeFilter(maxDist, minDist);
scan->setHeightFilter(top, bottom); // thermo
scan->setReductionParameter(voxelSize, nrpts[, pointtype]);
scan->setSearchTreeParameter(nns_method, use_cuda);
}
Access the contained data, will be loaded and calculated on demand
DataXYZ xyz = scan->get("xyz");
DataXYZ reduced = scan->get("xyz reduced");
DataRGB rgb = scan->get("rgb");
xyz[i][0..2]
reflectance[i]
unsigned int size = scan->size("xyz reduced");
In order to use the prefetching of all requested data field in the scanserver, mark them for use. This is relevant for efficiency, which would otherwise cause loading the files each time another data field is requested.
scan->get(DATA_XYZ | DATA_RGB | ...);
Under circumstances the data fields are not available (e.g. no color in uos-type scans)
DataRGB rgb = scan->get("rgb");
if(rgb.valid()) { ok, do something }
If backward-compability to pointer arrays is needed, the PointerArray class can adapt
BOctTree(PointerArray(scan->get("xyz")).get(), scan->size("xyz"), ...);
If data isn't needed anymore, flag it for removal
scan->clear("xyz");
scan->clear(DATA_XYZ | DATA_RGB | ...);
Creating data fields with the correct byte size
scan->create("xyz somethingelse", sizeof(double)*3*N);
Reading frames in show:
unsigned int size = scan->readFrames();
const double* pose;
AlgoType type;
scan->getFrame(i, pose, type);
Last, if program ends, clean up
Scan::closeDirectory(scans);
**/
/**
* This class bundles common features of different scan implementations by
* abstraction. It handles the algorithmic parts and leaves IO and other
* features to the deriving classes.
*/
class Scan {
//friend class SearchTree; // TODO: is this neccessary?
public:
enum AlgoType {
INVALID, ICP, ICPINACTIVE, LUM, ELCH, LOOPTORO, LOOPHOGMAN, GRAPHTORO,
GRAPHHOGMAN
};
// delete copy-ctor and assignment, scans shouldn't be copied by basic class
Scan(const Scan& other) = delete;
Scan& operator=(const Scan& other) = delete;
virtual ~Scan();
//! Holder of all scans - also used in transform for adding frames for each scan at the same time
static std::vector<Scan*> allScans;
/**
* Attempt to read a directory under \a path and return its read scans.
* No scans are loaded at this point, only checked if all exist.
*
* @param scanserver whether to use managed scans in the scanserver or not
* @param path to the directory containing the scans
* @param type determining which ScanIO to use
* @param start first scan to use
* @param end last scan to use, -1 means from start to last available
*/
static void openDirectory(bool scanserver, const std::string& path, IOType type,
int start, int end = -1);
/**
* "Close" a directory by deleting all its scans and emptying the
* Scan::allScans vector.
*/
static void closeDirectory();
/* Input filtering and parameter functions */
//! Input filtering for all points based on their euclidean length
virtual void setRangeFilter(double max, double min) = 0;
//! Input filtering for all points based on their height
virtual void setHeightFilter(double top, double bottom) = 0;
//! Input mutation to set range of all points to a constant value;
virtual void setRangeMutation(double range) { }
//! Set reduction parameters, but don't reduce yet
virtual void setReductionParameter(double voxelSize, int nrpts = 0,
PointType pointtype = PointType());
//! Set SearchTree type, but don't create it yet
void setSearchTreeParameter(int nns_method, bool cuda_enabled);
/**
* Set octtree parameters for show
* @param loadOct will load the serialized octtree from disk regardless
* @param saveOct serialize octtree if not loaded by loadOct after creation
*/
virtual void setOcttreeParameter(double reduction_voxelSize,
double octtree_voxelSize, PointType pointtype,
bool loadOct, bool saveOct);
/* Basic getter functions */
inline const double* get_rPos() const;
inline const double* get_rPosTheta() const;
inline const double* get_rPosQuat() const;
//! Pose matrix after initial and match transformations (org+dalign)
inline const double* get_transMat() const;
//! Original pose matrix after initial transform
inline const double* get_transMatOrg() const;
//! Accumulated delta transformation matrix
inline const double* getDAlign() const;
inline SearchTree* getSearchTree();
inline ANNkd_tree* getANNTree() const;
virtual const char* getIdentifier() const = 0;
//! Determine the maximum number of reduced points in \a scans
static unsigned int getMaxCountReduced(ScanVector& scans);
/* Functions for altering data fields, implementation specific */
/**
* Get the data field \a identifier, calculate it on demand if neccessary.
*
* If "xyz reduced" or "xyz reduced original" is requested, the reduction is
* started with "xyz" as input.
*/
virtual DataPointer get(const std::string& identifier) = 0;
/**
* Load the requested IODataTypes, joined by |, from the scan file.
*
* This feature is neccessary to load multiple data fields at once, not all
* one by one with each get("...") access.
*/
virtual void get(unsigned int types) = 0;
/**
* Creates a data field \a identifier with \a size bytes.
*/
virtual DataPointer create(const std::string& identifier, unsigned int size) = 0;
/**
* Clear the data field \a identifier, removing its allocated memory if
* possible or marking it for prioritized removal.
*/
virtual void clear(const std::string& identifier) = 0;
//! Extension to clear for more than one identifier, e.g. clear(DATA_XYZ | DATA_RGB);
void clear(unsigned int types);
/**
* Get the size of \a identifier as if it were requested and size() called
* upon its type specialized DataPointer class.
* e.g size<DataXYZ>("xyz reduced")
*/
template<typename T>
unsigned int size(const std::string& identifier) {
return (T(get(identifier))).size();
}
/* Frame handling functions */
/**
* Open the .frames-file and read its contents. If not read, the frame list
* will be empty.
* @return count of frames if file has been read, zero otherwise
*/
virtual unsigned int readFrames() = 0;
/**
* Write the accumulated frames into a .frames-file.
*/
virtual void saveFrames() = 0;
//! Count of frames
virtual unsigned int getFrameCount() = 0;
//! Get contents of a frame, pass matrix pointer and type by reference
virtual void getFrame(unsigned int i, const double*& pose_matrix, AlgoType& type) = 0;
protected:
/**
* Called from transform, this will add its current transMat pose with
* the given type as a frame into the list of frames
*/
virtual void addFrame(AlgoType type) = 0;
public:
/* Direct creation of reduced points and search tree */
//! Apply reduction and initial transMatOrg transformation
void toGlobal();
//! Copy reduced points to original and create search tree on it
void createSearchTree();
/* Common transformation and matching functions */
void mergeCoordinatesWithRoboterPosition(Scan* prevScan);
void transformAll(const double alignxf[16]);
void transformAll(const double alignQuat[4], const double alignt[3]);
void transform(const double alignxf[16],
const AlgoType type, int islum = 0);
void transform(const double alignQuat[4],
const double alignt[3], const AlgoType type, int islum = 0);
void transformToMatrix(double alignxf[16],
const AlgoType type, int islum = 0);
void transformToEuler(double rP[3], double rPT[3],
const AlgoType type, int islum = 0);
void transformToQuat(double rP[3], double rPQ[4],
const AlgoType type, int islum = 0);
// Scan matching functions
static void getPtPairs(std::vector<PtPair> *pairs,
Scan* Source, Scan* Target,
int thread_num,
int rnd, double max_dist_match2, double &sum,
double *centroid_m, double *centroid_d);
static void getNoPairsSimple(std::vector<double*> &diff,
Scan* Source, Scan* Target,
int thread_num,
double max_dist_match2);
static void getPtPairsSimple(std::vector<PtPair> *pairs,
Scan* Source, Scan* Target,
int thread_num,
int rnd, double max_dist_match2,
double *centroid_m, double *centroid_d);
static void getPtPairsParallel(std::vector<PtPair> *pairs,
Scan* Source, Scan* Target,
int thread_num, int step,
int rnd, double max_dist_match2,
double *sum,
double centroid_m[OPENMP_NUM_THREADS][3],
double centroid_d[OPENMP_NUM_THREADS][3]);
protected:
/**
* The pose of the scan
* Note: rPos/rPosTheta and transMat _should_
* always represent the same pose!!!
*/
double rPos[3], //!< 3D position
rPosTheta[3], //!< 3D rotation in Euler representation
rQuat[4], //!< 3D rotation in Quaternion representation
transMat[16], //!< (4x4) transformation matrix
transMatOrg[16]; //!< The original pose of the scan, e.g., from odometry
/**
* The dalignxf transformation represents the delta transformation virtually applied
* to the tree and is used to compute are actual corresponding points.
*/
double dalignxf[16];
//! Run ICP on GPU instead of CPU
bool cuda_enabled;
//! Defines the method used for nearest neighbor search and which tree to use
int nns_method;
//! SearchTree for point pair matching, works on the search points
SearchTree* kd;
//! This KD tree is created only for the CUDA usages
ANNkd_tree* ann_kd_tree;
//! Voxelsize of the octtree used for reduction
double reduction_voxelSize;
//! Which point to take out of the reduction octtree, 0 for center
int reduction_nrpts;
//! Pointtype used for the reduction octtree
PointType reduction_pointtype;
//! Type of the searchtree to be created
int searchtree_nnstype;
//! Use CUDA for searching
bool searchtree_cuda_enabled;
//! Flag whether "xyz reduced" has been initialized for this Scan yet
bool m_has_reduced;
//! Reduction value used for octtree input
double octtree_reduction_voxelSize;
//! Voxelsize used in the octtree itself
double octtree_voxelSize;
//! Pointtype for the Octtree
PointType octtree_pointtype;
//! Flags to load or save the octtrees from/to storage
bool octtree_loadOct, octtree_saveOct;
/**
* Basic initializing constructor calling the initalization function.
* Can only be called from deriving classes.
*/
Scan();
/**
* This function handles the reduction of points. It builds a lock for
* multithread-safety and calls caldReducedOnDemandPrivate.
*
* The intention is to reduce points, transforme them to the initial pose and
* then copy them to original for the SearchTree.
*/
void calcReducedOnDemand();
//! Create specific SearchTree variants matching the capability of the Scan
virtual void createSearchTreePrivate() = 0;
//! Create reduced points in a multithread-safe environment matching the capability of the Scan
virtual void calcReducedOnDemandPrivate() = 0;
//! Internal function of transform which alters the reduced points
void transformReduced(const double alignxf[16]);
//! Internal function of transform which handles the matrices
void transformMatrix(const double alignxf[16]);
//@FIXME
public:
//! Creating reduced points
void calcReducedPoints();
protected:
//! Copies reduced points to original points without any transformation.
void copyReducedToOriginal();
//! Inverse functionality of copyReducedToOriginal.
void copyOriginalToReduced();
private:
//! flag for openDirectory and closeDirectory to distinguish the scans
static bool scanserver;
public:
//! Mutex for safely reducing points and creating the search tree just once in a multithreaded environment
// it can not be compiled in win32 use boost 1.48, therefore we remeove it temporarily
boost::mutex m_mutex_reduction, m_mutex_create_tree;
};
#include "scan.icc"
#endif //SCAN_H

View file

@ -0,0 +1,753 @@
/**
*
* Copyright (C) Jacobs University Bremen
*
* @author Vaibhav Kumar Mehta
* @file normals.cc
*/
#include <iostream>
#include <string>
#include <fstream>
#include <errno.h>
#include <boost/program_options.hpp>
#include <slam6d/io_types.h>
#include <slam6d/globals.icc>
#include <slam6d/scan.h>
#include "slam6d/fbr/panorama.h"
#include <scanserver/clientInterface.h>
#include <ANN/ANN.h>
#include "newmat/newmat.h"
#include "newmat/newmatap.h"
using namespace NEWMAT;
#ifdef _MSC_VER
#define strcasecmp _stricmp
#define strncasecmp _strnicmp
#else
#include <strings.h>
#endif
namespace po = boost::program_options;
using namespace std;
enum normal_method {AKNN, ADAPTIVE_AKNN, PANORAMA, PANORAMA_FAST};
/*
* validates normal calculation method specification
*/
void validate(boost::any& v, const std::vector<std::string>& values,
normal_method*, int) {
if (values.size() == 0)
throw std::runtime_error("Invalid model specification");
string arg = values.at(0);
if(strcasecmp(arg.c_str(), "AKNN") == 0) v = AKNN;
else if(strcasecmp(arg.c_str(), "ADAPTIVE_AKNN") == 0) v = ADAPTIVE_AKNN;
else if(strcasecmp(arg.c_str(), "PANORAMA") == 0) v = PANORAMA;
else if(strcasecmp(arg.c_str(), "PANORAMA_FAST") == 0) v = PANORAMA_FAST;
else throw std::runtime_error(std::string("normal calculation method ") + arg + std::string(" is unknown"));
}
/// validate IO types
void validate(boost::any& v, const std::vector<std::string>& values,
IOType*, int) {
if (values.size() == 0)
throw std::runtime_error("Invalid model specification");
string arg = values.at(0);
try {
v = formatname_to_io_type(arg.c_str());
} catch (...) { // runtime_error
throw std::runtime_error("Format " + arg + " unknown.");
}
}
/// Parse commandline options
void parse_options(int argc, char **argv, int &start, int &end, bool &scanserver, int &max_dist, int &min_dist, string &dir,
IOType &iotype, int &k1, int &k2, normal_method &ntype,int &width,int &height)
{
/// ----------------------------------
/// set up program commandline options
/// ----------------------------------
po::options_description cmd_options("Usage: calculateNormals <options> where options are (default values in brackets)");
cmd_options.add_options()
("help,?", "Display this help message")
("start,s", po::value<int>(&start)->default_value(0), "Start at scan number <arg>")
("end,e", po::value<int>(&end)->default_value(-1), "Stop at scan number <arg>")
("scanserver,S", po::value<bool>(&scanserver)->default_value(false), "Use the scanserver as an input method")
("format,f", po::value<IOType>(&iotype)->default_value(UOS),
"using shared library <arg> for input. (chose format from [uos|uosr|uos_map|"
"uos_rgb|uos_frames|uos_map_frames|old|rts|rts_map|ifp|"
"riegl_txt|riegl_rgb|riegl_bin|zahn|ply])")
("max,M", po::value<int>(&max_dist)->default_value(-1),"neglegt all data points with a distance larger than <arg> 'units")
("min,m", po::value<int>(&min_dist)->default_value(-1),"neglegt all data points with a distance smaller than <arg> 'units")
("normal,g", po::value<normal_method>(&ntype)->default_value(AKNN), "normal calculation method "
"(AKNN, ADAPTIVE_AKNN, PANORAMA, PANORAMA_FAST)")
("K1,k", po::value<int>(&k1)->default_value(20), "<arg> value of K value used in the nearest neighbor search of ANN or" "kmin for k-adaptation")
("K2,K", po::value<int>(&k2)->default_value(20), "<arg> value of Kmax for k-adaptation")
("width,w", po::value<int>(&width)->default_value(1280),"width of panorama image")
("height,h", po::value<int>(&height)->default_value(960),"height of panorama image")
;
po::options_description hidden("Hidden options");
hidden.add_options()
("input-dir", po::value<string>(&dir), "input dir");
po::positional_options_description pd;
pd.add("input-dir", 1);
po::options_description all;
all.add(cmd_options).add(hidden);
po::variables_map vmap;
po::store(po::command_line_parser(argc, argv).options(all).positional(pd).run(), vmap);
po::notify(vmap);
if (vmap.count("help")) {
cout << cmd_options << endl << endl;
cout << "SAMPLE COMMAND FOR CALCULATING NORMALS" << endl;
cout << " bin/normals -s 0 -e 0 -f UOS -g AKNN -k 20 dat/" <<endl;
cout << endl << endl;
cout << "SAMPLE COMMAND FOR VIEWING CALCULATING NORMALS IN RGB SPACE" << endl;
cout << " bin/show -c -f UOS_RGB dat/normals/" << endl;
exit(-1);
}
// read scan path
if (dir[dir.length()-1] != '/') dir = dir + "/";
}
///////////////////////////////////////////////////////
/////////////NORMALS USING AKNN METHOD ////////////////
///////////////////////////////////////////////////////
void calculateNormalsAKNN(vector<Point> &normals,vector<Point> &points, int k, const double _rPos[3] )
{
cout<<"Total number of points: "<<points.size()<<endl;
int nr_neighbors = k;
ColumnVector rPos(3);
for (int i = 0; i < 3; ++i)
rPos(i+1) = _rPos[i];
ANNpointArray pa = annAllocPts(points.size(), 3);
for (size_t i=0; i<points.size(); ++i)
{
pa[i][0] = points[i].x;
pa[i][1] = points[i].y;
pa[i][2] = points[i].z;
}
ANNkd_tree t(pa, points.size(), 3);
ANNidxArray nidx = new ANNidx[nr_neighbors];
ANNdistArray d = new ANNdist[nr_neighbors];
for (size_t i=0; i<points.size(); ++i)
{
ANNpoint p = pa[i];
//ANN search for k nearest neighbors
//indexes of the neighbors along with the query point
//stored in the array n
t.annkSearch(p, nr_neighbors, nidx, d, 0.0);
Point mean(0.0,0.0,0.0);
Matrix X(nr_neighbors,3);
SymmetricMatrix A(3);
Matrix U(3,3);
DiagonalMatrix D(3);
//calculate mean for all the points
for (int j=0; j<nr_neighbors; ++j)
{
mean.x += points[nidx[j]].x;
mean.y += points[nidx[j]].y;
mean.z += points[nidx[j]].z;
}
mean.x /= nr_neighbors;
mean.y /= nr_neighbors;
mean.z /= nr_neighbors;
//calculate covariance = A for all the points
for (int i = 0; i < nr_neighbors; ++i) {
X(i+1, 1) = points[nidx[i]].x - mean.x;
X(i+1, 2) = points[nidx[i]].y - mean.y;
X(i+1, 3) = points[nidx[i]].z - mean.z;
}
A << 1.0/nr_neighbors * X.t() * X;
EigenValues(A, D, U);
//normal = eigenvector corresponding to lowest
//eigen value that is the 1st column of matrix U
ColumnVector n(3);
n(1) = U(1,1);
n(2) = U(2,1);
n(3) = U(3,1);
ColumnVector point_vector(3);
point_vector(1) = p[0] - rPos(1);
point_vector(2) = p[1] - rPos(2);
point_vector(3) = p[2] - rPos(3);
point_vector = point_vector / point_vector.NormFrobenius();
Real angle = (n.t() * point_vector).AsScalar();
if (angle < 0) {
n *= -1.0;
}
n = n / n.NormFrobenius();
normals.push_back(Point(n(1), n(2), n(3)));
}
delete[] nidx;
delete[] d;
annDeallocPts(pa);
}
////////////////////////////////////////////////////////////////
/////////////NORMALS USING ADAPTIVE AKNN METHOD ////////////////
////////////////////////////////////////////////////////////////
void calculateNormalsAdaptiveAKNN(vector<Point> &normals,vector<Point> &points,
int kmin, int kmax, const double _rPos[3])
{
ColumnVector rPos(3);
for (int i = 0; i < 3; ++i)
rPos(i+1) = _rPos[i];
cout<<"Total number of points: "<<points.size()<<endl;
int nr_neighbors;
ANNpointArray pa = annAllocPts(points.size(), 3);
for (size_t i=0; i<points.size(); ++i)
{
pa[i][0] = points[i].x;
pa[i][1] = points[i].y;
pa[i][2] = points[i].z;
}
ANNkd_tree t(pa, points.size(), 3);
Point mean(0.0,0.0,0.0);
double temp_n[3],norm_n = 0.0;
double e1,e2,e3;
for (size_t i=0; i<points.size(); ++i)
{
Matrix U(3,3);
ANNpoint p = pa[i];
for(int kidx = kmin; kidx < kmax; kidx++)
{
nr_neighbors=kidx+1;
ANNidxArray nidx = new ANNidx[nr_neighbors];
ANNdistArray d = new ANNdist[nr_neighbors];
//ANN search for k nearest neighbors
//indexes of the neighbors along with the query point
//stored in the array n
t.annkSearch(p, nr_neighbors, nidx, d, 0.0);
mean.x=0,mean.y=0,mean.z=0;
//calculate mean for all the points
for (int j=0; j<nr_neighbors; ++j)
{
mean.x += points[nidx[j]].x;
mean.y += points[nidx[j]].y;
mean.z += points[nidx[j]].z;
}
mean.x /= nr_neighbors;
mean.y /= nr_neighbors;
mean.z /= nr_neighbors;
Matrix X(nr_neighbors,3);
SymmetricMatrix A(3);
DiagonalMatrix D(3);
//calculate covariance = A for all the points
for (int j = 0; j < nr_neighbors; ++j) {
X(j+1, 1) = points[nidx[j]].x - mean.x;
X(j+1, 2) = points[nidx[j]].y - mean.y;
X(j+1, 3) = points[nidx[j]].z - mean.z;
}
A << 1.0/nr_neighbors * X.t() * X;
EigenValues(A, D, U);
e1 = D(1);
e2 = D(2);
e3 = D(3);
delete[] nidx;
delete[] d;
//We take the particular k if the second maximum eigen value
//is at least 25 percent of the maximum eigen value
if ((e1 > 0.25 * e2) && (fabs(1.0 - (double)e2/(double)e3) < 0.25))
break;
}
//normal = eigenvector corresponding to lowest
//eigen value that is the 1rd column of matrix U
ColumnVector n(3);
n(1) = U(1,1);
n(2) = U(2,1);
n(3) = U(3,1);
ColumnVector point_vector(3);
point_vector(1) = p[0] - rPos(1);
point_vector(2) = p[1] - rPos(2);
point_vector(3) = p[2] - rPos(3);
point_vector = point_vector / point_vector.NormFrobenius();
Real angle = (n.t() * point_vector).AsScalar();
if (angle < 0) {
n *= -1.0;
}
n = n / n.NormFrobenius();
normals.push_back(Point(n(1), n(2), n(3)));
}
annDeallocPts(pa);
}
///////////////////////////////////////////////////////
/////////////NORMALS USING IMAGE NEIGHBORS ////////////
///////////////////////////////////////////////////////
void calculateNormalsPANORAMA(vector<Point> &normals,
vector<Point> &points,
vector< vector< vector< cv::Vec3f > > > extendedMap,
const double _rPos[3])
{
ColumnVector rPos(3);
for (int i = 0; i < 3; ++i)
rPos(i+1) = _rPos[i];
cout<<"Total number of points: "<<points.size()<<endl;
points.clear();
int nr_neighbors = 0;
cout << "height of Image: "<<extendedMap.size()<<endl;
cout << "width of Image: "<<extendedMap[0].size()<<endl;
// as the nearest neighbors and then the same PCA method as done in AKNN
//temporary dynamic array for all the neighbors of a given point
vector<cv::Vec3f> neighbors;
for (size_t i=0; i< extendedMap.size(); i++)
{
for (size_t j=0; j<extendedMap[i].size(); j++)
{
if (extendedMap[i][j].size() == 0) continue;
neighbors.clear();
Point mean(0.0,0.0,0.0);
double temp_n[3],norm_n = 0.0;
// Offset for neighbor computation
int offset[2][5] = {{-1,0,1,0,0},{0,-1,0,1,0}};
// Traversing all the cells in the extended map
for (int n = 0; n < 5; ++n) {
int x = i + offset[0][n];
int y = j + offset[1][n];
// Copy the neighboring buckets into the vector
if (x >= 0 && x < (int)extendedMap.size() &&
y >= 0 && y < (int)extendedMap[x].size()) {
for (unsigned int k = 0; k < extendedMap[x][y].size(); k++) {
neighbors.push_back(extendedMap[x][y][k]);
}
}
}
nr_neighbors = neighbors.size();
cv::Vec3f p = extendedMap[i][j][0];
//if no neighbor point is found in the 4-neighboring pixels then normal is set to zero
if (nr_neighbors < 3)
{
points.push_back(Point(p[0], p[1], p[2]));
normals.push_back(Point(0.0,0.0,0.0));
continue;
}
//calculate mean for all the points
Matrix X(nr_neighbors,3);
SymmetricMatrix A(3);
Matrix U(3,3);
DiagonalMatrix D(3);
//calculate mean for all the points
for(int k = 0; k < nr_neighbors; k++)
{
cv::Vec3f pp = neighbors[k];
mean.x += pp[0];
mean.y += pp[1];
mean.z += pp[2];
}
mean.x /= nr_neighbors;
mean.y /= nr_neighbors;
mean.z /= nr_neighbors;
//calculate covariance = A for all the points
for (int i = 0; i < nr_neighbors; ++i) {
cv::Vec3f pp = neighbors[i];
X(i+1, 1) = pp[0] - mean.x;
X(i+1, 2) = pp[1] - mean.y;
X(i+1, 3) = pp[2] - mean.z;
}
A << 1.0/nr_neighbors * X.t() * X;
EigenValues(A, D, U);
//normal = eigenvector corresponding to lowest
//eigen value that is the 1st column of matrix U
ColumnVector n(3);
n(1) = U(1,1);
n(2) = U(2,1);
n(3) = U(3,1);
ColumnVector point_vector(3);
point_vector(1) = p[0] - rPos(1);
point_vector(2) = p[1] - rPos(2);
point_vector(3) = p[2] - rPos(3);
point_vector = point_vector / point_vector.NormFrobenius();
Real angle = (n.t() * point_vector).AsScalar();
if (angle < 0) {
n *= -1.0;
}
n = n / n.NormFrobenius();
for (unsigned int k = 0; k < extendedMap[i][j].size(); k++) {
cv::Vec3f p = extendedMap[i][j][k];
points.push_back(Point(p[0], p[1], p[2]));
normals.push_back(Point(n(1), n(2), n(3)));
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////
///////////FAST NORMALS USING PANORAMA EQUIRECTANGULAR RANGE IMAGE //////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////
/*
void calculateNormalsFAST(vector<Point> &normals,vector<Point> &points,cv::Mat &img,vector<vector<vector<cv::Vec3f>>> extendedMap)
{
cout<<"Total number of points: "<<points.size()<<endl;
points.clear();
int nr_points = 0;
//int nr_neighbors = 0,nr_neighbors_center = 0;
cout << "height of Image: "<<extendedMap.size()<<endl;
cout << "width of Image: "<<extendedMap[0].size()<<endl;
for (size_t i=0; i< extendedMap.size(); ++i)
{
for (size_t j=0; j<extendedMap[0].size(); j++)
{
double theta,phi,rho;
double x,y,z;
double dRdTheta,dRdPhi;
double n[3],m;
nr_points = extendedMap[i][j].size();
if (nr_points == 0 ) continue;
for (int k = 0; k< nr_points; k++)
{
cv::Vec3f p = extendedMap[i][j][k];
x = p[0];
y = p[1];
z = p[2];
rho = sqrt(x*x + y*y + z*z);
theta = atan(y/x);
phi = atan(z/x);
//Sobel Filter for the derivative
dRdTheta = dRdPhi = 0.0;
if (i == 0 || i == extendedMap.size()-1 || j == 0 || j == extendedMap[0].size()-1)
{
points.push_back(Point(x, y, z));
normals.push_back(Point(0.0,0.0,0.0));
continue;
}
dRdPhi += 10*img.at<uchar>(i-1,j);
dRdPhi += 3 *img.at<uchar>(i-1,j-1);
dRdPhi += 3 *img.at<uchar>(i-1,j+1);
dRdPhi -= 10*img.at<uchar>(i+1,j);
dRdPhi -= 3 *img.at<uchar>(i+1,j-1);
dRdPhi -= 3 *img.at<uchar>(i+1,j+1);
dRdTheta += 10*img.at<uchar>(i,j-1);
dRdTheta += 3 *img.at<uchar>(i-1,j-1);
dRdTheta += 3 *img.at<uchar>(i+1,j-1);
dRdTheta -= 10*img.at<uchar>(i,j+1);
dRdTheta -= 3 *img.at<uchar>(i-1,j+1);
dRdTheta -= 3 *img.at<uchar>(i+1,j+1);
n[0] = cos(theta) * sin(phi) - sin(theta) * dRdTheta / rho / sin(phi) +
cos(theta) * cos(phi) * dRdPhi / rho;
n[1] = sin(theta) * sin(phi) + cos(theta) * dRdTheta / rho / sin(phi) +
sin(theta) * cos(phi) * dRdPhi / rho;
n[2] = cos(phi) - sin(phi) * dRdPhi / rho;
//n[2] = -n[2];
m = sqrt(n[0]*n[0]+n[1]*n[1]+n[2]*n[2]);
n[0] /= m; n[1] /= m; n[2] /= m;
points.push_back(Point(x, y, z));
normals.push_back(Point(n[0],n[1],n[2]));
}
}
}
}
*/
/*
* retrieve a cv::Mat with x,y,z,r from a scan object
* functionality borrowed from scan_cv::convertScanToMat but this function
* does not allow a scanserver to be used, prints to stdout and can only
* handle a single scan
*/
cv::Mat scan2mat(Scan *source)
{
DataXYZ xyz = source->get("xyz");
DataReflectance xyz_reflectance = source->get("reflectance");
unsigned int nPoints = xyz.size();
cv::Mat scan(nPoints,1,CV_32FC(4));
scan = cv::Scalar::all(0);
cv::MatIterator_<cv::Vec4f> it;
it = scan.begin<cv::Vec4f>();
for(unsigned int i = 0; i < nPoints; i++){
float x, y, z, reflectance;
x = xyz[i][0];
y = xyz[i][1];
z = xyz[i][2];
if(xyz_reflectance.size() != 0)
{
reflectance = xyz_reflectance[i];
//normalize the reflectance
reflectance += 32;
reflectance /= 64;
reflectance -= 0.2;
reflectance /= 0.3;
if (reflectance < 0) reflectance = 0;
if (reflectance > 1) reflectance = 1;
}
(*it)[0] = x;
(*it)[1] = y;
(*it)[2] = z;
if(xyz_reflectance.size() != 0)
(*it)[3] = reflectance;
else
(*it)[3] = 0;
++it;
}
return scan;
}
/*
* convert a matrix of float values (range image) to a matrix of unsigned
* eight bit characters using different techniques
*/
cv::Mat float2uchar(cv::Mat &source, bool logarithm, float cutoff)
{
cv::Mat result(source.size(), CV_8U, cv::Scalar::all(0));
float max = 0;
// find maximum value
if (cutoff == 0.0) {
// without cutoff, just iterate through all values to find the largest
for (cv::MatIterator_<float> it = source.begin<float>();
it != source.end<float>(); ++it) {
float val = *it;
if (val > max) {
max = val;
}
}
} else {
// when a cutoff is specified, sort all the points by value and then
// specify the max so that <cutoff> values are larger than it
vector<float> sorted(source.cols*source.rows);
int i = 0;
for (cv::MatIterator_<float> it = source.begin<float>();
it != source.end<float>(); ++it, ++i) {
sorted[i] = *it;
}
std::sort(sorted.begin(), sorted.end());
max = sorted[(int)(source.cols*source.rows*(1.0-cutoff))];
cout << "A cutoff of " << cutoff << " resulted in a max value of " << max << endl;
}
cv::MatIterator_<float> src = source.begin<float>();
cv::MatIterator_<uchar> dst = result.begin<uchar>();
cv::MatIterator_<float> end = source.end<float>();
if (logarithm) {
// stretch values from 0 to max logarithmically over 0 to 255
// using the logarithm allows to represent smaller values with more
// precision and larger values with less
max = log(max+1);
for (; src != end; ++src, ++dst) {
float val = (log(*src+1)*255.0)/max;
if (val > 255)
*dst = 255;
else
*dst = (uchar)val;
}
} else {
// stretch values from 0 to max linearly over 0 to 255
for (; src != end; ++src, ++dst) {
float val = (*src*255.0)/max;
if (val > 255)
*dst = 255;
else
*dst = (uchar)val;
}
}
return result;
}
/// Write a pose file with the specofied name
void writePoseFiles(string dir, const double* rPos, const double* rPosTheta,int scanNumber)
{
string poseFileName = dir + "/scan" + to_string(scanNumber, 3) + ".pose";
ofstream posout(poseFileName.c_str());
posout << rPos[0] << " "
<< rPos[1] << " "
<< rPos[2] << endl
<< deg(rPosTheta[0]) << " "
<< deg(rPosTheta[1]) << " "
<< deg(rPosTheta[2]) << endl;
posout.clear();
posout.close();
}
/// write scan files for all segments
void writeScanFiles(string dir, vector<Point> &points, vector<Point> &normals, int scanNumber)
{
string ofilename = dir + "/scan" + to_string(scanNumber, 3) + ".3d";
ofstream normptsout(ofilename.c_str());
for (size_t i=0; i<points.size(); ++i)
{
int r,g,b;
r = (int)(normals[i].x * (127.5) + 127.5);
g = (int)(normals[i].y * (127.5) + 127.5);
b = (int)(fabs(normals[i].z) * (255.0));
normptsout <<points[i].x<<" "<<points[i].y<<" "<<points[i].z<<" "<<r<<" "<<g<<" "<<b<<" "<<endl;
}
normptsout.clear();
normptsout.close();
}
/// =============================================
/// Main
/// =============================================
int main(int argc, char** argv)
{
int start, end;
bool scanserver;
int max_dist, min_dist;
string dir;
IOType iotype;
int k1, k2;
normal_method ntype;
int width, height;
parse_options(argc, argv, start, end, scanserver, max_dist, min_dist,
dir, iotype, k1, k2, ntype, width, height);
/// ----------------------------------
/// Prepare and read scans
/// ----------------------------------
if (scanserver) {
try {
ClientInterface::create();
} catch(std::runtime_error& e) {
cerr << "ClientInterface could not be created: " << e.what() << endl;
cerr << "Start the scanserver first." << endl;
exit(-1);
}
}
/// Make directory for saving the scan segments
string normdir = dir + "normals";
#ifdef _MSC_VER
int success = mkdir(normdir.c_str());
#else
int success = mkdir(normdir.c_str(), S_IRWXU|S_IRWXG|S_IRWXO);
#endif
if(success == 0) {
cout << "Writing segments to " << normdir << endl;
} else if(errno == EEXIST) {
cout << "WARN: Directory " << normdir << " exists already. Contents will be overwriten" << endl;
} else {
cerr << "Creating directory " << normdir << " failed" << endl;
exit(1);
}
/// Read the scans
Scan::openDirectory(scanserver, dir, iotype, start, end);
if(Scan::allScans.size() == 0) {
cerr << "No scans found. Did you use the correct format?" << endl;
exit(-1);
}
cv::Mat img;
/// --------------------------------------------
/// Initialize and perform segmentation
/// --------------------------------------------
std::vector<Scan*>::iterator it = Scan::allScans.begin();
int scanNumber = 0;
for( ; it != Scan::allScans.end(); ++it) {
Scan* scan = *it;
// apply optional filtering
scan->setRangeFilter(max_dist, min_dist);
const double* rPos = scan->get_rPos();
const double* rPosTheta = scan->get_rPosTheta();
/// read scan into points
DataXYZ xyz(scan->get("xyz"));
vector<Point> points;
points.reserve(xyz.size());
vector<Point> normals;
normals.reserve(xyz.size());
for(unsigned int j = 0; j < xyz.size(); j++) {
points.push_back(Point(xyz[j][0], xyz[j][1], xyz[j][2]));
}
if(ntype == AKNN)
calculateNormalsAKNN(normals,points, k1, rPos);
else if(ntype == ADAPTIVE_AKNN)
calculateNormalsAdaptiveAKNN(normals,points, k1, k2, rPos);
else
{
// create panorama
fbr::panorama fPanorama(width, height, fbr::EQUIRECTANGULAR, 1, 0, fbr::EXTENDED);
fPanorama.createPanorama(scan2mat(scan));
// the range image has to be converted from float to uchar
img = fPanorama.getRangeImage();
img = float2uchar(img, 0, 0.0);
if(ntype == PANORAMA)
calculateNormalsPANORAMA(normals,points,fPanorama.getExtendedMap(), rPos);
else if(ntype == PANORAMA_FAST)
cout << "PANORAMA_FAST is not working yet" << endl;
// calculateNormalsFAST(normals,points,img,fPanorama.getExtendedMap());
}
// pose file (repeated for the number of segments
writePoseFiles(normdir, rPos, rPosTheta, scanNumber);
// scan files for all segments
writeScanFiles(normdir, points,normals,scanNumber);
scanNumber++;
}
// shutdown everything
if (scanserver)
ClientInterface::destroy();
else
Scan::closeDirectory();
cout << "Normal program end" << endl;
return 0;
}

View file

@ -0,0 +1,101 @@
/*
* grabFramesCam implementation
*
* Copyright (C) Stanislav Serebryakov
*
* Released under the GPL version 3.
*
*/
#include <stdio.h>
#include <stdlib.h>
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#include <highgui.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
//TODO: flip image flag
void usage(char *progName) {
printf("%s <board-size-x> <board-size-y> <camera-id>\n", progName);
printf("i.e.: %s 6 4 0\n", progName);
printf("press space to detect chessboard and (again) to proceed.\n");
}
int main(int argc, char **argv)
{
/* TODO:
* flags:
* subpixel
* camera id
* flip x and y
* pmd mode?
*/
if(argc < 4) {
usage(argv[0]);
exit(1);
}
CvCapture *capture = cvCaptureFromCAM(atoi(argv[3]));
IplImage *imgColor = cvQueryFrame(capture);
IplImage *img = cvCreateImage(cvGetSize(imgColor), 8, 1);
int patx = atoi(argv[1]);
int paty = atoi(argv[2]);
CvSize patternSize = cvSize(patx, paty);
int cornersTotal = patternSize.width * patternSize.height;
CvPoint2D32f *corners = (CvPoint2D32f*) malloc(cornersTotal * sizeof(CvPoint2D32f));
cvNamedWindow("Camera", 0);
int imageCnt = 0;
bool grabFrame = false;
while(1) {
imgColor = cvQueryFrame(capture);
cvFlip(imgColor, 0, -1); // flips image around the x and y axes
cvCvtColor(imgColor, img, CV_BGR2GRAY);
if(grabFrame) {
/* ----- Chessboard detection -----
-------------------------------- */
int cornersCount; // should be the same for wcam and pmd and equal totalCorners
int found = cvFindChessboardCorners(img, patternSize, corners,
&cornersCount, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
cvFindCornerSubPix(img, corners, cornersCount, cvSize(11,11), cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1));
if(found && (cornersCount == cornersTotal)) {
cvDrawChessboardCorners(imgColor, patternSize, corners, cornersCount, found);
cvShowImage("Camera", imgColor);
printf("grab?\n");
int k = cvWaitKey(0);
if(k == (int)' ') {
imageCnt++;
char *filename = (char*) malloc(11 * sizeof(char));
sprintf(filename, "image%02i.jpg", imageCnt);
printf("saved %s.\n", filename);
cvSaveImage(filename, img);
grabFrame = false;
continue;
}
}
}
cvShowImage("Camera", imgColor);
if((int)' ' == cvWaitKey(5)) grabFrame = true;
}
return 0;
}

View file

@ -0,0 +1,130 @@
-------------------------------------------------------------------
To compile the project simply call "make". This will configure slam6d
using the default settings. If you wish to configure the project using
custom settings do: "make config". This command requires ccmake be
installed on your system. Alternatively you may change into the build
directory ".build" and configure the project with your preferred cmake
configurator, i.e.:
cd .build && cmake -i ../
For Microsoft Windows, use the cmake-gui application provided by cmake
to configure and generate project files for the appropriate version of
Microsoft Visual Studio C++ of your system. Use the INSTALL target to
built the entire project. Executables (and .dll's) will then reside
in the "windows" folder. For running the binaries you need to install
the proper redistributable package.
Some Boost libraries (graph, regex, serialization, filesystem,
interprocess) are needed to compile the slam6D program.
If you are using Debian just do:
aptitude install libboost-graph-dev libboost-regex-dev libboost-serialization-dev freeglut3-dev libxmu-dev libxi-dev
or, if you are still using Debian stable (lenny):
aptitude install libboost-graph1.35-dev libboost-regex1.35-dev libboost-serialization1.35-dev freeglut3-dev libxmu-dev libxi-dev
for Ubuntu this would be:
sudo apt-get install libboost-all-dev libcv-dev freeglut3-dev libxmu-dev libxi-dev
SuSE users please use yast2 for installing the missing packages
Additionally you need some external tools (exemplarily for Ubuntu):
sudo apt-get install imagemagick ffmpeg libx264-120
-------------------------------------------------------------------
For a detailed explanation of the programm, its usage, etc., please
refer to the high level documentation doc/documentation_HL.pdf
(esp. sections 4-6, for starters).
IMPORTANT:
Take care to register scans first (bin/slam6D) before trying to
display them (bin/show), and think about using the point reduction
(see section 6) for a much faster scan matching result. Extremely
large scans might need to be reduced (using bin/scan_red) before
registration. This will write reduced scans in the uos format into a
directory "reduced" in the data directory.
Three example scans are included in the dat directory, several
larger data sets can be downloaded from the data repository,
http://kos.informatik.uni-osnabrueck.de/3Dscans/
(Alternatively, click on the "Data Repository" link on this project's
web site on Sourceforge, http://slam6d.sourceforge.net/)
EXAMPLES:
(using the data set in the slam6d repository)
bin/slam6D -m 500 -R 5 -d 25.0 --metascan dat
bin/show dat
(using the data set in the slam6d repository)
bin/slam6D --max=500 -r 10.2 -i 20 --metascan dat
bin/show dat
(using hannover1.tgz from http://kos.informatik.uni-osnabrueck.de/3Dscans/)
bin/slam6D -s 1 -e 65 -r 10 -i 100 -d 75 -D 250 --epsICP=0.00001
-I 50 --cldist=750 -L 0 -G 1 /home/nuechter/dat/dat_hannover1
bin/show -s 1 -e 65 /home/nuechter/dat/dat_hannover1
(using hannover2.tgz from http://kos.informatik.uni-osnabrueck.de/3Dscans/)
bin/slam6D -q -r 10 -f rts -s 23 -d 75 -L 4 --cldist=1500 -G 1 -D -1
--DlastSLAM 250 --graphDist 200 -I 50 hannover2
bin/show -f rts -s 23 hannover2
(using kvarntorp_mine.tgz (dat_mine1) form http://kos.informatik.uni-osnabrueck.de/3Dscans/)
bin/slam6D -s 1 -e 76 -r 10 -m 3000 -d 50 -i 1000 --epsICP=0.000001
-I 50 -D 75 --clpairs=5000 -f old /home/nuechter/dat/dat_mine1/
bin/show -s 1 -e 76 -m 3000 -f old /home/nuechter/dat/dat_mine1/
(using bremen_city.zip from http://kos.informatik.uni-osnabrueck.de/3Dscans/)
bin/scan_red -s 0 -e 12 -r OCTREE -v 10 --octree 0 -f RIEGL_TXT /home/nuechter/dat/bremen_city
bin/slam6D -a 2 -q /home/nuechter/dat/bremen_city/reduced -f uos -d 150
-s 0 -e 12 --anim=1 -n /home/nuechter/dat/bremen_city/bremen.net
-G 1 -D 100 -i 0 -I 50 -p --epsSLAM=0.0
bin/show -s 0 -e 12 /home/nuechter/dat/bremen_city/reduced
(using UniKoblenz_CampusTour3_OsnabrueckFormat.tar.gz from
http://kos.informatik.uni-osnabrueck.de/3Dscans/)
bin/slam6D -s 1 -e 320 -r 20 -i 300 --epsICP=0.000001 -d 45 -D 45
-f uos --algo=2 -l 10 -L 4 -I 100 -G 1
/home/nuechter/dat/UniKoblenz_CampusTour3_OsnabrueckFormat/
bin/show -s 1 -e 320 -f uos /home/nuechter/dat/UniKoblenz_CampusTour3_OsnabrueckFormat/
-------------------------------------------------------------------
For detecting planes compile with the WITH_PLANE option.
Adapt the settings in bin/hough.cfg for your data set.
EXAMPLE: (using the data set in the slam6d repository, no modification
of bin/hough.cfg necessary)
bin/planes -s 0 dat
bin/show -s 0 -e 0 dat -l dat/planes/planes.list
(using bremen_city.zip from http://kos.informatik.uni-osnabrueck.de/3Dscans/)
adapt these settings in bin/hough.cfg:
RhoNum 500
RhoMax 5000
MaxPointPlaneDist 50.0
MinPlaneSize 50
PointDist 100.0
/bin/planes -f riegl_txt -s 0 /home/nuechter/dat/bremen_city/ -r 50 -O 1 -m 5000
/bin/show -s 0 -e 0 /home/nuechter/dat/bremen_city/ -f riegl_txt -l dat/planes/planes.list -r 10 -O 1 -m 5000
-------------------------------------------------------------------
The IO relevant parameters -f(ormat), -s(tart), -e(nd) can be omitted
in slam6D and show if a 'format' file exists in the directory, which
contains key=value lines (spaces are trimmed automatically) for
format, start, end with the same values as in the commandline. These
format-file parameters will be overwritten by commandline parameters
so that the format-file will provide the right IO type and full index
range and the user can overwrite the index range as he sees fit.
-------------------------------------------------------------------
A reference manual can be found in doc/refman.pdf resp.
doc/html/index.html (type in 'make docu' to compile the doxygen
documentation for the HTML files).

View file

@ -0,0 +1,154 @@
/*
* calibrate implementation
*
* Copyright (C) Stanislav Serebryakov
*
* Released under the GPL version 3.
*
*/
#include <stdio.h>
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#include <highgui.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
void usage(char *progName) {
printf("%s <board-size-x> <board-size-y> <square-size> <images-list>\n", progName);
printf("\twhere board-size-x and y is count of *inner* corners of the booard");
printf("i.e.: %s 6 4 0.04 image*\n", progName);
printf("Use more then ten images.\nPress space bar to proceed.\n");
}
void calibrate( CvMat *intrinsic, CvMat *distortion, CvSize imgSz, CvSize boardSz
, double boardSide, CvPoint2D32f **corners, int boardsCnt) {
int totalPoints = boardSz.width * boardSz.height;
// object points (model)
CvMat *objPts = cvCreateMat(totalPoints * boardsCnt, 3, CV_32FC1);
// found points
CvMat *imgPts = cvCreateMat(totalPoints * boardsCnt, 2, CV_32FC1);
// points count
CvMat *ptsCnt = cvCreateMat(boardsCnt, 1, CV_32SC1);
// copy corners to matrix and fill model matrix
for(int i = 0; i < boardsCnt; i++) {
for(int j = 0; j < totalPoints; j++) {
int s = i * totalPoints;
CV_MAT_ELEM(*imgPts, float, s+j, 0) = corners[i][j].x;
CV_MAT_ELEM(*imgPts, float, s+j, 1) = corners[i][j].y;
CV_MAT_ELEM(*objPts, float, s+j, 0) = boardSide * (j / boardSz.width);
CV_MAT_ELEM(*objPts, float, s+j, 1) = boardSide * (j % boardSz.width);
CV_MAT_ELEM(*objPts, float, s+j, 2) = 0.0f;
}
CV_MAT_ELEM(*ptsCnt, int, i, 0) = totalPoints;
}
// initial guess
CV_MAT_ELEM(*intrinsic, float, 0, 0) = 1.0f;
CV_MAT_ELEM(*intrinsic, float, 1, 1) = 1.0f;
cvCalibrateCamera2( objPts, imgPts, ptsCnt, imgSz
, intrinsic, distortion
, NULL, NULL, 0 );
return;
}
int main(int argc, char **argv)
{
if(argc < 5) {
usage(argv[0]);
exit(1);
}
int patx = atoi(argv[1]);
int paty = atoi(argv[2]);
CvSize boardSz = cvSize(patx, paty);
int loadImageCnt = argc - 4;
int cornersTotal = boardSz.width * boardSz.height;
float boardSide = atof(argv[3]);;
char **images = &argv[4];
// intrinsic matrices and ditortion params
CvMat *intrinsic = cvCreateMat(3, 3, CV_32F);
CvMat *distortion = cvCreateMat(1, 4, CV_32F);
IplImage *img = cvLoadImage(images[0], CV_LOAD_IMAGE_GRAYSCALE);
IplImage *imgColor = cvCreateImage(cvGetSize(img), 8, 3);
int subpixel;
if(cvGetSize(img).width < 300) {
subpixel = 4;
}
else {
subpixel = 11;
}
int imagesWithBoard = 0;
CvPoint2D32f **corners = (CvPoint2D32f**)calloc(100, sizeof(CvPoint2D32f*));
corners[0] = (CvPoint2D32f*) malloc(cornersTotal * sizeof(CvPoint2D32f));
cvNamedWindow("Cam", 0);
for(int imagesLoaded = 0; imagesLoaded < loadImageCnt; imagesLoaded++) {
img = cvLoadImage(images[imagesLoaded], CV_LOAD_IMAGE_GRAYSCALE);
cvCvtColor(img, imgColor, CV_GRAY2BGR);
int cornersCount;
int found = cvFindChessboardCorners(img, boardSz, corners[imagesWithBoard],
&cornersCount, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
if(found) cvFindCornerSubPix(img, corners[imagesWithBoard], cornersCount, cvSize(subpixel, subpixel), cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1));
if(found && (cornersCount == cornersTotal)) {
cvDrawChessboardCorners(imgColor, boardSz, corners[imagesWithBoard], cornersCount, found);
imagesWithBoard++;
corners[imagesWithBoard] = (CvPoint2D32f*) malloc(cornersTotal * sizeof(CvPoint2D32f));
}
cvShowImage("Cam", imgColor);
cvWaitKey(0);
}
cvDestroyAllWindows();
printf("calibrating...\n");
fflush(stdout);
//TODO: can be started in parallel to watch calibration on image undistortion :)
calibrate(intrinsic, distortion, cvGetSize(img), boardSz, boardSide, corners, imagesWithBoard);
// save to xml files
cvSave("./intrinsic.xml", intrinsic);
cvSave("./distortion.xml", distortion);
printf("matrices saved to xml files.\n");
// let OS clean all images and matrices
return 0;
}
/*void usage(char *progName) {
printf( "usage:\n \
%s <x> <y> <camera-id> [-s]\n \
\tcamera-id is v4l id or -1 for pmd cam \
\tpress space or 'a' to grab image\n \
\tpress 'n' to skip grabbed frame\n \
\tpress 'c' to finish frame grabbing start calibration\n \
or:\n \
%s <x> <y> <image1cam.jpg, image1pmd.jpg, image2cam.png...>\n", progName, progName);
fflush(stdout);
}*/

View file

@ -0,0 +1,27 @@
IF (WITH_THERMO)
find_package(OpenCV REQUIRED)
include_directories(${CMAKE_SOURCE_DIR}/3rdparty/cvblob)
include_directories(${CMAKE_SOURCE_DIR}/include/shapes/)
include_directories(${CMAKE_SOURCE_DIR}/include/thermo/)
include_directories(/usr/include/)
include_directories(/usr/include/opencv)
add_executable(caliboard caliboard.cc)
add_executable(thermo thermo.cc)
# add_executable(thermo thermo.cc src/cvaux.cpp src/cvblob.cpp src/cvcolor.cpp src/cvcontour.cpp src/cvlabel.cpp src/cvtrack.cpp)
IF(UNIX)
target_link_libraries(caliboard scan shape newmat dl ANN)
target_link_libraries(thermo scan shape newmat dl ANN)
target_link_libraries(thermo GL GLU cvblob ${OpenCV_LIBS} scan ANN)
ENDIF(UNIX)
IF (WIN32)
target_link_libraries(caliboard scan shape newmat XGetopt ANN)
target_link_libraries(thermo scan shape newmat XGetopt ANN)
target_link_libraries(thermo GL GLU cvblob ${OpenCV_LIBS} scan ANN)
ENDIF(WIN32)
ENDIF(WITH_THERMO)

View file

@ -0,0 +1,200 @@
/*
* PMD implementation
*
* Copyright (C) Stanislav Serebryakov
*
* Released under the GPL version 3.
*
*/
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include "pmdsdk2.h"
#include <stdlib.h>
#include <stdio.h>
#include "cvpmd.h"
#include <inttypes.h>
/** TODO
* inten max amplitude
* error handling
*/
PMD *initPMD(const char* plugin, const char *ip) {
PMD *io = (PMD*)malloc(sizeof(PMD));
if(0 != pmdOpen (&io->hnd, plugin, ip, plugin, "")) {
fprintf(stderr, "ERROR: could not connect!\n");
exit(1);
}
//pmdUpdate(io->hnd);
//pmdGetSourceDataDescription(io->hnd, &io->dd);
io->dd.std.numColumns = 64;
io->dd.std.numRows = 50; //FIXME!
io->data = (float*)malloc(io->dd.std.numColumns * io->dd.std.numRows * sizeof(float));
return io;
}
float *pmdDataPtr(PMD *p) {
return p->data;
}
void releasePMD(PMD **pmd) {
pmdClose((*pmd)->hnd);
free((*pmd)->data);
free(*pmd);
*pmd = 0;
}
static float max(const PMD *p) {
float max = 0.0f;
for(unsigned int k = 0; k < p->dd.std.numRows*p->dd.std.numColumns; k++) {
if(p->data[k] > max) max = p->data[k];
}
//printf("max = %f\n", max);
return max;
}
//static inline void setPix(const IplImage *m, const int c, const int r, const char v) { m->imageData[r*m->width + c] = v; }
CvSize pmdGetSize(const PMD *p) {
return cvSize(p->dd.std.numColumns, p->dd.std.numRows);
}
IplImage *toIplImage(const PMD *p, IplImage *img = 0) {
int cols = p->dd.std.numColumns;
int rows = p->dd.std.numRows;
IplImage *imgp;
if(!img) imgp = cvCreateImage(pmdGetSize(p), 8, 1);
else imgp = img;
//FIXME: scaled!
float m = max(p);
for(int r = 0; r < rows; r++) {
for(int c = 0; c < cols; c++) {
//FIXME: mess with the rows and colums
CV_IMAGE_ELEM(imgp, uint8_t, r, c) = (uint8_t) 255.0f * p->data[r*cols + c] / m;
}
}
return imgp;
}
/*CvPoint3D32f unionVec(const CvPoint uv, const CvMat *intrinsic) {
//TODO: not defined yet
// with this function pmdProjectToCartesian would look like:
// pmdProjectToCartesian pt depth mat = depth * unionVec pt mat
return cvPoint3D32f(0,0,0);
}*/
static inline CvPoint3D32f pmdProjectToCartesian(const CvMat *intrinsicMatrix, const CvPoint2D32f uv, const float dist) {
/* Lukas Dierks and Jan Wuelfing's projectToCartesian */
float fx = cvmGet(intrinsicMatrix, 0, 0);
float cx = cvmGet(intrinsicMatrix, 0, 2);
float fy = cvmGet(intrinsicMatrix, 1, 1);
float cy = cvmGet(intrinsicMatrix, 1, 2);
float u = uv.x;
float v = uv.y;
float r = dist;
float u2 = u*u;
float v2 = v*v;
float cx2 = cx*cx;
float cy2 = cy*cy;
float fx2 = fx*fx;
float fy2 = fy*fy;
// Reverse Projection
float squareroot = sqrt(
cy2 * fx2 +
cx2 * fy2 +
fx2 * fy2 -
2 * cx * fy2 * u +
fy2 * u2 -
2 * cy * fx2 * v +
fx2 * v2
);
return cvPoint3D32f((fy * r * (cx - u)) / squareroot,
(fx * r * (cy - v)) / squareroot,
(fx * fy * r) / squareroot);
}
CvPoint3D32f *cvProjectArrayToCartesian( const CvMat *intrinsicMatrix
, const CvPoint2D32f *pts, const int ptsCnt
, CvPoint3D32f *unitVs) {
for(int i = 0; i < ptsCnt; i++)
unitVs[i] = pmdProjectToCartesian(intrinsicMatrix, pts[i], 1.0);
return unitVs;
}
CvPoint3D32f **pmdProjectArrayToCartesian(const PMD *p, const CvMat *intrinsicMatrix, CvPoint3D32f **pts) {
for(unsigned int i = 0; i < p->dd.std.numRows; i++)
for(unsigned int j = 0; j < p->dd.std.numColumns; j++)
pts[i][j] = pmdProjectToCartesian(intrinsicMatrix, cvPoint2D32f(i,j), p->data[j*p->dd.std.numColumns + i]);
return pts;
}
IplImage *pmdQueryImage(PMD *p, IplImage *img = 0) {
pmdGetIntensities(p->hnd, p->data, p->dd.std.numColumns * p->dd.std.numRows * sizeof(float));
return toIplImage(p, img);
}
IplImage *pmdQueryImageAsync(PMD *p, IplImage *img = 0) {
pmdGetIntensitiesAsync(p->hnd, p->data, p->dd.std.numColumns * p->dd.std.numRows * sizeof(float));
return toIplImage(p, img);
}
void pmdRetriveDistances(PMD *p) {
pmdGetDistances(p->hnd, p->data, p->dd.std.numColumns * p->dd.std.numRows * sizeof(float));
return;
}
void pmdRetriveDistancesAsync(PMD *p) {
pmdGetDistancesAsync(p->hnd, p->data, p->dd.std.numColumns * p->dd.std.numRows * sizeof(float));
return;
}
IplImage *pmdQueryDistances(PMD *p, IplImage *img = 0) {
pmdGetDistances(p->hnd, p->data, p->dd.std.numColumns * p->dd.std.numRows * sizeof(float));
return toIplImage(p, img);
}
IplImage *pmdQueryDistancesAsync(PMD *p, IplImage *img = 0) {
pmdGetDistancesAsync(p->hnd, p->data, p->dd.std.numColumns * p->dd.std.numRows * sizeof(float));
return toIplImage(p, img);
}
IplImage *pmdQueryAmplitudes(PMD *p, IplImage *img = 0) {
pmdGetAmplitudesAsync(p->hnd, p->data, p->dd.std.numColumns * p->dd.std.numRows * sizeof(float));
return toIplImage(p, img);
}
IplImage *pmdQueryAmplitudesAsync(PMD *p, IplImage *img = 0) {
pmdGetAmplitudes(p->hnd, p->data, p->dd.std.numColumns * p->dd.std.numRows * sizeof(float));
return toIplImage(p, img);
}

View file

@ -0,0 +1,869 @@
/*
* pose implementation
*
* Copyright (C) Stanislav Serebryakov
*
* Released under the GPL version 3.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#include <math.h>
// OpenCV
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#include <highgui.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
// GL: GLFW (window etc, ala glut) and FTGL (text rendering)
#include <GL/glfw.h>
#include <ftgl.h>
// PMD-related stuff
#include "pmdsdk2.h"
#include "cvpmd.h"
#include "pmdWrap.h"
#include "history.h"
// SLAM6D
#include "slam6d/point.h"
#include "slam6d/ptpair.h"
#include <vector>
FTGLPixmapFont *font; // opengl text rendring
void usage(char* progName) {
printf("usage: %s [options]\n", progName);
printf("options:\n \
\t--ui 0|1\t disable|enable ui (default: 1)\n\
\t--gl 0|1\t disable|enable 3d rendering (default: 1)\n\
\t--cfg file\t parse configuration from file (default: pmdc.conf)\n\
\t--help\t\t display this help.\n");
}
void render( History *history, CvPoint3D32f *camPts, int trPtsCnt
, CvMat *rot, CvMat *trn
, CvPoint3D32f **camPts3D, char **pts3DStatus
, int reprojected, bool poseEstimated);
/*
reprojects PMD-aquired 3D points on the web cam image
*/
//FIXME this function should be dispached >_>
int projectImageToPMD( CvMat *rot, CvMat *trn, CvMat *intrinsic
, CvPoint3D32f **pmdPts, CvSize pmdSz
, CvSize camSz, CvPoint **pmd2imgIdxs
, CvPoint2D32f *trackedPts
, int *trPtsCnt, CvPoint3D32f *trackedPts3D, char *pts3DStatus
, CvPoint *tr2pmdIdxs) {
/* TODO:
assert null image, not clean image, bad image depth
*/
// pmd point's matrix
CvMat *pt = cvCreateMat(3, 1, CV_32FC1);
// point in cam coorinate system
CvMat *reprojPt = cvCreateMat(3, 1, CV_32FC1);
// points on cam screen
CvMat *camPt3 = cvCreateMat(3, 1, CV_32FC1);
CvMat *camPt = cvCreateMat(2, 1, CV_32FC1);
// rotation matrix
CvMat *rotMat = cvCreateMat(3, 3, CV_32FC1);
cvRodrigues2(rot, rotMat, NULL);
// clear previous found tracked pts
for(int k = 0; k < *trPtsCnt; k++) pts3DStatus[k] = 0;
//int l = 0; // count corresponding points
int reprojected = 0;
for(int j = 0; j < pmdSz.width; j++)
for(int i = 0; i < pmdSz.height; i++) {
if( fabs(pmdPts[i][j].x) > 20.0
|| fabs(pmdPts[i][j].y) > 20.0
|| fabs(pmdPts[i][j].z) > 20.0) continue; //FIXME: more realistic limit, status = false
// convert to mat
CV_MAT_ELEM(*pt, float, 0, 0) = (float)pmdPts[i][j].x;
CV_MAT_ELEM(*pt, float, 1, 0) = (float)pmdPts[i][j].y;
CV_MAT_ELEM(*pt, float, 2, 0) = (float)pmdPts[i][j].z;
// reproject to the camera coordinate system
cvMatMulAdd(rotMat, pt, trn, reprojPt);
// project to the image
cvMatMul(intrinsic, reprojPt, camPt3);
// cvConvertPointsHomogenious(camPt3, camPt);
/* cvProjectPoints2(pt, rot, trn, intrinsic, NULL, camPt
, NULL, NULL, NULL, NULL, NULL);*/
float scale = (float)CV_MAT_ELEM(*camPt3, float, 2, 0);
float xf = CV_MAT_ELEM(*camPt3, float, 0, 0) / scale;
float yf = CV_MAT_ELEM(*camPt3, float, 1, 0) / scale;
int x = (int)xf;
int y = camSz.height - (int)yf; //FIXME revise coordinates
if((x < 0) || (x > camSz.width) ||
(y < 0) || (y > camSz.height)) {
pmd2imgIdxs[i][j].x = -1;
pmd2imgIdxs[i][j].y = -1;
continue;
} else { // point is projected to cam
// contains which PMD point is seen from this pixel
pmd2imgIdxs[i][j].x = x;
pmd2imgIdxs[i][j].y = y;
}
//find tracked 3d points (fused here, can be dispached)
for(int k = 0; k < *trPtsCnt; k++)
if(pts3DStatus[k]) continue; // kill to accumulate (*)
else
if( (abs((trackedPts[k].x - (float)x)) < 3) //TODO: distance SHOULD be chousen depending on depth!!!
&& (abs((trackedPts[k].y - (float)y)) < 3)) { //FIXME: hardcoded distance
//TODO: we can accumulate points here to get more presise results (*)
trackedPts3D[k].x = pmdPts[i][j].x;
trackedPts3D[k].y = pmdPts[i][j].y;
trackedPts3D[k].z = pmdPts[i][j].z;
pts3DStatus[k] = 1;
tr2pmdIdxs[k].x = i; //FIXME: cvPoint(i,j)?
tr2pmdIdxs[k].y = j;
reprojected++;
break; // kill to accumulate (*)
}
}
cvReleaseMat(&pt);
cvReleaseMat(&reprojPt);
cvReleaseMat(&camPt);
cvReleaseMat(&rotMat);
return reprojected;
}
static inline float cvPoint3D32fNorm(CvPoint3D32f pt) {
return sqrt( (pt.x - pt.x)*(pt.x - pt.x)
+(pt.y - pt.y)*(pt.y - pt.y)
+(pt.z - pt.z)*(pt.z - pt.z));
}
float dpthS( IplImage *img
, CvPoint ptI, CvPoint dp
, CvPoint3D32f **pts
, CvPoint ptD, CvSize pmdSz
, float sD, float sC) {
CvSize imgSz = cvGetSize(img);
if( ptD.x + dp.x > pmdSz.width - 1 || ptD.x + dp.x < 0
|| ptD.y + dp.y > pmdSz.height - 1 || ptD.y + dp.y < 0
|| ptI.x + dp.x > imgSz.width - 1 || ptI.x + dp.x < 0
|| ptI.y + dp.y > imgSz.height - 1 || ptI.y + dp.y < 0) return 0.0;
uchar *pI = &((uchar*) (img->imageData + img->widthStep * ptI.y))[ptI.x*3];
uchar *pJ = &((uchar*) (img->imageData + img->widthStep * (ptI.y+dp.y)))[(ptI.x+dp.x)*3];
float dr = (float)pI[2] - (float)pJ[2];
float dg = (float)pI[1] - (float)pJ[1];
float db = (float)pI[0] - (float)pJ[0];
float wij = exp(-(sqrt(dr*dr + dg*dg + db*db))/sC);
float dpI = cvPoint3D32fNorm(pts[ptD.x][ptD.y]);
float dpJ = cvPoint3D32fNorm(pts[ptD.x + dp.x][ptD.y + dp.y]);
float s = exp(-wij*(dpI - dpJ)*(dpI - dpJ)/sD);
return s;
}
#define DPTHS(dx, dy) (dpthS( img, idx, cvPoint((dx),(dy)) \
, pmdPts \
, pmdIdx, pmdSz \
, sigmaDepth, sigmaColor))
void outliersDepthAndColor( CvPoint3D32f **pmdPts, IplImage *img, CvSize pmdSz
, CvPoint2D32f *trackedPts, int trPtsCnt, CvPoint* tr2pmdIdxs
, char *pts3DStatus // this actually return parameter
, float sigmaDepth, float threshold, float sigmaColor
) {
if(threshold < 0.0) return;
// depth score outliers removal, see pmdc.conf comments
for(int k = 0; k < trPtsCnt; k++) {
//FIXME: check array bounds
CvPoint pmdIdx = tr2pmdIdxs[k];
CvPoint idx = cvPointFrom32f(trackedPts[k]);
float s00, s01, s02;
s00 = DPTHS(-1, -1);
s01 = DPTHS( 0, -1);
s02 = DPTHS( 1, -1);
float s10 = DPTHS(-1, 0);
float s12 = DPTHS( 1, 0);
float s20 = DPTHS(-1, 1);
float s21 = DPTHS( 0, 1);
float s22 = DPTHS( 1, 1);
float score = s00 + s01 + s02
+ s10 + 0.0 + s12
+ s20 + s21 + s22;
printf("score = %f\n", score);
if(score < threshold) pts3DStatus[k] = 0;
}
}
int motionMeanAndVariance(CvPoint3D32f **camPts3D, char **pts3DStatus, int trPtsCnt, float *mean, float *var) {
// float *mean = meanAndVariance;
// float *var = &(meanAndVariance[3]);
// float motion[3];
float dx, dy, dz;
float magSq;
float motionSum = 0; //[3] = {0.0, 0.0, 0.0};
float motionSqrSum = 0; //[3] = {0.0, 0.0, 0.0};
int sz = 0; // pairs count
for(int i = 0; i < trPtsCnt; i++)
if(pts3DStatus[1][i] && pts3DStatus[0][i]) {
sz++;
dx = camPts3D[1][i].x - camPts3D[0][i].x;
dy = camPts3D[1][i].y - camPts3D[0][i].y;
dz = camPts3D[1][i].z - camPts3D[0][i].z;
magSq = dx*dx + dy*dy + dz*dz;
motionSum += sqrt(magSq); //FIXME: optimisation, we can use it without sqrt
motionSqrSum += magSq; //thus, it would be sqr here
/* motionSum[0] += motion[0];
motionSum[1] += motion[1];
motionSum[2] += motion[2];
motionSqrSum[0] += motion[0]*motion[0];
motionSqrSum[1] += motion[1]*motion[1];
motionSqrSum[2] += motion[2]*motion[2];*/
}
if(0 == sz) return 0;
// mean
*mean = motionSum / sz;
/* mean[0] = motionSum[0] / (float)sz;
mean[1] = motionSum[1] / (float)sz;
mean[2] = motionSum[2] / (float)sz;*/
// variance
for(int i = 0; i < trPtsCnt; i++)
if(pts3DStatus[1][i] && pts3DStatus[0][i]) {
dx = camPts3D[1][i].x - camPts3D[0][i].x;
dy = camPts3D[1][i].y - camPts3D[0][i].y;
dz = camPts3D[1][i].z - camPts3D[0][i].z;
magSq = dx*dx + dy*dy + dz*dz;
*var += (magSq - *mean)*(magSq - *mean);
}
*var /= sz;
// = motionSqrSum / sz - (*mean)*(*mean);
/* var[0] = motionSqrSum[0] / (float)sz - mean[0]*mean[0];
var[1] = motionSqrSum[1] / (float)sz - mean[1]*mean[1];
var[2] = motionSqrSum[2] / (float)sz - mean[2]*mean[2];*/
return 1;
}
void outliersSpeedSigma(CvPoint3D32f **camPts3D, char **pts3DStatus, int trPtsCnt, float mean, float var) {
float dx, dy, dz;
float mag;
float sigma = sqrt(var);
for(int i = 0; i < trPtsCnt; i++)
if(pts3DStatus[1][i] && pts3DStatus[0][i]) {
dx = camPts3D[1][i].x - camPts3D[0][i].x;
dy = camPts3D[1][i].y - camPts3D[0][i].y;
dz = camPts3D[1][i].z - camPts3D[0][i].z;
mag = sqrt(dx*dx + dy*dy + dz*dz);
if(fabs(mag - mean) > sigma) {
pts3DStatus[0][i] = 0;
}
}
}
int main(int argc, char **argv) {
// settings
bool ui = true;
bool gl = true;
bool fps = false;
const char *config = "./pmdc.conf";
// args parsing
for(int i = 1; i < argc; i++) {
if (!strcmp(argv[i], "--ui")) ui = (bool) atoi(argv[++i]);
else if (!strcmp(argv[i], "--gl")) gl = (bool) atoi(argv[++i]);
else if (!strcmp(argv[i], "--fps")) fps = true;
else if (!strcmp(argv[i], "--cfg")) config = argv[++i];
//TODO: config
else if (!strcmp(argv[i], "--help")
|| !strcmp(argv[i], "-h")) {
usage(argv[0]);
return 1;
} else {
fprintf(stderr, "ERROR: unknown flag: %s\n", argv[i]);
return 1;
}
}
// pose guess
CvMat *rotMatGuess = cvCreateMat(3,3, CV_32FC1);
CvMat *rotGuess = cvCreateMat(3, 1, CV_32FC1);
CvMat *trnGuess = cvCreateMat(3, 1, CV_32FC1);
/***** init device and allocate images *****/
PMDCam *pmdc = initPMDCam(config);
CvSize pmdSz = cvGetSize(pmdc->iPMDI);
CvSize camSz = cvGetSize(pmdc->iCam);
printf("pose: pmd init done\n");
/***** essential matrix *****/
CvMat *rot = (CvMat*)cvLoad("../essential-rot.xml"); //FIXME: load path from cfg
CvMat *trn = (CvMat*)cvLoad("../essential-trn.xml");
/***** LK-tracking *****/
IplImage *swapTemp;
int featuresMax = pmdc->_track.maxFeatures;
int trPtsCnt = 0; // counts found points
// eigenvalues for GFTT
IplImage* eig = cvCreateImage(camSz, 32, 1);
IplImage* tmp = cvCreateImage(camSz, 32, 1);
IplImage* mask = cvCreateImage(camSz, IPL_DEPTH_8U, 1);
// previous image and pyramides
IplImage *imgCamPrv = cvCreateImage(camSz, 8, 1);
IplImage *imgCamPyr = cvCreateImage(camSz, 8, 1);
IplImage *imgCamPyrPrv = cvCreateImage(camSz, 8, 1);
// prev and curr tracked points
CvPoint2D32f *camPts[2];
camPts[0] = (CvPoint2D32f*) cvAlloc(featuresMax * sizeof(CvPoint2D32f));
camPts[1] = (CvPoint2D32f*) cvAlloc(featuresMax * sizeof(CvPoint2D32f));
CvPoint3D32f *camPts3D[2];
camPts3D[0] = (CvPoint3D32f*) cvAlloc(featuresMax * sizeof(CvPoint3D32f));
camPts3D[1] = (CvPoint3D32f*) cvAlloc(featuresMax * sizeof(CvPoint3D32f));
CvPoint2D32f *swapPts;
CvPoint3D32f *swapPts3; // i guess i can use void* d:D
char *swapStatus;
char *camPtsStatus = (char*)cvAlloc(featuresMax);
char *pts3DStatus[2];
pts3DStatus[0] = (char*) cvAlloc(featuresMax * sizeof(char));
pts3DStatus[1] = (char*) cvAlloc(featuresMax * sizeof(char));
CvPoint *tr2pmdIdxs = (CvPoint*) cvAlloc(featuresMax * sizeof(CvPoint));
// 3d rays where points points :P
CvPoint3D32f *trackedPts = (CvPoint3D32f*) cvAlloc(featuresMax * sizeof(CvPoint3D32f));
// contains (row,col) of pmd 3D pts
CvPoint **pmd2imgIdxs = (CvPoint**) cvAlloc(pmdSz.height * sizeof(CvPoint*));
for(int i = 0; i < pmdSz.height; i++)
pmd2imgIdxs[i] = (CvPoint*) cvAlloc(pmdSz.width * sizeof(CvPoint));
// pmd history
History *history = createHistory();
/***** ui and gl stuff *****/
if(ui) {
cvNamedWindow("PMD", 0);
cvNamedWindow("Cam", 0);
}
if(gl) {
glfwInit();
if(!glfwOpenWindow(640, 480, 8, 8, 8, 8, 24, 0, GLFW_WINDOW)) {
glfwTerminate();
fprintf(stderr, "ERROR: can't init glfw window!\n");
return 1;
}
}
//FIXME: put this in if(gl)
font = new FTGLPixmapFont("./djvm.ttf");
if(font->Error()) {
fprintf(stderr, "ERROR: can't load font ./djvm.ttf");
return 1;
}
font->FaceSize(20);
// fps counting
time_t tic=time(0);
time_t tac;
int fpsCnt = 0;
//icp pairs
vector<PtPair> pairs;
vector<PtPair> motion;
float mean, var;
/***** main loop *****/
/*********************/
int frames = 0;
int goodFrames = 0;
while(1) {
/***** fps counting *****/
if(fps) {
tac = time(0);
if(tac - tic >= 1) {
printf("%i FPS\n", fpsCnt);
fflush(stdout);
fpsCnt = 0;
tic=tac;
}
fpsCnt++;
}
if(grabData(pmdc)) break; // end of seq?
/***** tracking *****/
//if(trPtsCnt)
cvCalcOpticalFlowPyrLK( imgCamPrv, pmdc->iCam, imgCamPyrPrv, imgCamPyr
, camPts[0], camPts[1], trPtsCnt
, cvSize(pmdc->_track.winSz, pmdc->_track.winSz), 3, camPtsStatus, 0
, cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03)
, pmdc->_track.trackingFlags);
pmdc->_track.trackingFlags |= CV_LKFLOW_PYR_A_READY;
// filter out not tracked points
int k = 0;
for(int i = 0; i < trPtsCnt; i++)
if(camPtsStatus[i]) {
camPts3D[0][k] = camPts3D[0][i];
pts3DStatus[0][k] = pts3DStatus[0][i];
camPts[1][k++] = camPts[1][i];
}
trPtsCnt = k;
/***** 3d (re)projection *****/
cvProjectArrayToCartesian(pmdc->intrinsicCam, camPts[1], trPtsCnt, trackedPts);
int reprojected = projectImageToPMD( rot, trn, pmdc->intrinsicCam, pmdc->pts, pmdSz, camSz, pmd2imgIdxs
, camPts[1], &trPtsCnt, camPts3D[1], pts3DStatus[1], tr2pmdIdxs);
double alignError = 0.0;
bool poseEstimated = false;
if(reprojected >= pmdc->minPts4Pose) { // we need at least minPts4Pose points
if(motionMeanAndVariance(camPts3D, pts3DStatus, trPtsCnt, &mean, &var))
// TODO: can be fused with centroid computation
outliersSpeedSigma(camPts3D, pts3DStatus, trPtsCnt, mean, var);
outliersDepthAndColor( pmdc->pts, pmdc->iCamColor, pmdSz, camPts[1], trPtsCnt
, tr2pmdIdxs, pts3DStatus[1], pmdc->sigmaDepth
, pmdc->dpThreshold, pmdc->sigmaColor);
pairs.clear();
double centroidM[3] = {0.0, 0.0, 0.0};
double centroidD[3] = {0.0, 0.0, 0.0};
double pt1[3];
double pt2[3];
for(int i = 0; i < trPtsCnt; i++)
if(pts3DStatus[1][i] && pts3DStatus[0][i]) {
pt1[0] = camPts3D[0][i].x;
pt1[1] = camPts3D[0][i].y;
pt1[2] = camPts3D[0][i].z;
pt2[0] = camPts3D[1][i].x;
pt2[1] = camPts3D[1][i].y;
pt2[2] = camPts3D[1][i].z;
//TODO can be fused -- (+=) :: a -> a -> a
centroidM[0] += camPts3D[0][i].x;
centroidM[1] += camPts3D[0][i].y;
centroidM[2] += camPts3D[0][i].z;
centroidD[0] += camPts3D[1][i].x;
centroidD[1] += camPts3D[1][i].y;
centroidD[2] += camPts3D[1][i].z;
PtPair currentPair(pt1, pt2);
pairs.push_back(currentPair);
}
reprojected = pairs.size();
if(reprojected >= pmdc->minPts4Pose) { // enough corresponding points
centroidM[0] /= reprojected;
centroidM[1] /= reprojected;
centroidM[2] /= reprojected;
centroidD[0] /= reprojected;
centroidD[1] /= reprojected;
centroidD[2] /= reprojected;
double transformMat[16];
try { alignError = pmdc->icp->Point_Point_Align(pairs, transformMat, centroidM, centroidD); }
catch(...) { fprintf(stderr, "ERROR: matrix is singular!\n"); }
if(!gl) printf( "%i: align error: %f, 3d pts count: %i, 2d pts count: %i\n"
, frames, alignError, reprojected, trPtsCnt);
for(int i = 1; i < 16; i++)
if (i%4 > 2) continue; // bottom row
else if(i/4 > 2) CV_MAT_ELEM(*trnGuess, float, i%4, 0) = transformMat[i];
else CV_MAT_ELEM(*rotMatGuess, float, i/4, i%4) = transformMat[i]; // right col
cvRodrigues2(rotMatGuess, rotGuess, NULL);
if(alignError < pmdc->maxError)
poseEstimated = true;
}
}
/**** Print pose to file ****/
//TODO: config option
if(pmdc->savePoses) {
char filename[] = "./dat/scan0000.pose";
sprintf(filename, "./dat/scan%04d.pose", frames);
FILE *pose = fopen(filename, "wb");
if(!pose) fprintf(stderr, "cant create file %s!\n", filename);
if(poseEstimated) {
fprintf(pose, "%f %f %f\n%f %f %f\n"
, 100.0*CV_MAT_ELEM(*rotGuess, float, 0, 0)
, 100.0*CV_MAT_ELEM(*rotGuess, float, 0, 0)
, 100.0*CV_MAT_ELEM(*rotGuess, float, 0, 0)
, 100.0*CV_MAT_ELEM(*trnGuess, float, 0, 0)
, 100.0*CV_MAT_ELEM(*trnGuess, float, 1, 0)
, 100.0*CV_MAT_ELEM(*trnGuess, float, 2, 0));
goodFrames++;
} else {
fprintf( stderr, "ERROR: %i points found, align error: %f\n"
, reprojected, alignError);
fprintf(pose, "%f %f %f\n%f %f %f\n", 0.0, 0.0, 0.0, 0.0, 0.0, 0.0);
}
fflush(pose);
fclose(pose);
}
frames++;
/***** find features *****/
if(trPtsCnt < pmdc->_track.minFeatures) {
int trPtsCntFound = featuresMax - trPtsCnt;
cvSet(mask, cvScalarAll(255));
for(int i = 0; i < trPtsCnt; i++)
cvCircle(mask, cvPointFrom32f(camPts[1][i]), 20, CV_RGB(0,0,0), -1, 8, 0);
cvGoodFeaturesToTrack( pmdc->iCam, eig, tmp, camPts[1] + trPtsCnt, &trPtsCntFound
, pmdc->_track.quality, pmdc->_track.minDist, mask, 3, 0, 0.04);
cvFindCornerSubPix( pmdc->iCam, camPts[1] + trPtsCnt, trPtsCntFound
, cvSize(pmdc->_track.winSz,pmdc->_track.winSz), cvSize(-1,-1)
, cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
trPtsCnt += trPtsCntFound;
}
Frame *f = allocFrame3DData(pmdSz);
fillFrame(f, pmdc->iCamColor, pmdSz, pmdc->pts, pmd2imgIdxs, rotGuess, trnGuess, alignError);
history = addFrame(history, f);
checkHistoryLen(history, pmdc->historyLen);
bool pause = false;
do {
/***** ui and rendring *****/
if(gl) render( history, trackedPts, trPtsCnt
, rot, trn, camPts3D, pts3DStatus
, reprojected, poseEstimated);
if(ui) {
for(int i = 0; i < trPtsCnt; i++)
cvCircle( pmdc->iCamColor, cvPointFrom32f(camPts[1][i])
, 3, CV_RGB(255,0,255), -1, 8, 0);
cvShowImage("PMD", pmdc->iPMDI);
cvShowImage("Cam", pmdc->iCamColor);
}
int key = cvWaitKey(5);
if(27 == key) return 0; // ESC pressed FIXME: release stuff
if((int)' ' == key) pause = !pause;
if(gl) if(glfwGetKey(GLFW_KEY_ESC) == GLFW_PRESS) return 0; //in OpenGL window ;)
} while(pause);
CV_SWAP(imgCamPrv, pmdc->iCam, swapTemp);
CV_SWAP(imgCamPyrPrv, imgCamPyr, swapTemp);
CV_SWAP(camPts[0], camPts[1], swapPts);
CV_SWAP(camPts3D[0], camPts3D[1], swapPts3);
CV_SWAP(pts3DStatus[0], pts3DStatus[1], swapStatus);
} // while(1)
// let OS clean all images and matrices
// TODO: releasePMD(&pmd);
if(gl) {
glfwTerminate();
}
printf("%i good frames, %i frames total.\n", goodFrames, frames);
return 0;
}
// Global rendering settings
float rotx = 0;
float roty = 0;
float rotz = 0;
float scale = 0.7;
int renderCoords = 1;
int renderCams = 1;
int renderColorPts = 1;
int renderLines = 0;
int renderTracked = 1;
int renderHistory = 1;
int centerCloud = 0;
void renderFrame(Frame *f) {
assert(f->img->imageData);
glBegin(GL_POINTS);
if(!renderColorPts) {
//FIXME: mess with the indices (i,j)
for(int j = 0; j < f->sz.width; j++)
for(int i = 0; i < f->sz.height; i++)
glVertex3f(f->pts[i][j].x, f->pts[i][j].y, f->pts[i][j].z);
glEnd();
} else {
uchar *imgCamPix = 0;
for(int j = 0; j < f->sz.width; j++)
for(int i = 0; i < f->sz.height; i++) {
int x = f->status[i][j].x;
int y = f->status[i][j].y;
if(x > 0) {
imgCamPix = &((uchar*)
(f->img->imageData + f->img->widthStep * y))[x*3];
glColor3f( ((float)imgCamPix[2])/255.0
, ((float)imgCamPix[1])/255.0
, ((float)imgCamPix[0])/255.0); //BGR
} else glColor3f(1.0, 0.0, 0.0);
glVertex3f(f->pts[i][j].x, f->pts[i][j].y, f->pts[i][j].z);
}
} // if renderColorPts else
}
void render( History *history, CvPoint3D32f *camPts, int trPtsCnt
, CvMat *rot, CvMat *trn
, CvPoint3D32f **camPts3D, char **pts3DStatus
, int reprojected, bool poseEstimated) {
if(glfwGetKey((int)'W') == GLFW_PRESS) roty += 10.0;
if(glfwGetKey((int)'S') == GLFW_PRESS) roty -= 10.0;
if(glfwGetKey((int)'A') == GLFW_PRESS) rotx -= 10.0;
if(glfwGetKey((int)'D') == GLFW_PRESS) rotx += 10.0;
if(glfwGetKey((int)'Q') == GLFW_PRESS) rotz -= 10.0;
if(glfwGetKey((int)'E') == GLFW_PRESS) rotz += 10.0;
if(glfwGetKey((int)'R') == GLFW_PRESS) scale -= 0.1;
if(glfwGetKey((int)'F') == GLFW_PRESS) scale += 0.1;
if(glfwGetKey((int)'1') == GLFW_PRESS) renderCoords = !renderCoords;
if(glfwGetKey((int)'2') == GLFW_PRESS) renderCams = !renderCams;
if(glfwGetKey((int)'3') == GLFW_PRESS) renderColorPts = !renderColorPts;
if(glfwGetKey((int)'4') == GLFW_PRESS) renderLines = !renderLines;
if(glfwGetKey((int)'5') == GLFW_PRESS) renderTracked = !renderTracked;
if(glfwGetKey((int)'6') == GLFW_PRESS) renderHistory = !renderHistory;
if(glfwGetKey((int)'C') == GLFW_PRESS) centerCloud = !centerCloud;
int width, height;
GLUquadric *quadric;
glfwGetWindowSize(&width, &height);
height = height < 1 ? 1 : height;
glViewport(0, 0, width, height);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(70, (double)width/(double)height, 0.01, 100.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
int cj = history->frame->sz.width / 2;
int ci = history->frame->sz.height / 2;
CvPoint3D32f **pts = history->frame->pts;
// gluLookAt(scale, scale, scale,
// pts[ci][cj].x, pts[ci][cj].y, -pts[ci][cj].z, 0.0, 1.0, 0.0);
gluLookAt(scale, scale, scale, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0);
glScalef(1.0, 1.0, -1.0); // convert opengl coord system to left handed
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
/* text rendering */
char info[30];
FTPoint pt(5.0, 5.0);
sprintf(info, "2D pts: %i", trPtsCnt);
font->Render(info, -1, pt);
pt.Y(35.0);
sprintf(info, "3D pts: %i", reprojected);
font->Render(info, -1, pt);
pt.Y(65.0);
sprintf(info, "Align error: %f", history->frame->alignError);
font->Render(info, -1, pt);
glRotatef(rotz, 0.0, 0.0, 1.0);
glRotatef(roty, 0.0, 1.0, 0.0);
glRotatef(rotx, 1.0, 0.0, 0.0);
if(centerCloud) glTranslatef(-pts[ci][cj].x, -pts[ci][cj].y, -pts[ci][cj].z);
/***** xyz axes *****/
if(renderCoords) {
glBegin(GL_LINES);
glColor3f(1.0, 0.0, 0.0);
glVertex3f(0.0, 0.0, 0.0);
glVertex3f(1.0, 0.0, 0.0);
glColor3f(0.0, 1.0, 0.0);
glVertex3f(0.0, 0.0, 0.0);
glVertex3f(0.0, 1.0, 0.0);
glColor3f(0.0, 0.0, 1.0);
glVertex3f(0.0, 0.0, 0.0);
glVertex3f(0.0, 0.0, 1.0);
glEnd();
}
/***** render PMD-points PMD-cam in the O *****/
fflush(stdout);
renderFrame(history->frame);
fflush(stdout);
if(renderHistory) {
fflush(stdout);
History *histI = history;
glPushMatrix();
//for(int i = 0; histI->prev; i++) {
while(histI->prev) {
glColor3f(1.0, 0.0, 0.0);//, (100.0-i)/100);
assert(histI);
//if(histI->frame->alignError < 0.03) { //FIXME_ hardcoded thrs
glRotatef(-CV_MAT_ELEM(*histI->frame->rot, float, 0, 0), 1.0f, 0.0f, 0.0f);
glRotatef(-CV_MAT_ELEM(*histI->frame->rot, float, 1, 0), 0.0f, 1.0f, 0.0f);
glRotatef(-CV_MAT_ELEM(*histI->frame->rot, float, 2, 0), 0.0f, 0.0f, 1.0f);
glTranslatef( -CV_MAT_ELEM(*histI->frame->trn, float, 0, 0)
, -CV_MAT_ELEM(*histI->frame->trn, float, 1, 0)
, -CV_MAT_ELEM(*histI->frame->trn, float, 2, 0));
//}
histI = histI->prev;
renderFrame(histI->frame);
}
glPopMatrix();
}
quadric = gluNewQuadric();
if(renderCams) {
glColor3f(0.5, 0.5, 0.0);
glPushMatrix();
glScalef(1.0, 1.0, -1.0); // rotated cylinder
gluCylinder(quadric, 0.05, 0.0, 0.05, 10, 10);
glPopMatrix();
}
/***** render Cam and features according to essential matrix [R|t] *****/
if(renderTracked) {
glBegin(GL_LINES);
for(int i = 0; i < trPtsCnt; i++) {
if(pts3DStatus[1][i] &&
pts3DStatus[0][i]) {
glColor3f(1.0, 0.0, 0.0);
glVertex3f(camPts3D[0][i].x, camPts3D[0][i].y, camPts3D[0][i].z);
glVertex3f(camPts3D[1][i].x, camPts3D[1][i].y, camPts3D[1][i].z);
glColor3f(0.0, 0.0, 0.5);
glPushMatrix();
glTranslatef(camPts3D[0][i].x, camPts3D[0][i].y, camPts3D[0][i].z);
gluSphere(quadric, 0.005f, 10, 10);
glPopMatrix();
glColor3f(0.5, 0.0, 1.0);
glPushMatrix();
glTranslatef(camPts3D[1][i].x, camPts3D[1][i].y, camPts3D[1][i].z);
gluSphere(quadric, 0.01f, 10, 10);
glPopMatrix();
}
}
glEnd();
}
glBegin(GL_LINES);
for(int i = 0; i < trPtsCnt; i++)
if(pts3DStatus[1][i] &&
pts3DStatus[0][i]) {
glColor3f(1.0, 0.0, 0.0);
glVertex3f(camPts3D[0][i].x, camPts3D[0][i].y, camPts3D[0][i].z);
glVertex3f(camPts3D[1][i].x, camPts3D[1][i].y, camPts3D[1][i].z);
}
glEnd();
glPushMatrix();
glTranslatef( -CV_MAT_ELEM(*trn, float, 0, 0)
, -CV_MAT_ELEM(*trn, float, 1, 0)
, -CV_MAT_ELEM(*trn, float, 2, 0));
glRotatef(-CV_MAT_ELEM(*rot, float, 0, 0), 1.0f, 0.0f, 0.0f);
glRotatef(-CV_MAT_ELEM(*rot, float, 1, 0), 0.0f, 1.0f, 0.0f);
glRotatef(-CV_MAT_ELEM(*rot, float, 2, 0), 0.0f, 0.0f, 1.0f);
if(renderCams) {
glColor3f(0.0, 0.5, 0.5);
glPushMatrix();
glScalef(1.0, 1.0, -1.0);
gluCylinder(quadric, 0.05, 0.0, 0.05, 10, 10);
glPopMatrix();
}
if(renderLines) {
glColor3f(1.0, 0.0, 1.0);
glBegin(GL_LINES);
for(int i = 0; i < trPtsCnt; i++) {
glVertex3f(0.0f, 0.0f, 0.0f);
glVertex3f(-camPts[i].x, camPts[i].y, camPts[i].z); //FIXME: revise coordinates! why -camPts?!
}
glEnd();
}
glPopMatrix();
glPushMatrix();
glRotatef(CV_MAT_ELEM(*history->frame->rot, float, 2, 0), 0.0f, 0.0f, 1.0f);
glRotatef(CV_MAT_ELEM(*history->frame->rot, float, 1, 0), 0.0f, 1.0f, 0.0f);
glRotatef(CV_MAT_ELEM(*history->frame->rot, float, 0, 0), 1.0f, 0.0f, 0.0f);
glTranslatef( CV_MAT_ELEM(*history->frame->trn, float, 0, 0)
, CV_MAT_ELEM(*history->frame->trn, float, 1, 0)
, CV_MAT_ELEM(*history->frame->trn, float, 2, 0));
if(poseEstimated) glColor3f(0.0, 0.0, 1.0);
else glColor3f(0.0, 0.0, 0.3);
//render pose and pmd cam
glScalef(1.0, 1.0, -1.0);
gluCylinder(quadric, 0.05, 0.0, 0.05, 10, 10);
glColor4f(1.0, 0.0, 0.0, 0.5);
gluSphere(quadric, history->frame->alignError, 20, 20);
glPopMatrix();
gluDeleteQuadric(quadric);
glfwSwapBuffers();
}

View file

@ -0,0 +1,93 @@
/*
* extrinsic implementation
*
* Copyright (C) Stanislav Serebryakov
*
* Released under the GPL version 3.
*
*/
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#include <highgui.h>
#include <stdio.h>
#include <stdlib.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
int main(int argc, char **argv) {
if(argc < 3) {
printf("extrinic: x y img1 img2\n");
exit(1);
}
// extrinic params: rotation and translation
CvMat *rotCam = cvCreateMat(1, 3, CV_32FC1);
CvMat *transCam = cvCreateMat(1, 3, CV_32FC1);
CvMat *rotPMD = cvCreateMat(1, 3, CV_32FC1);
CvMat *transPMD = cvCreateMat(1, 3, CV_32FC1);
CvSize boardSz = cvSize(atoi(argv[1]), atoi(argv[2]));
int totalPoints = boardSz.width*boardSz.height;
double boardSide = 0.04;
CvMat *objPts = cvCreateMat(totalPoints, 3, CV_32FC1);
CvPoint2D32f *cornersCam = (CvPoint2D32f*)cvAlloc(totalPoints * sizeof(CvPoint2D32f));
CvMat imgPtsCam = cvMat(totalPoints, 1, CV_32FC2, cornersCam);
CvPoint2D32f *cornersPMD = (CvPoint2D32f*)cvAlloc(totalPoints * sizeof(CvPoint2D32f));
CvMat imgPtsPMD = cvMat(totalPoints, 1, CV_32FC2, cornersPMD);
for(int i = 0; i < totalPoints; i++) {
CV_MAT_ELEM(*objPts, float, i, 0) = boardSide * (i / boardSz.width);
CV_MAT_ELEM(*objPts, float, i, 1) = boardSide * (i % boardSz.width);
CV_MAT_ELEM(*objPts, float, i, 2) = 0.0f;
}
IplImage *imgPMD = cvLoadImage(argv[3], CV_LOAD_IMAGE_GRAYSCALE);
IplImage *imgPMDU = cvCreateImage(cvGetSize(imgPMD), 8, 1);
IplImage *imgCam = cvLoadImage(argv[4], CV_LOAD_IMAGE_GRAYSCALE);
IplImage *imgCamU = cvCreateImage(cvGetSize(imgCam), 8, 1);
CvMat *intrinsicsCam = (CvMat*)cvLoad("../intrinsic-cam-6x4.xml");
CvMat *intrinsicsPMD = (CvMat*)cvLoad("../intrinsic-pmd-6x4.xml");
CvMat *distortionCam = (CvMat*)cvLoad("../distortion-cam-6x4.xml");
CvMat *distortionPMD = (CvMat*)cvLoad("../distortion-pmd-6x4.xml");
cvUndistort2(imgPMD, imgPMDU, intrinsicsPMD, distortionPMD);
cvUndistort2(imgCam, imgCamU, intrinsicsCam, distortionCam);
int cornersCountCam, cornersCountPMD;
int foundPMD = cvFindChessboardCorners(imgPMDU, boardSz, cornersPMD,
&cornersCountPMD, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
if(foundPMD) {
cvFindCornerSubPix(imgPMDU, cornersPMD, cornersCountPMD, cvSize(2, 2), cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1));
printf("foundPMD\n");
}
int foundCam = cvFindChessboardCorners(imgCamU, boardSz, cornersCam,
&cornersCountCam, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
if(foundPMD) {
cvFindCornerSubPix(imgCamU, cornersCam, cornersCountCam, cvSize(11, 11), cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1));
printf("foundCam\n");
}
cvNamedWindow("c", 0);
cvNamedWindow("p", 0);
cvShowImage("c", imgCamU);
cvShowImage("p", imgPMDU);
cvWaitKey(0);
if(foundCam) cvFindExtrinsicCameraParams2(objPts, &imgPtsCam, intrinsicsCam, distortionCam, rotCam, transCam);
if(foundPMD) cvFindExtrinsicCameraParams2(objPts, &imgPtsPMD, intrinsicsPMD, distortionPMD, rotPMD, transPMD);
cvSave("./rotcam.xml", rotCam);
cvSave("./rotpmd.xml", rotPMD);
cvSave("./transcam.xml", transCam);
cvSave("./transpmd.xml", transPMD);
}

View file

@ -0,0 +1,81 @@
// Copyright (C) 2007 by Cristóbal Carnero Liñán
// grendel.ccl@gmail.com
//
// This file is part of cvBlob.
//
// cvBlob is free software: you can redistribute it and/or modify
// it under the terms of the Lesser GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// cvBlob is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// Lesser GNU General Public License for more details.
//
// You should have received a copy of the Lesser GNU General Public License
// along with cvBlob. If not, see <http://www.gnu.org/licenses/>.
//
#include <cmath>
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include "cvblob.h"
namespace cvb
{
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
// http://www.topcoder.com/tc?module=Static&d1=tutorials&d2=geometry1
double cvDotProductPoints(CvPoint const &a, CvPoint const &b, CvPoint const &c)
{
double abx = b.x-a.x;
double aby = b.y-a.y;
double bcx = c.x-b.x;
double bcy = c.y-b.y;
return abx*bcx + aby*bcy;
}
double cvCrossProductPoints(CvPoint const &a, CvPoint const &b, CvPoint const &c)
{
double abx = b.x-a.x;
double aby = b.y-a.y;
double acx = c.x-a.x;
double acy = c.y-a.y;
return abx*acy - aby*acx;
}
double cvDistancePointPoint(CvPoint const &a, CvPoint const &b)
{
double abx = a.x-b.x;
double aby = a.y-b.y;
return sqrt(abx*abx + aby*aby);
}
double cvDistanceLinePoint(CvPoint const &a, CvPoint const &b, CvPoint const &c, bool isSegment)
{
if (isSegment)
{
double dot1 = cvDotProductPoints(a, b, c);
if (dot1>0) return cvDistancePointPoint(b, c);
double dot2 = cvDotProductPoints(b, a, c);
if(dot2>0) return cvDistancePointPoint(a, c);
}
return fabs(cvCrossProductPoints(a,b,c)/cvDistancePointPoint(a,b));
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
}

View file

@ -0,0 +1,775 @@
/*
* panorama implementation
*
* Copyright (C) HamidReza Houshiar
*
* Released under the GPL version 3.
*
*/
#include "slam6d/fbr/panorama.h"
using namespace std;
namespace fbr{
void panorama::init(unsigned int width, unsigned int height, projection_method method, unsigned int numberOfImages, double param, panorama_map_method mMethod){
iWidth = width;
iHeight = height;
pMethod = method;
nImages = numberOfImages;
pParam = param;
if(mMethod == FARTHEST){
iMap.create(iHeight, iWidth, CV_32FC(3));
iMap = cv::Scalar::all(0);
}
else if(mMethod == EXTENDED){
extendedIMap.resize(iHeight);
for (unsigned int i = 0; i < iHeight; i++)
extendedIMap[i].resize(iWidth);
}
iReflectance.create(iHeight, iWidth, CV_8U);
iReflectance = cv::Scalar::all(0);
iRange.create(iHeight, iWidth, CV_32FC(1));
iRange = cv::Scalar::all(0);
mapMethod = mMethod;
}
panorama::panorama(unsigned int width, unsigned int height, projection_method method, unsigned int numberOfImages, double param, panorama_map_method mMethod){
init(width, height, method, numberOfImages, param, mMethod);
}
panorama::panorama(unsigned int width, unsigned int height, projection_method method, unsigned int numberOfImages, double param){
init(width, height, method, numberOfImages, param, FARTHEST);
}
panorama::panorama(unsigned int width, unsigned int height, projection_method method, unsigned int numberOfImages){
double param = 0;
if(method == PANNINI)
param = 1;
else if (method == STEREOGRAPHIC)
param = 2;
init(width, height, method, numberOfImages, param, FARTHEST);
}
panorama::panorama(unsigned int width, unsigned int height, projection_method method){
double param = 0;
unsigned int numberOfImages = 1;
if(method == RECTILINEAR)
numberOfImages = 3;
else if(method == PANNINI){
numberOfImages = 3;
param = 1;
} else if (method == STEREOGRAPHIC){
numberOfImages = 3;
param = 2;
}
init(width, height, method, numberOfImages, param, FARTHEST);
}
void panorama::map(int x, int y, cv::MatIterator_<cv::Vec4f> it, double range){
iReflectance.at<uchar>(y,x) = (*it)[3]*255;//reflectance
iRange.at<float>(y,x) = range;//range
if(mapMethod == FARTHEST){
//adding the point with max distance
if( iRange.at<float>(y,x) < range ){
iMap.at<cv::Vec3f>(y,x)[0] = (*it)[0];//x
iMap.at<cv::Vec3f>(y,x)[1] = (*it)[1];//y
iMap.at<cv::Vec3f>(y,x)[2] = (*it)[2];//z
}
}else if(mapMethod == EXTENDED){
//adding all the points
cv::Vec3f point;
point[0] = (*it)[0];//x
point[1] = (*it)[1];//y
point[2] = (*it)[2];//z
extendedIMap[y][x].push_back(point);
}
}
void panorama::createPanorama(cv::Mat scan){
//EQUIRECTANGULAR projection
if(pMethod == EQUIRECTANGULAR){
//adding the longitude to x axis and latitude to y axis
double xFactor = (double) iWidth / 2 / M_PI;
int widthMax = iWidth - 1;
double yFactor = (double) iHeight / ((MAX_ANGLE - MIN_ANGLE) / 360 * 2 * M_PI);
//shift all the valuse to positive points on image
double heightLow =(0 - MIN_ANGLE) / 360 * 2 * M_PI;
int heightMax = iHeight - 1;
cv::MatIterator_<cv::Vec4f> it, end;
for( it = scan.begin<cv::Vec4f>(), end = scan.end<cv::Vec4f>(); it != end; ++it){
double kart[3], polar[3], phi, theta, range;
kart[0] = (*it)[2]/100;
kart[1] = (*it)[0]/-100;
kart[2] = (*it)[1]/100;
toPolar(kart, polar);
//theta == polar[0] == scan [4]
//phi == polar[1] == scan [5]
//range == polar[2] == scan [3]
theta = polar[0] * 180 / M_PI;
phi = polar[1] * 180 / M_PI;
range = polar[2];
//horizantal angle of view of [0:360] and vertical of [-40:60]
phi = 360.0 - phi;
phi = phi * 2.0 * M_PI / 360.0;
theta -= 90;
theta *= -1;
theta *= 2.0 * M_PI / 360.0;
int x = (int) ( xFactor * phi);
if (x < 0) x = 0;
if (x > widthMax) x = widthMax;
int y = (int) ( yFactor * (theta + heightLow) );
y = heightMax - y;
if (y < 0) y = 0;
if (y > heightMax) y = heightMax;
//create the iReflectance iRange and map
map(x, y, it, range);
}
}
//CONIC projection
if(pMethod == CONIC){
// set up maximum latitude and longitude angles of the robot
double MIN_VERT_ANGLE = MIN_ANGLE * M_PI / 180.0, MAX_VERT_ANGLE = MAX_ANGLE * M_PI / 180.0,
MIN_HORIZ_ANGLE = -M_PI, MAX_HORIZ_ANGLE = M_PI;
// set up initial parameters according to MathWorld: http://mathworld.wolfram.com/AlbersEqual-AreaConicProjection.html
double Lat0 = 0., Long0 = 0.;
double Phi1 = -40. * M_PI / 180.0, Phi2 = 60 * M_PI / 180.0;
double n = (sin(Phi1) + sin(Phi2)) / 2.;
double C = sqr(cos(Phi1)) + 2 * n * sin(Phi1);
double Rho0 = sqrt(C - 2 * n * sin(Lat0)) / n;
// set up max values for x and y and add the longitude to x axis and latitude to y axis
double xmax = (1./n * sqrt(C - 2*n*sin( MIN_VERT_ANGLE )) ) * sin(n * (MAX_HORIZ_ANGLE - Long0));
double xmin = (1./n * sqrt(C - 2*n*sin( MIN_VERT_ANGLE )) ) * sin(n * (MIN_HORIZ_ANGLE - Long0));
double xFactor = (double) iWidth / ( xmax - xmin );
int widthMax = iWidth - 1;
double ymin = Rho0 - (1./n * sqrt(C - 2*n*sin(MIN_VERT_ANGLE)) ) * cos(n * ( 0. - Long0 ));
double ymax = Rho0 - (1./n * sqrt(C - 2*n*sin(MAX_VERT_ANGLE)) ) * cos(n * (MAX_HORIZ_ANGLE - Long0 ));
double yFactor = (double) iHeight / ( ymax - ymin );
//shift all the values to positive points on image
int heightMax = iHeight - 1;
cv::MatIterator_<cv::Vec4f> it, end;
for( it = scan.begin<cv::Vec4f>(), end = scan.end<cv::Vec4f>(); it != end; ++it){
double kart[3], polar[3], phi, theta, range;
kart[0] = (*it)[2]/100;
kart[1] = (*it)[0]/-100;
kart[2] = (*it)[1]/100;
toPolar(kart, polar);
//theta == polar[0] == scan [4]
//phi == polar[1] == scan [5]
//range == polar[2] == scan [3]
theta = polar[0] * 180 / M_PI;
phi = polar[1] * 180 / M_PI;
range = polar[2];
//phi == longitude == horizantal angle of view of [0:360]
phi = 180.0 - phi;
phi *= M_PI / 180.0;
//theta == latitude == vertical angle of view of [-40:60]
theta -= 90;
theta *= -1;
theta *= M_PI / 180.0;
// add minimum x position as an offset
int x = (int) ( xFactor * (sqrt(C - 2 * n * sin( theta) ) / n * sin(n * (phi - Long0)) + fabs(xmin) ) );
if (x < 0) x = 0;
if (x > widthMax) x = widthMax;
// add minimum y position as an offset
int y = (int) ( yFactor * (Rho0 - (1/n * sqrt(C - 2 * n * sin( theta) ) ) * cos(n * (phi - Long0)) + fabs( ymin ) ) );
y = heightMax - y;
if (y < 0) y = 0;
if (y > heightMax) y = heightMax;
//create the iReflectance iRange and map
map(x, y, it, range);
}
}
//CYLINDRICAL projection
if(pMethod == CYLINDRICAL){
//adding the longitude to x and tan(latitude) to y
//find the x and y range
double xFactor = (double) iWidth / 2 / M_PI;
int widthMax = iWidth - 1;
double yFactor = (double) iHeight / (tan(MAX_ANGLE / 360 * 2 * M_PI) - tan(MIN_ANGLE / 360 * 2 * M_PI));
double heightLow = (MIN_ANGLE) / 360 * 2 * M_PI;
int heightMax = iHeight - 1;
cv::MatIterator_<cv::Vec4f> it, end;
for( it = scan.begin<cv::Vec4f>(), end = scan.end<cv::Vec4f>(); it != end; ++it){
double kart[3], polar[3], phi, theta, range;
kart[0] = (*it)[2]/100;
kart[1] = (*it)[0]/-100;
kart[2] = (*it)[1]/100;
toPolar(kart, polar);
//theta == polar[0] == scan [4]
//phi == polar[1] == scan [5]
//range == polar[2] == scan [3]
theta = polar[0] * 180 / M_PI;
phi = polar[1] * 180 / M_PI;
range = polar[2];
//horizantal angle of view of [0:360] and vertical of [-40:60]
phi = 360.0 - phi;
phi = phi * 2.0 * M_PI / 360.0;
theta -= 90;
theta *= -1;
theta *= 2.0 * M_PI / 360.0;
int x = (int) ( xFactor * phi);
if (x < 0) x = 0;
if (x > widthMax) x = widthMax;
int y = (int) ((double) yFactor * (tan(theta) - tan(heightLow)));
y = heightMax - y;
if (y < 0) y = 0;
if (y > heightMax) y = heightMax;
//create the iReflectance iRange and map
map(x, y, it, range);
}
}
//Mercator Projection
if( pMethod == MERCATOR){
//find the x and y range
double xFactor = (double) iWidth / 2 / M_PI;
int widthMax = iWidth - 1;
double yFactor = (double) iHeight / ( log( tan( MAX_ANGLE / 360 * 2 * M_PI ) + ( 1 / cos( MAX_ANGLE / 360 * 2 * M_PI ) ) ) - log ( tan( MIN_ANGLE / 360 * 2 * M_PI) + (1/cos(MIN_ANGLE / 360 * 2 * M_PI) ) ) );
double heightLow = log(tan(MIN_ANGLE / 360 * 2 * M_PI) + (1/cos(MIN_ANGLE / 360 * 2 * M_PI)));
int heightMax = iHeight - 1;
cv::MatIterator_<cv::Vec4f> it, end;
for( it = scan.begin<cv::Vec4f>(), end = scan.end<cv::Vec4f>(); it != end; ++it){
double kart[3], polar[3], phi, theta, range;
kart[0] = (*it)[2]/100;
kart[1] = (*it)[0]/-100;
kart[2] = (*it)[1]/100;
toPolar(kart, polar);
//theta == polar[0] == scan [4]
//phi == polar[1] == scan [5]
//range == polar[2] == scan [3]
theta = polar[0] * 180 / M_PI;
phi = polar[1] * 180 / M_PI;
range = polar[2];
//horizantal angle of view of [0:360] and vertical of [-40:60]
phi = 360.0 - phi;
phi = phi * 2.0 * M_PI / 360.0;
theta -= 90;
theta *= -1;
theta *= 2.0 * M_PI / 360.0;
int x = (int) ( xFactor * phi);
if (x < 0) x = 0;
if (x > widthMax) x = widthMax;
int y = (int) ( yFactor * (log(tan(theta) + (1/cos(theta))) - heightLow) );
y = heightMax - y;
if (y < 0) y = 0;
if (y > heightMax) y = heightMax;
//create the iReflectance iRange and map
map(x, y, it, range);
}
}
//RECTILINEAR projection
if(pMethod == RECTILINEAR){
//default value for nImages
if(nImages == 0) nImages = 3;
cout<<"Number of images per scan is: "<<nImages<<endl;
double l0, p1, iMinx, iMaxx, iMiny, iMaxy, interval;
interval = 2 * M_PI / nImages;
iMiny = -M_PI/9;
iMaxy = 2*M_PI/9;
//latitude of projection center
p1 = 0;
//go through all points
cv::MatIterator_<cv::Vec4f> it, end;
for( it = scan.begin<cv::Vec4f>(), end = scan.end<cv::Vec4f>(); it != end; ++it){
double kart[3], polar[3], phi, theta, range;
kart[0] = (*it)[2]/100;
kart[1] = (*it)[0]/-100;
kart[2] = (*it)[1]/100;
toPolar(kart, polar);
//theta == polar[0] == scan [4]
//phi == polar[1] == scan [5]
//range == polar[2] == scan [3]
theta = polar[0] * 180 / M_PI;
phi = polar[1] * 180 / M_PI;
range = polar[2];
//horizantal angle of view of [0:360] and vertical of [-40:60]
phi = 360.0 - phi;
phi = phi * 2.0 * M_PI / 360.0;
theta -= 90;
theta *= -1;
theta *= 2.0 * M_PI / 360.0;
for(unsigned int j = 0 ; j < nImages ; j++){
iMinx = j * interval;
iMaxx = (j + 1) * interval;
//check for point in interval
if(phi < iMaxx && phi > iMinx){
double max, min, coscRectilinear;
//the longitude of projection center
l0 = iMinx + interval / 2;
//finding the min and max of the x direction
coscRectilinear = sin(p1) * sin(iMaxy) + cos(p1) * cos(iMaxy) * cos(iMaxx - l0);
max = (cos(iMaxy) * sin(iMaxx - l0) / coscRectilinear);
coscRectilinear = sin(p1) * sin(iMiny) + cos(p1) * cos(iMiny) * cos(iMinx - l0);
min = (cos(iMiny) * sin(iMinx - l0) / coscRectilinear);
double xFactor = (double) (iWidth / nImages) / (max - min);
double xlow = min;
int widthMax = (iWidth / nImages) - 1;
//finding the min and max of y direction
coscRectilinear = sin(p1) * sin(iMaxy) + cos(p1) * cos(iMaxy) * cos(iMaxx - l0);
max = ( (cos(p1) * sin(iMaxy) - sin(p1) * cos(iMaxy) * cos(iMaxx - l0) )/ coscRectilinear);
coscRectilinear = sin(p1) * sin(iMiny) + cos(p1) * cos(iMiny) * cos(iMinx - l0);
min = ( (cos(p1) * sin(iMiny) - sin(p1) * cos(iMiny) * cos(iMinx - l0) )/ coscRectilinear);
double yFactor = (double) iHeight / (max - min);
double heightLow = min;
int heightMax = iHeight - 1;
//project the points and add them to image
coscRectilinear = sin(p1) * sin(theta) + cos(p1) * cos(theta) * cos(phi - l0);
int x = (int)(xFactor) * ((cos(theta) * sin(phi - l0) / coscRectilinear) - xlow);
if (x < 0) x = 0;
if (x > widthMax) x = widthMax;
x = x + (j * iWidth / nImages);
int y = (int) (yFactor) * (( (cos(p1) * sin(theta) - sin(p1) * cos(theta) * cos(phi - l0)) / coscRectilinear) - heightLow);
y = heightMax - y;
if (y < 0) y = 0;
if (y > heightMax) y = heightMax;
//create the iReflectance iRange and map
map(x, y, it, range);
}
}
}
}
//PANNINI projection
if(pMethod == PANNINI){
//default values for nImages and dPannini==pParam
if(pParam == 0) pParam = 1;
if(nImages == 0) nImages = 3;
cout << "Parameter d is:" << pParam <<", Number of images per scan is:" << nImages << endl;
double l0, p1, iMinx, iMaxx, iMiny, iMaxy, interval;
interval = 2 * M_PI / nImages;
iMiny = -M_PI/9;
iMaxy = 2*M_PI/9;
//latitude of projection center
p1 = 0;
cv::MatIterator_<cv::Vec4f> it, end;
for( it = scan.begin<cv::Vec4f>(), end = scan.end<cv::Vec4f>(); it != end; ++it){
double kart[3], polar[3], phi, theta, range;
kart[0] = (*it)[2]/100;
kart[1] = (*it)[0]/-100;
kart[2] = (*it)[1]/100;
toPolar(kart, polar);
//theta == polar[0] == scan [4]
//phi == polar[1] == scan [5]
//range == polar[2] == scan [3]
theta = polar[0] * 180 / M_PI;
phi = polar[1] * 180 / M_PI;
range = polar[2];
//horizantal angle of view of [0:360] and vertical of [-40:60]
phi = 360.0 - phi;
phi = phi * 2.0 * M_PI / 360.0;
theta -= 90;
theta *= -1;
theta *= 2.0 * M_PI / 360.0;
for(unsigned int j = 0 ; j < nImages ; j++){
iMinx = j * interval;
iMaxx = (j + 1) * interval;
//check for point in interval
if(phi < (iMaxx) && phi > (iMinx)){
double max, min, sPannini;
//the longitude of projection center
l0 = iMinx + interval / 2;
//use the S variable of pannini projection mentioned in the thesis
//finding the min and max of the x direction
sPannini = (pParam + 1) / (pParam + sin(p1) * tan(iMaxy) + cos(p1) * cos(iMaxx - l0));
max = sPannini * (sin(iMaxx - l0));
sPannini = (pParam + 1) / (pParam + sin(p1) * tan(iMiny) + cos(p1) * cos(iMinx - l0));
min = sPannini * (sin(iMinx - l0));
double xFactor = (double) (iWidth / nImages) / (max - min);
double xlow = min;
int widthMax = (iWidth / nImages) - 1;
//finding the min and max of y direction
sPannini = (pParam + 1) / (pParam + sin(p1) * tan(iMaxy) + cos(p1) * cos(iMaxx - l0));
max = sPannini * (tan(iMaxy) * (cos(p1) - sin(p1) * 1/tan(iMaxy) * cos(iMaxx - l0)));
sPannini = (pParam + 1) / (pParam + sin(p1) * tan(iMiny) + cos(p1) * cos(iMinx - l0));
min = sPannini * (tan(iMiny) * (cos(p1) - sin(p1) * 1/tan(iMiny) * cos(iMinx - l0)));
double yFactor = (double) iHeight / (max - min);
double heightLow = min;
int heightMax = iHeight - 1;
//project the points and add them to image
sPannini = (pParam + 1) / (pParam + sin(p1) * tan(theta) + cos(p1) * cos(phi - l0));
int x = (int)(xFactor) * (sPannini * sin(phi - l0) - xlow);
if (x < 0) x = 0;
if (x > widthMax) x = widthMax;
x = x + (j * iWidth / nImages);
int y = (int) (yFactor) * ( (sPannini * tan(theta) * (cos(p1) - sin(p1) * (1/tan(theta)) * cos(phi - l0) ) ) - heightLow );
y = heightMax - y;
if (y < 0) y = 0;
if (y > heightMax) y = heightMax;
//create the iReflectance iRange and map
map(x, y, it, range);
}
}
}
}
//STEREOGRAPHIC projection
if(pMethod == STEREOGRAPHIC){
//default values for nImages and rStereographic==pParam
if(pParam == 0) pParam = 2;
if(nImages == 0) nImages = 3;
cout << "Paremeter R is:" << pParam << ", Number of images per scan is:" << nImages << endl;
// l0 and p1 are the center of projection iminx, imaxx, iminy, imaxy are the bounderis of intervals
double l0, p1, iMinx, iMaxx, iMiny, iMaxy, interval;
interval = 2 * M_PI / nImages;
iMiny = -M_PI/9;
iMaxy = 2*M_PI/9;
//latitude of projection center
p1 = 0;
//go through all points
cv::MatIterator_<cv::Vec4f> it, end;
for( it = scan.begin<cv::Vec4f>(), end = scan.end<cv::Vec4f>(); it != end; ++it){
double kart[3], polar[3], phi, theta, range;
kart[0] = (*it)[2]/100;
kart[1] = (*it)[0]/-100;
kart[2] = (*it)[1]/100;
toPolar(kart, polar);
//theta == polar[0] == scan [4]
//phi == polar[1] == scan [5]
//range == polar[2] == scan [3]
theta = polar[0] * 180 / M_PI;
phi = polar[1] * 180 / M_PI;
range = polar[2];
//horizantal angle of view of [0:360] and vertical of [-40:60]
phi = 360.0 - phi;
phi = phi * 2.0 * M_PI / 360.0;
theta -= 90;
theta *= -1;
theta *= 2.0 * M_PI / 360.0;
for (unsigned int j = 0 ; j < nImages ; j++){
iMinx = j * interval;
iMaxx = (j + 1) * interval;
//check for point in intervals
if(phi < (iMaxx) && phi > (iMinx)){
double max, min, k;
//longitude of projection center
l0 = iMinx + interval / 2;
//use the R variable of stereographic projection mentioned in the thesis
//finding the min and max of x direction
k = (2 * pParam) / (1 + sin(p1) * sin(p1) + cos(p1) * cos(p1) * cos(iMaxx - l0));
max = k * cos(p1) * sin (iMaxx - l0);
k = (2 * pParam) / (1 + sin (p1) * sin(p1) + cos(p1) * cos(p1) * cos(iMinx -l0));
min = k * cos(p1) * sin (iMinx -l0);
double xFactor = (double) (iWidth / nImages) / (max - min);
double xlow = min;
int widthMax = (iWidth / nImages) - 1;
//finding the min and max of y direction
k = (2 * pParam) / (1 + sin(p1) * sin(iMaxy) + cos(p1) * cos(iMaxy) * cos(iMaxx - l0));
max = k * (cos(p1) * sin(iMaxy) - sin(p1) * cos(iMaxy) * cos(iMaxx - l0));
k = (2 * pParam) / (1 + sin(p1) * sin(iMiny) + cos(p1) * cos(iMiny) * cos(iMinx - l0));
min = k * (cos(p1) * sin(iMiny) - sin(p1) * cos(iMiny) * cos(iMinx - l0));
double yFactor = (double) iHeight / (max - min);
double heightLow = min;
int heightMax = iHeight - 1;
//project the points and add them to image
k = (2 * pParam) / (1 + sin(p1) * sin(theta) + cos(p1) * cos(theta) * cos(phi - l0));
int x = (int) (xFactor) * (k * cos(theta) * sin(phi - l0) - xlow);
if (x < 0) x = 0;
if (x > widthMax) x = widthMax;
x = x + (j * iWidth / nImages);
int y = (int) (yFactor) * (k * ( cos(p1) * sin(theta) - sin(p1) * cos(theta) * cos(phi - l0) ) - heightLow);
y = heightMax - y;
if (y < 0) y = 0;
if (y > heightMax) y = heightMax;
//create the iReflectance iRange and map
map(x, y, it, range);
}
}
}
}
//ZAXIS projection
if(pMethod == ZAXIS){
double zmin = -200;
double zmax = 4000;
//adding the longitude to x axis and latitude to y axis
double xFactor = (double) iWidth / 2 / M_PI;
int widthMax = iWidth - 1;
cout << "ZMAX= " << zmax << " ZMIN= "<< zmin << endl;
double yFactor = (double) iHeight / (zmax - zmin);
//shift all the valuse to positive points on image
double heightLow = zmin;
int heightMax = iHeight - 1;
cv::MatIterator_<cv::Vec4f> it, end;
for( it = scan.begin<cv::Vec4f>(), end = scan.end<cv::Vec4f>(); it != end; ++it){
double kart[3], polar[3], phi, theta, range;
kart[0] = (*it)[2]/100;
kart[1] = (*it)[0]/-100;
kart[2] = (*it)[1]/100;
toPolar(kart, polar);
//theta == polar[0] == scan [4]
//phi == polar[1] == scan [5]
//range == polar[2] == scan [3]
theta = polar[0] * 180 / M_PI;
phi = polar[1] * 180 / M_PI;
range = polar[2];
//horizantal angle of view of [0:360] and vertical of [-40:60]
phi = 360.0 - phi;
phi = phi * 2.0 * M_PI / 360.0;
theta -= 90;
theta *= -1;
theta *= 2.0 * M_PI / 360.0;
int x = (int) ( xFactor * phi);
if (x < 0) x = 0;
if (x > widthMax) x = widthMax;
///////////////////check this
int y = (int) ( yFactor * ((*it)[1] - heightLow) );
y = heightMax - y;
if (y < 0) y = 0;
if (y > heightMax) y = heightMax;
//create the iReflectance iRange and map
map(x, y, it, range);
}
}
}
void panorama::recoverPointCloud(const cv::Mat& range_image,
cv::Mat& reflectance_image, vector<cv::Vec4f> &reduced_points) {
if (range_image.cols != reflectance_image.cols
|| range_image.rows != reflectance_image.rows) {
cerr << "range image and reflectance image have different geometries - using empty range image" << endl;
reflectance_image.create(range_image.size(), CV_8U);
reflectance_image = cv::Scalar::all(0);
}
//recover from EQUIRECTANGULAR projection
if(pMethod == EQUIRECTANGULAR) {
double xFactor = (double) range_image.size().width / 2 / M_PI;
//int widthMax = range_image.size().width - 1;
double yFactor = (double) range_image.size().height / ((MAX_ANGLE - MIN_ANGLE) / 360 * 2 * M_PI);
double heightLow = (0 - MIN_ANGLE) / 360 * 2 * M_PI;
int heightMax = range_image.size().height - 1;
bool first_seen = true;
for (int row = 0; row < range_image.size().height; ++row) {
for (int col = 0; col < range_image.size().width; ++col) {
float range = range_image.at<float>(row, col);
float reflectance = reflectance_image.at<uchar>(row,col)/255.0;
float theta = (heightMax - row) / yFactor - heightLow;
float phi = col / xFactor;
phi *= 180.0 / M_PI;
phi = 360.0 - phi;
phi *= M_PI / 180.0;
theta *= 180.0 / M_PI;
theta *= -1;
theta += 90.0;
theta *= M_PI / 180.0;
double polar[3] = { theta, phi, range }, cartesian[3] = {0., 0., 0.};
toKartesian(polar, cartesian);
if( fabs(cartesian[0]) < 1e-5 && fabs(cartesian[1]) < 1e-5 && fabs(cartesian[2]) < 1e-5) {
if (first_seen) first_seen = false;
else continue;
}
reduced_points.push_back(cv::Vec4f(-100.0*cartesian[1],
100.0*cartesian[2],
100.0*cartesian[0],
reflectance));
}
}
}
//recover from CYLINDRICAL projection
if(pMethod == CYLINDRICAL) {
double xFactor = (double) range_image.size().width / 2 / M_PI;
//int widthMax = range_image.size().width - 1;
double yFactor = (double) range_image.size().height / (tan(MAX_ANGLE / 360 * 2 * M_PI) - tan(MIN_ANGLE / 360 * 2 * M_PI));
double heightLow = (MIN_ANGLE) / 360 * 2 * M_PI;
//int heightMax = range_image.size().height - 1;
bool first_seen = true;
for (int row = 0; row < range_image.size().height; ++row) {
for (int col = 0; col < range_image.size().width; ++col) {
float range = range_image.at<float>(row, col);
float reflectance = reflectance_image.at<uchar>(row,col)/255.0;
float theta = atan2(row + yFactor * tan(heightLow), yFactor);
float phi = col / xFactor;
phi *= 180.0 / M_PI;
phi = 360.0 - phi;
phi *= M_PI / 180.0;
theta *= 180.0 / M_PI;
theta *= -1;
theta += 90.0;
theta *= M_PI / 180.0;
double polar[3] = { theta, phi, range }, cartesian[3] = {0., 0., 0.};
toKartesian(polar, cartesian);
if( fabs(cartesian[0]) < 1e-5 && fabs(cartesian[1]) < 1e-5 && fabs(cartesian[2]) < 1e-5) {
if (first_seen) first_seen = false;
else continue;
}
reduced_points.push_back(cv::Vec4f(-100.0*cartesian[1],
100.0*cartesian[2],
100.0*cartesian[0],
reflectance));
}
}
}
//recover from MERCATOR projection
if(pMethod == MERCATOR) {
double xFactor = (double) range_image.size().width / 2 / M_PI;
double yFactor = (double) range_image.size().height / ( log( tan( MAX_ANGLE / 360 * 2 * M_PI ) + ( 1 / cos( MAX_ANGLE / 360 * 2 * M_PI ) ) ) - log ( tan( MIN_ANGLE / 360 * 2 * M_PI) + (1/cos(MIN_ANGLE / 360 * 2 * M_PI) ) ) );
double heightLow = log(tan(MIN_ANGLE / 360 * 2 * M_PI) + (1/cos(MIN_ANGLE / 360 * 2 * M_PI)));
int heightMax = range_image.size().height - 1;
bool first_seen = true;
for (int row = 0; row < range_image.size().height; ++row) {
for (int col = 0; col < range_image.size().width; ++col) {
float range = range_image.at<float>(row, col);
float reflectance = reflectance_image.at<uchar>(row,col)/255.0;
float theta = 2 * atan2(exp((heightMax - row) / yFactor + heightLow), 1.) - M_PI_2;
float phi = col / xFactor;
phi *= 180.0 / M_PI;
phi = 180.0 - phi;
phi *= M_PI / 180.0;
theta *= 180.0 / M_PI;
theta *= -1;
theta += 90.0;
theta *= M_PI / 180.0;
double polar[3] = { theta, phi, range }, cartesian[3] = {0., 0., 0.};
toKartesian(polar, cartesian);
if( fabs(cartesian[0]) < 1e-5 && fabs(cartesian[1]) < 1e-5 && fabs(cartesian[2]) < 1e-5) {
if (first_seen) first_seen = false;
else continue;
}
reduced_points.push_back(cv::Vec4f(-100.0*cartesian[1],
100.0*cartesian[2],
100.0*cartesian[0],
reflectance));
}
}
}
//recover from CONIC projection
if(pMethod == CONIC) {
// set up maximum latitude and longitude angles of the robot
double MIN_VERT_ANGLE = MIN_ANGLE * M_PI / 180.0, MAX_VERT_ANGLE = MAX_ANGLE * M_PI / 180.0,
MIN_HORIZ_ANGLE = -M_PI, MAX_HORIZ_ANGLE = M_PI;
// set up initial parameters according to MathWorld: http://mathworld.wolfram.com/AlbersEqual-AreaConicProjection.html
double Lat0 = 0., Long0 = 0.;
double Phi1 = -40. * M_PI / 180.0, Phi2 = 60 * M_PI / 180.0;
double n = (sin(Phi1) + sin(Phi2)) / 2.;
double C = sqr(cos(Phi1)) + 2 * n * sin(Phi1);
double Rho0 = sqrt(C - 2 * n * sin(Lat0)) / n;
// set up max values for x and y and add the longitude to x axis and latitude to y axis
double xmax = (1./n * sqrt(C - 2*n*sin( MIN_VERT_ANGLE )) ) * sin(n * (MAX_HORIZ_ANGLE - Long0));
double xmin = (1./n * sqrt(C - 2*n*sin( MIN_VERT_ANGLE )) ) * sin(n * (MIN_HORIZ_ANGLE - Long0));
double xFactor = (double) range_image.size().width / ( xmax - xmin );
double ymin = Rho0 - (1./n * sqrt(C - 2*n*sin(MIN_VERT_ANGLE)) ) * cos(n * ( 0. - Long0 ));
double ymax = Rho0 - (1./n * sqrt(C - 2*n*sin(MAX_VERT_ANGLE)) ) * cos(n * (MAX_HORIZ_ANGLE - Long0 ));
double yFactor = (double) range_image.size().height / ( ymax - ymin );
int heightMax = range_image.size().height - 1;
bool first_seen = true;
for (int row = 0; row < range_image.size().height; ++row) {
for (int col = 0; col < range_image.size().width; ++col) {
float range = range_image.at<float>(row, col);
float reflectance = reflectance_image.at<uchar>(row,col)/255.0;
float x = col * 1. / xFactor - fabs(xmin);
float y = (heightMax - row) * 1. / yFactor - fabs(ymin);
float theta = asin((C - (x*x + (Rho0 - y) * (Rho0 - y)) * n * n) / (2 * n));
float phi = Long0 + (1./n) * ::atan2(x, Rho0 - y);
phi *= 180.0 / M_PI;
phi = 360.0 - phi;
phi *= M_PI / 180.0;
theta *= 180.0 / M_PI;
theta *= -1;
theta += 90.0;
theta *= M_PI / 180.0;
double polar[3] = { theta, phi, range }, cartesian[3] = {0., 0., 0.};
toKartesian(polar, cartesian);
//if ( std::isnan(cartesian[0]) || std::isnan(cartesian[1]) || std::isnan(cartesian[2]) ) continue;
if( fabs(cartesian[0]) < 1e-5 && fabs(cartesian[1]) < 1e-5 && fabs(cartesian[2]) < 1e-5) {
if (first_seen) first_seen = false;
else continue;
}
reduced_points.push_back(cv::Vec4f(-100.0*cartesian[1],
100.0*cartesian[2],
100.0*cartesian[0],
reflectance));
}
}
}
}
unsigned int panorama::getImageWidth(){
return iWidth;
}
unsigned int panorama::getImageHeight(){
return iHeight;
}
projection_method panorama::getProjectionMethod(){
return pMethod;
}
unsigned int panorama::getNumberOfImages(){
return nImages;
}
double panorama::getProjectionParam(){
return pParam;
}
cv::Mat panorama::getReflectanceImage(){
return iReflectance;
}
cv::Mat panorama::getMap(){
return iMap;
}
cv::Mat panorama::getRangeImage(){
return iRange;
}
vector<vector<vector<cv::Vec3f> > > panorama::getExtendedMap(){
return extendedIMap;
}
panorama_map_method panorama::getMapMethod(){
return mapMethod;
}
void panorama::getDescription(){
cout << "panorama created with width: " << iWidth << ", and height: "
<< iHeight << ", and projection method: " << projectionMethodToString(pMethod)
<< ", number of images: " << nImages << ", projection param: " << pParam << "."
<< endl;
cout << endl;
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,438 @@
// Copyright (C) 2007 by Cristóbal Carnero Liñán
// grendel.ccl@gmail.com
//
// This file is part of cvBlob.
//
// cvBlob is free software: you can redistribute it and/or modify
// it under the terms of the Lesser GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// cvBlob is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// Lesser GNU General Public License for more details.
//
// You should have received a copy of the Lesser GNU General Public License
// along with cvBlob. If not, see <http://www.gnu.org/licenses/>.
//
#include <climits>
#define _USE_MATH_DEFINES
#include <cmath>
#include <deque>
#include <iostream>
#include <fstream>
#include <sstream>
using namespace std;
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include "cvblob.h"
#ifdef M_PI
const double pi = M_PI;
#else
const double pi = std::atan(1.)*4.;
#endif // M_PI
namespace cvb
{
void cvRenderContourChainCode(CvContourChainCode const *contour, IplImage const *img, CvScalar const &color)
{
CV_FUNCNAME("cvRenderContourChainCode");
__CV_BEGIN__;
{
CV_ASSERT(img&&(img->depth==IPL_DEPTH_8U)&&(img->nChannels==3));
int stepDst = img->widthStep/(img->depth/8);
int img_width = img->width;
int img_height = img->height;
int img_offset = 0;
if(img->roi)
{
img_width = img->roi->width;
img_height = img->roi->height;
img_offset = (img->nChannels * img->roi->xOffset) + (img->roi->yOffset * stepDst);
}
unsigned char *imgData = (unsigned char *)img->imageData + img_offset;
unsigned int x = contour->startingPoint.x;
unsigned int y = contour->startingPoint.y;
for (CvChainCodes::const_iterator it=contour->chainCode.begin(); it!=contour->chainCode.end(); ++it)
{
imgData[img->nChannels*x+img->widthStep*y+0] = (unsigned char)(color.val[0]); // Blue
imgData[img->nChannels*x+img->widthStep*y+1] = (unsigned char)(color.val[1]); // Green
imgData[img->nChannels*x+img->widthStep*y+2] = (unsigned char)(color.val[2]); // Red
x += cvChainCodeMoves[*it][0];
y += cvChainCodeMoves[*it][1];
}
}
__CV_END__;
}
CvContourPolygon *cvConvertChainCodesToPolygon(CvContourChainCode const *cc)
{
CV_FUNCNAME("cvConvertChainCodesToPolygon");
__CV_BEGIN__;
{
CV_ASSERT(cc!=NULL);
CvContourPolygon *contour = new CvContourPolygon;
unsigned int x = cc->startingPoint.x;
unsigned int y = cc->startingPoint.y;
contour->push_back(cvPoint(x, y));
if (cc->chainCode.size())
{
CvChainCodes::const_iterator it=cc->chainCode.begin();
CvChainCode lastCode = *it;
x += cvChainCodeMoves[*it][0];
y += cvChainCodeMoves[*it][1];
++it;
for (; it!=cc->chainCode.end(); ++it)
{
if (lastCode!=*it)
{
contour->push_back(cvPoint(x, y));
lastCode=*it;
}
x += cvChainCodeMoves[*it][0];
y += cvChainCodeMoves[*it][1];
}
}
return contour;
}
__CV_END__;
}
void cvRenderContourPolygon(CvContourPolygon const *contour, IplImage *img, CvScalar const &color)
{
CV_FUNCNAME("cvRenderContourPolygon");
__CV_BEGIN__;
{
CV_ASSERT(img&&(img->depth==IPL_DEPTH_8U)&&(img->nChannels==3));
CvContourPolygon::const_iterator it=contour->begin();
if (it!=contour->end())
{
unsigned int fx, x, fy, y;
fx = x = it->x;
fy = y = it->y;
for (; it!=contour->end(); ++it)
{
cvLine(img, cvPoint(x, y), cvPoint(it->x, it->y), color, 1);
x = it->x;
y = it->y;
}
cvLine(img, cvPoint(x, y), cvPoint(fx, fy), color, 1);
}
}
__CV_END__;
}
double cvContourPolygonArea(CvContourPolygon const *p)
{
CV_FUNCNAME("cvContourPolygonArea");
__CV_BEGIN__;
{
CV_ASSERT(p!=NULL);
if (p->size()<=2)
return 1.;
CvContourPolygon::const_iterator it=p->begin();
CvPoint lastPoint = p->back();
double a = 0.;
for (; it!=p->end(); ++it)
{
a += lastPoint.x*it->y - lastPoint.y*it->x;
lastPoint = *it;
}
return a*0.5;
}
__CV_END__;
}
double cvContourChainCodePerimeter(CvContourChainCode const *c)
{
CV_FUNCNAME("cvContourChainCodePerimeter");
__CV_BEGIN__;
{
CV_ASSERT(c!=NULL);
double perimeter = 0.;
for(CvChainCodes::const_iterator it=c->chainCode.begin(); it!=c->chainCode.end(); ++it)
{
if ((*it)%2)
perimeter+=sqrt(1.+1.);
else
perimeter+=1.;
}
return perimeter;
}
__CV_END__;
}
double cvContourPolygonPerimeter(CvContourPolygon const *p)
{
CV_FUNCNAME("cvContourPolygonPerimeter");
__CV_BEGIN__;
{
CV_ASSERT(p!=NULL);
double perimeter = cvDistancePointPoint((*p)[p->size()-1], (*p)[0]);
for (unsigned int i=0; i<p->size()-1; i++)
perimeter+=cvDistancePointPoint((*p)[i], (*p)[i+1]);
return perimeter;
}
__CV_END__;
}
double cvContourPolygonCircularity(const CvContourPolygon *p)
{
CV_FUNCNAME("cvContourPolygonCircularity");
__CV_BEGIN__;
{
CV_ASSERT(p!=NULL);
double l = cvContourPolygonPerimeter(p);
double c = (l*l/cvContourPolygonArea(p)) - 4.*pi;
if (c>=0.)
return c;
else // This could happen if the blob it's only a pixel: the perimeter will be 0. Another solution would be to force "cvContourPolygonPerimeter" to be 1 or greater.
return 0.;
}
__CV_END__;
}
void simplifyPolygonRecursive(CvContourPolygon const *p, int const i1, int const i2, bool *pnUseFlag, double const delta)
{
CV_FUNCNAME("cvSimplifyPolygonRecursive");
__CV_BEGIN__;
{
int endIndex = (i2<0)?p->size():i2;
if (abs(i1-endIndex)<=1)
return;
CvPoint firstPoint = (*p)[i1];
CvPoint lastPoint = (i2<0)?p->front():(*p)[i2];
double furtherDistance=0.;
int furtherIndex=0;
for (int i=i1+1; i<endIndex; i++)
{
double d = cvDistanceLinePoint(firstPoint, lastPoint, (*p)[i]);
if ((d>=delta)&&(d>furtherDistance))
{
furtherDistance=d;
furtherIndex=i;
}
}
if (furtherIndex)
{
pnUseFlag[furtherIndex]=true;
simplifyPolygonRecursive(p, i1, furtherIndex, pnUseFlag, delta);
simplifyPolygonRecursive(p, furtherIndex, i2, pnUseFlag, delta);
}
}
__CV_END__;
}
CvContourPolygon *cvSimplifyPolygon(CvContourPolygon const *p, double const delta)
{
CV_FUNCNAME("cvSimplifyPolygon");
__CV_BEGIN__;
{
CV_ASSERT(p!=NULL);
double furtherDistance=0.;
unsigned int furtherIndex=0;
CvContourPolygon::const_iterator it=p->begin();
++it;
for (unsigned int i=1; it!=p->end(); ++it, i++)
{
double d = cvDistancePointPoint(*it, p->front());
if (d>furtherDistance)
{
furtherDistance = d;
furtherIndex = i;
}
}
if (furtherDistance<delta)
{
CvContourPolygon *result = new CvContourPolygon;
result->push_back(p->front());
return result;
}
bool *pnUseFlag = new bool[p->size()];
for (unsigned int i=1; i<p->size(); i++) pnUseFlag[i] = false;
pnUseFlag[0] = pnUseFlag[furtherIndex] = true;
simplifyPolygonRecursive(p, 0, furtherIndex, pnUseFlag, delta);
simplifyPolygonRecursive(p, furtherIndex, -1, pnUseFlag, delta);
CvContourPolygon *result = new CvContourPolygon;
for (unsigned int i=0; i<p->size(); i++)
if (pnUseFlag[i])
result->push_back((*p)[i]);
delete[] pnUseFlag;
return result;
}
__CV_END__;
}
CvContourPolygon *cvPolygonContourConvexHull(CvContourPolygon const *p)
{
CV_FUNCNAME("cvPolygonContourConvexHull");
__CV_BEGIN__;
{
CV_ASSERT(p!=NULL);
if (p->size()<=3)
{
return new CvContourPolygon(p->begin(), p->end());
}
deque<CvPoint> dq;
if (cvCrossProductPoints((*p)[0], (*p)[1], (*p)[2])>0)
{
dq.push_back((*p)[0]);
dq.push_back((*p)[1]);
}
else
{
dq.push_back((*p)[1]);
dq.push_back((*p)[0]);
}
dq.push_back((*p)[2]);
dq.push_front((*p)[2]);
for (unsigned int i=3; i<p->size(); i++)
{
int s = dq.size();
if ((cvCrossProductPoints((*p)[i], dq.at(0), dq.at(1))>=0) && (cvCrossProductPoints(dq.at(s-2), dq.at(s-1), (*p)[i])>=0))
continue; // TODO Optimize.
while (cvCrossProductPoints(dq.at(s-2), dq.at(s-1), (*p)[i])<0)
{
dq.pop_back();
s = dq.size();
}
dq.push_back((*p)[i]);
while (cvCrossProductPoints((*p)[i], dq.at(0), dq.at(1))<0)
dq.pop_front();
dq.push_front((*p)[i]);
}
return new CvContourPolygon(dq.begin(), dq.end());
}
__CV_END__;
}
void cvWriteContourPolygonCSV(const CvContourPolygon& p, const string& filename)
{
ofstream f;
f.open(filename.c_str());
f << p << endl;
f.close();
}
void cvWriteContourPolygonSVG(const CvContourPolygon& p, const string& filename, const CvScalar& stroke, const CvScalar& fill)
{
int minx=INT_MAX;
int miny=INT_MAX;
int maxx=INT_MIN;
int maxy=INT_MIN;
stringstream buffer("");
for (CvContourPolygon::const_iterator it=p.begin(); it!=p.end(); ++it)
{
if (it->x>maxx)
maxx = it->x;
if (it->x<minx)
minx = it->x;
if (it->y>maxy)
maxy = it->y;
if (it->y<miny)
miny = it->y;
buffer << it->x << "," << it->y << " ";
}
ofstream f;
f.open(filename.c_str());
f << "<?xml version=\"1.0\" encoding=\"ISO-8859-1\" standalone=\"no\"?>" << endl;
f << "<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 20010904//EN\" \"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd\">" << endl;
f << "<svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" xml:space=\"preserve\" width=\"" << maxx-minx << "px\" height=\"" << maxy-miny << "px\" viewBox=\"" << minx << " " << miny << " " << maxx << " " << maxy << "\" zoomAndPan=\"disable\" >" << endl;
f << "<polygon fill=\"rgb(" << fill.val[0] << "," << fill.val[1] << "," << fill.val[2] << ")\" stroke=\"rgb(" << stroke.val[0] << "," << stroke.val[1] << "," << stroke.val[2] << ")\" stroke-width=\"1\" points=\"" << buffer.str() << "\"/>" << endl;
f << "</svg>" << endl;
f.close();
}
}
ostream& operator<< (ostream& output, const cvb::CvContourPolygon& p)
{
for (cvb::CvContourPolygon::const_iterator it=p.begin(); it!=p.end(); ++it)
output << it->x << ", " << it->y << endl;
return output;
}

View file

@ -0,0 +1,48 @@
if(WIN32)
add_library(pointfilter STATIC ../slam6d/pointfilter.cc)
else(WIN32)
add_library(pointfilter SHARED ../slam6d/pointfilter.cc)
endif(WIN32)
set(SCANIO_LIBNAMES
uos uosr uos_rgb uos_rrgbt ks ks_rgb riegl_txt riegl_rgb rts velodyne
)
if(WITH_RIVLIB)
set(SCANIO_LIBNAMES ${SCANIO_LIBNAMES} rxp)
if(LIBXML2_FOUND)
include_directories(${LIBXML2_INCLUDE_DIR})
# set(SCANIO_LIBNAMES ${SCANIO_LIBNAMES} riegl_project)
# target_link_libraries(scan_io_riegl_project ${RIVLIB} scan_io_rxp ${LIBXML2_LIBRARIES})
endif(LIBXML2_FOUND)
endif(WITH_RIVLIB)
foreach(libname ${SCANIO_LIBNAMES})
if(WIN32)
#add_library(scan_io_${libname} STATIC scan_io_${libname}.cc)
add_library(scan_io_${libname} SHARED scan_io_${libname}.cc)
else(WIN32)
add_library(scan_io_${libname} SHARED scan_io_${libname}.cc)
endif(WIN32)
target_link_libraries(scan_io_${libname} pointfilter ${Boost_LIBRARIES} ${Boost_PROGRAM_OPTIONS_LIBRARY} ${Boost_FILESYSTEM_LIBRARY} ${Boost_SYSTEM_LIBRARY})
endforeach(libname)
if(WITH_RIVLIB)
target_link_libraries(scan_io_rxp ${RIVLIB})
if(LIBXML2_FOUND)
target_link_libraries(scan_io_rxp ${LIBXML2_LIBRARIES}) #scan_io_riegl_project ${RIVLIB})
endif(LIBXML2_FOUND)
endif(WITH_RIVLIB)
if(WIN32)
add_library(scanio STATIC scan_io.cc ../slam6d/io_types.cc)
else(WIN32)
add_library(scanio SHARED scan_io.cc ../slam6d/io_types.cc)
endif(WIN32)
if(UNIX)
target_link_libraries(scanio dl)
endif(UNIX)

View file

@ -0,0 +1,528 @@
/*
* scan_red implementation
*
* Copyright (C) Dorit Borrmann, Razvan-George Mihalyi, Remus Dumitru
*
* Released under the GPL version 3.
*
*/
/**
* @file
* @brief Main program for reducing 3D scans.
*
* Program to reduce scans for use with slam6d
* Usage: bin/scan_red -r <NR> 'dir',
* Use -r for octree based reduction (voxel size=<NR>)
* and 'dir' the directory of a set of scans
* Reduced scans will be written to 'dir/reduced'
*
* @author Dorit Borrmann. Automation Group, Jacobs University Bremen gGmbH, Germany.
*/
#ifdef _MSC_VER
#if !defined _OPENMP && defined OPENMP
#define _OPENMP
#endif
#endif
#define WANT_STREAM ///< define the WANT stream :)
#include <string>
using std::string;
#include <iostream>
using std::cout;
using std::cerr;
using std::endl;
#include <fstream>
using std::ofstream;
#include <errno.h>
#include "slam6d/metaScan.h"
#include "slam6d/io_utils.h"
#include "slam6d/scan.h"
#include "slam6d/Boctree.h"
#include "slam6d/fbr/fbr_global.h"
#include "slam6d/fbr/panorama.h"
#include "slam6d/fbr/scan_cv.h"
#include "scanserver/clientInterface.h"
#include "slam6d/globals.icc"
#ifdef _OPENMP
#include <omp.h>
#endif
#ifndef _MSC_VER
#include <getopt.h>
#else
#include "XGetopt.h"
#endif
#ifdef _MSC_VER
#define strcasecmp _stricmp
#define strncasecmp _strnicmp
#include <windows.h>
#include <direct.h>
#else
#include <sys/stat.h>
#include <sys/types.h>
#include <strings.h>
#include <dlfcn.h>
#endif
using namespace fbr;
#include <boost/program_options.hpp>
namespace po = boost::program_options;
enum reduction_method {OCTREE, RANGE, INTERPOLATE};
/* Function used to check that 'opt1' and 'opt2' are not specified
at the same time. */
void conflicting_options(const po::variables_map & vm,
const char *opt1, const char *opt2)
{
if (vm.count(opt1) && !vm[opt1].defaulted()
&& vm.count(opt2) && !vm[opt2].defaulted())
throw std::logic_error(string("Conflicting options '")
+ opt1 + "' and '" + opt2 + "'.");
}
/* Function used to check that if 'for_what' is specified, then
'required_option' is specified too. */
void option_dependency(const po::variables_map & vm,
const char *for_what, const char *required_option)
{
if (vm.count(for_what) && !vm[for_what].defaulted())
if (vm.count(required_option) == 0
|| vm[required_option].defaulted())
throw std::logic_error(string("Option '") + for_what +
"' requires option '" +
required_option + "'.");
}
/*
* validates panorama method specification
*/
namespace fbr {
void validate(boost::any& v, const std::vector<std::string>& values,
projection_method*, int) {
if (values.size() == 0)
throw std::runtime_error("Invalid model specification");
string arg = values.at(0);
if(strcasecmp(arg.c_str(), "EQUIRECTANGULAR") == 0) v = EQUIRECTANGULAR;
else if(strcasecmp(arg.c_str(), "CYLINDRICAL") == 0) v = CYLINDRICAL;
else if(strcasecmp(arg.c_str(), "MERCATOR") == 0) v = MERCATOR;
else if(strcasecmp(arg.c_str(), "CONIC") == 0) v = CONIC;
else throw std::runtime_error(std::string("projection method ") + arg + std::string(" is unknown"));
}
}
/*
* validates input type specification
*/
void validate(boost::any& v, const std::vector<std::string>& values,
IOType*, int) {
if (values.size() == 0)
throw std::runtime_error("Invalid model specification");
string arg = values.at(0);
try {
v = formatname_to_io_type(arg.c_str());
} catch (...) { // runtime_error
throw std::runtime_error("Format " + arg + " unknown.");
}
}
void reduction_option_dependency(const po::variables_map & vm, reduction_method stype, const char *option)
{
if (vm.count("reduction") && vm["reduction"].as<reduction_method>() == stype) {
if (!vm.count(option)) {
throw std::logic_error (string("this reduction option needs ")+option+" to be set");
}
}
}
void reduction_option_conflict(const po::variables_map & vm, reduction_method stype, const char *option)
{
if (vm.count("reduction") && vm["reduction"].as<reduction_method>() == stype) {
if (vm.count(option)) {
throw std::logic_error (string("this reduction option is incompatible with ")+option);
}
}
}
/*
* validates reduction method specification
*/
void validate(boost::any& v, const std::vector<std::string>& values,
reduction_method*, int) {
if (values.size() == 0)
throw std::runtime_error("Invalid model specification");
string arg = values.at(0);
if(strcasecmp(arg.c_str(), "OCTREE") == 0) v = OCTREE;
else if(strcasecmp(arg.c_str(), "RANGE") == 0) v = RANGE;
else if(strcasecmp(arg.c_str(), "INTERPOLATE") == 0) v = INTERPOLATE;
else throw std::runtime_error(std::string("reduction method ") + arg + std::string(" is unknown"));
}
void parse_options(int argc, char **argv, int &start, int &end,
bool &scanserver, int &width, int &height,
fbr::projection_method &ptype, string &dir, IOType &iotype,
int &maxDist, int &minDist, reduction_method &rtype, double &scale,
double &voxel, int &octree, bool &use_reflectance)
{
po::options_description generic("Generic options");
generic.add_options()
("help,h", "output this help message");
po::options_description input("Input options");
input.add_options()
("start,s", po::value<int>(&start)->default_value(0),
"start at scan <arg> (i.e., neglects the first <arg> scans) "
"[ATTENTION: counting naturally starts with 0]")
("end,e", po::value<int>(&end)->default_value(-1),
"end after scan <arg>")
("format,f", po::value<IOType>(&iotype)->default_value(UOS),
"using shared library <arg> for input. (chose F from {uos, uos_map, "
"uos_rgb, uos_frames, uos_map_frames, old, rts, rts_map, ifp, "
"riegl_txt, riegl_rgb, riegl_bin, zahn, ply})")
("max,M", po::value<int>(&maxDist)->default_value(-1),
"neglegt all data points with a distance larger than <arg> 'units")
("min,m", po::value<int>(&minDist)->default_value(-1),
"neglegt all data points with a distance smaller than <arg> 'units")
("scanserver,S", po::bool_switch(&scanserver),
"Use the scanserver as an input method and handling of scan data");
po::options_description reduction("Reduction options");
reduction.add_options()
("reduction,r", po::value<reduction_method>(&rtype)->required(),
"choose reduction method (OCTREE, RANGE, INTERPOLATE)")
("scale,S", po::value<double>(&scale),
"scaling factor")
("voxel,v", po::value<double>(&voxel),
"voxel size")
("projection,P", po::value<fbr::projection_method>(&ptype),
"projection method or panorama image")
("octree,O", po::value<int>(&octree),
"0 -> center\n1 -> random\nN>1 -> random N")
("width,w", po::value<int>(&width),
"width of panorama")
("height,h", po::value<int>(&height),
"height of panorama");
po::options_description output("Output options");
output.add_options()
("reflectance,R", po::bool_switch(&use_reflectance),
"Use reflectance when reducing points and save scan files in UOSR format");
po::options_description hidden("Hidden options");
hidden.add_options()
("input-dir", po::value<string>(&dir), "input dir");
// all options
po::options_description all;
all.add(generic).add(input).add(reduction).add(output).add(hidden);
// options visible with --help
po::options_description cmdline_options;
cmdline_options.add(generic).add(input).add(reduction).add(output);
// positional argument
po::positional_options_description pd;
pd.add("input-dir", 1);
// process options
po::variables_map vm;
po::store(po::command_line_parser(argc, argv).
options(all).positional(pd).run(), vm);
// display help
if (vm.count("help")) {
cout << cmdline_options;
cout << endl
<< "Example usage:" << endl
<< "\t./bin/scan_red -s 0 -e 0 -f uos --reduction OCTREE --voxel 10 --octree 0 dat" << endl
<< "\t./bin/scan_red -s 0 -e 0 -f uos --reduction RANGE --scale 0.5 --projection EQUIRECTANGULAR --width 3600 --height 1000 dat" << endl
<< "\t./bin/scan_red -s 0 -e 0 -f uos --reduction INTERPOLATE --scale 0.2 --projection EQUIRECTANGULAR --width 3600 --height 1000 dat" << endl;
exit(0);
}
po::notify(vm);
reduction_option_dependency(vm, OCTREE, "voxel");
reduction_option_dependency(vm, OCTREE, "octree");
reduction_option_conflict(vm, OCTREE, "scale");
reduction_option_conflict(vm, OCTREE, "projection");
reduction_option_conflict(vm, OCTREE, "width");
reduction_option_conflict(vm, OCTREE, "height");
reduction_option_conflict(vm, RANGE, "voxel");
reduction_option_conflict(vm, RANGE, "octree");
reduction_option_dependency(vm, RANGE, "scale");
reduction_option_dependency(vm, RANGE, "projection");
reduction_option_dependency(vm, RANGE, "width");
reduction_option_dependency(vm, RANGE, "height");
reduction_option_conflict(vm, INTERPOLATE, "voxel");
reduction_option_conflict(vm, INTERPOLATE, "octree");
reduction_option_dependency(vm, INTERPOLATE, "scale");
reduction_option_dependency(vm, INTERPOLATE, "projection");
reduction_option_dependency(vm, INTERPOLATE, "width");
reduction_option_dependency(vm, INTERPOLATE, "height");
#ifndef _MSC_VER
if (dir[dir.length()-1] != '/') dir = dir + "/";
#else
if (dir[dir.length()-1] != '\\') dir = dir + "\\";
#endif
}
void createdirectory(string dir)
{
int success = mkdir(dir.c_str(), S_IRWXU|S_IRWXG|S_IRWXO);
if (success == 0 || errno == EEXIST) {
cout << "Writing to " << dir << endl;
} else {
cerr << "Creating directory " << dir << " failed" << endl;
exit(1);
}
}
void scan2mat(Scan *source, cv::Mat &mat)
{
DataXYZ xyz = source->get("xyz");
DataReflectance xyz_reflectance = (((DataReflectance)source->get("reflectance")).size() == 0) ?
source->create("reflectance", sizeof(float)*xyz.size())
: source->get("reflectance");
if(((DataReflectance)source->get("reflectance")).size() == 0){
for(unsigned int i = 0; i < xyz.size(); i++)
xyz_reflectance[i] = 255;
}
unsigned int nPoints = xyz.size();
mat.create(nPoints,1,CV_32FC(4));
mat = cv::Scalar::all(0);
cv::MatIterator_<cv::Vec4f> it = mat.begin<cv::Vec4f>();
for(unsigned int i = 0; i < nPoints; i++){
float reflectance = xyz_reflectance[i];
//normalize the reflectance
reflectance += 32;
reflectance /= 64;
reflectance -= 0.2;
reflectance /= 0.3;
if (reflectance < 0) reflectance = 0;
if (reflectance > 1) reflectance = 1;
(*it)[0] = xyz[i][0];
(*it)[1] = xyz[i][1];
(*it)[2] = xyz[i][2];
(*it)[3] = reflectance;
++it;
}
}
void reduce_octree(Scan *scan, vector<cv::Vec4f> &reduced_points, int octree,
int red, bool use_reflectance)
{
if (use_reflectance) {
unsigned int types = PointType::USE_REFLECTANCE;
PointType pointtype(types);
scan->setReductionParameter(red, octree, pointtype);
scan->calcReducedPoints();
DataXYZ xyz_reduced(scan->get("xyz reduced"));
DataReflectance reflectance_reduced(scan->get("reflectance reduced"));
if (xyz_reduced.size() != reflectance_reduced.size()) {
cerr << "xyz_reduced size different than reflectance_reduced size" << endl;
return;
}
for(unsigned int j = 0; j < xyz_reduced.size(); j++) {
reduced_points.push_back(cv::Vec4f(xyz_reduced[j][0], xyz_reduced[j][1], xyz_reduced[j][2], reflectance_reduced[j]));
}
}
else {
scan->setReductionParameter(red, octree);
scan->calcReducedPoints();
DataXYZ xyz_reduced(scan->get("xyz reduced"));
for(unsigned int j = 0; j < xyz_reduced.size(); j++) {
reduced_points.push_back(cv::Vec4f(xyz_reduced[j][0], xyz_reduced[j][1], xyz_reduced[j][2], 0.0));
}
}
}
void reduce_range(Scan *scan, vector<cv::Vec4f> &reduced_points, int width,
int height, fbr::projection_method ptype, double scale,
bool use_reflectance)
{
panorama image(width, height, ptype);
cv::Mat mat;
scan2mat(scan, mat);
image.createPanorama(mat);
image.getDescription();
cv::Mat range_image_resized;
cv::Mat reflectance_image_resized;
resize(image.getRangeImage(), range_image_resized, cv::Size(),
scale, scale, cv::INTER_NEAREST);
if (use_reflectance) {
resize(image.getReflectanceImage(), reflectance_image_resized,
cv::Size(), scale, scale, cv::INTER_NEAREST);
} else {
reflectance_image_resized.create(range_image_resized.size(), CV_8U);
reflectance_image_resized = cv::Scalar::all(0);
}
image.recoverPointCloud(range_image_resized, reflectance_image_resized, reduced_points);
}
void reduce_interpolation(Scan *scan, vector<cv::Vec4f> &reduced_points,
int width, int height, fbr::projection_method ptype, double scale,
bool use_reflectance)
{
panorama image(width, height, ptype);
cv::Mat mat;
scan2mat(scan, mat);
image.createPanorama(mat);
image.getDescription();
cv::Mat range_image_resized;
cv::Mat reflectance_image_resized;
resize(image.getMap(), range_image_resized, cv::Size(),
scale, scale, cv::INTER_NEAREST);
if (use_reflectance) {
resize(image.getReflectanceImage(), reflectance_image_resized,
cv::Size(), scale, scale, cv::INTER_NEAREST);
}
for(int i = 0; i < range_image_resized.rows; i++) {
for(int j = 0; j < range_image_resized.cols; j++) {
cv::Vec3f vec = range_image_resized.at<cv::Vec3f>(i, j);
if (use_reflectance) {
reduced_points.push_back(cv::Vec4f(
vec[0], vec[1], vec[2],
reflectance_image_resized.at<uchar>(i, j)/255.0));
} else {
reduced_points.push_back(cv::Vec4f(vec[0], vec[1], vec[2], 0.0));
}
}
}
}
/*
* given a vector of 3d points, write them out as uos files
*/
void write_uos(vector<cv::Vec4f> &points, string &dir, string id)
{
ofstream outfile(dir + "/scan" + id + ".3d");
outfile << "# header is ignored" << endl;
for (vector<cv::Vec4f>::iterator it=points.begin(); it < points.end(); it++) {
outfile << (*it)[0] << " " << (*it)[1] << " " << (*it)[2] << endl;
}
outfile.close();
}
/*
* given a vector of 3d points, write them out as uosr files
*/
void write_uosr(vector<cv::Vec4f> &points, string &dir, string id)
{
ofstream outfile(dir + "/scan" + id + ".3d");
outfile << "# header is ignored" << endl;
for (vector<cv::Vec4f>::iterator it=points.begin(); it < points.end(); it++) {
outfile << (*it)[0] << " " << (*it)[1] << " " << (*it)[2] << " " << (*it)[3] << endl;
}
outfile.close();
}
// write .pose files
// .frames files can later be generated from them using ./bin/pose2frames
void writeposefile(string &dir, const double* rPos, const double* rPosTheta, string id)
{
ofstream posefile(dir + "/scan" + id + ".pose");
posefile << rPos[0] << " " << rPos[1] << " " << rPos[2] << endl;
posefile << deg(rPosTheta[0]) << " "
<< deg(rPosTheta[1]) << " "
<< deg(rPosTheta[2]) << endl;
posefile.close();
}
/**
* Main program for reducing scans.
* Usage: bin/scan_red -r <NR> 'dir',
* Use -r for octree based reduction (voxel size=<NR>)
* and 'dir' the directory of a set of scans
* Reduced scans will be written to 'dir/reduced'
*
*/
int main(int argc, char **argv)
{
int start, end;
bool scanserver;
int width, height;
int maxDist, minDist;
fbr::projection_method ptype;
string dir;
IOType iotype;
reduction_method rtype;
double scale, voxel;
int octree;
bool use_reflectance;
parse_options(argc, argv, start, end, scanserver, width, height, ptype,
dir, iotype, maxDist, minDist, rtype, scale, voxel, octree,
use_reflectance);
for (int iter = start; iter <= end; iter++) {
Scan::openDirectory(scanserver, dir, iotype, iter, iter);
if(Scan::allScans.size() == 0) {
cerr << "No scans found. Did you use the correct format?" << endl;
exit(-1);
}
for(ScanVector::iterator it = Scan::allScans.begin(); it != Scan::allScans.end(); ++it) {
Scan* scan = *it;
scan->setRangeFilter(maxDist, minDist);
vector<cv::Vec4f> reduced_points;
string reddir = dir + "reduced";
createdirectory(reddir);
switch (rtype) {
case OCTREE:
reduce_octree(scan, reduced_points, octree, voxel, use_reflectance);
break;
case RANGE:
reduce_range(scan, reduced_points, width, height, ptype, scale, use_reflectance);
break;
case INTERPOLATE:
reduce_interpolation(scan, reduced_points, width, height, ptype, scale, use_reflectance);
break;
default:
cerr << "unknown method" << endl;
return 1;
break;
}
if (use_reflectance)
write_uosr(reduced_points, reddir, scan->getIdentifier());
else
write_uos(reduced_points, reddir, scan->getIdentifier());
writeposefile(reddir, scan->get_rPos(), scan->get_rPosTheta(), scan->getIdentifier());
}
Scan::closeDirectory();
}
}

View file

@ -0,0 +1,44 @@
#ifndef __THERMO_H__
#define __THERMO_H__
#if (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#include <opencv/highgui.h>
#else
#include <opencv2/opencv.hpp>
#endif
//#include <opencv2/highgui.hpp>
#include <string>
#include <slam6d/scan.h>
using namespace std;
//typedef vector<vector<float> > Float2D[1200][1600];
typedef vector<vector<float> > Float2D[2592][3888];
void calcBoard(double point_array[][2], int board_n, double &x, double &y, double &cx, double &cy, bool pc);
void sortBlobs(double point_array[][2], int board_n, int board_h, int board_w, bool quiet);
IplImage* detectBlobs(IplImage *org_image, int &corner_exp, int board_h, int board_w, bool quiet, double point_array2[][2]);
void drawLines(double point_array2[][2], int corner_exp, IplImage *image, bool color=false);
IplImage* resizeImage(IplImage *source, int scale);
IplImage* detectCorners(IplImage *orgimage, int &corner_exp, int board_h, int board_w, bool quiet, double point_array2[][2], int scale=1);
void CalibFunc(int board_w, int board_h, int start, int end, bool optical, bool chess, bool quiet, string dir, int scale=1);
void writeCalibParam(int images, int corner_exp, int board_w, CvMat* image_points, CvSize size, string dir);
void loadIntrinsicCalibration(CvMat * intrinsic, CvMat * distortion, string dir, bool optical=false) ;
void loadExtrinsicCalibration(CvMat * Translation, CvMat * Rotation, string dir, int method, bool optical=false) ;
void ProjectAndMap(int start, int end, bool optical, bool quiet, string dir,
IOType type, int scale, double rot_angle, double minDist, double maxDist,
bool correction, int neighborhood, int method=0);
bool readPoints(string filename, CvPoint3D32f *corners, int size) ;
void sortElementByElement(CvMat * vectors, int nr_elems, int nr_vectors);
void calculateExtrinsicsWithReprojectionCheck(CvMat * points2D, CvMat *
points3D, CvMat * rotation_vectors_temp, CvMat * translation_vectors_temp, CvMat
* distortions, CvMat * instrinsics, int corners, int successes, string dir, bool quiet=true, string substring = "") ;
void calculateExtrinsics(CvMat * rotation_vectors_temp, CvMat * translation_vectors_temp, int successes, string dir, bool quiet=true, string substring = "") ;
void CorrectErrorAndWrite(Float2D &data, fstream &outfile, CvSize size, bool optical);
void clusterSearch(float ** points, int size, double thresh1, double thresh2, fstream &outfile, bool optical);
void sortDistances(float ** points, int size);
void ExtrCalibFunc(int board_w, int board_h, int start, int end, bool optical, bool chess, bool quiet, string dir, int scale=1);
#endif

View file

@ -0,0 +1,111 @@
/*
* history implementation
*
* Copyright (C) Stanislav Serebryakov
*
* Released under the GPL version 3.
*
*/
#include "history.h"
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
Frame *allocFrame3DData(CvSize pmdSz) {
Frame *f = (Frame*)cvAlloc(sizeof(Frame));
f->sz = pmdSz;
f->img = 0;
f->trn = cvCreateMat(3, 1, CV_32FC1);
f->rot = cvCreateMat(3, 1, CV_32FC1);
f->status = (CvPoint**) cvAlloc(pmdSz.height * sizeof(CvPoint*));
f->pts = (CvPoint3D32f**) cvAlloc(pmdSz.height * sizeof(CvPoint3D32f*));
for(int i = 0; i < pmdSz.height; i++) {
f->status[i] = (CvPoint*) cvAlloc(pmdSz.width * sizeof(CvPoint));
f->pts[i] = (CvPoint3D32f*) cvAlloc(pmdSz.width * sizeof(CvPoint3D32f));
}
return f;
}
void fillFrame(Frame *f, IplImage *img, CvSize pmdSz, CvPoint3D32f **pts, CvPoint **status
, CvMat *rot, CvMat *trn, double alignError) {
assert(pmdSz.width == f->sz.width || pmdSz.height == f->sz.height);
if(f->img) cvCopy(img, f->img, NULL);
else f->img = cvCloneImage(img);
f->alignError = alignError;
for(int j = 0; j < pmdSz.width; j++)
for(int i = 0; i < pmdSz.height; i++) {
f->status[i][j] = status[i][j];
f->pts[i][j] = pts[i][j];
}
cvCopy(rot, f->rot, NULL);
cvCopy(trn, f->trn, NULL);
}
void releaseFrame(Frame **f) {
Frame *t = *f;
cvReleaseImage(&t->img);
cvReleaseMat(&t->rot);
cvReleaseMat(&t->trn);
for(int i = 0; i < t->sz.height; i++) {
cvFree(&t->pts[i]);
cvFree(&t->status[i]);
}
cvFree(&t->pts);
cvFree(&t->status);
*f = NULL;
}
History *createHistory() {
History *h = (History*)cvAlloc(sizeof(History));
h->frame = 0;
h->prev = 0; // essential!!!
return h;
}
History *addFrame(History *h, Frame *f) {
if(!h->frame) { // first frame, FIXME: should be out here
h->prev = 0; // ensure
h->frame = f;
return h;
}
// else
History *n = (History*)cvAlloc(sizeof(History));
n->prev = h;
n->frame = f;
return n;
}
void releaseTail(History *h) {
History *tmp = h->prev;
History *prev = 0;
h->prev = 0;
while(tmp) {
prev = tmp->prev;
releaseFrame(&tmp->frame);
cvFree(&tmp);
tmp = prev;
}
}
void checkHistoryLen(History *h, int maxLen) {
History *histI = h;
for(int i = 0; histI->prev; i++)
if(i > maxLen-2) // -2 because I release *prev* elemnents
releaseTail(histI);
else histI = histI->prev;
}

View file

@ -0,0 +1,421 @@
// Copyright (C) 2007 by Cristóbal Carnero Liñán
// grendel.ccl@gmail.com
//
// This file is part of cvBlob.
//
// cvBlob is free software: you can redistribute it and/or modify
// it under the terms of the Lesser GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// cvBlob is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// Lesser GNU General Public License for more details.
//
// You should have received a copy of the Lesser GNU General Public License
// along with cvBlob. If not, see <http://www.gnu.org/licenses/>.
//
#include <cmath>
#include <iostream>
#include <sstream>
using namespace std;
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include "cvblob.h"
namespace cvb
{
double distantBlobTrack(CvBlob const *b, CvTrack const *t)
{
double d1;
if (b->centroid.x<t->minx)
{
if (b->centroid.y<t->miny)
d1 = MAX(t->minx - b->centroid.x, t->miny - b->centroid.y);
else if (b->centroid.y>t->maxy)
d1 = MAX(t->minx - b->centroid.x, b->centroid.y - t->maxy);
else // if (t->miny < b->centroid.y)&&(b->centroid.y < t->maxy)
d1 = t->minx - b->centroid.x;
}
else if (b->centroid.x>t->maxx)
{
if (b->centroid.y<t->miny)
d1 = MAX(b->centroid.x - t->maxx, t->miny - b->centroid.y);
else if (b->centroid.y>t->maxy)
d1 = MAX(b->centroid.x - t->maxx, b->centroid.y - t->maxy);
else
d1 = b->centroid.x - t->maxx;
}
else // if (t->minx =< b->centroid.x) && (b->centroid.x =< t->maxx)
{
if (b->centroid.y<t->miny)
d1 = t->miny - b->centroid.y;
else if (b->centroid.y>t->maxy)
d1 = b->centroid.y - t->maxy;
else
return 0.;
}
double d2;
if (t->centroid.x<b->minx)
{
if (t->centroid.y<b->miny)
d2 = MAX(b->minx - t->centroid.x, b->miny - t->centroid.y);
else if (t->centroid.y>b->maxy)
d2 = MAX(b->minx - t->centroid.x, t->centroid.y - b->maxy);
else // if (b->miny < t->centroid.y)&&(t->centroid.y < b->maxy)
d2 = b->minx - t->centroid.x;
}
else if (t->centroid.x>b->maxx)
{
if (t->centroid.y<b->miny)
d2 = MAX(t->centroid.x - b->maxx, b->miny - t->centroid.y);
else if (t->centroid.y>b->maxy)
d2 = MAX(t->centroid.x - b->maxx, t->centroid.y - b->maxy);
else
d2 = t->centroid.x - b->maxx;
}
else // if (b->minx =< t->centroid.x) && (t->centroid.x =< b->maxx)
{
if (t->centroid.y<b->miny)
d2 = b->miny - t->centroid.y;
else if (t->centroid.y>b->maxy)
d2 = t->centroid.y - b->maxy;
else
return 0.;
}
return MIN(d1, d2);
}
// Access to matrix
#define C(blob, track) close[((blob) + (track)*(nBlobs+2))]
// Access to accumulators
#define AB(label) C((label), (nTracks))
#define AT(id) C((nBlobs), (id))
// Access to identifications
#define IB(label) C((label), (nTracks)+1)
#define IT(id) C((nBlobs)+1, (id))
// Access to registers
#define B(label) blobs.find(IB(label))->second
#define T(id) tracks.find(IT(id))->second
void getClusterForTrack(unsigned int trackPos, CvID *close, unsigned int nBlobs, unsigned int nTracks, CvBlobs const &blobs, CvTracks const &tracks, list<CvBlob*> &bb, list<CvTrack*> &tt);
void getClusterForBlob(unsigned int blobPos, CvID *close, unsigned int nBlobs, unsigned int nTracks, CvBlobs const &blobs, CvTracks const &tracks, list<CvBlob*> &bb, list<CvTrack*> &tt)
{
for (unsigned int j=0; j<nTracks; j++)
{
if (C(blobPos, j))
{
tt.push_back(T(j));
unsigned int c = AT(j);
C(blobPos, j) = 0;
AB(blobPos)--;
AT(j)--;
if (c>1)
{
getClusterForTrack(j, close, nBlobs, nTracks, blobs, tracks, bb, tt);
}
}
}
}
void getClusterForTrack(unsigned int trackPos, CvID *close, unsigned int nBlobs, unsigned int nTracks, CvBlobs const &blobs, CvTracks const &tracks, list<CvBlob*> &bb, list<CvTrack*> &tt)
{
for (unsigned int i=0; i<nBlobs; i++)
{
if (C(i, trackPos))
{
bb.push_back(B(i));
unsigned int c = AB(i);
C(i, trackPos) = 0;
AB(i)--;
AT(trackPos)--;
if (c>1)
{
getClusterForBlob(i, close, nBlobs, nTracks, blobs, tracks, bb, tt);
}
}
}
}
void cvUpdateTracks(CvBlobs const &blobs, CvTracks &tracks, const double thDistance, const unsigned int thInactive, const unsigned int thActive)
{
CV_FUNCNAME("cvUpdateTracks");
__CV_BEGIN__;
unsigned int nBlobs = blobs.size();
unsigned int nTracks = tracks.size();
// Proximity matrix:
// Last row/column is for ID/label.
// Last-1 "/" is for accumulation.
CvID *close = new unsigned int[(nBlobs+2)*(nTracks+2)]; // XXX Must be same type than CvLabel.
try
{
// Inicialization:
unsigned int i=0;
for (CvBlobs::const_iterator it = blobs.begin(); it!=blobs.end(); ++it, i++)
{
AB(i) = 0;
IB(i) = it->second->label;
}
CvID maxTrackID = 0;
unsigned int j=0;
for (CvTracks::const_iterator jt = tracks.begin(); jt!=tracks.end(); ++jt, j++)
{
AT(j) = 0;
IT(j) = jt->second->id;
if (jt->second->id > maxTrackID)
maxTrackID = jt->second->id;
}
// Proximity matrix calculation and "used blob" list inicialization:
for (i=0; i<nBlobs; i++)
for (j=0; j<nTracks; j++)
if (C(i, j) = (distantBlobTrack(B(i), T(j)) < thDistance))
{
AB(i)++;
AT(j)++;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Detect inactive tracks
for (j=0; j<nTracks; j++)
{
unsigned int c = AT(j);
if (c==0)
{
//cout << "Inactive track: " << j << endl;
// Inactive track.
CvTrack *track = T(j);
track->inactive++;
track->label = 0;
}
}
// Detect new tracks
for (i=0; i<nBlobs; i++)
{
unsigned int c = AB(i);
if (c==0)
{
//cout << "Blob (new track): " << maxTrackID+1 << endl;
//cout << *B(i) << endl;
// New track.
maxTrackID++;
CvBlob *blob = B(i);
CvTrack *track = new CvTrack;
track->id = maxTrackID;
track->label = blob->label;
track->minx = blob->minx;
track->miny = blob->miny;
track->maxx = blob->maxx;
track->maxy = blob->maxy;
track->centroid = blob->centroid;
track->lifetime = 0;
track->active = 0;
track->inactive = 0;
tracks.insert(CvIDTrack(maxTrackID, track));
}
}
// Clustering
for (j=0; j<nTracks; j++)
{
unsigned int c = AT(j);
if (c)
{
list<CvTrack*> tt; tt.push_back(T(j));
list<CvBlob*> bb;
getClusterForTrack(j, close, nBlobs, nTracks, blobs, tracks, bb, tt);
// Select track
CvTrack *track;
unsigned int area = 0;
for (list<CvTrack*>::const_iterator it=tt.begin(); it!=tt.end(); ++it)
{
CvTrack *t = *it;
unsigned int a = (t->maxx-t->minx)*(t->maxy-t->miny);
if (a>area)
{
area = a;
track = t;
}
}
// Select blob
CvBlob *blob;
area = 0;
//cout << "Matching blobs: ";
for (list<CvBlob*>::const_iterator it=bb.begin(); it!=bb.end(); ++it)
{
CvBlob *b = *it;
//cout << b->label << " ";
if (b->area>area)
{
area = b->area;
blob = b;
}
}
//cout << endl;
// Update track
//cout << "Matching: track=" << track->id << ", blob=" << blob->label << endl;
track->label = blob->label;
track->centroid = blob->centroid;
track->minx = blob->minx;
track->miny = blob->miny;
track->maxx = blob->maxx;
track->maxy = blob->maxy;
if (track->inactive)
track->active = 0;
track->inactive = 0;
// Others to inactive
for (list<CvTrack*>::const_iterator it=tt.begin(); it!=tt.end(); ++it)
{
CvTrack *t = *it;
if (t!=track)
{
//cout << "Inactive: track=" << t->id << endl;
t->inactive++;
t->label = 0;
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
for (CvTracks::iterator jt=tracks.begin(); jt!=tracks.end();)
if ((jt->second->inactive>=thInactive)||((jt->second->inactive)&&(thActive)&&(jt->second->active<thActive)))
{
delete jt->second;
tracks.erase(jt++);
}
else
{
jt->second->lifetime++;
if (!jt->second->inactive)
jt->second->active++;
++jt;
}
}
catch (...)
{
delete[] close;
throw; // TODO: OpenCV style.
}
delete[] close;
__CV_END__;
}
CvFont *defaultFont = NULL;
void cvRenderTracks(CvTracks const tracks, IplImage *imgSource, IplImage *imgDest, unsigned short mode, CvFont *font)
{
CV_FUNCNAME("cvRenderTracks");
__CV_BEGIN__;
CV_ASSERT(imgDest&&(imgDest->depth==IPL_DEPTH_8U)&&(imgDest->nChannels==3));
if ((mode&CV_TRACK_RENDER_ID)&&(!font))
{
if (!defaultFont)
{
font = defaultFont = new CvFont;
cvInitFont(font, CV_FONT_HERSHEY_DUPLEX, 0.5, 0.5, 0, 1);
// Other fonts:
// CV_FONT_HERSHEY_SIMPLEX, CV_FONT_HERSHEY_PLAIN,
// CV_FONT_HERSHEY_DUPLEX, CV_FONT_HERSHEY_COMPLEX,
// CV_FONT_HERSHEY_TRIPLEX, CV_FONT_HERSHEY_COMPLEX_SMALL,
// CV_FONT_HERSHEY_SCRIPT_SIMPLEX, CV_FONT_HERSHEY_SCRIPT_COMPLEX
}
else
font = defaultFont;
}
if (mode)
{
for (CvTracks::const_iterator it=tracks.begin(); it!=tracks.end(); ++it)
{
if (mode&CV_TRACK_RENDER_ID)
if (!it->second->inactive)
{
stringstream buffer;
buffer << it->first;
cvPutText(imgDest, buffer.str().c_str(), cvPoint((int)it->second->centroid.x, (int)it->second->centroid.y), font, CV_RGB(0.,255.,0.));
}
if (mode&CV_TRACK_RENDER_BOUNDING_BOX)
if (it->second->inactive)
cvRectangle(imgDest, cvPoint(it->second->minx, it->second->miny), cvPoint(it->second->maxx-1, it->second->maxy-1), CV_RGB(0., 0., 50.));
else
cvRectangle(imgDest, cvPoint(it->second->minx, it->second->miny), cvPoint(it->second->maxx-1, it->second->maxy-1), CV_RGB(0., 0., 255.));
if (mode&CV_TRACK_RENDER_TO_LOG)
{
clog << "Track " << it->second->id << endl;
if (it->second->inactive)
clog << " - Inactive for " << it->second->inactive << " frames" << endl;
else
clog << " - Associated with blob " << it->second->label << endl;
clog << " - Lifetime " << it->second->lifetime << endl;
clog << " - Active " << it->second->active << endl;
clog << " - Bounding box: (" << it->second->minx << ", " << it->second->miny << ") - (" << it->second->maxx << ", " << it->second->maxy << ")" << endl;
clog << " - Centroid: (" << it->second->centroid.x << ", " << it->second->centroid.y << ")" << endl;
clog << endl;
}
if (mode&CV_TRACK_RENDER_TO_STD)
{
cout << "Track " << it->second->id << endl;
if (it->second->inactive)
cout << " - Inactive for " << it->second->inactive << " frames" << endl;
else
cout << " - Associated with blobs " << it->second->label << endl;
cout << " - Lifetime " << it->second->lifetime << endl;
cout << " - Active " << it->second->active << endl;
cout << " - Bounding box: (" << it->second->minx << ", " << it->second->miny << ") - (" << it->second->maxx << ", " << it->second->maxy << ")" << endl;
cout << " - Centroid: (" << it->second->centroid.x << ", " << it->second->centroid.y << ")" << endl;
cout << endl;
}
}
}
__CV_END__;
}
}

View file

@ -0,0 +1,42 @@
Project admins
Andreas Nuechter andreas@nuechti.de
Kai Lingemann kai.lingemann@gmx.de
Dorit Borrmann d.borrmann@jacobs-university.de
List of contributors
Andreas Nuechter andreas@nuechti.de
Kai Lingemann kai.lingemann@gmx.de
Dorit Borrmann d.borrmann@jacobs-university.de
Jan Elseberg j.elseberg@jacobs-university.de
Jochen Sprickerhof jochen@sprickerhof.de
HamidReza Houshiar h.houshiar@jacobs-university.de
Sven Albrecht sven.albrecht@uni-osnabrueck.de
Stan Serebryakov cfr.ssv@gmail.com
Thomas Escher tescher@uni-osnabrueck.de
Thomas Wiemann twiemann@uni-osnabrueck.de
Alexandru Tandrau alexandru@tandrau.com
Alexandru Eugen Ichim eugen@alexichim.com
Flavia Grosan me@flaviagrosan.com
Deyuan Qiu deyuan.qiu@googlemail.com
Darko Makreshanski d.makreshanski@jacobs-university.de
Mohammad Faisal Abdullah m.faisal@jacobs-university.de
Li Ming liming751218@whu.edu.cn
Li Wei xpaulee@gmail.com
Shams Feyzabadi sh.feyzabadi@gmail.co
Vladislav Perelmann v.perelman@jacobs-university.de
Chen Long lchen.whu@gmail.com
Remus Dumitru r.dumitru@jaocbs-university.de
Billy Okal okal.billy@googlemail.com
Razvan-George Mihalyi r.mihalyi@jacobs-university.de
Johannes Schauer j.schauer@jacobs-university.de
Corneliu-Claudiu Prodescu c.prodescu@jacobs-university.de
Vaibhav Kumar Mehta v.metha@jacobs-university.de
Further contributors
Uwe Hebbelmann, Sebastian Stock, Andre Schemschat
Hartmut Surmann
Amuz Tamrakars, Ulugbek Makhmudov
Christof Soeger, Marcel Junker, Anton Fluegge, Hannes Schulz

View file

@ -0,0 +1,45 @@
# Copyright (C) 2007 by Cristóbal Carnero Liñán
# grendel.ccl@gmail.com
#
# This file is part of cvBlob.
#
# cvBlob is free software: you can redistribute it and/or modify
# it under the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cvBlob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with cvBlob. If not, see <http://www.gnu.org/licenses/>.
#
IF (WITH_THERMO)
MESSAGE(STATUS "With cvblob")
find_package(OpenCV REQUIRED)
set(CVBLOB_SRCS
cvblob.cpp
cvlabel.cpp
cvaux.cpp
cvcontour.cpp
cvtrack.cpp
cvcolor.cpp
)
set_source_files_properties(${cvBlob_SRC}
PROPERTIES
COMPILE_FLAGS "-O3 ${ADDITIONAL_OPENCV_FLAGS}"
)
add_library(cvblob STATIC ${CVBLOB_SRCS})
IF(EXPORT_SHARED_LIBS)
add_library(cvblob_s SHARED ${CVBLOB_SRCS})
ENDIF(EXPORT_SHARED_LIBS)
ENDIF (WITH_THERMO)

View file

@ -0,0 +1,296 @@
/*
* PMDCam implementation
*
* Copyright (C) Stanislav Serebryakov
*
* Released under the GPL version 3.
*
*/
#include <stdio.h>
#include <stdlib.h>
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#include <highgui.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include <libconfig.h>
#include "cvpmd.h"
#include "pmdWrap.h"
#include "slam6d/icp6Dhelix.h"
#include "slam6d/icp6Dortho.h"
#include "slam6d/icp6Dquat.h"
#include "slam6d/icp6Dsvd.h"
#include "slam6d/icp6Dapx.h"
//TODO: releasePMDCam()
//TODO: !!! check config parse error !!! TODO
PMDCam *initPMDCam(const char *confPath) {
PMDCam *pmdc = (PMDCam*)malloc(sizeof(PMDCam));
config_t *conf = (config_t*)malloc(sizeof(config_t));
config_init(conf);
config_read_file(conf, confPath);
config_lookup_bool(conf, "offline", &pmdc->_offlineMode);
if(!pmdc->_offlineMode) {
// init PMDCam
const char *ip;
const char *plugin;
config_lookup_string(conf, "ip", &ip);
config_lookup_string(conf, "plugin", &plugin);
printf("Starting PMD...\n");
pmdc->_pmd = initPMD(plugin, ip);
//FIXME: pmdc initialization assert
int camID;
#if (((LIBCONFIG_VER_MAJOR == 1) && (LIBCONFIG_VER_MINOR >= 4)) \
|| (LIBCONFIG_VER_MAJOR > 1))
config_lookup_int(conf, "cameraID", &camID);
#else //libconfig API changed in version 1.4b
config_lookup_int(conf, "cameraID", (long *)&camID);
#endif
pmdc->_capture = cvCaptureFromCAM(camID);
if(!pmdc->_capture) fprintf(stderr, "ERROR: Can't initialize capture, see camera id in pmdc.conf.\n");
} else {
printf("Offline mode.\n");
const char *camVid;
const char *pmdI;
const char *pmdA;
const char *pmd3D;
const char *headers;
config_lookup_string(conf, "camVideoFile", &camVid);
config_lookup_string(conf, "pmdIFile", &pmdI);
// config_lookup_string(conf, "pmdAFile", &pmdA);
config_lookup_string(conf, "pmd3DFile", &pmd3D);
config_lookup_string(conf, "headersFile", &headers);
//FIXME: check empty strings
printf( "DEBUG: Reading from files: %s, %s, %s.\n"
, camVid, pmdI, pmd3D);
pmdc->_capture = cvCaptureFromFile(camVid);
pmdc->_f.i = fopen(pmdI, "r");
// pmdc->_f.a = fopen(pmdA, "r");
pmdc->_f.p = fopen(pmd3D, "r");
pmdc->_f.h = fopen(headers, "r");
if( !pmdc->_capture
|| !pmdc->_f.i
// || !pmdc->_f.a
|| !pmdc->_f.p
|| !pmdc->_f.h) fprintf(stderr, "ERROR: Can't open data file, see files settings in pmdc.conf.\n");
pmdc->header = (ImageHeaderInformation*)cvAlloc(sizeof(ImageHeaderInformation));
//TODO: read pmd header somewhere
}
CvSize pmdSz;
#if (((LIBCONFIG_VER_MAJOR == 1) && (LIBCONFIG_VER_MINOR >= 4)) \
|| (LIBCONFIG_VER_MAJOR > 1))
config_lookup_int(conf, "pmdSize.width", &pmdSz.width);
config_lookup_int(conf, "pmdSize.height", &pmdSz.height);
#else //libconfig API changed in version 1.4b
config_lookup_int(conf, "pmdSize.width", (long *)&pmdSz.width);
config_lookup_int(conf, "pmdSize.height", (long *)&pmdSz.height);
#endif
printf("DEBUG: pmdSz: %i %i\n", pmdSz.width, pmdSz.height);
pmdc->_iPMDIU = cvCreateImage(pmdSz, IPL_DEPTH_8U, 1);
pmdc->iPMDI = cvCreateImage(pmdSz, IPL_DEPTH_8U, 1);
pmdc->_iPMDAU = cvCreateImage(pmdSz, IPL_DEPTH_8U, 1);
pmdc->iPMDA = cvCreateImage(pmdSz, IPL_DEPTH_8U, 1);
pmdc->_iCamColorU = cvQueryFrame(pmdc->_capture);
CvSize camSz = cvGetSize(pmdc->_iCamColorU);
printf("DEBUG: camSz: %i %i\n", camSz.width, camSz.height);
config_lookup_bool(conf, "hybrid", &pmdc->hybrid);
if(pmdc->hybrid) pmdc->_iCamColorUBuffer = cvCreateImage(camSz, IPL_DEPTH_8U, 3);
pmdc->iCamColor = cvCreateImage(camSz, IPL_DEPTH_8U, 3);
pmdc->iCam = cvCreateImage(camSz, IPL_DEPTH_8U, 1);
pmdc->_mapXPMD = cvCreateImage(pmdSz, IPL_DEPTH_32F, 1);
pmdc->_mapYPMD = cvCreateImage(pmdSz, IPL_DEPTH_32F, 1);
pmdc->_mapXCam = cvCreateImage(camSz, IPL_DEPTH_32F, 1);
pmdc->_mapYCam = cvCreateImage(camSz, IPL_DEPTH_32F, 1);
printf("DEBUG: Created images...\n");
const char *inc;
const char *dsc;
const char *inp;
const char *dsp;
config_lookup_string(conf, "intrinsicCam", &inc);
config_lookup_string(conf, "distortionCam", &dsc);
config_lookup_string(conf, "intrinsicPMD", &inp);
config_lookup_string(conf, "distortionPMD", &dsp);
pmdc->intrinsicCam = (CvMat*)cvLoad(inc);
pmdc->distortionCam = (CvMat*)cvLoad(dsc);
pmdc->intrinsicPMD = (CvMat*)cvLoad(inp);
pmdc->distortionPMD = (CvMat*)cvLoad(dsp);
if(!pmdc->intrinsicCam || !pmdc->distortionCam || !pmdc->intrinsicPMD || !pmdc->distortionPMD)
fprintf(stderr, "ERROR: Cant load matrix file, see pmdc.conf matrix files settings.\n");
//TODO: essential mat
cvInitUndistortMap(pmdc->intrinsicCam, pmdc->distortionCam, pmdc->_mapXCam, pmdc->_mapYCam);
cvInitUndistortMap(pmdc->intrinsicPMD, pmdc->distortionPMD, pmdc->_mapXPMD, pmdc->_mapYPMD);
printf("DEBUG: initialized undistortion maps...\n");
CvPoint3D32f **pmdPts = (CvPoint3D32f**) cvAlloc(pmdSz.height * sizeof(CvPoint3D32f*));
for(int i = 0; i < pmdSz.height; i++) pmdPts[i] = (CvPoint3D32f*) cvAlloc(pmdSz.width * sizeof(CvPoint3D32f));
pmdc->pts = pmdPts;
config_lookup_float(conf, "tracking.quality", &pmdc->_track.quality);
#if (((LIBCONFIG_VER_MAJOR == 1) && (LIBCONFIG_VER_MINOR >= 4)) \
|| (LIBCONFIG_VER_MAJOR > 1))
config_lookup_int(conf, "tracking.min", &pmdc->_track.minFeatures);
config_lookup_int(conf, "tracking.max", &pmdc->_track.maxFeatures);
config_lookup_int(conf, "tracking.minDist", &pmdc->_track.minDist);
config_lookup_int(conf, "tracking.winSize", &pmdc->_track.winSz);
config_lookup_int(conf, "historyLen", &pmdc->historyLen);
#else //libconfig API changed in version 1.4b
config_lookup_int(conf, "tracking.min", (long *)&pmdc->_track.minFeatures);
config_lookup_int(conf, "tracking.max", (long *)&pmdc->_track.maxFeatures);
config_lookup_int(conf, "tracking.minDist", (long *)&pmdc->_track.minDist);
config_lookup_int(conf, "tracking.winSize", (long *)&pmdc->_track.winSz);
config_lookup_int(conf, "historyLen", (long *)&pmdc->historyLen);
#endif
pmdc->_track.trackingFlags = 0; //FIXME, if you init pyrs here
#if (((LIBCONFIG_VER_MAJOR == 1) && (LIBCONFIG_VER_MINOR >= 4)) \
|| (LIBCONFIG_VER_MAJOR > 1))
config_lookup_int(conf, "minPts", &pmdc->minPts4Pose);
#else //libconfig API changed in version 1.4b
config_lookup_int(conf, "minPts", (long *)&pmdc->minPts4Pose);
#endif
config_lookup_float(conf, "maxError", &pmdc->maxError);
config_lookup_float(conf, "outliersRemoval.sigmaDepth", &pmdc->sigmaDepth);
config_lookup_float(conf, "outliersRemoval.sigmaColor", &pmdc->sigmaColor);
config_lookup_float(conf, "outliersRemoval.threshold", &pmdc->dpThreshold);
config_lookup_bool(conf, "savePoses", &pmdc->savePoses);
config_lookup_bool(conf, "synchronous", &pmdc->synchronous);
int icpVerbose;
config_lookup_bool(conf, "icp.verbose", &icpVerbose);
bool icpQuiet = !icpVerbose;
const char *icpMethod;
config_lookup_string(conf, "icp.method", &icpMethod);
printf("%s\n", icpMethod);
if(!strcmp(icpMethod, "helix")) pmdc->icp = new icp6D_HELIX(icpQuiet);
else if(!strcmp(icpMethod, "svd")) pmdc->icp = new icp6D_SVD(icpQuiet);
else if(!strcmp(icpMethod, "apx")) pmdc->icp = new icp6D_APX(icpQuiet);
else if(!strcmp(icpMethod, "quat")) pmdc->icp = new icp6D_QUAT(icpQuiet);
else if(!strcmp(icpMethod, "ortho")) pmdc->icp = new icp6D_ORTHO(icpQuiet);
else {
fprintf(stderr, "ERROR: Uknown ICP method: %s!\n", icpMethod);
exit(1);
}
//TODO: init tracking?
//TODO: alloc pts
config_destroy(conf);
return pmdc;
}
int grabData(PMDCam *pmdc) {
static unsigned int latestTimestamp = 0;
static unsigned int latestTimestampUsec = 0;
while(1) { // grab data until the new have come
if(!pmdc->_offlineMode) {
pmdUpdate(pmdc->_pmd->hnd);
pmdc->_iCamColorUBuffer = cvQueryFrame(pmdc->_capture);
if(pmdc->synchronous) {
pmdRetriveDistances(pmdc->_pmd);
pmdProjectArrayToCartesian(pmdc->_pmd, pmdc->intrinsicPMD, pmdc->pts);
// project array has to be called right after retrive distances
pmdQueryImageAsync(pmdc->_pmd, pmdc->_iPMDIU);
} else {
pmdRetriveDistancesAsync(pmdc->_pmd);
pmdProjectArrayToCartesian(pmdc->_pmd, pmdc->intrinsicPMD, pmdc->pts);
// project array has to be called right after retrive distances
pmdQueryImageAsync(pmdc->_pmd, pmdc->_iPMDIU);
/* printf("time: %f, utime: %f, valid: %f\n"
, pmdc->header->Seconds
, pmdc->header->Useconds
, pmdc->header->ValidImage
);*/
}
pmdc->header = retriveHeader();
pmdc->timestamp = (unsigned int)pmdc->header->Seconds;
pmdc->timestampUsec = (unsigned int)pmdc->header->Useconds;
if(pmdc->hybrid) {
if( pmdc->timestamp > latestTimestamp
|| pmdc->timestampUsec > latestTimestampUsec) {
latestTimestamp = pmdc->timestamp;
latestTimestampUsec = pmdc->timestampUsec;
cvCopy(pmdc->_iCamColorUBuffer, pmdc->_iCamColorU);
break;
} else continue;
} else {
pmdc->_iCamColorU = pmdc->_iCamColorUBuffer;
break;
}
} else { // offliine mode
pmdc->_iCamColorUBuffer = cvQueryFrame(pmdc->_capture);
if(!pmdc->_iCamColorUBuffer) return 1;
CvSize pmdSz = cvGetSize(pmdc->_iPMDIU);
if(fread(pmdc->_iPMDIU->imageData, sizeof(char), pmdSz.width*pmdSz.height, pmdc->_f.i)
< (unsigned)pmdSz.width*pmdSz.height) return 1;
//if(fread(pmdc->_iPMDAU->imageData, sizeof(char), pmdSz.width*pmdSz.height, pmdc->_f.a)
// < (unsigned)pmdSz.width*pmdSz.height) return 1;
for(int i = 0; i < pmdSz.height; i++)
fread(pmdc->pts[i], sizeof(CvPoint3D32f), pmdSz.width, pmdc->_f.p);
fread(pmdc->header, sizeof(ImageHeaderInformation), 1, pmdc->_f.h);
// i hope CvPt32f contains no holes :)
pmdc->timestamp = (unsigned int)pmdc->header->Seconds;
pmdc->timestampUsec = (unsigned int)pmdc->header->Useconds;
/*printf("%i %i %i\n", (unsigned int)pmdc->header->Seconds
, (unsigned int)pmdc->header->Useconds
, (unsigned int)pmdc->header->ValidImage);*/
if(pmdc->hybrid) {
if( pmdc->timestamp > latestTimestamp
|| pmdc->timestampUsec > latestTimestampUsec) {
latestTimestamp = pmdc->timestamp;
latestTimestampUsec = pmdc->timestampUsec;
cvCopy(pmdc->_iCamColorUBuffer, pmdc->_iCamColorU);
break;
} else continue;
} else {
pmdc->_iCamColorU = pmdc->_iCamColorUBuffer;
break;
}
}
}
cvFlip(pmdc->_iPMDIU, 0, 1); // flip around x-axes
cvRemap(pmdc->_iPMDIU, pmdc->iPMDI, pmdc->_mapXPMD, pmdc->_mapYPMD); //undistortion
cvFlip(pmdc->_iCamColorU, 0, -1); // flip around x and y axes
cvRemap(pmdc->_iCamColorU, pmdc->iCamColor, pmdc->_mapXCam, pmdc->_mapYCam);
cvCvtColor(pmdc->iCamColor, pmdc->iCam, CV_BGR2GRAY);
return 0;
}

View file

@ -0,0 +1,33 @@
SET(SHOW_LIBS glui scan ANN ${OPENGL_LIBRARIES})
IF(WIN32)
IF( CMAKE_SIZEOF_VOID_P EQUAL 8 )
SET(SHOW_LIBS ${SHOW_LIBS} ${CMAKE_SOURCE_DIR}/3rdparty/windows/x64/freeglut.lib XGetopt)
ELSE( CMAKE_SIZEOF_VOID_P EQUAL 8 )
SET(SHOW_LIBS ${SHOW_LIBS} ${CMAKE_SOURCE_DIR}/3rdparty/windows/freeglut.lib XGetopt)
ENDIF(CMAKE_SIZEOF_VOID_P EQUAL 8 )
ENDIF(WIN32)
IF (UNIX)
SET(SHOW_LIBS newmat dl ${SHOW_LIBS} ${GLUT_LIBRARIES})
ENDIF(UNIX)
IF(WITH_GLEE)
SET(SHOW_LIBS ${SHOW_LIBS} glee)
ENDIF(WITH_GLEE)
SET(SHOW_SRCS NurbsPath.cc PathGraph.cc vertexarray.cc viewcull.cc colormanager.cc compacttree.cc scancolormanager.cc display.cc)
IF (WITH_SHOW)
add_executable(show show.cc ${SHOW_SRCS})
target_link_libraries(show ${SHOW_LIBS})
ENDIF(WITH_SHOW)
IF(WITH_WXSHOW)
add_executable(wxshow wxshow.cc selectionframe.cc ${SHOW_SRCS})
target_link_libraries(wxshow ${wxWidgets_LIBRARIES} wxthings ${SHOW_LIBS})
ENDIF(WITH_WXSHOW)
### EXPORT SHARED LIBS
IF(EXPORT_SHARED_LIBS)
add_library(show_s SHARED ${SHOW_SRCS})
target_link_libraries(show_s newmat_s)
ENDIF(EXPORT_SHARED_LIBS)

View file

@ -0,0 +1,128 @@
/*
* grabVideoAnd3D implementation
*
* Copyright (C) Stanislav Serebryakov
*
* Released under the GPL version 3.
*
*/
#include <stdio.h>
#include <stdlib.h>
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#include <highgui.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include "pmdsdk2.h"
#include "cvpmd.h"
int main(int argc, char **argv)
{
int totalFrames = 0;
int ui = 1;
printf("%i\n", argc);
if(argc > 1) {
totalFrames = atoi(argv[1]);
ui = 0;
}
if(argc > 2) ui = 1;
//FIXME: here is lots of old code, should be removed
PMD *pmd = initPMD("../o3d.L32.pcp", "192.168.0.69");
CvCapture *capture = cvCaptureFromCAM(1); //FIXME: should be passed via argc
CvSize pmdSz = pmdGetSize(pmd);
printf("pmd sz: %i x %i\n", pmdSz.width, pmdSz.height);
IplImage *imgCamColor = cvQueryFrame(capture);
IplImage *imgPMD = cvCreateImage(pmdGetSize(pmd), 8, 1);
IplImage *imgPMDA = cvCreateImage(cvGetSize(imgPMD), 8, 1);
IplImage *imgCam = cvCreateImage(cvGetSize(imgCamColor), 8, 1);
CvPoint3D32f **pmdPts = (CvPoint3D32f**) cvAlloc(pmdSz.height * sizeof(CvPoint3D32f*));
for(int i = 0; i < pmdSz.height; i++) pmdPts[i] = (CvPoint3D32f*) cvAlloc(pmdSz.width * sizeof(CvPoint3D32f));
CvMat *intrinsicCam = (CvMat*)cvLoad("../intrinsic-cam-6x4.xml");
CvMat *distortionCam = (CvMat*)cvLoad("../distortion-cam-6x4.xml");
CvMat *intrinsicPMD = (CvMat*)cvLoad("../intrinsic-pmd-6x4.xml");
CvMat *distortionPMD = (CvMat*)cvLoad("../distortion-pmd-6x4.xml");
if(!intrinsicCam || !distortionCam|| !intrinsicPMD || !distortionPMD) {
fprintf(stderr, "ERROR: can't load intrinsic and/or distortion xml files!\n");
return 1;
}
// FILE *vPMDA = fopen("./s001.arv", "wb"); // ala dot Amplitudes Raw Video NOTE: i'm not sure it is amplitude :P
// FILE *vPMDAf = fopen("./s001.farv", "wb"); // same but Floating
FILE *vPMDI = fopen("./s001.irv", "wb"); // same but Intensities
FILE *vPMDIf = fopen("./s001.firv", "wb");
FILE *headers = fopen("./s001.head", "wb");
CvVideoWriter *vCam = cvCreateVideoWriter( "./s001.avi"
, CV_FOURCC('D', 'I', 'V', 'X')
, 25, cvGetSize(imgCam), 1);
FILE *pmdPtsFile = fopen("./s001.3dp", "w");
if(ui) {
cvNamedWindow("Cam", 0);
cvNamedWindow("PMD", 1);
}
printf("DEBUG: init done, going to grab %i frames.\n", totalFrames);
int frames = 0;
while(1) {
frames++;
if(0 == frames % 100) printf("%i frames grabbed...\n", frames);
// Image retriving
pmdUpdate(pmd->hnd);
imgCamColor = cvQueryFrame(capture);
pmdQueryImageAsync(pmd, imgPMD);
fwrite(pmdDataPtr(pmd), sizeof(float), pmdSz.width*pmdSz.height, vPMDIf);
//pmdQueryAmplitudesAsync(pmd, imgPMDA);
//fwrite(pmdDataPtr(pmd), sizeof(float), pmdSz.width*pmdSz.height, vPMDAf);
pmdRetriveDistancesAsync(pmd);
pmdProjectArrayToCartesian(pmd, intrinsicPMD, pmdPts);
ImageHeaderInformation *header = retriveHeader();
if(ui) {
cvShowImage("Cam", imgCamColor);
cvShowImage("PMD", imgPMD);
}
//FIXME: order col/str
for(int i = 0; i < pmdSz.height; i++)
fwrite(pmdPts[i], sizeof(CvPoint3D32f), pmdSz.width, pmdPtsFile);
fwrite(imgPMD->imageData, sizeof(char), pmdSz.width*pmdSz.height, vPMDI);
//fwrite(imgPMDA->imageData, sizeof(char), pmdSz.width*pmdSz.height, vPMDA);
fwrite(header, sizeof(ImageHeaderInformation), 1, headers);
cvWriteFrame(vCam, imgCamColor);
if(totalFrames && (frames >= totalFrames)) break;
if(ui) if(27 == cvWaitKey(5)) break;
}
// fclose(vPMDA);
// fclose(vPMDAf);
fclose(vPMDI);
fclose(vPMDIf);
fclose(pmdPtsFile);
fclose(headers);
printf("grabbed %i frames.\n", frames);
printf("See s001* files (you want to rename them).\n");
}

View file

@ -0,0 +1,118 @@
### TOOLS
IF(WITH_TOOLS)
FIND_PACKAGE(OpenCV REQUIRED)
### SCAN_RED
add_executable(scan_red scan_red.cc fbr/fbr_global.cc fbr/panorama.cc fbr/scan_cv.cc)
IF(UNIX)
target_link_libraries(scan_red scan dl ANN fbr_cv_io fbr_panorama ${OpenCV_LIBS} ${Boost_LIBRARIES})
ENDIF(UNIX)
IF (WIN32)
target_link_libraries(scan_red scan ANN XGetopt)
ENDIF(WIN32)
### SCAN_DIFF
add_executable(scan_diff scan_diff.cc)
# add_executable(scan_diff2d scan_diff2d.cc ../show/colormanager.cc)
IF(UNIX)
target_link_libraries(scan_diff scan dl ANN)
# target_link_libraries(scan_diff2d scan dl ANN)
ENDIF(UNIX)
IF (WIN32)
target_link_libraries(scan_diff scan ANN XGetopt)
# target_link_libraries(scan_diff2d scan ANN XGetopt)
ENDIF(WIN32)
add_executable(frame_to_graph frame_to_graph.cc)
add_executable(convergence convergence.cc)
add_executable(graph_balancer graph_balancer.cc)
add_executable(exportPoints exportPoints.cc)
add_executable(frames2riegl frames2riegl.cc)
add_executable(frames2pose frames2pose.cc)
add_executable(pose2frames pose2frames.cc)
add_executable(riegl2frames riegl2frames.cc)
add_executable(toGlobal toGlobal.cc)
IF(UNIX)
target_link_libraries(graph_balancer scan ${Boost_GRAPH_LIBRARY} ${Boost_SERIALIZATION_LIBRARY} ${Boost_REGEX_LIBRARY})
target_link_libraries(exportPoints scan dl ANN)
target_link_libraries(toGlobal scan)
ENDIF(UNIX)
IF (WIN32)
target_link_libraries(frame_to_graph XGetopt ${Boost_LIBRARIES})
target_link_libraries(convergence XGetopt ${Boost_LIBRARIES})
target_link_libraries(graph_balancer scan XGetopt ${Boost_LIBRARIES})
target_link_libraries(exportPoints scan ANN XGetopt ${Boost_LIBRARIES})
target_link_libraries(frames2pose XGetopt ${Boost_LIBRARIES})
target_link_libraries(pose2frames XGetopt ${Boost_LIBRARIES})
target_link_libraries(frames2riegl XGetopt ${Boost_LIBRARIES})
target_link_libraries(riegl2frames XGetopt ${Boost_LIBRARIES})
target_link_libraries(toGlobal XGetopt ${Boost_LIBRARIES})
ENDIF(WIN32)
ENDIF(WITH_TOOLS)
### SCANLIB
SET(SCANLIB_SRCS
kd.cc kdManaged.cc kdMeta.cc graphSlam6D.cc
graph.cc icp6Dapx.cc icp6D.cc icp6Dsvd.cc
icp6Dortho.cc icp6Dquat.cc icp6Dhelix.cc icp6Dlumeuler.cc
icp6Dlumquat.cc icp6Ddual.cc lum6Deuler.cc lum6Dquat.cc
ghelix6DQ2.cc gapx6D.cc graphToro.cc ann_kd.cc
graphHOG-Man.cc elch6D.cc elch6Dquat.cc elch6DunitQuat.cc
elch6Dslerp.cc elch6Deuler.cc loopToro.cc loopHOG-Man.cc
point_type.cc icp6Dquatscale.cc searchTree.cc Boctree.cc
allocator.cc
scan.cc basicScan.cc managedScan.cc metaScan.cc
io_types.cc io_utils.cc pointfilter.cc
)
if(WITH_METRICS)
set(SCANLIB_SRCS ${SCANLIB_SRCS} metrics.cc)
endif(WITH_METRICS)
add_library(scan STATIC ${SCANLIB_SRCS})
target_link_libraries(scan scanclient scanio)
IF(UNIX)
target_link_libraries(scan dl)
ENDIF(UNIX)
### EXPORT SHARED LIBS
IF(EXPORT_SHARED_LIBS)
add_library(scan_s SHARED ${SCANLIB_SRCS})
#target_link_libraries(scan_s ${Boost_LIBRARIES} newmat)
target_link_libraries(scan_s newmat_s sparse_s ANN_s )
ENDIF(EXPORT_SHARED_LIBS)
### SLAM6D
IF(WITH_CUDA)
CUDA_COMPILE(CUDA_FILES cuda/CIcpGpuCuda.cu )
add_executable(slam6D slam6D.cc cuda/icp6Dcuda.cc ${CUDA_FILES})
target_link_libraries(slam6D ${CUDA_LIBRARIES} ANN cudpp64)
CUDA_ADD_CUBLAS_TO_TARGET(slam6D)
CUDA_ADD_CUTIL_TO_TARGET(slam6D)
ELSE(WITH_CUDA)
add_executable(slam6D slam6D.cc)
ENDIF(WITH_CUDA)
IF(UNIX)
target_link_libraries(slam6D scan newmat sparse ANN)
ENDIF(UNIX)
IF(WIN32)
target_link_libraries(slam6D scan newmat sparse ANN XGetopt ${Boost_LIBRARIES})
ENDIF(WIN32)
#IF(MSVC)
# INSTALL(TARGETS slam6D RUNTIME DESTINATION ${CMAKE_SOURCE_DIR}/windows)
#ENDIF(MSVC)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,587 @@
// Copyright (C) 2007 by Cristóbal Carnero Liñán
// grendel.ccl@gmail.com
//
// This file is part of cvBlob.
//
// cvBlob is free software: you can redistribute it and/or modify
// it under the terms of the Lesser GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// cvBlob is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// Lesser GNU General Public License for more details.
//
// You should have received a copy of the Lesser GNU General Public License
// along with cvBlob. If not, see <http://www.gnu.org/licenses/>.
//
/// \file cvblob.h
/// \brief OpenCV Blob header file.
#ifdef SWIG
%module cvblob
%{
#include "cvblob.h"
%}
#endif
#ifndef CVBLOB_H
#define CVBLOB_H
#include <iostream>
#include <map>
#include <list>
#include <vector>
#include <limits>
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#ifndef __CV_BEGIN__
#define __CV_BEGIN__ __BEGIN__
#endif
#ifndef __CV_END__
#define __CV_END__ __END__
#endif
#ifdef __cplusplus
extern "C" {
#endif
namespace cvb
{
////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Contours
// Chain code:
// 7 0 1
// 6 2
// 5 4 3
#define CV_CHAINCODE_UP 0 ///< Up.
#define CV_CHAINCODE_UP_RIGHT 1 ///< Up and right.
#define CV_CHAINCODE_RIGHT 2 ///< Right.
#define CV_CHAINCODE_DOWN_RIGHT 3 ///< Down and right.
#define CV_CHAINCODE_DOWN 4 ///< Down.
#define CV_CHAINCODE_DOWN_LEFT 5 ///< Down and left.
#define CV_CHAINCODE_LEFT 6 ///< Left.
#define CV_CHAINCODE_UP_LEFT 7 ///< Up and left.
/// \brief Move vectors of chain codes.
/// \see CV_CHAINCODE_UP
/// \see CV_CHAINCODE_UP_LEFT
/// \see CV_CHAINCODE_LEFT
/// \see CV_CHAINCODE_DOWN_LEFT
/// \see CV_CHAINCODE_DOWN
/// \see CV_CHAINCODE_DOWN_RIGHT
/// \see CV_CHAINCODE_RIGHT
/// \see CV_CHAINCODE_UP_RIGHT
const char cvChainCodeMoves[8][2] = { { 0, -1},
{ 1, -1},
{ 1, 0},
{ 1, 1},
{ 0, 1},
{-1, 1},
{-1, 0},
{-1, -1}
};
/// \brief Direction.
/// \see CV_CHAINCODE_UP
/// \see CV_CHAINCODE_UP_LEFT
/// \see CV_CHAINCODE_LEFT
/// \see CV_CHAINCODE_DOWN_LEFT
/// \see CV_CHAINCODE_DOWN
/// \see CV_CHAINCODE_DOWN_RIGHT
/// \see CV_CHAINCODE_RIGHT
/// \see CV_CHAINCODE_UP_RIGHT
typedef unsigned char CvChainCode;
/// \brief Chain code.
/// \see CvChainCode
typedef std::list<CvChainCode> CvChainCodes;
/// \brief Chain code contour.
/// \see CvChainCodes
struct CvContourChainCode
{
CvPoint startingPoint; ///< Point where contour begin.
CvChainCodes chainCode; ///< Polygon description based on chain codes.
};
typedef std::list<CvContourChainCode *> CvContoursChainCode; ///< List of contours (chain codes type).
/// \brief Polygon based contour.
typedef std::vector<CvPoint> CvContourPolygon;
/// \fn void cvRenderContourChainCode(CvContourChainCode const *contour, IplImage const *img, CvScalar const &color=CV_RGB(255, 255, 255))
/// \brief Draw a contour.
/// \param contour Chain code contour.
/// \param img Image to draw on.
/// \param color Color to draw (default, white).
/// \see CvContourChainCode
void cvRenderContourChainCode(CvContourChainCode const *contour, IplImage const *img, CvScalar const &color=CV_RGB(255, 255, 255));
/// \fn CvContourPolygon *cvConvertChainCodesToPolygon(CvContourChainCode const *cc)
/// \brief Convert a chain code contour to a polygon.
/// \param cc Chain code contour.
/// \return A polygon.
/// \see CvContourChainCode
/// \see CvContourPolygon
CvContourPolygon *cvConvertChainCodesToPolygon(CvContourChainCode const *cc);
/// \fn void cvRenderContourPolygon(CvContourPolygon const *contour, IplImage *img, CvScalar const &color=CV_RGB(255, 255, 255))
/// \brief Draw a polygon.
/// \param contour Polygon contour.
/// \param img Image to draw on.
/// \param color Color to draw (default, white).
/// \see CvContourPolygon
void cvRenderContourPolygon(CvContourPolygon const *contour, IplImage *img, CvScalar const &color=CV_RGB(255, 255, 255));
/// \fn double cvContourPolygonArea(CvContourPolygon const *p)
/// \brief Calculates area of a polygonal contour.
/// \param p Contour (polygon type).
/// \return Area of the contour.
double cvContourPolygonArea(CvContourPolygon const *p);
/// \fn double cvContourChainCodePerimeter(CvContourChainCode const *c)
/// \brief Calculates perimeter of a chain code contour.
/// \param c Contour (chain code type).
/// \return Perimeter of the contour.
double cvContourChainCodePerimeter(CvContourChainCode const *c);
/// \fn double cvContourPolygonPerimeter(CvContourPolygon const *p)
/// \brief Calculates perimeter of a polygonal contour.
/// \param p Contour (polygon type).
/// \return Perimeter of the contour.
double cvContourPolygonPerimeter(CvContourPolygon const *p);
/// \fn double cvContourPolygonCircularity(const CvContourPolygon *p)
/// \brief Calculates the circularity of a polygon (compactness measure).
/// \param p Contour (polygon type).
/// \return Circularity: a non-negative value, where 0 correspond with a circumference.
double cvContourPolygonCircularity(const CvContourPolygon *p);
/// \fn CvContourPolygon *cvSimplifyPolygon(CvContourPolygon const *p, double const delta=1.)
/// \brief Simplify a polygon reducing the number of vertex according the distance "delta".
/// Uses a version of the Ramer-Douglas-Peucker algorithm (http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm).
/// \param p Contour (polygon type).
/// \param delta Minimun distance.
/// \return A simplify version of the original polygon.
CvContourPolygon *cvSimplifyPolygon(CvContourPolygon const *p, double const delta=1.);
/// \fn CvContourPolygon *cvPolygonContourConvexHull(CvContourPolygon const *p)
/// \brief Calculates convex hull of a contour.
/// Uses the Melkman Algorithm. Code based on the version in http://w3.impa.br/~rdcastan/Cgeometry/.
/// \param p Contour (polygon type).
/// \return Convex hull.
CvContourPolygon *cvPolygonContourConvexHull(CvContourPolygon const *p);
/// \fn void cvWriteContourPolygonCSV(const CvContourPolygon& p, const std::string& filename)
/// \brief Write a contour to a CSV (Comma-separated values) file.
/// \param p Polygon contour.
/// \param filename File name.
void cvWriteContourPolygonCSV(const CvContourPolygon& p, const std::string& filename);
/// \fn void cvWriteContourPolygonSVG(const CvContourPolygon& p, const std::string& filename, const CvScalar& stroke=cvScalar(0,0,0), const CvScalar& fill=cvScalar(255,255,255))
/// \brief Write a contour to a SVG file (http://en.wikipedia.org/wiki/Scalable_Vector_Graphics).
/// \param p Polygon contour.
/// \param filename File name.
/// \param stroke Stroke color (black by default).
/// \param fill Fill color (white by default).
void cvWriteContourPolygonSVG(const CvContourPolygon& p, const std::string& filename, const CvScalar& stroke=cvScalar(0,0,0), const CvScalar& fill=cvScalar(255,255,255));
////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Blobs
/// \brief Type of label.
/// \see IPL_DEPTH_LABEL
typedef unsigned int CvLabel;
//typedef unsigned char CvLabel;
/// \def IPL_DEPTH_LABEL
/// \brief Size of a label in bits.
/// \see CvLabel
#define IPL_DEPTH_LABEL (sizeof(cvb::CvLabel)*8)
/// \def CV_BLOB_MAX_LABEL
/// \brief Max label number.
/// \see CvLabel.
#define CV_BLOB_MAX_LABEL std::numeric_limits<CvLabel>::max()
/// \brief Type of identification numbers.
typedef unsigned int CvID;
/// \brief Struct that contain information about one blob.
struct CvBlob
{
CvLabel label; ///< Label assigned to the blob.
union
{
unsigned int area; ///< Area (moment 00).
unsigned int m00; ///< Moment 00 (area).
};
unsigned int minx; ///< X min.
unsigned int maxx; ///< X max.
unsigned int miny; ///< Y min.
unsigned int maxy; ///< y max.
CvPoint2D64f centroid; ///< Centroid.
double m10; ///< Moment 10.
double m01; ///< Moment 01.
double m11; ///< Moment 11.
double m20; ///< Moment 20.
double m02; ///< Moment 02.
double u11; ///< Central moment 11.
double u20; ///< Central moment 20.
double u02; ///< Central moment 02.
double n11; ///< Normalized central moment 11.
double n20; ///< Normalized central moment 20.
double n02; ///< Normalized central moment 02.
double p1; ///< Hu moment 1.
double p2; ///< Hu moment 2.
CvContourChainCode contour; ///< Contour.
CvContoursChainCode internalContours; ///< Internal contours.
};
/// \var typedef std::map<CvLabel,CvBlob *> CvBlobs
/// \brief List of blobs.
/// A map is used to access each blob from its label number.
/// \see CvLabel
/// \see CvBlob
typedef std::map<CvLabel,CvBlob *> CvBlobs;
/// \var typedef std::pair<CvLabel,CvBlob *> CvLabelBlob
/// \brief Pair (label, blob).
/// \see CvLabel
/// \see CvBlob
typedef std::pair<CvLabel,CvBlob *> CvLabelBlob;
/// \fn unsigned int cvLabel (IplImage const *img, IplImage *imgOut, CvBlobs &blobs);
/// \brief Label the connected parts of a binary image.
/// Algorithm based on paper "A linear-time component-labeling algorithm using contour tracing technique" of Fu Chang, Chun-Jen Chen and Chi-Jen Lu.
/// \param img Input binary image (depth=IPL_DEPTH_8U and num. channels=1).
/// \param imgOut Output image (depth=IPL_DEPTH_LABEL and num. channels=1).
/// \param blobs List of blobs.
/// \return Number of pixels that has been labeled.
unsigned int cvLabel (IplImage const *img, IplImage *imgOut, CvBlobs &blobs);
//IplImage *cvFilterLabel(IplImage *imgIn, CvLabel label);
/// \fn void cvFilterLabels(IplImage *imgIn, IplImage *imgOut, const CvBlobs &blobs)
/// \brief Draw a binary image with the blobs that have been given.
/// \param imgIn Input image (depth=IPL_DEPTH_LABEL and num. channels=1).
/// \param imgOut Output binary image (depth=IPL_DEPTH_8U and num. channels=1).
/// \param blobs List of blobs to be drawn.
/// \see cvLabel
void cvFilterLabels(IplImage *imgIn, IplImage *imgOut, const CvBlobs &blobs);
/// \fn CvLabel cvGetLabel(IplImage const *img, unsigned int x, unsigned int y)
/// \brief Get the label value from a labeled image.
/// \param img Label image.
/// \param x X coordenate.
/// \param y Y coordenate.
/// \return Label value.
/// \see CvLabel
CvLabel cvGetLabel(IplImage const *img, unsigned int x, unsigned int y);
/// \fn inline void cvReleaseBlob(CvBlob *blob)
/// \brief Clear a blob structure.
/// \param blob Blob.
/// \see CvBlob
inline void cvReleaseBlob(CvBlob *blob)
{
if (blob)
{
for (CvContoursChainCode::iterator jt=blob->internalContours.begin(); jt!=blob->internalContours.end(); ++jt)
{
CvContourChainCode *contour = *jt;
if (contour)
delete contour;
}
blob->internalContours.clear();
delete blob;
}
}
/// \fn inline void cvReleaseBlobs(CvBlobs &blobs)
/// \brief Clear blobs structure.
/// \param blobs List of blobs.
/// \see CvBlobs
inline void cvReleaseBlobs(CvBlobs &blobs)
{
for (CvBlobs::iterator it=blobs.begin(); it!=blobs.end(); ++it)
{
cvReleaseBlob((*it).second);
}
blobs.clear();
}
/// \fn CvLabel cvGreaterBlob(const CvBlobs &blobs)
/// \brief Find greater blob.
/// \param blobs List of blobs.
/// \return Label of the greater blob or 0 if there are no blobs.
/// \see cvLabel
CvLabel cvGreaterBlob(const CvBlobs &blobs);
/// \fn void cvFilterByArea(CvBlobs &blobs, unsigned int minArea, unsigned int maxArea)
/// \brief Filter blobs by area.
/// Those blobs whose areas are not in range will be erased from the input list of blobs.
/// \param blobs List of blobs.
/// \param minArea Minimun area.
/// \param maxArea Maximun area.
void cvFilterByArea(CvBlobs &blobs, unsigned int minArea, unsigned int maxArea);
/// \fn void cvFilterByLabel(CvBlobs &blobs, CvLabel label)
/// \brief Filter blobs by label.
/// Delete all blobs except those with label l.
/// \param blobs List of blobs.
/// \param label Label to leave.
void cvFilterByLabel(CvBlobs &blobs, CvLabel label);
/// \fn inline CvPoint2D64f cvCentroid(CvBlob *blob)
/// \brief Calculates centroid.
/// Centroid will be returned and stored in the blob structure.
/// \param blob Blob whose centroid will be calculated.
/// \return Centroid.
/// \see CvBlob
inline CvPoint2D64f cvCentroid(CvBlob *blob)
{
return blob->centroid=cvPoint2D64f(blob->m10/blob->area, blob->m01/blob->area);
}
/// \fn double cvAngle(CvBlob *blob)
/// \brief Calculates angle orientation of a blob.
/// This function uses central moments so cvCentralMoments should have been called before for this blob.
/// \param blob Blob.
/// \return Angle orientation in radians.
/// \see cvCentralMoments
/// \see CvBlob
double cvAngle(CvBlob *blob);
/// \fn cvSaveImageBlob(const char *filename, IplImage *img, CvBlob const *blob)
/// \brief Save the image of a blob to a file.
/// The function uses an image (that can be the original pre-processed image or a processed one, or even the result of cvRenderBlobs, for example) and a blob structure.
/// Then the function saves a copy of the part of the image where the blob is.
/// \param filename Name of the file.
/// \param img Image.
/// \param blob Blob.
/// \see CvBlob
/// \see cvRenderBlob
void cvSaveImageBlob(const char *filename, IplImage *img, CvBlob const *blob);
#define CV_BLOB_RENDER_COLOR 0x0001 ///< Render each blog with a different color. \see cvRenderBlobs
#define CV_BLOB_RENDER_CENTROID 0x0002 ///< Render centroid. \see cvRenderBlobs
#define CV_BLOB_RENDER_BOUNDING_BOX 0x0004 ///< Render bounding box. \see cvRenderBlobs
#define CV_BLOB_RENDER_ANGLE 0x0008 ///< Render angle. \see cvRenderBlobs
#define CV_BLOB_RENDER_TO_LOG 0x0010 ///< Print blob data to log out. \see cvRenderBlobs
#define CV_BLOB_RENDER_TO_STD 0x0020 ///< Print blob data to std out. \see cvRenderBlobs
/// \fn void cvRenderBlob(const IplImage *imgLabel, CvBlob *blob, IplImage *imgSource, IplImage *imgDest, unsigned short mode=0x000f, CvScalar const &color=CV_RGB(255, 255, 255), double alpha=1.)
/// \brief Draws or prints information about a blob.
/// \param imgLabel Label image (depth=IPL_DEPTH_LABEL and num. channels=1).
/// \param blob Blob.
/// \param imgSource Input image (depth=IPL_DEPTH_8U and num. channels=3).
/// \param imgDest Output image (depth=IPL_DEPTH_8U and num. channels=3).
/// \param mode Render mode. By default is CV_BLOB_RENDER_COLOR|CV_BLOB_RENDER_CENTROID|CV_BLOB_RENDER_BOUNDING_BOX|CV_BLOB_RENDER_ANGLE.
/// \param color Color to render (if CV_BLOB_RENDER_COLOR is used).
/// \param alpha If mode CV_BLOB_RENDER_COLOR is used. 1.0 indicates opaque and 0.0 translucent (1.0 by default).
/// \see CV_BLOB_RENDER_COLOR
/// \see CV_BLOB_RENDER_CENTROID
/// \see CV_BLOB_RENDER_BOUNDING_BOX
/// \see CV_BLOB_RENDER_ANGLE
/// \see CV_BLOB_RENDER_TO_LOG
/// \see CV_BLOB_RENDER_TO_STD
void cvRenderBlob(const IplImage *imgLabel, CvBlob *blob, IplImage *imgSource, IplImage *imgDest, unsigned short mode=0x000f, CvScalar const &color=CV_RGB(255, 255, 255), double alpha=1.);
/// \fn void cvRenderBlobs(const IplImage *imgLabel, CvBlobs &blobs, IplImage *imgSource, IplImage *imgDest, unsigned short mode=0x000f, double alpha=1.)
/// \brief Draws or prints information about blobs.
/// \param imgLabel Label image (depth=IPL_DEPTH_LABEL and num. channels=1).
/// \param blobs List of blobs.
/// \param imgSource Input image (depth=IPL_DEPTH_8U and num. channels=3).
/// \param imgDest Output image (depth=IPL_DEPTH_8U and num. channels=3).
/// \param mode Render mode. By default is CV_BLOB_RENDER_COLOR|CV_BLOB_RENDER_CENTROID|CV_BLOB_RENDER_BOUNDING_BOX|CV_BLOB_RENDER_ANGLE.
/// \param alpha If mode CV_BLOB_RENDER_COLOR is used. 1.0 indicates opaque and 0.0 translucent (1.0 by default).
/// \see CV_BLOB_RENDER_COLOR
/// \see CV_BLOB_RENDER_CENTROID
/// \see CV_BLOB_RENDER_BOUNDING_BOX
/// \see CV_BLOB_RENDER_ANGLE
/// \see CV_BLOB_RENDER_TO_LOG
/// \see CV_BLOB_RENDER_TO_STD
void cvRenderBlobs(const IplImage *imgLabel, CvBlobs &blobs, IplImage *imgSource, IplImage *imgDest, unsigned short mode=0x000f, double alpha=1.);
/// \fn void cvSetImageROItoBlob(IplImage *img, CvBlob const *blob)
/// \brief Set the ROI of an image to the bounding box of a blob.
/// \param img Image.
/// \param blob Blob.
/// \see CvBlob
inline void cvSetImageROItoBlob(IplImage *img, CvBlob const *blob)
{
cvSetImageROI(img, cvRect(blob->minx, blob->miny, blob->maxx-blob->minx, blob->maxy-blob->miny));
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Color
/// \fn CvScalar cvBlobMeanColor(CvBlob const *blob, IplImage const *imgLabel, IplImage const *img)
/// \brief Calculates mean color of a blob in an image.
/// \param blob Blob.
/// \param imgLabel Image of labels.
/// \param img Original image.
/// \return Average color.
CvScalar cvBlobMeanColor(CvBlob const *blob, IplImage const *imgLabel, IplImage const *img);
////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Aux
/// \fn double cvDotProductPoints(CvPoint const &a, CvPoint const &b, CvPoint const &c)
/// \brief Dot product of the vectors ab and bc.
/// \param a First point.
/// \param b Middle point.
/// \param c Last point.
/// \return Dot product of ab and bc.
double cvDotProductPoints(CvPoint const &a, CvPoint const &b, CvPoint const &c);
/// \fn double cvCrossProductPoints(CvPoint const &a, CvPoint const &b, CvPoint const &c)
/// \brief Cross product of the vectors ab and bc.
/// \param a Point.
/// \param b Point.
/// \param c Point.
/// \return Cross product of ab and bc.
double cvCrossProductPoints(CvPoint const &a, CvPoint const &b, CvPoint const &c);
/// \fn double cvDistancePointPoint(CvPoint const &a, CvPoint const &b)
/// \brief Distance between two points.
/// \param a Point.
/// \param b Point.
/// \return Distance.
double cvDistancePointPoint(CvPoint const &a, CvPoint const &b);
/// \fn double cvDistanceLinePoint(CvPoint const &a, CvPoint const &b, CvPoint const &c, bool isSegment=true)
/// \brief Distance between line ab and point c.
/// \param a First point of the segment.
/// \param b Second point of the segment.
/// \param c Point.
/// \param isSegment If false then the distance will be calculated from the line defined by the points a and b, to the point c.
/// \return Distance between ab and c.
double cvDistanceLinePoint(CvPoint const &a, CvPoint const &b, CvPoint const &c, bool isSegment=true);
////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Tracking
/// \brief Struct that contain information about one track.
/// \see CvID
/// \see CvLabel
struct CvTrack
{
CvID id; ///< Track identification number.
CvLabel label; ///< Label assigned to the blob related to this track.
unsigned int minx; ///< X min.
unsigned int maxx; ///< X max.
unsigned int miny; ///< Y min.
unsigned int maxy; ///< y max.
CvPoint2D64f centroid; ///< Centroid.
unsigned int lifetime; ///< Indicates how much frames the object has been in scene.
unsigned int active; ///< Indicates number of frames that has been active from last inactive period.
unsigned int inactive; ///< Indicates number of frames that has been missing.
};
/// \var typedef std::map<CvID, CvTrack *> CvTracks
/// \brief List of tracks.
/// \see CvID
/// \see CvTrack
typedef std::map<CvID, CvTrack *> CvTracks;
/// \var typedef std::pair<CvID, CvTrack *> CvIDTrack
/// \brief Pair (identification number, track).
/// \see CvID
/// \see CvTrack
typedef std::pair<CvID, CvTrack *> CvIDTrack;
/// \fn inline void cvReleaseTracks(CvTracks &tracks)
/// \brief Clear tracks structure.
/// \param tracks List of tracks.
/// \see CvTracks
inline void cvReleaseTracks(CvTracks &tracks)
{
for (CvTracks::iterator it=tracks.begin(); it!=tracks.end(); it++)
{
CvTrack *track = (*it).second;
if (track) delete track;
}
tracks.clear();
}
/// \fn cvUpdateTracks(CvBlobs const &b, CvTracks &t, const double thDistance, const unsigned int thInactive, const unsigned int thActive=0)
/// \brief Updates list of tracks based on current blobs.
/// Tracking based on:
/// A. Senior, A. Hampapur, Y-L Tian, L. Brown, S. Pankanti, R. Bolle. Appearance Models for
/// Occlusion Handling. Second International workshop on Performance Evaluation of Tracking and
/// Surveillance Systems & CVPR'01. December, 2001.
/// (http://www.research.ibm.com/peoplevision/PETS2001.pdf)
/// \param b List of blobs.
/// \param t List of tracks.
/// \param thDistance Max distance to determine when a track and a blob match.
/// \param thInactive Max number of frames a track can be inactive.
/// \param thActive If a track becomes inactive but it has been active less than thActive frames, the track will be deleted.
/// \see CvBlobs
/// \see Tracks
void cvUpdateTracks(CvBlobs const &b, CvTracks &t, const double thDistance, const unsigned int thInactive, const unsigned int thActive=0);
#define CV_TRACK_RENDER_ID 0x0001 ///< Print the ID of each track in the image. \see cvRenderTracks
#define CV_TRACK_RENDER_BOUNDING_BOX 0x0002 ///< Draw bounding box of each track in the image. \see cvRenderTracks
#define CV_TRACK_RENDER_TO_LOG 0x0010 ///< Print track info to log out. \see cvRenderTracks
#define CV_TRACK_RENDER_TO_STD 0x0020 ///< Print track info to log out. \see cvRenderTracks
/// \fn void cvRenderTracks(CvTracks const tracks, IplImage *imgSource, IplImage *imgDest, unsigned short mode=0x00ff, CvFont *font=NULL)
/// \brief Prints tracks information.
/// \param tracks List of tracks.
/// \param imgSource Input image (depth=IPL_DEPTH_8U and num. channels=3).
/// \param imgDest Output image (depth=IPL_DEPTH_8U and num. channels=3).
/// \param mode Render mode. By default is CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX.
/// \param font OpenCV font for print on the image.
/// \see CV_TRACK_RENDER_ID
/// \see CV_TRACK_RENDER_BOUNDING_BOX
/// \see CV_TRACK_RENDER_TO_LOG
/// \see CV_TRACK_RENDER_TO_STD
void cvRenderTracks(CvTracks const tracks, IplImage *imgSource, IplImage *imgDest, unsigned short mode=0x000f, CvFont *font=NULL);
}
#ifdef __cplusplus
}
#endif
/// \fn std::ostream& operator<< (std::ostream& output, const cvb::CvBlob& b)
/// \brief Overload operator "<<" for printing blob structure.
/// \return Stream.
std::ostream& operator<< (std::ostream& output, const cvb::CvBlob& b);
/// \fn std::ostream& operator<< (std::ostream& output, const cvb::CvContourPolygon& p)
/// \brief Overload operator "<<" for printing polygons in CSV format.
/// \return Stream.
std::ostream& operator<< (std::ostream& output, const cvb::CvContourPolygon& p);
/// \fn std::ostream& operator<< (std::ostream& output, const cvb::CvTrack& t)
/// \brief Overload operator "<<" for printing track structure.
/// \return Stream.
std::ostream& operator<< (std::ostream& output, const cvb::CvTrack& t);
#endif

View file

@ -0,0 +1,103 @@
/**
* @file cvpmd.h
* @brief pmdaccess wrappers, PMD -> OpenCV marshalling and a bit of math.
* @author Stanislav `Cfr` Serebryakov
*/
#pragma once
#include "pmdsdk2.h"
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cxcore.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
/**
* @brief Structure for PMD IO, see initPMD.
*/
typedef struct PMD {
PMDHandle hnd; /**< handle for pmd IO */
float *data; /**< image quering buffer */
PMDDataDescription dd; /**< contains image size, integration time, etc */
} PMD;
/**
* PMD Constructor
* @param *plugin IO plugin name i.e. o3d.L32.pcp
* @param *ip PMD camera IP address
* @return initialized PMD struct
*/
PMD *initPMD(const char* plugin, const char *ip);
/**
* Destructor
*/
void releasePMD(PMD **pmd);
/**
* Returns pointer to the data (used to save float data for offline mode)
*/
float *pmdDataPtr(PMD *p);
/**
* Converts PMD data (stored in PMD struct) to provided IplImage (new will be created if NULL passed)
* @param *p initialized PMD struct with grabbed data (i.e. pmdGetIntensities(p->hnd, p->data, p->dd.std.numColumns * p->dd.std.numRows * sizeof(float)))
* @param *img destination IplImage (default: 0, new one will be created)
* @return generated IplImage
*/
IplImage *toIplImage(const PMD *p, IplImage *img);
/**
* Converts PMD distance point to cartesian coords with intrinsic matrix
* @param uv PMD depth point coords (row, col) //FIXME: order!
* @param dist PMD distance for given point
* @param intrinsicMatrix PMD camera' intrinsic matrix (from calibration)
* @return 3D Point in OpenCV format (CvPoint3D32f)
*/
CvPoint3D32f *cvProjectArrayToCartesian( const CvMat *intrinsicMatrix
, const CvPoint2D32f *pts, const int ptsCnt
, CvPoint3D32f *unitVs);
CvPoint3D32f **pmdProjectArrayToCartesian(const PMD *p, const CvMat *intrinsicMatrix, CvPoint3D32f **pts);
/**
* Get pmd camera' image size
* @param *p initialized PMD struct
* @return image size in OpenCV format (CvSize)
*/
CvSize pmdGetSize(const PMD *p);
/**
* Query PMD inrensities image. Dont forget to call pmdUpdate before quering!
* @param *p initialized PMD struct
* @param *img destination IplImage (defaul: 0, new one will be created if null passed)
* @return image in OpenCV format (IplImage)
*/
IplImage *pmdQueryImage(PMD *p, IplImage *img);
IplImage *pmdQueryImageAsync(PMD *p, IplImage *img);
/**
* Store PMD distances (inside p->data)
*/
void pmdRetriveDistances(PMD *p);
void pmdRetriveDistancesAsync(PMD *p);
/**
* Query PMD distance image in OpenCV format
*/
IplImage *pmdQueryDistances(PMD *p, IplImage *img);
IplImage *pmdQueryDistancesAsync(PMD *p, IplImage *img);
/**
* Query PMD aplitudes image in OpenCV format (might be used as belief measure)
*/
IplImage *pmdQueryAmplitudes(PMD *p, IplImage *img);
IplImage *pmdQueryAmplitudesAsync(PMD *p, IplImage *img);

View file

@ -0,0 +1,862 @@
/*
* scan implementation
*
* Copyright (C) Andreas Nuechter, Kai Lingemann, Dorit Borrmann, Jan Elseberg, Thomas Escher
*
* Released under the GPL version 3.
*
*/
#include "slam6d/scan.h"
#include "slam6d/basicScan.h"
#include "slam6d/managedScan.h"
#include "slam6d/metaScan.h"
#include "slam6d/searchTree.h"
#include "slam6d/kd.h"
#include "slam6d/Boctree.h"
#include "slam6d/globals.icc"
#ifdef WITH_METRICS
#include "slam6d/metrics.h"
#endif
#ifdef _MSC_VER
#define _NO_PARALLEL_READ
#endif
#ifdef __APPLE__
#define _NO_PARALLEL_READ
#endif
using std::vector;
vector<Scan*> Scan::allScans;
bool Scan::scanserver = false;
void Scan::openDirectory(bool scanserver, const std::string& path, IOType type,
int start, int end)
{
Scan::scanserver = scanserver;
if(scanserver)
ManagedScan::openDirectory(path, type, start, end);
else
BasicScan::openDirectory(path, type, start, end);
}
void Scan::closeDirectory()
{
if(scanserver)
ManagedScan::closeDirectory();
else
BasicScan::closeDirectory();
}
Scan::Scan()
{
unsigned int i;
// pose and transformations
for(i = 0; i < 3; ++i) rPos[i] = 0;
for(i = 0; i < 3; ++i) rPosTheta[i] = 0;
for(i = 0; i < 4; ++i) rQuat[i] = 0;
M4identity(transMat);
M4identity(transMatOrg);
M4identity(dalignxf);
// trees and reduction methods
cuda_enabled = false;
nns_method = -1;
kd = 0;
ann_kd_tree = 0;
// reduction on-demand
reduction_voxelSize = 0.0;
reduction_nrpts = 0;
reduction_pointtype = PointType();
// flags
m_has_reduced = false;
// octtree
octtree_reduction_voxelSize = 0.0;
octtree_voxelSize = 0.0;
octtree_pointtype = PointType();
octtree_loadOct = false;
octtree_saveOct = false;
}
Scan::~Scan()
{
if(kd) delete kd;
}
void Scan::setReductionParameter(double voxelSize, int nrpts, PointType pointtype)
{
reduction_voxelSize = voxelSize;
reduction_nrpts = nrpts;
reduction_pointtype = pointtype;
}
void Scan::setSearchTreeParameter(int nns_method, bool cuda_enabled)
{
searchtree_nnstype = nns_method;
searchtree_cuda_enabled = cuda_enabled;
}
void Scan::setOcttreeParameter(double reduction_voxelSize, double voxelSize, PointType pointtype, bool loadOct, bool saveOct)
{
octtree_reduction_voxelSize = reduction_voxelSize;
octtree_voxelSize = voxelSize;
octtree_pointtype = pointtype;
octtree_loadOct = loadOct;
octtree_saveOct = saveOct;
}
void Scan::clear(unsigned int types)
{
if(types & DATA_XYZ) clear("xyz");
if(types & DATA_RGB) clear("rgb");
if(types & DATA_REFLECTANCE) clear("reflectance");
if(types & DATA_TEMPERATURE) clear("temperature");
if(types & DATA_AMPLITUDE) clear("amplitude");
if(types & DATA_TYPE) clear("type");
if(types & DATA_DEVIATION) clear("deviation");
}
SearchTree* Scan::getSearchTree()
{
// if the search tree hasn't been created yet, calculate everything
if(kd == 0) {
createSearchTree();
}
return kd;
}
void Scan::toGlobal() {
calcReducedPoints();
transform(transMatOrg, INVALID);
}
/**
* Computes a search tree depending on the type.
*/
void Scan::createSearchTree()
{
// multiple threads will call this function at the same time because they all work on one pair of Scans, just let the first one (who sees a nullpointer) do the creation
boost::lock_guard<boost::mutex> lock(m_mutex_create_tree);
if(kd != 0) return;
// make sure the original points are created before starting the measurement
DataXYZ xyz_orig(get("xyz reduced original"));
#ifdef WITH_METRICS
Timer tc = ClientMetric::create_tree_time.start();
#endif //WITH_METRICS
createSearchTreePrivate();
#ifdef WITH_METRICS
ClientMetric::create_tree_time.end(tc);
#endif //WITH_METRICS
}
void Scan::calcReducedOnDemand()
{
// multiple threads will call this function at the same time because they all work on one pair of Scans, just let the first one (who sees count as zero) do the reduction
boost::lock_guard<boost::mutex> lock(m_mutex_reduction);
if(m_has_reduced) return;
#ifdef WITH_METRICS
Timer t = ClientMetric::on_demand_reduction_time.start();
#endif //WITH_METRICS
calcReducedOnDemandPrivate();
m_has_reduced = true;
#ifdef WITH_METRICS
ClientMetric::on_demand_reduction_time.end(t);
#endif //WITH_METRICS
}
void Scan::copyReducedToOriginal()
{
#ifdef WITH_METRICS
Timer t = ClientMetric::copy_original_time.start();
#endif //WITH_METRICS
DataXYZ xyz_reduced(get("xyz reduced"));
unsigned int size = xyz_reduced.size();
DataXYZ xyz_reduced_orig(create("xyz reduced original", sizeof(double)*3*size));
for(unsigned int i = 0; i < size; ++i) {
for(unsigned int j = 0; j < 3; ++j) {
xyz_reduced_orig[i][j] = xyz_reduced[i][j];
}
}
#ifdef WITH_METRICS
ClientMetric::copy_original_time.end(t);
#endif //WITH_METRICS
}
void Scan::copyOriginalToReduced()
{
#ifdef WITH_METRICS
Timer t = ClientMetric::copy_original_time.start();
#endif //WITH_METRICS
DataXYZ xyz_reduced_orig(get("xyz reduced original"));
unsigned int size = xyz_reduced_orig.size();
DataXYZ xyz_reduced(create("xyz reduced", sizeof(double)*3*size));
for(unsigned int i = 0; i < size; ++i) {
for(unsigned int j = 0; j < 3; ++j) {
xyz_reduced[i][j] = xyz_reduced_orig[i][j];
}
}
#ifdef WITH_METRICS
ClientMetric::copy_original_time.end(t);
#endif //WITH_METRICS
}
/**
* Computes an octtree of the current scan, then getting the
* reduced points as the centers of the octree voxels.
*/
void Scan::calcReducedPoints()
{
#ifdef WITH_METRICS
Timer t = ClientMetric::scan_load_time.start();
#endif //WITH_METRICS
// get xyz to start the scan load, separated here for time measurement
DataXYZ xyz(get("xyz"));
DataReflectance reflectance(get("reflectance"));
if(xyz.size() == 0)
throw runtime_error("Could not calculate reduced points, XYZ data is empty");
if (reflectance.size()==0) {
// no reduction needed
// copy vector of points to array of points to avoid
// further copying
if(reduction_voxelSize <= 0.0) {
// copy the points
DataXYZ xyz_reduced(create("xyz reduced", sizeof(double)*3*xyz.size()));
for(unsigned int i = 0; i < xyz.size(); ++i) {
for(unsigned int j = 0; j < 3; ++j) {
xyz_reduced[i][j] = xyz[i][j];
}
}
} else {
// start reduction
// build octree-tree from CurrentScan
// put full data into the octtree
BOctTree<double> *oct = new BOctTree<double>(PointerArray<double>(xyz).get(),
xyz.size(), reduction_voxelSize, reduction_pointtype);
vector<double*> center;
center.clear();
if (reduction_nrpts > 0) {
if (reduction_nrpts == 1) {
oct->GetOctTreeRandom(center);
} else {
oct->GetOctTreeRandom(center, reduction_nrpts);
}
} else {
oct->GetOctTreeCenter(center);
}
// storing it as reduced scan
unsigned int size = center.size();
DataXYZ xyz_reduced(create("xyz reduced", sizeof(double)*3*size));
for(unsigned int i = 0; i < size; ++i) {
for(unsigned int j = 0; j < 3; ++j) {
xyz_reduced[i][j] = center[i][j];
}
}
delete oct;
}
} else {
if(xyz.size() != reflectance.size())
throw runtime_error("Could not calculate reduced reflectance, reflectance size is different from points size");
double **xyz_reflectance = new double*[xyz.size()];
for (unsigned int i = 0; i < xyz.size(); ++i) {
xyz_reflectance[i] = new double[4];
for (unsigned int j = 0; j < 3; ++j)
xyz_reflectance[i][j] = xyz[i][j];
xyz_reflectance[i][3] = reflectance[i];
}
#ifdef WITH_METRICS
ClientMetric::scan_load_time.end(t);
Timer tl = ClientMetric::calc_reduced_points_time.start();
#endif //WITH_METRICS
// no reduction needed
// copy vector of points to array of points to avoid
// further copying
if(reduction_voxelSize <= 0.0) {
// copy the points
if (reduction_pointtype.hasReflectance()) {
DataXYZ xyz_reduced(create("xyz reduced", sizeof(double)*3*xyz.size()));
DataReflectance reflectance_reduced(create("reflectance reduced", sizeof(double)*reflectance.size()));
for(unsigned int i = 0; i < xyz.size(); ++i) {
for(unsigned int j = 0; j < 3; ++j)
xyz_reduced[i][j] = xyz[i][j];
reflectance_reduced[i] = reflectance[i];
}
} else {
DataXYZ xyz_reduced(create("xyz reduced", sizeof(double)*3*xyz.size()));
for(unsigned int i = 0; i < xyz.size(); ++i) {
for(unsigned int j = 0; j < 3; ++j) {
xyz_reduced[i][j] = xyz[i][j];
}
}
}
} else {
// start reduction
// build octree-tree from CurrentScan
// put full data into the octtree
BOctTree<double> *oct = new BOctTree<double>(xyz_reflectance, xyz.size(), reduction_voxelSize, reduction_pointtype);
vector<double*> reduced;
reduced.clear();
if (reduction_nrpts > 0) {
if (reduction_nrpts == 1) {
oct->GetOctTreeRandom(reduced);
} else {
oct->GetOctTreeRandom(reduced, reduction_nrpts);
}
} else {
oct->GetOctTreeCenter(reduced);
}
// storing it as reduced scan
unsigned int size = reduced.size();
if (reduction_pointtype.hasReflectance()) {
DataXYZ xyz_reduced(create("xyz reduced", sizeof(double)*3*size));
for(unsigned int i = 0; i < size; ++i) {
for(unsigned int j = 0; j < 3; ++j) {
xyz_reduced[i][j] = reduced[i][j];
}
}
DataReflectance reflectance_reduced(create("reflectance reduced", sizeof(float)*size));
for(unsigned int i = 0; i < size; ++i)
reflectance_reduced[i] = reduced[i][3];
} else {
DataXYZ xyz_reduced(create("xyz reduced", sizeof(double)*3*size));
for(unsigned int i = 0; i < size; ++i)
for(unsigned int j = 0; j < 3; ++j)
xyz_reduced[i][j] = reduced[i][j];
}
delete oct;
}
for (unsigned int i = 0; i < xyz.size(); ++i) {
delete[] xyz_reflectance[i];
}
delete[] xyz_reflectance;
#ifdef WITH_METRICS
ClientMetric::calc_reduced_points_time.end(tl);
#endif //WITH_METRICS
}
}
/**
* Merges the scan's intrinsic coordinates with the robot position.
* @param prevScan The scan that's transformation is extrapolated,
* i.e., odometry extrapolation
*
* For additional information see the following paper (jfr2007.pdf):
*
* Andreas Nüchter, Kai Lingemann, Joachim Hertzberg, and Hartmut Surmann,
* 6D SLAM - 3D Mapping Outdoor Environments Journal of Field Robotics (JFR),
* Special Issue on Quantitative Performance Evaluation of Robotic and Intelligent
* Systems, Wiley & Son, ISSN 1556-4959, Volume 24, Issue 8-9, pages 699 - 722,
* August/September, 2007
*
*/
void Scan::mergeCoordinatesWithRoboterPosition(Scan* prevScan)
{
double tempMat[16], deltaMat[16];
M4inv(prevScan->get_transMatOrg(), tempMat);
MMult(prevScan->get_transMat(), tempMat, deltaMat);
transform(deltaMat, INVALID); //apply delta transformation of the previous scan
}
/**
* The method transforms all points with the given transformation matrix.
*/
void Scan::transformAll(const double alignxf[16])
{
DataXYZ xyz(get("xyz"));
unsigned int i=0 ;
// #pragma omp parallel for
for(; i < xyz.size(); ++i) {
transform3(alignxf, xyz[i]);
}
// TODO: test for ManagedScan compability, may need a touch("xyz") to mark saving the new values
}
//! Internal function of transform which alters the reduced points
void Scan::transformReduced(const double alignxf[16])
{
#ifdef WITH_METRICS
Timer t = ClientMetric::transform_time.start();
#endif //WITH_METRICS
DataXYZ xyz_reduced(get("xyz reduced"));
unsigned int i=0;
// #pragma omp parallel for
for( ; i < xyz_reduced.size(); ++i) {
transform3(alignxf, xyz_reduced[i]);
}
#ifdef WITH_METRICS
ClientMetric::transform_time.end(t);
#endif //WITH_METRICS
}
//! Internal function of transform which handles the matrices
void Scan::transformMatrix(const double alignxf[16])
{
double tempxf[16];
// apply alignxf to transMat and update pose vectors
MMult(alignxf, transMat, tempxf);
memcpy(transMat, tempxf, sizeof(transMat));
Matrix4ToEuler(transMat, rPosTheta, rPos);
Matrix4ToQuat(transMat, rQuat);
#ifdef DEBUG
cerr << "(" << rPos[0] << ", " << rPos[1] << ", " << rPos[2] << ", "
<< rPosTheta[0] << ", " << rPosTheta[1] << ", " << rPosTheta[2] << ")" << endl;
cerr << transMat << endl;
#endif
// apply alignxf to dalignxf
MMult(alignxf, dalignxf, tempxf);
memcpy(dalignxf, tempxf, sizeof(transMat));
}
/**
* Transforms the scan by a given transformation and writes a new frame. The idea
* is to write for every transformation in all files, such that the show program
* is able to determine, whcih scans have to be drawn in which color. Hidden scans
* (or later processed scans) are written with INVALID.
*
* @param alignxf Transformation matrix
* @param colour Specifies which colour should the written to the frames file
* @param islum Is the transformtion part of LUM, i.e., all scans are transformed?
* In this case only LUM transformation is stored, otherwise all scans are processed
* -1 no transformation is stored
* 0 ICP transformation
* 1 LUM transformation, all scans except last scan
* 2 LUM transformation, last scan only
*/
void Scan::transform(const double alignxf[16], const AlgoType type, int islum)
{
MetaScan* meta = dynamic_cast<MetaScan*>(this);
if(meta) {
for(unsigned int i = 0; i < meta->size(); ++i) {
meta->getScan(i)->transform(alignxf, type, -1);
}
}
#ifdef TRANSFORM_ALL_POINTS
transformAll(alignxf);
#endif //TRANSFORM_ALL_POINTS
#ifdef DEBUG
cerr << alignxf << endl;
cerr << "(" << rPos[0] << ", " << rPos[1] << ", " << rPos[2] << ", "
<< rPosTheta[0] << ", " << rPosTheta[1] << ", " << rPosTheta[2] << ") ---> ";
#endif
// transform points
transformReduced(alignxf);
// update matrices
transformMatrix(alignxf);
// store transformation in frames
if(type != INVALID) {
#ifdef WITH_METRICS
Timer t = ClientMetric::add_frames_time.start();
#endif //WITH_METRICS
bool in_meta;
MetaScan* meta = dynamic_cast<MetaScan*>(this);
int found = 0;
unsigned int scans_size = allScans.size();
switch (islum) {
case -1:
// write no tranformation
break;
case 0:
for(unsigned int i = 0; i < scans_size; ++i) {
Scan* scan = allScans[i];
in_meta = false;
if(meta) {
for(unsigned int j = 0; j < meta->size(); ++j) {
if(meta->getScan(j) == scan) {
found = i;
in_meta = true;
}
}
}
if(scan == this || in_meta) {
found = i;
scan->addFrame(type);
} else {
if(found == 0) {
scan->addFrame(ICPINACTIVE);
} else {
scan->addFrame(INVALID);
}
}
}
break;
case 1:
addFrame(type);
break;
case 2:
for(unsigned int i = 0; i < scans_size; ++i) {
Scan* scan = allScans[i];
if(scan == this) {
found = i;
addFrame(type);
allScans[0]->addFrame(type);
continue;
}
if (found != 0) {
scan->addFrame(INVALID);
}
}
break;
default:
cerr << "invalid point transformation mode" << endl;
}
#ifdef WITH_METRICS
ClientMetric::add_frames_time.end(t);
#endif //WITH_METRICS
}
}
/**
* Transforms the scan by a given transformation and writes a new frame. The idea
* is to write for every transformation in all files, such that the show program
* is able to determine, whcih scans have to be drawn in which color. Hidden scans
* (or later processed scans) are written with INVALID.
*
* @param alignQuat Quaternion for the rotation
* @param alignt Translation vector
* @param colour Specifies which colour should the written to the frames file
* @param islum Is the transformtion part of LUM, i.e., all scans are transformed?
* In this case only LUM transformation is stored, otherwise all scans are processed
* -1 no transformation is stored
* 0 ICP transformation
* 1 LUM transformation, all scans except last scan
* 2 LUM transformation, last scan only
*/
void Scan::transform(const double alignQuat[4], const double alignt[3],
const AlgoType type, int islum)
{
double alignxf[16];
QuatToMatrix4(alignQuat, alignt, alignxf);
transform(alignxf, type, islum);
}
/**
* Transforms the scan, so that the given Matrix
* prepresent the next pose.
*
* @param alignxf Transformation matrix to which this scan will be set to
* @param islum Is the transformation part of LUM?
*/
void Scan::transformToMatrix(double alignxf[16], const AlgoType type, int islum)
{
double tinv[16];
M4inv(transMat, tinv);
transform(tinv, INVALID);
transform(alignxf, type, islum);
}
/**
* Transforms the scan, so that the given Euler angles
* prepresent the next pose.
*
* @param rP Translation to which this scan will be set to
* @param rPT Orientation as Euler angle to which this scan will be set
* @param islum Is the transformation part of LUM?
*/
void Scan::transformToEuler(double rP[3], double rPT[3], const AlgoType type, int islum)
{
#ifdef WITH_METRICS
// called in openmp context in lum6Deuler.cc:422
ClientMetric::transform_time.set_threadsafety(true);
ClientMetric::add_frames_time.set_threadsafety(true);
#endif //WITH_METRICS
double tinv[16];
double alignxf[16];
M4inv(transMat, tinv);
transform(tinv, INVALID);
EulerToMatrix4(rP, rPT, alignxf);
transform(alignxf, type, islum);
#ifdef WITH_METRICS
ClientMetric::transform_time.set_threadsafety(false);
ClientMetric::add_frames_time.set_threadsafety(false);
#endif //WITH_METRICS
}
/**
* Transforms the scan, so that the given Euler angles
* prepresent the next pose.
*
* @param rP Translation to which this scan will be set to
* @param rPQ Orientation as Quaternion to which this scan will be set
* @param islum Is the transformation part of LUM?
*/
void Scan::transformToQuat(double rP[3], double rPQ[4], const AlgoType type, int islum)
{
double tinv[16];
double alignxf[16];
M4inv(transMat, tinv);
transform(tinv, INVALID);
QuatToMatrix4(rPQ, rP, alignxf);
transform(alignxf, type, islum);
}
/**
* Calculates Source\Target
* Calculates a set of corresponding point pairs and returns them. It
* computes the k-d trees and deletes them after the pairs have been
* found. This slow function should be used only for testing
*
* @param pairs The resulting point pairs (vector will be filled)
* @param Target The scan to whiche the points are matched
* @param thread_num number of the thread (for parallelization)
* @param rnd randomized point selection
* @param max_dist_match2 maximal allowed distance for matching
*/
void Scan::getNoPairsSimple(vector <double*> &diff,
Scan* Source, Scan* Target,
int thread_num,
double max_dist_match2)
{
DataXYZ xyz_reduced(Source->get("xyz reduced"));
KDtree* kd = new KDtree(PointerArray<double>(Target->get("xyz reduced")).get(), Target->size<DataXYZ>("xyz reduced"));
cout << "Max: " << max_dist_match2 << endl;
for (unsigned int i = 0; i < xyz_reduced.size(); i++) {
double p[3];
p[0] = xyz_reduced[i][0];
p[1] = xyz_reduced[i][1];
p[2] = xyz_reduced[i][2];
double *closest = kd->FindClosest(p, max_dist_match2, thread_num);
if (!closest) {
diff.push_back(xyz_reduced[i]);
//diff.push_back(closest);
}
}
delete kd;
}
/**
* Calculates a set of corresponding point pairs and returns them. It
* computes the k-d trees and deletes them after the pairs have been
* found. This slow function should be used only for testing
*
* @param pairs The resulting point pairs (vector will be filled)
* @param Source The scan whose points are matched to Targets' points
* @param Target The scan to whiche the points are matched
* @param thread_num number of the thread (for parallelization)
* @param rnd randomized point selection
* @param max_dist_match2 maximal allowed distance for matching
*/
void Scan::getPtPairsSimple(vector <PtPair> *pairs,
Scan* Source, Scan* Target,
int thread_num,
int rnd, double max_dist_match2,
double *centroid_m, double *centroid_d)
{
KDtree* kd = new KDtree(PointerArray<double>(Source->get("xyz reduced")).get(), Source->size<DataXYZ>("xyz reduced"));
DataXYZ xyz_reduced(Target->get("xyz reduced"));
for (unsigned int i = 0; i < xyz_reduced.size(); i++) {
if (rnd > 1 && rand(rnd) != 0) continue; // take about 1/rnd-th of the numbers only
double p[3];
p[0] = xyz_reduced[i][0];
p[1] = xyz_reduced[i][1];
p[2] = xyz_reduced[i][2];
double *closest = kd->FindClosest(p, max_dist_match2, thread_num);
if (closest) {
centroid_m[0] += closest[0];
centroid_m[1] += closest[1];
centroid_m[2] += closest[2];
centroid_d[0] += p[0];
centroid_d[1] += p[1];
centroid_d[2] += p[2];
PtPair myPair(closest, p);
pairs->push_back(myPair);
}
}
centroid_m[0] /= pairs[thread_num].size();
centroid_m[1] /= pairs[thread_num].size();
centroid_m[2] /= pairs[thread_num].size();
centroid_d[0] /= pairs[thread_num].size();
centroid_d[1] /= pairs[thread_num].size();
centroid_d[2] /= pairs[thread_num].size();
delete kd;
}
/**
* Calculates a set of corresponding point pairs and returns them.
* The function uses the k-d trees stored the the scan class, thus
* the function createTrees and deletTrees have to be called before
* resp. afterwards.
* Here we implement the so called "fast corresponding points"; k-d
* trees are not recomputed, instead the apply the inverse transformation
* to to the given point set.
*
* @param pairs The resulting point pairs (vector will be filled)
* @param Source The scan whose points are matched to Targets' points
* @param Target The scan to whiche the points are matched
* @param thread_num number of the thread (for parallelization)
* @param rnd randomized point selection
* @param max_dist_match2 maximal allowed distance for matching
* @return a set of corresponding point pairs
*/
void Scan::getPtPairs(vector <PtPair> *pairs,
Scan* Source, Scan* Target,
int thread_num,
int rnd, double max_dist_match2, double &sum,
double *centroid_m, double *centroid_d)
{
// initialize centroids
for(unsigned int i = 0; i < 3; ++i) {
centroid_m[i] = 0;
centroid_d[i] = 0;
}
// get point pairs
DataXYZ xyz_reduced(Target->get("xyz reduced"));
Source->getSearchTree()->getPtPairs(pairs, Source->dalignxf,
xyz_reduced, 0, xyz_reduced.size(),
thread_num,
rnd, max_dist_match2, sum, centroid_m, centroid_d);
// normalize centroids
unsigned int size = pairs->size();
if(size != 0) {
for(unsigned int i = 0; i < 3; ++i) {
centroid_m[i] /= size;
centroid_d[i] /= size;
}
}
}
/**
* Calculates a set of corresponding point pairs and returns them.
* The function uses the k-d trees stored the the scan class, thus
* the function createTrees and delteTrees have to be called before
* resp. afterwards.
*
* @param pairs The resulting point pairs (vector will be filled)
* @param Source The scan whose points are matched to Targets' points
* @param Target The scan to whiche the points are matched
* @param thread_num The number of the thread that is computing ptPairs in parallel
* @param step The number of steps for parallelization
* @param rnd randomized point selection
* @param max_dist_match2 maximal allowed distance for matching
* @param sum The sum of distances of the points
*
* These intermediate values are for the parallel ICP algorithm
* introduced in the paper
* "The Parallel Iterative Closest Point Algorithm"
* by Langis / Greenspan / Godin, IEEE 3DIM 2001
*
*/
void Scan::getPtPairsParallel(vector <PtPair> *pairs, Scan* Source, Scan* Target,
int thread_num, int step,
int rnd, double max_dist_match2,
double *sum,
double centroid_m[OPENMP_NUM_THREADS][3], double centroid_d[OPENMP_NUM_THREADS][3])
{
// initialize centroids
for(unsigned int i = 0; i < 3; ++i) {
centroid_m[thread_num][i] = 0;
centroid_d[thread_num][i] = 0;
}
// get point pairs
SearchTree* search = Source->getSearchTree();
// differentiate between a meta scan (which has no reduced points) and a normal scan
// if Source is also a meta scan it already has a special meta-kd-tree
MetaScan* meta = dynamic_cast<MetaScan*>(Target);
if(meta) {
for(unsigned int i = 0; i < meta->size(); ++i) {
// determine step for each scan individually
DataXYZ xyz_reduced(meta->getScan(i)->get("xyz reduced"));
unsigned int max = xyz_reduced.size();
unsigned int step = max / OPENMP_NUM_THREADS;
// call ptpairs for each scan and accumulate ptpairs, centroids and sum
search->getPtPairs(&pairs[thread_num], Source->dalignxf,
xyz_reduced, step * thread_num, step * thread_num + step,
thread_num,
rnd, max_dist_match2, sum[thread_num],
centroid_m[thread_num], centroid_d[thread_num]);
}
} else {
DataXYZ xyz_reduced(Target->get("xyz reduced"));
search->getPtPairs(&pairs[thread_num], Source->dalignxf,
xyz_reduced, thread_num * step, thread_num * step + step,
thread_num,
rnd, max_dist_match2, sum[thread_num],
centroid_m[thread_num], centroid_d[thread_num]);
}
// normalize centroids
unsigned int size = pairs[thread_num].size();
if(size != 0) {
for(unsigned int i = 0; i < 3; ++i) {
centroid_m[thread_num][i] /= size;
centroid_d[thread_num][i] /= size;
}
}
}
unsigned int Scan::getMaxCountReduced(ScanVector& scans)
{
unsigned int max = 0;
for(std::vector<Scan*>::iterator it = scans.begin(); it != scans.end(); ++it) {
unsigned int count = (*it)->size<DataXYZ>("xyz reduced");
if(count > max)
max = count;
}
return max;
}

View file

@ -0,0 +1,443 @@
// Copyright (C) 2007 by Cristóbal Carnero Liñán
// grendel.ccl@gmail.com
//
// This file is part of cvBlob.
//
// cvBlob is free software: you can redistribute it and/or modify
// it under the terms of the Lesser GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// cvBlob is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// Lesser GNU General Public License for more details.
//
// You should have received a copy of the Lesser GNU General Public License
// along with cvBlob. If not, see <http://www.gnu.org/licenses/>.
//
#include <stdexcept>
#include <iostream>
using namespace std;
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include "cvblob.h"
namespace cvb
{
const char movesE[4][3][4] = { { {-1, -1, 3, CV_CHAINCODE_UP_LEFT }, { 0, -1, 0, CV_CHAINCODE_UP }, { 1, -1, 0, CV_CHAINCODE_UP_RIGHT } },
{ { 1, -1, 0, CV_CHAINCODE_UP_RIGHT }, { 1, 0, 1, CV_CHAINCODE_RIGHT}, { 1, 1, 1, CV_CHAINCODE_DOWN_RIGHT } },
{ { 1, 1, 1, CV_CHAINCODE_DOWN_RIGHT}, { 0, 1, 2, CV_CHAINCODE_DOWN }, {-1, 1, 2, CV_CHAINCODE_DOWN_LEFT } },
{ {-1, 1, 2, CV_CHAINCODE_DOWN_LEFT }, {-1, 0, 3, CV_CHAINCODE_LEFT }, {-1, -1, 3, CV_CHAINCODE_UP_LEFT } }
};
const char movesI[4][3][4] = { { { 1, -1, 3, CV_CHAINCODE_UP_RIGHT }, { 0, -1, 0, CV_CHAINCODE_UP }, {-1, -1, 0, CV_CHAINCODE_UP_LEFT } },
{ {-1, -1, 0, CV_CHAINCODE_UP_LEFT }, {-1, 0, 1, CV_CHAINCODE_LEFT }, {-1, 1, 1, CV_CHAINCODE_DOWN_LEFT } },
{ {-1, 1, 1, CV_CHAINCODE_DOWN_LEFT }, { 0, 1, 2, CV_CHAINCODE_DOWN }, { 1, 1, 2, CV_CHAINCODE_DOWN_RIGHT } },
{ { 1, 1, 2, CV_CHAINCODE_DOWN_RIGHT }, { 1, 0, 3, CV_CHAINCODE_RIGHT}, { 1, -1, 3, CV_CHAINCODE_UP_RIGHT } }
};
unsigned int cvLabel (IplImage const *img, IplImage *imgOut, CvBlobs &blobs)
{
CV_FUNCNAME("cvLabel");
__CV_BEGIN__;
{
CV_ASSERT(img&&(img->depth==IPL_DEPTH_8U)&&(img->nChannels==1));
CV_ASSERT(imgOut&&(imgOut->depth==IPL_DEPTH_LABEL)&&(img->nChannels==1));
unsigned int numPixels=0;
cvSetZero(imgOut);
CvLabel label=0;
cvReleaseBlobs(blobs);
unsigned int stepIn = img->widthStep / (img->depth / 8);
unsigned int stepOut = imgOut->widthStep / (imgOut->depth / 8);
unsigned int imgIn_width = img->width;
unsigned int imgIn_height = img->height;
unsigned int imgIn_offset = 0;
unsigned int imgOut_width = imgOut->width;
unsigned int imgOut_height = imgOut->height;
unsigned int imgOut_offset = 0;
if(img->roi)
{
imgIn_width = img->roi->width;
imgIn_height = img->roi->height;
imgIn_offset = img->roi->xOffset + (img->roi->yOffset * stepIn);
}
if(imgOut->roi)
{
imgOut_width = imgOut->roi->width;
imgOut_height = imgOut->roi->height;
imgOut_offset = imgOut->roi->xOffset + (imgOut->roi->yOffset * stepOut);
}
unsigned char *imgDataIn = (unsigned char *)img->imageData + imgIn_offset;
CvLabel *imgDataOut = (CvLabel *)imgOut->imageData + imgOut_offset;
#define imageIn(X, Y) imgDataIn[(X) + (Y)*stepIn]
#define imageOut(X, Y) imgDataOut[(X) + (Y)*stepOut]
CvLabel lastLabel = 0;
CvBlob *lastBlob = NULL;
for (unsigned int y=0; y<imgIn_height; y++)
{
for (unsigned int x=0; x<imgIn_width; x++)
{
if (imageIn(x, y))
{
bool labeled = imageOut(x, y);
if ((!imageOut(x, y))&&((y==0)||(!imageIn(x, y-1))))
{
labeled = true;
// Label contour.
label++;
CV_ASSERT(label!=CV_BLOB_MAX_LABEL);
imageOut(x, y) = label;
numPixels++;
// XXX This is not necessary at all. I only do this for consistency.
if (y>0)
imageOut(x, y-1) = CV_BLOB_MAX_LABEL;
CvBlob *blob = new CvBlob;
blob->label = label;
blob->area = 1;
blob->minx = x; blob->maxx = x;
blob->miny = y; blob->maxy = y;
blob->m10=x; blob->m01=y;
blob->m11=x*y;
blob->m20=x*x; blob->m02=y*y;
blob->internalContours.clear();
blobs.insert(CvLabelBlob(label,blob));
lastLabel = label;
lastBlob = blob;
blob->contour.startingPoint = cvPoint(x, y);
unsigned char direction=1;
unsigned int xx = x;
unsigned int yy = y;
bool contourEnd = false;
do
{
for (unsigned int numAttempts=0; numAttempts<3; numAttempts++)
{
bool found = false;
for (unsigned char i=0; i<3; i++)
{
int nx = xx+movesE[direction][i][0];
int ny = yy+movesE[direction][i][1];
if ((nx<imgIn_width)&&(nx>=0)&&(ny<imgIn_height)&&(ny>=0))
{
if (imageIn(nx, ny))
{
found = true;
blob->contour.chainCode.push_back(movesE[direction][i][3]);
xx=nx;
yy=ny;
direction=movesE[direction][i][2];
break;
}
else
{
imageOut(nx, ny) = CV_BLOB_MAX_LABEL;
}
}
}
if (!found)
direction=(direction+1)%4;
else
{
imageOut(xx, yy) = label;
numPixels++;
if (xx<blob->minx) blob->minx = xx;
else if (xx>blob->maxx) blob->maxx = xx;
if (yy<blob->miny) blob->miny = yy;
else if (yy>blob->maxy) blob->maxy = yy;
blob->area++;
blob->m10+=xx; blob->m01+=yy;
blob->m11+=xx*yy;
blob->m20+=xx*xx; blob->m02+=yy*yy;
break;
}
if (contourEnd = ((xx==x) && (yy==y) && (direction==1)))
break;
}
}
while (!contourEnd);
}
if ((y+1<imgIn_height)&&(!imageIn(x, y+1))&&(!imageOut(x, y+1)))
{
labeled = true;
// Label internal contour
CvLabel l;
CvBlob *blob = NULL;
if (!imageOut(x, y))
{
/*if (!imageOut(x-1, y))
{
cerr << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" << endl;
continue;
}*/
l = imageOut(x-1, y);
imageOut(x, y) = l;
numPixels++;
if (l==lastLabel)
blob = lastBlob;
else
{
blob = blobs.find(l)->second;
lastLabel = l;
lastBlob = blob;
}
blob->area++;
blob->m10+=x; blob->m01+=y;
blob->m11+=x*y;
blob->m20+=x*x; blob->m02+=y*y;
}
else
{
l = imageOut(x, y);
if (l==lastLabel)
blob = lastBlob;
else
{
blob = blobs.find(l)->second;
lastLabel = l;
lastBlob = blob;
}
}
// XXX This is not necessary (I believe). I only do this for consistency.
imageOut(x, y+1) = CV_BLOB_MAX_LABEL;
CvContourChainCode *contour = new CvContourChainCode;
contour->startingPoint = cvPoint(x, y);
unsigned char direction = 3;
unsigned int xx = x;
unsigned int yy = y;
do
{
for (unsigned int numAttempts=0; numAttempts<3; numAttempts++)
{
bool found = false;
for (unsigned char i=0; i<3; i++)
{
int nx = xx+movesI[direction][i][0];
int ny = yy+movesI[direction][i][1];
if (imageIn(nx, ny))
{
found = true;
contour->chainCode.push_back(movesI[direction][i][3]);
xx=nx;
yy=ny;
direction=movesI[direction][i][2];
break;
}
else
{
imageOut(nx, ny) = CV_BLOB_MAX_LABEL;
}
}
if (!found)
direction=(direction+1)%4;
else
{
if (!imageOut(xx, yy))
{
imageOut(xx, yy) = l;
numPixels++;
blob->area++;
blob->m10+=xx; blob->m01+=yy;
blob->m11+=xx*yy;
blob->m20+=xx*xx; blob->m02+=yy*yy;
}
break;
}
}
}
while (!(xx==x && yy==y));
blob->internalContours.push_back(contour);
}
//else if (!imageOut(x, y))
if (!labeled)
{
// Internal pixel
CvLabel l = imageOut(x-1, y);
imageOut(x, y) = l;
numPixels++;
CvBlob *blob = NULL;
if (l==lastLabel)
blob = lastBlob;
else
{
blob = blobs.find(l)->second;
lastLabel = l;
lastBlob = blob;
}
blob->area++;
blob->m10+=x; blob->m01+=y;
blob->m11+=x*y;
blob->m20+=x*x; blob->m02+=y*y;
}
}
}
}
for (CvBlobs::iterator it=blobs.begin(); it!=blobs.end(); ++it)
{
cvCentroid((*it).second);
(*it).second->u11 = (*it).second->m11 - ((*it).second->m10*(*it).second->m01)/(*it).second->m00;
(*it).second->u20 = (*it).second->m20 - ((*it).second->m10*(*it).second->m10)/(*it).second->m00;
(*it).second->u02 = (*it).second->m02 - ((*it).second->m01*(*it).second->m01)/(*it).second->m00;
double m00_2 = (*it).second->m00 * (*it).second->m00;
(*it).second->n11 = (*it).second->u11 / m00_2;
(*it).second->n20 = (*it).second->u20 / m00_2;
(*it).second->n02 = (*it).second->u02 / m00_2;
(*it).second->p1 = (*it).second->n20 + (*it).second->n02;
double nn = (*it).second->n20 - (*it).second->n02;
(*it).second->p2 = nn*nn + 4.*((*it).second->n11*(*it).second->n11);
}
return numPixels;
}
__CV_END__;
}
void cvFilterLabels(IplImage *imgIn, IplImage *imgOut, const CvBlobs &blobs)
{
CV_FUNCNAME("cvFilterLabels");
__CV_BEGIN__;
{
CV_ASSERT(imgIn&&(imgIn->depth==IPL_DEPTH_LABEL)&&(imgIn->nChannels==1));
CV_ASSERT(imgOut&&(imgOut->depth==IPL_DEPTH_8U)&&(imgOut->nChannels==1));
int stepIn = imgIn->widthStep / (imgIn->depth / 8);
int stepOut = imgOut->widthStep / (imgOut->depth / 8);
int imgIn_width = imgIn->width;
int imgIn_height = imgIn->height;
int imgIn_offset = 0;
int imgOut_width = imgOut->width;
int imgOut_height = imgOut->height;
int imgOut_offset = 0;
if(imgIn->roi)
{
imgIn_width = imgIn->roi->width;
imgIn_height = imgIn->roi->height;
imgIn_offset = imgIn->roi->xOffset + (imgIn->roi->yOffset * stepIn);
}
if(imgOut->roi)
{
imgOut_width = imgOut->roi->width;
imgOut_height = imgOut->roi->height;
imgOut_offset = imgOut->roi->xOffset + (imgOut->roi->yOffset * stepOut);
}
char *imgDataOut=imgOut->imageData + imgOut_offset;
CvLabel *imgDataIn=(CvLabel *)imgIn->imageData + imgIn_offset;
for (unsigned int r=0;r<(unsigned int)imgIn_height;r++,
imgDataIn+=stepIn,imgDataOut+=stepOut)
{
for (unsigned int c=0;c<(unsigned int)imgIn_width;c++)
{
if (imgDataIn[c])
{
if (blobs.find(imgDataIn[c])==blobs.end()) imgDataOut[c]=0x00;
else imgDataOut[c]=(char)0xff;
}
else
imgDataOut[c]=0x00;
}
}
}
__CV_END__;
}
CvLabel cvGetLabel(IplImage const *img, unsigned int x, unsigned int y)
{
CV_FUNCNAME("cvGetLabel");
__CV_BEGIN__;
{
CV_ASSERT(img&&(img->depth==IPL_DEPTH_LABEL)&&(img->nChannels==1));
int step = img->widthStep / (img->depth / 8);
int img_width = 0;
int img_height= 0;
int img_offset = 0;
if(img->roi)
{
img_width = img->roi->width;
img_height = img->roi->height;
img_offset = img->roi->xOffset + (img->roi->yOffset * step);
}
else
{
img_width = img->width;
img_height= img->height;
}
CV_ASSERT((x>=0)&&(x<img_width)&&(y>=0)&&(y<img_height));
return ((CvLabel *)(img->imageData + img_offset))[x + y*step];
}
__CV_END__;
}
}

View file

@ -0,0 +1,106 @@
// Copyright (C) 2007 by Cristóbal Carnero Liñán
// grendel.ccl@gmail.com
//
// This file is part of cvBlob.
//
// cvBlob is free software: you can redistribute it and/or modify
// it under the terms of the Lesser GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// cvBlob is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// Lesser GNU General Public License for more details.
//
// You should have received a copy of the Lesser GNU General Public License
// along with cvBlob. If not, see <http://www.gnu.org/licenses/>.
//
#include <cmath>
#include <iostream>
using namespace std;
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include "cvblob.h"
namespace cvb
{
CvScalar cvBlobMeanColor(CvBlob const *blob, IplImage const *imgLabel, IplImage const *img)
{
CV_FUNCNAME("cvBlobMeanColor");
__CV_BEGIN__;
{
CV_ASSERT(imgLabel&&(imgLabel->depth==IPL_DEPTH_LABEL)&&(imgLabel->nChannels==1));
CV_ASSERT(img&&(img->depth==IPL_DEPTH_8U)&&(img->nChannels==3));
int stepLbl = imgLabel->widthStep/(imgLabel->depth/8);
int stepImg = img->widthStep/(img->depth/8);
int imgLabel_width = imgLabel->width;
int imgLabel_height = imgLabel->height;
int imgLabel_offset = 0;
int img_width = img->width;
int img_height = img->height;
int img_offset = 0;
if(imgLabel->roi)
{
imgLabel_width = imgLabel->roi->width;
imgLabel_height = imgLabel->roi->height;
imgLabel_offset = (imgLabel->nChannels * imgLabel->roi->xOffset) + (imgLabel->roi->yOffset * stepLbl);
}
if(img->roi)
{
img_width = img->roi->width;
img_height = img->roi->height;
img_offset = (img->nChannels * img->roi->xOffset) + (img->roi->yOffset * stepImg);
}
CvLabel *labels = (CvLabel *)imgLabel->imageData + imgLabel_offset;
unsigned char *imgData = (unsigned char *)img->imageData + img_offset;
double mb = 0;
double mg = 0;
double mr = 0;
double pixels = (double)blob->area;
for (unsigned int r=0; r<(unsigned int)imgLabel_height; r++, labels+=stepLbl, imgData+=stepImg)
for (unsigned int c=0; c<(unsigned int)imgLabel_width; c++)
{
if (labels[c]==blob->label)
{
mb += ((double)imgData[img->nChannels*c+0])/pixels; // B
mg += ((double)imgData[img->nChannels*c+1])/pixels; // G
mr += ((double)imgData[img->nChannels*c+2])/pixels; // R
}
}
/*double mb = 0;
double mg = 0;
double mr = 0;
double pixels = (double)blob->area;
for (unsigned int y=0; y<imgLabel->height; y++)
for (unsigned int x=0; x<imgLabel->width; x++)
{
if (cvGetLabel(imgLabel, x, y)==blob->label)
{
CvScalar color = cvGet2D(img, y, x);
mb += color.val[0]/pixels;
mg += color.val[1]/pixels;
mr += color.val[2]/pixels;
}
}*/
return cvScalar(mr, mg, mb);
}
__CV_END__;
}
}

Binary file not shown.

15
3rdparty/CMakeModules/OpenCV.cmake vendored Normal file
View file

@ -0,0 +1,15 @@
find_package(OpenCV REQUIRED)
if(EXISTS "${OpenCV_DIR}/OpenCVConfig.cmake")
include("${OpenCV_DIR}/OpenCVConfig.cmake")
set(ADDITIONAL_OPENCV_FLAGS
"-DCV_MINOR_VERSION=${OpenCV_VERSION_MINOR} -DCV_MAJOR_VERSION=${OpenCV_VERSION_MAJOR}"
CACHE STRING"OpenCV Version Defines)"
)
## Include the standard CMake script
ELSE(EXISTS "${OpenCV_DIR}/OpenCVConfig.cmake")
set(ADDITIONAL_OPENCV_FLAGS
""
CACHE STRING"OpenCV Version Defines (BLUB)"
)
endif(EXISTS "${OpenCV_DIR}/OpenCVConfig.cmake")

View file

@ -33,7 +33,7 @@ set(CVBLOB_SRCS
set_source_files_properties(${cvBlob_SRC}
PROPERTIES
COMPILE_FLAGS "-O3"
COMPILE_FLAGS "-O3 ${ADDITIONAL_OPENCV_FLAGS}"
)
add_library(cvblob STATIC ${CVBLOB_SRCS})

View file

@ -21,8 +21,10 @@
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#else
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include "cvblob.h"

View file

@ -24,9 +24,11 @@ using namespace std;
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#include <highgui.h>
#else
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#include <opencv/highgui.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include "cvblob.h"

View file

@ -38,8 +38,10 @@
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#else
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#ifndef __CV_BEGIN__

View file

@ -23,8 +23,10 @@ using namespace std;
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#else
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include "cvblob.h"

View file

@ -30,8 +30,10 @@ using namespace std;
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#else
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include "cvblob.h"

View file

@ -23,8 +23,10 @@ using namespace std;
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#else
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include "cvblob.h"

View file

@ -24,8 +24,10 @@ using namespace std;
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#else
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include "cvblob.h"

View file

@ -1,6 +1,9 @@
cmake_minimum_required (VERSION 2.8.2)
SET(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/3rdparty/CMakeModules" ${CMAKE_MODULE_PATH})
project (Slam6D)
project (3DTK)
#include_directories(OPENGL_INCLUDE_DIR)
IF(WIN32)
@ -27,8 +30,75 @@ endif()
# Declare Options and modify build accordingly ##
#################################################
FUNCTION(ENFORCE_OPTION_DEP_3DTK option VALUE)
SET (${option} "${VALUE}" CACHE BOOL "${${option}_DESCRIPTION}" FORCE) # this option set to VALUE as advised
#now make sure other dependencies are also true
FOREACH(d ${${option}_DEPENDENCIES}) # look through all my dependencies
STRING(REGEX REPLACE " +" ";" CMAKE_DEPENDENT_OPTION_DEP "${d}")
# check for a not in front
STRING(STRIP "${CMAKE_DEPENDENT_OPTION_DEP}" CMAKE_DEPENDENT_OPTION_DEP)
STRING(SUBSTRING "${CMAKE_DEPENDENT_OPTION_DEP}" 0 3 CMAKE_DEPENDENT_OPTION_DEP_3)
STRING(TOUPPER "${CMAKE_DEPENDENT_OPTION_DEP_3}" CMAKE_DEPENDENT_OPTION_DEP_3)
STRING(COMPARE EQUAL "${CMAKE_DEPENDENT_OPTION_DEP_3}" "NOT" CMAKE_DEPENDENT_OPTION_DEP_NOT)
#STRING(REPLACE "NOT " "" CMAKE_DEPENDENT_OPTION_DEP "${d}")
IF(CMAKE_DEPENDENT_OPTION_DEP_NOT) # we found a NOT
STRING(REPLACE "NOT;" "" CMAKE_DEPENDENT_OPTION_DEP "${CMAKE_DEPENDENT_OPTION_DEP}")
IF(${CMAKE_DEPENDENT_OPTION_DEP}) # not met, make sure it is
ENFORCE_OPTION_DEP_3DTK(${CMAKE_DEPENDENT_OPTION_DEP} OFF)
ELSE(${CMAKE_DEPENDENT_OPTION_DEP}) # dependency is met
ENDIF(${CMAKE_DEPENDENT_OPTION_DEP})
ELSE(CMAKE_DEPENDENT_OPTION_DEP_NOT)
IF(${CMAKE_DEPENDENT_OPTION_DEP}) # dependency is met
ELSE(${CMAKE_DEPENDENT_OPTION_DEP}) # not met, make sure it is
ENFORCE_OPTION_DEP_3DTK(${CMAKE_DEPENDENT_OPTION_DEP} ON)
ENDIF(${CMAKE_DEPENDENT_OPTION_DEP})
ENDIF(CMAKE_DEPENDENT_OPTION_DEP_NOT)
ENDFOREACH(d)
ENDFUNCTION(ENFORCE_OPTION_DEP_3DTK)
MACRO(OPT_DEP option doc default depends)
OPTION(${option} "${doc}" "${default}")
SET(${option} "${${option}}" CACHE BOOL "${doc}" FORCE)
SET(${option}_DEPENDENCIES "${depends}" CACHE INTERNAL "" FORCE)
SET(${option}_DESCRIPTION "${doc}" CACHE INTERNAL "" FORCE)
IF (${option})
#MESSAGE(STATUS "Yes ${option} is true")
# MESSAGE("FOREACH d in ${depends}")
FOREACH(d ${depends})
STRING(REGEX REPLACE " +" ";" CMAKE_DEPENDENT_OPTION_DEP "${d}")
# check for a not in front
STRING(STRIP "${CMAKE_DEPENDENT_OPTION_DEP}" CMAKE_DEPENDENT_OPTION_DEP)
STRING(SUBSTRING "${CMAKE_DEPENDENT_OPTION_DEP}" 0 3 CMAKE_DEPENDENT_OPTION_DEP_3)
STRING(TOUPPER "${CMAKE_DEPENDENT_OPTION_DEP_3}" CMAKE_DEPENDENT_OPTION_DEP_3)
STRING(COMPARE EQUAL "${CMAKE_DEPENDENT_OPTION_DEP_3}" "NOT" CMAKE_DEPENDENT_OPTION_DEP_NOT)
IF(CMAKE_DEPENDENT_OPTION_DEP_NOT) # we found a NOT
STRING(REPLACE "NOT;" "" CMAKE_DEPENDENT_OPTION_DEP "${CMAKE_DEPENDENT_OPTION_DEP}")
IF(${CMAKE_DEPENDENT_OPTION_DEP}) # not met, make sure it is
ENFORCE_OPTION_DEP_3DTK(${CMAKE_DEPENDENT_OPTION_DEP} OFF)
ELSE(${CMAKE_DEPENDENT_OPTION_DEP}) # dependency is met
ENDIF(${CMAKE_DEPENDENT_OPTION_DEP})
ELSE(CMAKE_DEPENDENT_OPTION_DEP_NOT)
IF(${CMAKE_DEPENDENT_OPTION_DEP}) # dependency is met
ELSE(${CMAKE_DEPENDENT_OPTION_DEP}) # not met, make sure it is
ENFORCE_OPTION_DEP_3DTK(${CMAKE_DEPENDENT_OPTION_DEP} ON)
ENDIF(${CMAKE_DEPENDENT_OPTION_DEP})
ENDIF(CMAKE_DEPENDENT_OPTION_DEP_NOT)
ENDFOREACH(d)
ENDIF(${option})
ENDMACRO(OPT_DEP)
## FreeGLUT
OPTION(WITH_FREEGLUT "Whether freeglut is available. This enables iterative drawing in show. ON/OFF" ON)
OPT_DEP(WITH_FREEGLUT "Whether freeglut is available. This enables iterative drawing in show. ON/OFF" ON "")
IF(WITH_FREEGLUT)
MESSAGE(STATUS "With freeglut")
@ -38,7 +108,7 @@ ELSE(WITH_FREEGLUT)
ENDIF(WITH_FREEGLUT)
## Show
OPTION(WITH_SHOW "Whether to build Show. This is the Visualization program of slam6d. ON/OFF" ON)
OPT_DEP(WITH_SHOW "Whether to build Show. This is the Visualization program of slam6d. ON/OFF" ON "" )
IF(WITH_SHOW)
FIND_PACKAGE(OpenGL REQUIRED)
@ -50,7 +120,7 @@ ELSE(WITH_SHOW)
ENDIF(WITH_SHOW)
## WXShow
OPTION(WITH_WXSHOW "Whether to build WXShow. This is the wxwidgets variant of Show. ON/OFF" OFF)
OPT_DEP(WITH_WXSHOW "Whether to build WXShow. This is the wxwidgets variant of Show. ON/OFF" OFF "")
IF(WITH_WXSHOW)
FIND_PACKAGE(OpenGL REQUIRED)
@ -61,12 +131,12 @@ IF(WITH_WXSHOW)
# wxWidgets include (this will do all the magic to configure everything)
include( ${wxWidgets_USE_FILE})
MESSAGE(STATUS "With wxshow")
ELSE(WITH_XWSHOW)
ELSE(WITH_WXSHOW)
MESSAGE(STATUS "Without wxshow")
ENDIF(WITH_WXSHOW)
## Shapes
OPTION(WITH_SHAPE_DETECTION "Whether to build shapes and planes executable for detecting planes. ON/OFF" OFF)
OPT_DEP(WITH_SHAPE_DETECTION "Whether to build shapes and planes executable for detecting planes. ON/OFF" OFF "")
IF(WITH_SHAPE_DETECTION)
MESSAGE(STATUS "With shape detection")
ELSE(WITH_SHAPE_DETECTION)
@ -74,7 +144,7 @@ ELSE(WITH_SHAPE_DETECTION)
ENDIF(WITH_SHAPE_DETECTION)
## Interior reconstruction
option(WITH_MODEL "Whether to build model executable for modelling interior environments. ON/OFF" OFF)
OPT_DEP(WITH_MODEL "Whether to build model executable for modelling interior environments. ON/OFF" OFF "")
if(WITH_MODEL)
message(STATUS "With interior reconstruction")
@ -83,9 +153,12 @@ else(WITH_MODEL)
endif(WITH_MODEL)
## Thermo
OPTION(WITH_THERMO "Whether to build executables for mutual calibration of laser scanner and camera. ON/OFF" OFF)
OPT_DEP(WITH_THERMO "Whether to build executables for mutual calibration of laser scanner and camera. ON/OFF" OFF "WITH_SHAPE_DETECTION")
IF(WITH_THERMO)
#for OpenCV 2.1
FIND_PACKAGE(OpenCV REQUIRED)
include("3rdparty/CMakeModules/OpenCV.cmake")
SET (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ADDITIONAL_OPENCV_FLAGS}")
add_subdirectory(3rdparty/cvblob)
include_directories(${CMAKE_SOURCE_DIR}/3rdparty/cvblob)
link_directories(${CMAKE_SOURCE_DIR}/3rdparty/cvblob)
@ -94,21 +167,9 @@ ELSE(WITH_THERMO)
MESSAGE(STATUS "Without thermo")
ENDIF(WITH_THERMO)
## Octree
OPTION(WITH_OCTREE_DISPLAY "Whether to use octree display for efficiently culling scans ON/OFF" ON)
IF(WITH_OCTREE_DISPLAY)
MESSAGE(STATUS "Using octree display")
SET (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_GL_POINTS")
ELSE(WITH_OCTREE_DISPLAY)
MESSAGE(STATUS "Using displaylists: Warning potentially much slower")
ENDIF(WITH_OCTREE_DISPLAY)
#SET (WITH_OCTREE_DISPLAY ${WITH_OCTREE_DISPLAY} CACHE BOOL
#"Whether to use octree display for efficiently culling scans ON/OFF" FORCE)
## Octree
OPTION(WITH_COMPACT_OCTREE "Whether to use the compact octree display ON/OFF" OFF)
OPT_DEP(WITH_COMPACT_OCTREE "Whether to use the compact octree display ON/OFF" OFF "")
IF(WITH_COMPACT_OCTREE)
MESSAGE(STATUS "Using compact octrees")
@ -118,7 +179,7 @@ ELSE(WITH_COMPACT_OCTREE)
ENDIF(WITH_COMPACT_OCTREE)
## Glee?
OPTION(WITH_GLEE "Whether to use OpenGL extensions, requires glee. ON/OFF" OFF)
OPT_DEP(WITH_GLEE "Whether to use OpenGL extensions, requires glee. ON/OFF" OFF "")
IF(WITH_GLEE)
MESSAGE(STATUS "Using opengl extensions")
@ -128,7 +189,7 @@ ELSE(WITH_GLEE)
ENDIF(WITH_GLEE)
## Gridder
OPTION(WITH_GRIDDER "Whether to build the 2DGridder binary ON/OFF" OFF)
OPT_DEP(WITH_GRIDDER "Whether to build the 2DGridder binary ON/OFF" OFF "")
IF(WITH_GRIDDER)
MESSAGE(STATUS "With 2DGridder")
@ -137,7 +198,7 @@ ELSE(WITH_GRIDDER)
ENDIF(WITH_GRIDDER)
## Dynamic VELOSLAM
OPTION(WITH_VELOSLAM "Whether to build the Velodyne data processing (veloslam/veloshow) ON/OFF" OFF)
OPT_DEP(WITH_VELOSLAM "Whether to build the Velodyne data processing (veloslam/veloshow) ON/OFF" OFF "WITH_SHOW")
IF(WITH_VELOSLAM)
MESSAGE(STATUS "With VELOSLAM")
@ -146,7 +207,7 @@ ELSE(WITH_VELOSLAM)
ENDIF(WITH_VELOSLAM)
## Home-made Laserscanner
OPTION(WITH_DAVID_3D_SCANNER "Whether to build the David scanner app for homemade laser scanners binary ON/OFF" OFF)
OPT_DEP(WITH_DAVID_3D_SCANNER "Whether to build the David scanner app for homemade laser scanners binary ON/OFF" OFF "")
IF(WITH_DAVID_3D_SCANNER)
MESSAGE(STATUS "With David scanner")
@ -156,17 +217,18 @@ ENDIF(WITH_DAVID_3D_SCANNER)
## Tools
OPTION(WITH_TOOLS "Whether to build additional tools like convergence frame_to_graph etc. ON/OFF" OFF)
OPT_DEP(WITH_TOOLS "Whether to build additional tools like convergence frame_to_graph etc. ON/OFF" OFF "WITH_FBR")
IF(WITH_TOOLS)
MESSAGE(STATUS "With Tools")
find_package (Boost COMPONENTS program_options REQUIRED)
ELSE(WITH_TOOLS)
MESSAGE(STATUS "Without Tools")
ENDIF(WITH_TOOLS)
## Segmentation
OPTION(WITH_SEGMENTATION "Whether to build scan segmantion program ON/OFF" OFF)
OPT_DEP(WITH_SEGMENTATION "Whether to build scan segmantion program ON/OFF" OFF "WITH_FBR")
IF(WITH_SEGMENTATION)
MESSAGE(STATUS "With segmentation")
@ -177,7 +239,7 @@ ENDIF(WITH_SEGMENTATION)
## Normals
OPTION(WITH_NORMALS "Whether to build program for normal computation ON/OFF" OFF)
OPT_DEP(WITH_NORMALS "Whether to build program for normal computation ON/OFF" OFF "WITH_FBR")
IF(WITH_NORMALS)
MESSAGE(STATUS "With normals")
@ -185,19 +247,8 @@ ELSE(WITH_NORMALS)
MESSAGE(STATUS "Without normals")
ENDIF(WITH_NORMALS)
## CAD matching
OPTION (WITH_CAD "Wether to build with CAD import lib ON/OFF" OFF)
IF (WITH_CAD)
MESSAGE (STATUS "With CAD import")
find_package (Boost COMPONENTS program_options filesystem REQUIRED)
ELSE (WITH_CAD)
MESSAGE (STATUS "Without CAD import")
ENDIF (WITH_CAD)
## RivLib
OPTION(WITH_RIVLIB "Whether the RIEGL rivlib is present ON/OFF" OFF)
OPT_DEP(WITH_RIVLIB "Whether the RIEGL rivlib is present ON/OFF" OFF "")
IF(WITH_RIVLIB)
MESSAGE(STATUS "Compiling a scan IO for RXP files")
@ -216,7 +267,7 @@ ELSE(WITH_RIVLIB)
ENDIF(WITH_RIVLIB)
## CUDA support, TODO depend on CUDA_FIND
OPTION(WITH_CUDA "Compile with CUDA support" OFF)
OPT_DEP(WITH_CUDA "Compile with CUDA support" OFF "")
IF(WITH_CUDA)
MESSAGE(STATUS "Compiling WITH CUDA support")
FIND_PACKAGE(CUDA)
@ -226,7 +277,7 @@ ELSE(WITH_CUDA)
ENDIF(WITH_CUDA)
## PMD
OPTION(WITH_PMD "Whether to build the PMD tools like grabVideoAnd3D calibrate etc. ON/OFF" OFF)
OPT_DEP(WITH_PMD "Whether to build the PMD tools like grabVideoAnd3D calibrate etc. ON/OFF" OFF "")
IF(WITH_PMD)
FIND_PACKAGE(OpenGL REQUIRED)
@ -236,31 +287,21 @@ ELSE(WITH_PMD)
ENDIF(WITH_PMD)
## FBR
OPTION(WITH_FBR "Whether to compile feature based registration. ON/OFF" OFF)
OPT_DEP(WITH_FBR "Whether to compile feature based registration. ON/OFF" OFF "")
IF(WITH_FBR)
FIND_PACKAGE(OpenCV REQUIRED)
include("3rdparty/CMakeModules/OpenCV.cmake")
SET (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ADDITIONAL_OPENCV_FLAGS}")
MESSAGE(STATUS "With FBR ")
ELSE(WITH_FBR)
MESSAGE(STATUS "Without FBR")
ENDIF(WITH_FBR)
## Special treatment for system specifics
IF(APPLE)
add_definitions(-Dfopen64=fopen)
ENDIF(APPLE)
## Multiple Cores
IF(APPLE)
SET(PROCESSOR_COUNT 2)
ELSE(APPLE)
INCLUDE(CountProcessors)
SET(NUMBER_OF_CPUS "${PROCESSOR_COUNT}" CACHE STRING "The number of processors to use (default: ${PROCESSOR_COUNT})" )
ENDIF(APPLE)
# OPEN
FIND_PACKAGE(OpenMP)
IF(OPENMP_FOUND)
OPTION(WITH_OPENMP "Whether to use parallel processing capabilities of OPENMP. ON/OFF" ON)
OPT_DEP(WITH_OPENMP "Whether to use parallel processing capabilities of OPENMP. ON/OFF" ON "")
ENDIF(OPENMP_FOUND)
IF(OPENMP_FOUND AND WITH_OPENMP)
@ -272,7 +313,7 @@ ELSE(OPENMP_FOUND AND WITH_OPENMP)
ENDIF(OPENMP_FOUND AND WITH_OPENMP)
## TORO
OPTION(WITH_TORO "Whether to use TORO. ON/OFF" OFF)
OPT_DEP(WITH_TORO "Whether to use TORO. ON/OFF" OFF "")
IF(WITH_TORO)
IF(WIN32)
@ -295,7 +336,7 @@ ENDIF(WITH_TORO)
## HOGMAN
OPTION(WITH_HOGMAN "Whether to use HOGMAN. ON/OFF" OFF)
OPT_DEP(WITH_HOGMAN "Whether to use HOGMAN. ON/OFF" OFF "")
IF(WITH_HOGMAN)
# dependant on libqt4-devi
@ -324,15 +365,19 @@ ELSE(WITH_HOGMAN)
MESSAGE(STATUS "Without HOGMAN")
ENDIF(WITH_HOGMAN)
OPTION(EXPORT_SHARED_LIBS "Whether to build additional shared libraries for use in other projects. ON/OFF" OFF)
OPT_DEP(EXPORT_SHARED_LIBS "Whether to build additional shared libraries for use in other projects. ON/OFF" OFF "WITH_SHOW;WITH_FBR")
IF(EXPORT_SHARED_LIBS)
## Compile a single shared library containing all of 3DTK
add_library(slam SHARED src/slam6d/icp6D.cc)
target_link_libraries(slam scan_s ANN_s sparse_s newmat_s show_s fbr_s)
MESSAGE(STATUS "exporting additional libraries")
ELSE(EXPORT_SHARED_LIBS)
MESSAGE(STATUS "not exporting libraries")
ENDIF(EXPORT_SHARED_LIBS)
OPTION(WITH_METRICS "Whether to use metrics in slam6d. ON/OFF" OFF)
OPT_DEP(WITH_METRICS "Whether to use metrics in slam6d. ON/OFF" OFF "")
IF(WITH_METRICS)
MESSAGE(STATUS "With metrics in slam6d.")
@ -341,8 +386,25 @@ ELSE(WITH_METRICS)
MESSAGE(STATUS "Without metrics in slam6d.")
ENDIF(WITH_METRICS)
#################################################
# OPERATING SYSTEM SPECIFIC BEHAVIOUR ##
#################################################
## Special treatment for system specifics
IF(APPLE)
add_definitions(-Dfopen64=fopen)
ENDIF(APPLE)
## Multiple Cores
IF(APPLE)
SET(PROCESSOR_COUNT 2)
ELSE(APPLE)
INCLUDE(CountProcessors)
SET(NUMBER_OF_CPUS "${PROCESSOR_COUNT}" CACHE STRING "The number of processors to use (default: ${PROCESSOR_COUNT})" )
ENDIF(APPLE)
IF(WIN32)
SET(ADDITIONAL_CFLAGS "-O2" CACHE STRING"Additional flags given to the compiler ()" )
SET(ADDITIONAL_CFLAGS "-O2" CACHE STRING "Additional flags given to the compiler ( -O2)" )
include_directories(${CMAKE_SOURCE_DIR}/3rdparty/windows/)
link_directories(${CMAKE_SOURCE_DIR}/3rdparty/windows)
link_directories(${CMAKE_SOURCE_DIR}/3rdparty/windows/x64)
@ -364,6 +426,30 @@ IF(APPLE)
include_directories(/System/Library/Frameworks/OpenGL.framework/Headers)
ENDIF(APPLE)
# hack to "circumvent" Debug and Release folders that are created under visual studio
# this is why the INSTALL target has to be used in visual studio
IF(MSVC)
INSTALL(DIRECTORY ${CMAKE_SOURCE_DIR}/bin/Release/ CONFIGURATIONS Release DESTINATION ${CMAKE_SOURCE_DIR}/windows FILES_MATCHING PATTERN "*.dll" PATTERN "*.exe")
IF( CMAKE_SIZEOF_VOID_P EQUAL 8 )
INSTALL(DIRECTORY ${CMAKE_SOURCE_DIR}/3rdparty/windows/x64/ CONFIGURATIONS Release DESTINATION ${CMAKE_SOURCE_DIR}/windows FILES_MATCHING PATTERN "*.dll" PATTERN "*.exe")
ELSE( CMAKE_SIZEOF_VOID_P EQUAL 8 )
INSTALL(DIRECTORY ${CMAKE_SOURCE_DIR}/3rdparty/windows/ CONFIGURATIONS Release DESTINATION ${CMAKE_SOURCE_DIR}/windows FILES_MATCHING PATTERN "*.dll" PATTERN "*.exe")
ENDIF(CMAKE_SIZEOF_VOID_P EQUAL 8 )
INSTALL(DIRECTORY ${CMAKE_SOURCE_DIR}/bin/Debug/ CONFIGURATIONS Debug DESTINATION ${CMAKE_SOURCE_DIR}/windows FILES_MATCHING PATTERN "*.dll" PATTERN "*.exe")
IF( CMAKE_SIZEOF_VOID_P EQUAL 8 )
INSTALL(DIRECTORY ${CMAKE_SOURCE_DIR}/3rdparty/windows/x64/ CONFIGURATIONS Debug DESTINATION ${CMAKE_SOURCE_DIR}/windows FILES_MATCHING PATTERN "*.dll" PATTERN "*.exe")
ELSE( CMAKE_SIZEOF_VOID_P EQUAL 8 )
INSTALL(DIRECTORY ${CMAKE_SOURCE_DIR}/3rdparty/windows/ CONFIGURATIONS Debug DESTINATION ${CMAKE_SOURCE_DIR}/windows FILES_MATCHING PATTERN "*.dll" PATTERN "*.exe")
ENDIF(CMAKE_SIZEOF_VOID_P EQUAL 8 )
ENDIF(MSVC)
#################################################
# GENERAL PROJECT SETTINGS ##
#################################################
SET (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ADDITIONAL_CFLAGS}")
# Hide CMake variables
@ -398,35 +484,8 @@ add_subdirectory(src/grid)
add_subdirectory(src/pmd)
add_subdirectory(src/shapes)
add_subdirectory(src/thermo)
IF(WITH_FBR)
add_subdirectory(src/slam6d/fbr)
ENDIF(WITH_FBR)
add_subdirectory(src/slam6d/fbr)
add_subdirectory(src/scanner)
add_subdirectory(src/model)
IF(EXPORT_SHARED_LIBS)
## Compiling a shared library containing all of the project
add_library(slam SHARED src/slam6d/icp6D.cc)
target_link_libraries(slam scan_s ANN_s sparse_s newmat_s show_s fbr_s)
ENDIF(EXPORT_SHARED_LIBS)
MESSAGE (STATUS "Build environment is set up!")
# hack to "circumvent" Debug and Release folders that are created under visual studio
# this is why the INSTALL target has to be used in visual studio
IF(MSVC)
INSTALL(DIRECTORY ${CMAKE_SOURCE_DIR}/bin/Release/ CONFIGURATIONS Release DESTINATION ${CMAKE_SOURCE_DIR}/windows FILES_MATCHING PATTERN "*.dll" PATTERN "*.exe")
IF( CMAKE_SIZEOF_VOID_P EQUAL 8 )
INSTALL(DIRECTORY ${CMAKE_SOURCE_DIR}/3rdparty/windows/x64/ CONFIGURATIONS Release DESTINATION ${CMAKE_SOURCE_DIR}/windows FILES_MATCHING PATTERN "*.dll" PATTERN "*.exe")
ELSE( CMAKE_SIZEOF_VOID_P EQUAL 8 )
INSTALL(DIRECTORY ${CMAKE_SOURCE_DIR}/3rdparty/windows/ CONFIGURATIONS Release DESTINATION ${CMAKE_SOURCE_DIR}/windows FILES_MATCHING PATTERN "*.dll" PATTERN "*.exe")
ENDIF(CMAKE_SIZEOF_VOID_P EQUAL 8 )
INSTALL(DIRECTORY ${CMAKE_SOURCE_DIR}/bin/Debug/ CONFIGURATIONS Debug DESTINATION ${CMAKE_SOURCE_DIR}/windows FILES_MATCHING PATTERN "*.dll" PATTERN "*.exe")
IF( CMAKE_SIZEOF_VOID_P EQUAL 8 )
INSTALL(DIRECTORY ${CMAKE_SOURCE_DIR}/3rdparty/windows/x64/ CONFIGURATIONS Debug DESTINATION ${CMAKE_SOURCE_DIR}/windows FILES_MATCHING PATTERN "*.dll" PATTERN "*.exe")
ELSE( CMAKE_SIZEOF_VOID_P EQUAL 8 )
INSTALL(DIRECTORY ${CMAKE_SOURCE_DIR}/3rdparty/windows/ CONFIGURATIONS Debug DESTINATION ${CMAKE_SOURCE_DIR}/windows FILES_MATCHING PATTERN "*.dll" PATTERN "*.exe")
ENDIF(CMAKE_SIZEOF_VOID_P EQUAL 8 )
ENDIF(MSVC)

View file

@ -32,6 +32,7 @@ Billy Okal okal.billy@googlemail.com
Razvan-George Mihalyi r.mihalyi@jacobs-university.de
Johannes Schauer j.schauer@jacobs-university.de
Corneliu-Claudiu Prodescu c.prodescu@jacobs-university.de
Vaibhav Kumar Mehta v.metha@jacobs-university.de
Further contributors

View file

@ -12,7 +12,7 @@ config: .build
touch .configured
.configured: .build
cd .build && cmake ..
cd .build && cmake .. && cmake ..
touch .configured
.build:

2
README
View file

@ -82,7 +82,7 @@ bin/slam6D -s 1 -e 76 -r 10 -m 3000 -d 50 -i 1000 --epsICP=0.000001
bin/show -s 1 -e 76 -m 3000 -f old /home/nuechter/dat/dat_mine1/
(using bremen_city.zip from http://kos.informatik.uni-osnabrueck.de/3Dscans/)
bin/scan_red -s 0 -e 12 -r 10 /home/nuechter/dat/bremen_city
bin/scan_red -s 0 -e 12 -r OCTREE -v 10 --octree 0 -f RIEGL_TXT /home/nuechter/dat/bremen_city
bin/slam6D -a 2 -q /home/nuechter/dat/bremen_city/reduced -f uos -d 150
-s 0 -e 12 --anim=1 -n /home/nuechter/dat/bremen_city/bremen.net
-G 1 -D 100 -i 0 -I 50 -p --epsSLAM=0.0

Binary file not shown.

View file

@ -1 +0,0 @@
scanserver -b 0

Binary file not shown.

View file

@ -1 +0,0 @@
scanserver -b 0

View file

@ -6,7 +6,13 @@
#pragma once
#include "pmdsdk2.h"
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cxcore.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
/**
* @brief Structure for PMD IO, see initPMD.

View file

@ -1,7 +1,13 @@
#pragma once
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cxcore.h>
#include <highgui.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include <libconfig.h>

View file

@ -1,6 +1,13 @@
#pragma once
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cxcore.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
struct Frame {
CvMat *trn;

View file

@ -226,9 +226,6 @@ public:
this->POINTDIM = pointtype.getPointDim();
//@@@
cout << "POINTDIM" << this->POINTDIM << endl;
mins = alloc->allocate<T>(POINTDIM);
maxs = alloc->allocate<T>(POINTDIM);
@ -822,11 +819,24 @@ protected:
if ( ( 1 << i ) & node.valid ) { // if ith node exists
childcenter(center, ccenter, size, i); // childrens center
if ( ( 1 << i ) & node.leaf ) { // if ith node is leaf get center
T * cp = new T[3];
for (unsigned int iterator = 0; iterator < 3; iterator++) {
T * cp = new T[POINTDIM];
// compute average of reflectance values and store in last position of cp
pointrep* points = children->getPointreps();
unsigned int length = points[0].length;
T *point = &(points[1].v);
float reflectance_center = 0.;
for(unsigned int iterator = 0; iterator < length; iterator++ ) {
reflectance_center += point[POINTDIM-1]; // add current reflectance
point+=BOctTree<T>::POINTDIM;
}
reflectance_center /= length * 1.0;
for (unsigned int iterator = 0; iterator < POINTDIM-1; iterator++) {
cp[iterator] = ccenter[iterator];
}
cp[POINTDIM-1] = reflectance_center; // reflectance is the last dimension in POINTDIM
c.push_back(cp);
} else { // recurse
GetOctTreeCenter(c, children->node, ccenter, size/2.0);
}

View file

@ -11,7 +11,11 @@
#include <iostream>
#include <vector>
#include <fstream>
#if (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
//for opencv 2.4
#if (CV_MAJOR_VERSION >= 2) && (CV_MINOR_VERSION >= 4)
#include <opencv2/nonfree/nonfree.hpp>

View file

@ -64,7 +64,7 @@ namespace fbr{
* @param image - input range image to be converted to point cloud
* @param file - destination of .3d file containing the point cloud
*/
void recoverPointCloud(const cv::Mat& range_image, const std::string& file);
void recoverPointCloud(const cv::Mat& range_image, cv::Mat& reflectance_image, vector<cv::Vec4f> &reduced_points);
unsigned int getImageWidth();
unsigned int getImageHeight();

View file

@ -1189,7 +1189,7 @@ inline int LU_solve(const double A[4][4], const int indx[4], double b[4])
}
/**
* Calculates the cross product of two 4-vectors
* Calculates the <i>cross</i> product of two 4-vectors
*
* @param x input 1
* @param y input 2
@ -1205,6 +1205,19 @@ static inline void Cross(const T *x, const T *y, T *result)
return;
}
/**
* Computes the <i>dot</i> product of two 3-vector
*
* @param x input 3-vector
* @param y input 3-vector
* @return dot product of x and y
*/
template <class T>
inline T Dot(const T *x, const T *y)
{
return x[0] * y[0] + x[1] * y[1] + x[2] * y[2];
}
/**
* converts a quaternion to Euler angels in the roll pitch yaw system
*/

View file

@ -391,9 +391,12 @@ protected:
//! Internal function of transform which handles the matrices
void transformMatrix(const double alignxf[16]);
//@FIXME
public:
//! Creating reduced points
void calcReducedPoints();
protected:
//! Copies reduced points to original points without any transformation.
void copyReducedToOriginal();

View file

@ -1,8 +1,13 @@
#ifndef __THERMO_H__
#define __THERMO_H__
#if (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#include <opencv/highgui.h>
#else
#include <opencv2/opencv.hpp>
#endif
//#include <opencv2/highgui.hpp>
#include <string>
#include <slam6d/scan.h>
using namespace std;
@ -18,6 +23,8 @@ IplImage* detectCorners(IplImage *orgimage, int &corner_exp, int board_h, int bo
void CalibFunc(int board_w, int board_h, int start, int end, bool optical, bool chess, bool quiet, string dir, int scale=1);
void writeCalibParam(int images, int corner_exp, int board_w, CvMat* image_points, CvSize size, string dir);
void loadIntrinsicCalibration(CvMat * intrinsic, CvMat * distortion, string dir, bool optical=false) ;
void loadExtrinsicCalibration(CvMat * Translation, CvMat * Rotation, string dir, int method, bool optical=false) ;
void ProjectAndMap(int start, int end, bool optical, bool quiet, string dir,
IOType type, int scale, double rot_angle, double minDist, double maxDist,
bool correction, int neighborhood, int method=0);

View file

@ -109,7 +109,7 @@ void parse_options(int argc, char **argv, int &start, int &end, bool &scanserver
if (vmap.count("help")) {
cout << cmd_options << endl << endl;
cout << "SAMPLE COMMAND FOR CALCULATING NORMALS" << endl;
cout << " bin/calculateNormals -s 0 -e 0 -f UOS -g AKNN -k 20 dat/" <<endl;
cout << " bin/normals -s 0 -e 0 -f UOS -g AKNN -k 20 dat/" <<endl;
cout << endl << endl;
cout << "SAMPLE COMMAND FOR VIEWING CALCULATING NORMALS IN RGB SPACE" << endl;
cout << " bin/show -c -f UOS_RGB dat/normals/" << endl;
@ -135,7 +135,6 @@ void calculateNormalsAKNN(vector<Point> &normals,vector<Point> &points, int k, c
ANNpointArray pa = annAllocPts(points.size(), 3);
for (size_t i=0; i<points.size(); ++i)
{
pa[i] = new ANNcoord[3];
pa[i][0] = points[i].x;
pa[i][1] = points[i].y;
pa[i][2] = points[i].z;
@ -195,8 +194,10 @@ void calculateNormalsAKNN(vector<Point> &normals,vector<Point> &points, int k, c
n = n / n.NormFrobenius();
normals.push_back(Point(n(1), n(2), n(3)));
}
delete[] nidx;
delete[] d;
annDeallocPts(pa);
}
////////////////////////////////////////////////////////////////
/////////////NORMALS USING ADAPTIVE AKNN METHOD ////////////////
@ -213,7 +214,6 @@ void calculateNormalsAdaptiveAKNN(vector<Point> &normals,vector<Point> &points,
ANNpointArray pa = annAllocPts(points.size(), 3);
for (size_t i=0; i<points.size(); ++i)
{
pa[i] = new ANNcoord[3];
pa[i][0] = points[i].x;
pa[i][1] = points[i].y;
pa[i][2] = points[i].z;
@ -295,6 +295,7 @@ void calculateNormalsAdaptiveAKNN(vector<Point> &normals,vector<Point> &points,
n = n / n.NormFrobenius();
normals.push_back(Point(n(1), n(2), n(3)));
}
annDeallocPts(pa);
}
///////////////////////////////////////////////////////

View file

@ -9,8 +9,14 @@
#include <stdio.h>
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#include <highgui.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
void usage(char *progName) {

View file

@ -7,10 +7,16 @@
*
*/
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#include <highgui.h>
#include <stdio.h>
#include <stdlib.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
int main(int argc, char **argv) {

View file

@ -10,8 +10,14 @@
#include <stdio.h>
#include <stdlib.h>
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#include <highgui.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
//TODO: flip image flag
void usage(char *progName) {

View file

@ -7,8 +7,14 @@
*
*/
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#include <highgui.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include <stdio.h>
#include <stdlib.h>

View file

@ -7,7 +7,13 @@
*
*/
#include <cv.h> /* IplImage, cvCreateImage */
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include "pmdsdk2.h"

View file

@ -10,8 +10,14 @@
#include <stdio.h>
#include <stdlib.h>
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#include <highgui.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include "pmdsdk2.h"
#include "cvpmd.h"

View file

@ -10,8 +10,14 @@
#include <stdio.h>
#include <stdlib.h>
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#include <highgui.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include <libconfig.h>

View file

@ -8,7 +8,13 @@
*/
#include "history.h"
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
Frame *allocFrame3DData(CvSize pmdSz) {
Frame *f = (Frame*)cvAlloc(sizeof(Frame));

View file

@ -14,8 +14,14 @@
#include <math.h>
// OpenCV
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#include <highgui.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
// GL: GLFW (window etc, ala glut) and FTGL (text rendering)
#include <GL/glfw.h>

View file

@ -17,12 +17,6 @@ if(WITH_RIVLIB)
endif(LIBXML2_FOUND)
endif(WITH_RIVLIB)
#IF (WITH_CAD)
# IF(NOT WIN32)
# add_library(scan_io_cad SHARED scan_io_cad.cc)
# target_link_libraries(scan_io_cad ${Boost_PROGRAM_OPTIONS_LIBRARY} ${Boost_FILESYSTEM_LIBRARY})
# ENDIF(NOT WIN32)
#ENDIF (WITH_CAD)
foreach(libname ${SCANIO_LIBNAMES})
if(WIN32)

View file

@ -36,10 +36,18 @@
#include <iostream>
#include <string>
#include <fstream>
#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#include <highgui.h>
#include <cvaux.h>
#include <cxcore.h>
#elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2)
#include <opencv/cv.h>
#else
#include <opencv2/opencv.hpp>
#endif
#include <math.h>
#include <vector>
#define PI 3.14159265
@ -196,7 +204,7 @@ int main(int argc, char** argv){
//open file for writing
ofstream scanfile;
char scanname[10];
char scanname[20];
sprintf(scanname,"scan000.3d");
scanfile.open(scanname);

View file

@ -29,5 +29,5 @@ ENDIF(WITH_WXSHOW)
### EXPORT SHARED LIBS
IF(EXPORT_SHARED_LIBS)
add_library(show_s SHARED ${SHOW_SRCS})
target_link_libraries(show_s ${SHOW_LIBS})
target_link_libraries(show_s newmat_s)
ENDIF(EXPORT_SHARED_LIBS)

View file

@ -29,9 +29,6 @@ using std::ifstream;
using std::exception;
#include <algorithm>
#ifdef WITH_METRICS
#include "slam6d/metrics.h"
#endif
#ifdef _MSC_VER
#include "XGetopt.h"
@ -779,100 +776,6 @@ void generateFrames(int start, int end, bool identity) {
}
}
/*
* create display lists
* @to do general framework for color & type definitions
*/
void createDisplayLists(bool reduced)
{
for(unsigned int s = 0; s < Scan::allScans.size() ; s++) {
Scan* scan = Scan::allScans[s];
vertexArray* myvertexArray1;
vertexArray* myvertexArray2;
// count points
unsigned int color1 = 0, color2 = 0;
if(!reduced) {
scan->get(DATA_XYZ | DATA_TYPE);
DataType type(scan->get("type"));
if(type.valid()) {
for(unsigned int i = 0; i < type.size(); ++i) {
if(type[i] & TYPE_GROUND) {
color1 += 3;
} else {
color2 += 3;
}
}
} else {
color2 = 3 * scan->size<DataXYZ>("xyz");
}
myvertexArray1 = new vertexArray(color1);
myvertexArray2 = new vertexArray(color2);
color1 = 0; color2 = 0;
DataXYZ xyz(scan->get("xyz"));
for(unsigned int i = 0; i < xyz.size(); ++i) {
if(type[i] & TYPE_GROUND) {
for(unsigned int j = 0; j < 3; ++j) {
myvertexArray1->array[color1++] = xyz[i][j];
}
} else {
for(unsigned int j = 0; j < 3; ++j) {
myvertexArray2->array[color2++] = xyz[i][j];
}
}
}
} else {
color2 = 3 * scan->size<DataXYZ>("xyz reduced");
myvertexArray1 = new vertexArray(0);
myvertexArray2 = new vertexArray(color2);
color2 = 0;
DataXYZ xyz_r(scan->get("xyz reduced"));
for(unsigned int i = 0; i < xyz_r.size(); ++i) {
for(unsigned int j = 0; j < 3; ++j) {
myvertexArray2->array[color2++] = xyz_r[i][j];
}
}
}
glNewList(myvertexArray1->name, GL_COMPILE);
//@
//glColor4d(0.44, 0.44, 0.44, 1.0);
//glColor4d(0.66, 0.66, 0.66, 1.0);
glVertexPointer(3, GL_FLOAT, 0, myvertexArray1->array);
glEnableClientState(GL_VERTEX_ARRAY);
glDrawArrays(GL_POINTS, 0, myvertexArray1->numPointsToRender);
glDisableClientState(GL_VERTEX_ARRAY);
glEndList();
glNewList(myvertexArray2->name, GL_COMPILE);
//glColor4d(1.0, 1.0, 1.0, 1.0);
//glColor4d(0.0, 0.0, 0.0, 1.0);
glVertexPointer(3, GL_FLOAT, 0, myvertexArray2->array);
glEnableClientState(GL_VERTEX_ARRAY);
glDrawArrays(GL_POINTS, 0, myvertexArray2->numPointsToRender);
glDisableClientState(GL_VERTEX_ARRAY);
glEndList();
// append to vector
vector<vertexArray*> vvertexArray;
vvertexArray.push_back(myvertexArray1);
vvertexArray.push_back(myvertexArray2);
vvertexArrayList.push_back(vvertexArray);
}
}
void cycleLOD() {
LevelOfDetail = 0.00001;
for (unsigned int i = 0; i < octpts.size(); i++)
@ -980,7 +883,6 @@ void initShow(int argc, char **argv){
}
cm = new ScanColorManager(4096, pointtype);
#ifdef USE_GL_POINTS // use octtrees
#ifdef USE_COMPACT_TREE
cout << "Creating compact display octrees.." << endl;
#else
@ -1117,9 +1019,6 @@ set heuristic, do locking, catch exception, reset heuristic to default or old
#endif // !COMPACT_TREE
#else // not using octtrees
createDisplayLists(red > 0);
#endif // USE_GL_POINTS
// load frames now that we know how many scans we actually loaded
unsigned int real_end = min((unsigned int)(end),

View file

@ -42,11 +42,7 @@ void DrawPoints(GLenum mode, bool interruptable)
if(frameNr != 0) {
cm->setMode(ScanColorManager::MODE_ANIMATION);
#ifdef USE_GL_POINTS
for(int iterator = (int)octpts.size()-1; iterator >= 0; iterator--) {
#else
for(int iterator = (int)Scan::allScans.size()-1; iterator >= 0; iterator--) {
#endif
// ignore scans that don't have any frames associated with them
if((unsigned int)iterator >= MetaMatrix.size()) continue;
// set usable frame
@ -67,7 +63,6 @@ void DrawPoints(GLenum mode, bool interruptable)
glPointSize(pointsize);
#ifdef USE_GL_POINTS
ExtractFrustum(pointsize);
cm->selectColors(type);
if (pointmode == 1 ) {
@ -75,18 +70,6 @@ void DrawPoints(GLenum mode, bool interruptable)
} else {
octpts[iterator]->displayLOD(LevelOfDetail);
}
#else
for (unsigned int jterator = 0; jterator < vvertexArrayList[iterator].size(); jterator++) {
if ((jterator == 0) && vvertexArrayList[iterator][jterator]->numPointsToRender > 0) {
cm->selectColors(type);
}
if (vvertexArrayList[iterator][jterator]->numPointsToRender > 0) {
glCallList(vvertexArrayList[iterator][jterator]->name);
}
}
#endif
glPopMatrix();
}
@ -128,13 +111,8 @@ void DrawPoints(GLenum mode, bool interruptable)
vector<int> sequence;
calcPointSequence(sequence, current_frame);
#ifdef USE_GL_POINTS
//for(int iterator = (int)octpts.size()-1; iterator >= 0; iterator--) {
for(unsigned int i = 0; i < sequence.size(); i++) {
int iterator = sequence[i];
#else
for(int iterator = (int)Scan::allScans.size()-1; iterator >= 0; iterator--) {
#endif
// ignore scans that don't have any frames associated with them
if((unsigned int)iterator >= MetaMatrix.size()) continue;
// set usable frame
@ -162,14 +140,6 @@ void DrawPoints(GLenum mode, bool interruptable)
}
glMultMatrixd(frame);
#ifdef USE_GL_POINTS
//cout << endl << endl; calcRay(570, 266, 1.0, 40000.0);
/* // for height mapped color in the vertex shader
GLfloat v[16];
for (unsigned int l = 0; l < 16; l++)
v[l] = MetaMatrix[iterator].back()[l];
glUniformMatrix4fvARB(glGetUniformLocationARB(p, "MYMAT"), 1, 0, v);
*/
ExtractFrustum(pointsize);
if (pointmode == 1 ) {
octpts[iterator]->display();
@ -197,13 +167,6 @@ void DrawPoints(GLenum mode, bool interruptable)
glPointSize(pointsize);
}
#else
for (unsigned int jterator = 0; jterator < vvertexArrayList[iterator].size(); jterator++) {
if (vvertexArrayList[iterator][jterator]->numPointsToRender > 0) {
glCallList(vvertexArrayList[iterator][jterator]->name);
}
}
#endif
glPopMatrix();
}
}

View file

@ -6,7 +6,7 @@ IF(WITH_TOOLS)
add_executable(scan_red scan_red.cc fbr/fbr_global.cc fbr/panorama.cc fbr/scan_cv.cc)
IF(UNIX)
target_link_libraries(scan_red scan dl ANN fbr_cv_io fbr_panorama ${OpenCV_LIBS})
target_link_libraries(scan_red scan dl ANN fbr_cv_io fbr_panorama ${OpenCV_LIBS} ${Boost_LIBRARIES})
ENDIF(UNIX)
IF (WIN32)
@ -90,7 +90,7 @@ ENDIF(UNIX)
IF(EXPORT_SHARED_LIBS)
add_library(scan_s SHARED ${SCANLIB_SRCS})
#target_link_libraries(scan_s ${Boost_LIBRARIES} newmat)
target_link_libraries(scan_s newmat sparse ANN scanclient pointfilter scanio)
target_link_libraries(scan_s newmat_s sparse_s ANN_s )
ENDIF(EXPORT_SHARED_LIBS)
### SLAM6D

View file

@ -126,7 +126,10 @@ BasicScan::BasicScan(const std::string& path, const std::string& identifier, IOT
BasicScan::~BasicScan()
{
// TODO: clean m_data up
for (map<string, pair<unsigned char*, unsigned int>>::iterator it = m_data.begin(); it != m_data.end(); it++) {
delete it->second.first;
}
}
void BasicScan::init()

View file

@ -1,3 +1,4 @@
IF(WITH_FBR)
FIND_PACKAGE(OpenCV REQUIRED)
SET(FBR_IO_SRC scan_cv.cc)
@ -20,17 +21,17 @@ add_library(fbr_registration STATIC ${FBR_REGISTRATION_SRC})
SET(FBR_SRC scan_cv.cc panorama.cc feature.cc feature_matcher.cc registration.cc fbr_global.cc)
add_library(fbr STATIC ${FBR_SRC})
IF(WITH_FBR)
SET(FBR_LIBS scan ANN ${OpenCV_LIBS})
add_executable(featurebasedregistration feature_based_registration.cc fbr_global.cc)
#target_link_libraries(featurebasedregistration fbr_cv_io fbr_panorama fbr_feature fbr_feature_matcher fbr_registration ${FBR_LIBS})
target_link_libraries(featurebasedregistration fbr ${FBR_LIBS})
ENDIF(WITH_FBR)
### EXPORT SHARED LIBS
IF(EXPORT_SHARED_LIBS)
add_library(fbr_s SHARED ${FBR_SRC})
target_link_libraries(fbr_s scan ANN ${OpenCV_LIBS})
target_link_libraries(fbr_s scan_s ANN_s ${OpenCV_LIBS})
ENDIF(EXPORT_SHARED_LIBS)
ENDIF(WITH_FBR)

View file

@ -554,8 +554,15 @@ namespace fbr{
}
}
void panorama::recoverPointCloud(const cv::Mat& range_image, const string& file ) {
std::ofstream scan_file (file.c_str());
void panorama::recoverPointCloud(const cv::Mat& range_image,
cv::Mat& reflectance_image, vector<cv::Vec4f> &reduced_points) {
if (range_image.cols != reflectance_image.cols
|| range_image.rows != reflectance_image.rows) {
cerr << "range image and reflectance image have different geometries - using empty range image" << endl;
reflectance_image.create(range_image.size(), CV_8U);
reflectance_image = cv::Scalar::all(0);
}
//recover from EQUIRECTANGULAR projection
if(pMethod == EQUIRECTANGULAR) {
double xFactor = (double) range_image.size().width / 2 / M_PI;
@ -568,6 +575,7 @@ namespace fbr{
for (int row = 0; row < range_image.size().height; ++row) {
for (int col = 0; col < range_image.size().width; ++col) {
float range = range_image.at<float>(row, col);
float reflectance = reflectance_image.at<uchar>(row,col)/255.0;
float theta = (heightMax - row) / yFactor - heightLow;
float phi = col / xFactor;
phi *= 180.0 / M_PI;
@ -583,7 +591,10 @@ namespace fbr{
if (first_seen) first_seen = false;
else continue;
}
scan_file << -100. * cartesian[1] << " " << 100. * cartesian[2] << " " << 100. * cartesian[0] << endl;
reduced_points.push_back(cv::Vec4f(-100.0*cartesian[1],
100.0*cartesian[2],
100.0*cartesian[0],
reflectance));
}
}
}
@ -600,6 +611,7 @@ namespace fbr{
for (int row = 0; row < range_image.size().height; ++row) {
for (int col = 0; col < range_image.size().width; ++col) {
float range = range_image.at<float>(row, col);
float reflectance = reflectance_image.at<uchar>(row,col)/255.0;
float theta = atan2(row + yFactor * tan(heightLow), yFactor);
float phi = col / xFactor;
phi *= 180.0 / M_PI;
@ -615,7 +627,10 @@ namespace fbr{
if (first_seen) first_seen = false;
else continue;
}
scan_file << -100. * cartesian[1] << " " << 100. * cartesian[2] << " " << 100. * cartesian[0] << endl;
reduced_points.push_back(cv::Vec4f(-100.0*cartesian[1],
100.0*cartesian[2],
100.0*cartesian[0],
reflectance));
}
}
}
@ -631,6 +646,7 @@ namespace fbr{
for (int row = 0; row < range_image.size().height; ++row) {
for (int col = 0; col < range_image.size().width; ++col) {
float range = range_image.at<float>(row, col);
float reflectance = reflectance_image.at<uchar>(row,col)/255.0;
float theta = 2 * atan2(exp((heightMax - row) / yFactor + heightLow), 1.) - M_PI_2;
float phi = col / xFactor;
phi *= 180.0 / M_PI;
@ -646,7 +662,10 @@ namespace fbr{
if (first_seen) first_seen = false;
else continue;
}
scan_file << -100. * cartesian[1] << " " << 100. * cartesian[2] << " " << 100. * cartesian[0] << endl;
reduced_points.push_back(cv::Vec4f(-100.0*cartesian[1],
100.0*cartesian[2],
100.0*cartesian[0],
reflectance));
}
}
}
@ -675,6 +694,7 @@ namespace fbr{
for (int row = 0; row < range_image.size().height; ++row) {
for (int col = 0; col < range_image.size().width; ++col) {
float range = range_image.at<float>(row, col);
float reflectance = reflectance_image.at<uchar>(row,col)/255.0;
float x = col * 1. / xFactor - fabs(xmin);
float y = (heightMax - row) * 1. / yFactor - fabs(ymin);
float theta = asin((C - (x*x + (Rho0 - y) * (Rho0 - y)) * n * n) / (2 * n));
@ -695,12 +715,13 @@ namespace fbr{
if (first_seen) first_seen = false;
else continue;
}
scan_file << -100. * cartesian[1] << " " << 100. * cartesian[2] << " " << 100. * cartesian[0] << endl;
reduced_points.push_back(cv::Vec4f(-100.0*cartesian[1],
100.0*cartesian[2],
100.0*cartesian[0],
reflectance));
}
}
}
scan_file.close();
}
unsigned int panorama::getImageWidth(){

View file

@ -188,12 +188,12 @@ void Scan::copyReducedToOriginal()
Timer t = ClientMetric::copy_original_time.start();
#endif //WITH_METRICS
DataXYZ xyz_r(get("xyz reduced"));
unsigned int size = xyz_r.size();
DataXYZ xyz_r_orig(create("xyz reduced original", sizeof(double)*3*size));
DataXYZ xyz_reduced(get("xyz reduced"));
unsigned int size = xyz_reduced.size();
DataXYZ xyz_reduced_orig(create("xyz reduced original", sizeof(double)*3*size));
for(unsigned int i = 0; i < size; ++i) {
for(unsigned int j = 0; j < 3; ++j) {
xyz_r_orig[i][j] = xyz_r[i][j];
xyz_reduced_orig[i][j] = xyz_reduced[i][j];
}
}
@ -208,12 +208,12 @@ void Scan::copyOriginalToReduced()
Timer t = ClientMetric::copy_original_time.start();
#endif //WITH_METRICS
DataXYZ xyz_r_orig(get("xyz reduced original"));
unsigned int size = xyz_r_orig.size();
DataXYZ xyz_r(create("xyz reduced", sizeof(double)*3*size));
DataXYZ xyz_reduced_orig(get("xyz reduced original"));
unsigned int size = xyz_reduced_orig.size();
DataXYZ xyz_reduced(create("xyz reduced", sizeof(double)*3*size));
for(unsigned int i = 0; i < size; ++i) {
for(unsigned int j = 0; j < 3; ++j) {
xyz_r[i][j] = xyz_r_orig[i][j];
xyz_reduced[i][j] = xyz_reduced_orig[i][j];
}
}
@ -234,32 +234,29 @@ void Scan::calcReducedPoints()
Timer t = ClientMetric::scan_load_time.start();
#endif //WITH_METRICS
// get xyz to start the scan load, separated here for time measurement
DataXYZ xyz(get("xyz"));
// if the scan hasn't been loaded we can't calculate anything
// get xyz to start the scan load, separated here for time measurement
DataXYZ xyz(get("xyz"));
DataReflectance reflectance(get("reflectance"));
if(xyz.size() == 0)
throw runtime_error("Could not calculate reduced points, XYZ data is empty");
#ifdef WITH_METRICS
ClientMetric::scan_load_time.end(t);
Timer tl = ClientMetric::calc_reduced_points_time.start();
#endif //WITH_METRICS
if (reflectance.size()==0) {
// no reduction needed
// copy vector of points to array of points to avoid
// further copying
if(reduction_voxelSize <= 0.0) {
// copy the points
DataXYZ xyz_r(create("xyz reduced", sizeof(double)*3*xyz.size()));
DataXYZ xyz_reduced(create("xyz reduced", sizeof(double)*3*xyz.size()));
for(unsigned int i = 0; i < xyz.size(); ++i) {
for(unsigned int j = 0; j < 3; ++j) {
xyz_r[i][j] = xyz[i][j];
xyz_reduced[i][j] = xyz[i][j];
}
}
} else {
// start reduction
// build octree-tree from CurrentScan
// put full data into the octtree
BOctTree<double> *oct = new BOctTree<double>(PointerArray<double>(xyz).get(),
@ -267,7 +264,6 @@ void Scan::calcReducedPoints()
vector<double*> center;
center.clear();
if (reduction_nrpts > 0) {
if (reduction_nrpts == 1) {
oct->GetOctTreeRandom(center);
@ -280,21 +276,100 @@ void Scan::calcReducedPoints()
// storing it as reduced scan
unsigned int size = center.size();
DataXYZ xyz_r(create("xyz reduced", sizeof(double)*3*size));
DataXYZ xyz_reduced(create("xyz reduced", sizeof(double)*3*size));
for(unsigned int i = 0; i < size; ++i) {
for(unsigned int j = 0; j < 3; ++j) {
xyz_r[i][j] = center[i][j];
xyz_reduced[i][j] = center[i][j];
}
}
delete oct;
}
} else {
if(xyz.size() != reflectance.size())
throw runtime_error("Could not calculate reduced reflectance, reflectance size is different from points size");
double **xyz_reflectance = new double*[xyz.size()];
for (unsigned int i = 0; i < xyz.size(); ++i) {
xyz_reflectance[i] = new double[4];
for (unsigned int j = 0; j < 3; ++j)
xyz_reflectance[i][j] = xyz[i][j];
xyz_reflectance[i][3] = reflectance[i];
}
#ifdef WITH_METRICS
ClientMetric::scan_load_time.end(t);
Timer tl = ClientMetric::calc_reduced_points_time.start();
#endif //WITH_METRICS
// no reduction needed
// copy vector of points to array of points to avoid
// further copying
if(reduction_voxelSize <= 0.0) {
// copy the points
if (reduction_pointtype.hasReflectance()) {
DataXYZ xyz_reduced(create("xyz reduced", sizeof(double)*3*xyz.size()));
DataReflectance reflectance_reduced(create("reflectance reduced", sizeof(double)*reflectance.size()));
for(unsigned int i = 0; i < xyz.size(); ++i) {
for(unsigned int j = 0; j < 3; ++j)
xyz_reduced[i][j] = xyz[i][j];
reflectance_reduced[i] = reflectance[i];
}
} else {
DataXYZ xyz_reduced(create("xyz reduced", sizeof(double)*3*xyz.size()));
for(unsigned int i = 0; i < xyz.size(); ++i) {
for(unsigned int j = 0; j < 3; ++j) {
xyz_reduced[i][j] = xyz[i][j];
}
}
}
} else {
// start reduction
// build octree-tree from CurrentScan
// put full data into the octtree
BOctTree<double> *oct = new BOctTree<double>(xyz_reflectance, xyz.size(), reduction_voxelSize, reduction_pointtype);
vector<double*> reduced;
reduced.clear();
if (reduction_nrpts > 0) {
if (reduction_nrpts == 1) {
oct->GetOctTreeRandom(reduced);
} else {
oct->GetOctTreeRandom(reduced, reduction_nrpts);
}
} else {
oct->GetOctTreeCenter(reduced);
}
// storing it as reduced scan
unsigned int size = reduced.size();
if (reduction_pointtype.hasReflectance()) {
DataXYZ xyz_reduced(create("xyz reduced", sizeof(double)*3*size));
for(unsigned int i = 0; i < size; ++i) {
for(unsigned int j = 0; j < 3; ++j) {
xyz_reduced[i][j] = reduced[i][j];
}
}
DataReflectance reflectance_reduced(create("reflectance reduced", sizeof(float)*size));
for(unsigned int i = 0; i < size; ++i)
reflectance_reduced[i] = reduced[i][3];
} else {
DataXYZ xyz_reduced(create("xyz reduced", sizeof(double)*3*size));
for(unsigned int i = 0; i < size; ++i)
for(unsigned int j = 0; j < 3; ++j)
xyz_reduced[i][j] = reduced[i][j];
}
delete oct;
}
for (unsigned int i = 0; i < xyz.size(); ++i) {
delete[] xyz_reflectance[i];
}
delete[] xyz_reflectance;
#ifdef WITH_METRICS
ClientMetric::calc_reduced_points_time.end(tl);
#endif //WITH_METRICS
}
}
/**
* Merges the scan's intrinsic coordinates with the robot position.
* @param prevScan The scan that's transformation is extrapolated,
@ -338,11 +413,11 @@ void Scan::transformReduced(const double alignxf[16])
Timer t = ClientMetric::transform_time.start();
#endif //WITH_METRICS
DataXYZ xyz_r(get("xyz reduced"));
DataXYZ xyz_reduced(get("xyz reduced"));
unsigned int i=0;
// #pragma omp parallel for
for( ; i < xyz_r.size(); ++i) {
transform3(alignxf, xyz_r[i]);
for( ; i < xyz_reduced.size(); ++i) {
transform3(alignxf, xyz_reduced[i]);
}
#ifdef WITH_METRICS
@ -584,21 +659,21 @@ void Scan::getNoPairsSimple(vector <double*> &diff,
int thread_num,
double max_dist_match2)
{
DataXYZ xyz_r(Source->get("xyz reduced"));
DataXYZ xyz_reduced(Source->get("xyz reduced"));
KDtree* kd = new KDtree(PointerArray<double>(Target->get("xyz reduced")).get(), Target->size<DataXYZ>("xyz reduced"));
cout << "Max: " << max_dist_match2 << endl;
for (unsigned int i = 0; i < xyz_r.size(); i++) {
for (unsigned int i = 0; i < xyz_reduced.size(); i++) {
double p[3];
p[0] = xyz_r[i][0];
p[1] = xyz_r[i][1];
p[2] = xyz_r[i][2];
p[0] = xyz_reduced[i][0];
p[1] = xyz_reduced[i][1];
p[2] = xyz_reduced[i][2];
double *closest = kd->FindClosest(p, max_dist_match2, thread_num);
if (!closest) {
diff.push_back(xyz_r[i]);
diff.push_back(xyz_reduced[i]);
//diff.push_back(closest);
}
}
@ -625,15 +700,15 @@ void Scan::getPtPairsSimple(vector <PtPair> *pairs,
double *centroid_m, double *centroid_d)
{
KDtree* kd = new KDtree(PointerArray<double>(Source->get("xyz reduced")).get(), Source->size<DataXYZ>("xyz reduced"));
DataXYZ xyz_r(Target->get("xyz reduced"));
DataXYZ xyz_reduced(Target->get("xyz reduced"));
for (unsigned int i = 0; i < xyz_r.size(); i++) {
for (unsigned int i = 0; i < xyz_reduced.size(); i++) {
if (rnd > 1 && rand(rnd) != 0) continue; // take about 1/rnd-th of the numbers only
double p[3];
p[0] = xyz_r[i][0];
p[1] = xyz_r[i][1];
p[2] = xyz_r[i][2];
p[0] = xyz_reduced[i][0];
p[1] = xyz_reduced[i][1];
p[2] = xyz_reduced[i][2];
double *closest = kd->FindClosest(p, max_dist_match2, thread_num);
if (closest) {
@ -688,9 +763,9 @@ void Scan::getPtPairs(vector <PtPair> *pairs,
}
// get point pairs
DataXYZ xyz_r(Target->get("xyz reduced"));
DataXYZ xyz_reduced(Target->get("xyz reduced"));
Source->getSearchTree()->getPtPairs(pairs, Source->dalignxf,
xyz_r, 0, xyz_r.size(),
xyz_reduced, 0, xyz_reduced.size(),
thread_num,
rnd, max_dist_match2, sum, centroid_m, centroid_d);
@ -746,20 +821,20 @@ void Scan::getPtPairsParallel(vector <PtPair> *pairs, Scan* Source, Scan* Target
if(meta) {
for(unsigned int i = 0; i < meta->size(); ++i) {
// determine step for each scan individually
DataXYZ xyz_r(meta->getScan(i)->get("xyz reduced"));
unsigned int max = xyz_r.size();
DataXYZ xyz_reduced(meta->getScan(i)->get("xyz reduced"));
unsigned int max = xyz_reduced.size();
unsigned int step = max / OPENMP_NUM_THREADS;
// call ptpairs for each scan and accumulate ptpairs, centroids and sum
search->getPtPairs(&pairs[thread_num], Source->dalignxf,
xyz_r, step * thread_num, step * thread_num + step,
xyz_reduced, step * thread_num, step * thread_num + step,
thread_num,
rnd, max_dist_match2, sum[thread_num],
centroid_m[thread_num], centroid_d[thread_num]);
}
} else {
DataXYZ xyz_r(Target->get("xyz reduced"));
DataXYZ xyz_reduced(Target->get("xyz reduced"));
search->getPtPairs(&pairs[thread_num], Source->dalignxf,
xyz_r, thread_num * step, thread_num * step + step,
xyz_reduced, thread_num * step, thread_num * step + step,
thread_num,
rnd, max_dist_match2, sum[thread_num],
centroid_m[thread_num], centroid_d[thread_num]);

View file

@ -40,6 +40,7 @@ using std::ofstream;
#include "slam6d/metaScan.h"
#include "slam6d/io_utils.h"
#include "slam6d/scan.h"
#include "slam6d/Boctree.h"
#include "slam6d/fbr/fbr_global.h"
#include "slam6d/fbr/panorama.h"
#include "slam6d/fbr/scan_cv.h"
@ -71,205 +72,389 @@ using std::ofstream;
#include <dlfcn.h>
#endif
//Vertical angle of view of scanner
#define MAX_ANGLE 60.0
#define MIN_ANGLE -40.0
#define IMAGE_HEIGHT 1000
#define IMAGE_WIDTH 3600
using namespace fbr;
projection_method strToPMethod(string method){
if(strcasecmp(method.c_str(), "EQUIRECTANGULAR") == 0) return EQUIRECTANGULAR;
else if(strcasecmp(method.c_str(), "CYLINDRICAL") == 0) return CYLINDRICAL;
else if(strcasecmp(method.c_str(), "MERCATOR") == 0) return MERCATOR;
else if(strcasecmp(method.c_str(), "CONIC") == 0) return CONIC;
else throw std::runtime_error(std::string("projection method ") + method + std::string(" is unknown"));
#include <boost/program_options.hpp>
namespace po = boost::program_options;
enum reduction_method {OCTREE, RANGE, INTERPOLATE};
/* Function used to check that 'opt1' and 'opt2' are not specified
at the same time. */
void conflicting_options(const po::variables_map & vm,
const char *opt1, const char *opt2)
{
if (vm.count(opt1) && !vm[opt1].defaulted()
&& vm.count(opt2) && !vm[opt2].defaulted())
throw std::logic_error(string("Conflicting options '")
+ opt1 + "' and '" + opt2 + "'.");
}
/**
* Explains the usage of this program's command line parameters
*/
void usage(char* prog)
/* Function used to check that if 'for_what' is specified, then
'required_option' is specified too. */
void option_dependency(const po::variables_map & vm,
const char *for_what, const char *required_option)
{
#ifndef _MSC_VER
const string bold("\033[1m");
const string normal("\033[m");
#else
const string bold("");
const string normal("");
#endif
cout << endl
<< bold << "USAGE " << normal << endl
<< " " << prog << " [options] -r <NR> directory" << endl << endl;
cout << bold << "OPTIONS" << normal << endl
<< bold << " -s" << normal << " NR, " << bold << "--start=" << normal << "NR" << endl
<< " start at scan NR (i.e., neglects the first NR scans)" << endl
<< " [ATTENTION: counting naturally starts with 0]" << endl
<< endl
<< bold << " -e" << normal << " NR, " << bold << "--end=" << normal << "NR" << endl
<< " end after scan NR" << endl
<< endl
<< bold << " -f" << normal << " F, " << bold << "--format=" << normal << "F" << endl
<< " using shared library F for input" << endl
<< " (choose F from {uos, uos_map, uos_rgb, uos_frames, uos_map_frames, old, rts, rts_map, ifp, riegl_txt, riegl_rgb, riegl_bin, zahn, ply})" << endl
<< endl
<< bold << " -m" << normal << " NR, " << bold << "--max=" << normal << "NR" << endl
<< " neglegt all data points with a distance larger than NR 'units'" << endl
<< endl
<< bold << " -M" << normal << " NR, " << bold << "--min=" << normal << "NR" << endl
<< " neglegt all data points with a distance smaller than NR 'units'" << endl
<< endl
<< bold << " -r" << normal << " NR, " << bold << "--reduce=" << normal << "NR" << endl
<< " if NR >= 0, turns on octree based point reduction (voxel size=<NR>)" << endl
<< " if NR < 0, turns on rescaling based reduction" << endl
<< endl
<< bold << " -I" << normal << " NR," << bold << "--rangeimage=" << normal << "NR" << endl
<< " use rescaling of the range image as reduction method" << endl
<< " if NR = 1 recovers ranges from range image" << endl
<< " if NR = 2 interpolates 3D points in the image map" << endl
<< " if NR is omitted, then NR=1 is selected" << endl
<< endl
<< bold << " -p" << normal << " MET," << bold << "--projection=" << normal << "MET" << endl
<< " create range image using the MET projection method" << endl
<< " (choose MET from [EQUIRECTANGULAR|CYLINDRICAL|MERCATOR|CONIC])" << endl
<< bold << " -S, --scanserver" << normal << endl
<< " Use the scanserver as an input method and handling of scan data" << endl
<< endl << endl;
cout << bold << "EXAMPLES " << normal << endl
<< " " << prog << " -m 500 -r 5 dat" << endl
<< " " << prog << " --max=5000 -r 10.2 dat" << endl
<< " " << prog << " -s 2 -e 10 -r dat" << endl
<< " " << prog << " -s 0 -e 1 -r 10 -I=1 dat " << endl << endl;
exit(1);
if (vm.count(for_what) && !vm[for_what].defaulted())
if (vm.count(required_option) == 0
|| vm[required_option].defaulted())
throw std::logic_error(string("Option '") + for_what +
"' requires option '" +
required_option + "'.");
}
/** A function that parses the command-line arguments and sets the respective flags.
* @param argc the number of arguments
* @param argv the arguments
* @param dir the directory
* @param red using point reduction?
* @param rand use randomized point reduction?
* @param start starting at scan number 'start'
* @param end stopping at scan number 'end'
* @param maxDist - maximal distance of points being loaded
* @param minDist - minimal distance of points being loaded
* @param projection - projection method for building range image
* @param quiet switches on/off the quiet mode
* @param veryQuiet switches on/off the 'very quiet' mode
* @return 0, if the parsing was successful. 1 otherwise
/*
* validates panorama method specification
*/
int parseArgs(int argc, char **argv, string &dir, double &red,
int &start, int &end, int &maxDist, int &minDist,
string &projection, int &octree, IOType &type,
int &rangeimage, bool &scanserver)
{
bool reduced = false;
int c;
// from unistd.h:
extern char *optarg;
extern int optind;
namespace fbr {
void validate(boost::any& v, const std::vector<std::string>& values,
projection_method*, int) {
if (values.size() == 0)
throw std::runtime_error("Invalid model specification");
string arg = values.at(0);
if(strcasecmp(arg.c_str(), "EQUIRECTANGULAR") == 0) v = EQUIRECTANGULAR;
else if(strcasecmp(arg.c_str(), "CYLINDRICAL") == 0) v = CYLINDRICAL;
else if(strcasecmp(arg.c_str(), "MERCATOR") == 0) v = MERCATOR;
else if(strcasecmp(arg.c_str(), "CONIC") == 0) v = CONIC;
else throw std::runtime_error(std::string("projection method ") + arg + std::string(" is unknown"));
}
}
WriteOnce<IOType> w_type(type);
WriteOnce<int> w_start(start), w_end(end);
/* options descriptor */
// 0: no arguments, 1: required argument, 2: optional argument
static struct option longopts[] = {
{ "format", required_argument, 0, 'f' },
{ "max", required_argument, 0, 'm' },
{ "min", required_argument, 0, 'M' },
{ "start", required_argument, 0, 's' },
{ "end", required_argument, 0, 'e' },
{ "reduce", required_argument, 0, 'r' },
{ "octree", optional_argument, 0, 'O' },
{ "rangeimage", optional_argument, 0, 'I' },
{ "projection", required_argument, 0, 'p' },
{ "scanserver", no_argument, 0, 'S' },
{ 0, 0, 0, 0} // needed, cf. getopt.h
};
cout << endl;
while ((c = getopt_long(argc, argv, "f:r:s:e:m:M:O:p:", longopts, NULL)) != -1)
switch (c)
{
case 'r':
red = atof(optarg);
reduced = true;
break;
case 's':
w_start = atoi(optarg);
if (w_start < 0) { cerr << "Error: Cannot start at a negative scan number.\n"; exit(1); }
break;
case 'e':
w_end = atoi(optarg);
if (w_end < 0) { cerr << "Error: Cannot end at a negative scan number.\n"; exit(1); }
if (w_end < start) { cerr << "Error: <end> cannot be smaller than <start>.\n"; exit(1); }
break;
case 'f':
/*
* validates input type specification
*/
void validate(boost::any& v, const std::vector<std::string>& values,
IOType*, int) {
if (values.size() == 0)
throw std::runtime_error("Invalid model specification");
string arg = values.at(0);
try {
w_type = formatname_to_io_type(optarg);
v = formatname_to_io_type(arg.c_str());
} catch (...) { // runtime_error
cerr << "Format " << optarg << " unknown." << endl;
abort();
throw std::runtime_error("Format " + arg + " unknown.");
}
break;
case 'm':
maxDist = atoi(optarg);
break;
case 'O':
if (optarg) {
octree = atoi(optarg);
} else {
octree = 1;
}
void reduction_option_dependency(const po::variables_map & vm, reduction_method stype, const char *option)
{
if (vm.count("reduction") && vm["reduction"].as<reduction_method>() == stype) {
if (!vm.count(option)) {
throw std::logic_error (string("this reduction option needs ")+option+" to be set");
}
break;
case 'M':
minDist = atoi(optarg);
break;
case 'I':
if (optarg) {
rangeimage = atoi(optarg);
} else {
rangeimage = 1;
}
break;
case 'p':
projection = optarg;
break;
case 'S':
scanserver = true;
break;
case '?':
usage(argv[0]);
return 1;
default:
abort ();
}
void reduction_option_conflict(const po::variables_map & vm, reduction_method stype, const char *option)
{
if (vm.count("reduction") && vm["reduction"].as<reduction_method>() == stype) {
if (vm.count(option)) {
throw std::logic_error (string("this reduction option is incompatible with ")+option);
}
}
}
/*
* validates reduction method specification
*/
void validate(boost::any& v, const std::vector<std::string>& values,
reduction_method*, int) {
if (values.size() == 0)
throw std::runtime_error("Invalid model specification");
string arg = values.at(0);
if(strcasecmp(arg.c_str(), "OCTREE") == 0) v = OCTREE;
else if(strcasecmp(arg.c_str(), "RANGE") == 0) v = RANGE;
else if(strcasecmp(arg.c_str(), "INTERPOLATE") == 0) v = INTERPOLATE;
else throw std::runtime_error(std::string("reduction method ") + arg + std::string(" is unknown"));
}
void parse_options(int argc, char **argv, int &start, int &end,
bool &scanserver, int &width, int &height,
fbr::projection_method &ptype, string &dir, IOType &iotype,
int &maxDist, int &minDist, reduction_method &rtype, double &scale,
double &voxel, int &octree, bool &use_reflectance)
{
po::options_description generic("Generic options");
generic.add_options()
("help,h", "output this help message");
po::options_description input("Input options");
input.add_options()
("start,s", po::value<int>(&start)->default_value(0),
"start at scan <arg> (i.e., neglects the first <arg> scans) "
"[ATTENTION: counting naturally starts with 0]")
("end,e", po::value<int>(&end)->default_value(-1),
"end after scan <arg>")
("format,f", po::value<IOType>(&iotype)->default_value(UOS),
"using shared library <arg> for input. (chose F from {uos, uos_map, "
"uos_rgb, uos_frames, uos_map_frames, old, rts, rts_map, ifp, "
"riegl_txt, riegl_rgb, riegl_bin, zahn, ply})")
("max,M", po::value<int>(&maxDist)->default_value(-1),
"neglegt all data points with a distance larger than <arg> 'units")
("min,m", po::value<int>(&minDist)->default_value(-1),
"neglegt all data points with a distance smaller than <arg> 'units")
("scanserver,S", po::bool_switch(&scanserver),
"Use the scanserver as an input method and handling of scan data");
po::options_description reduction("Reduction options");
reduction.add_options()
("reduction,r", po::value<reduction_method>(&rtype)->required(),
"choose reduction method (OCTREE, RANGE, INTERPOLATE)")
("scale,S", po::value<double>(&scale),
"scaling factor")
("voxel,v", po::value<double>(&voxel),
"voxel size")
("projection,P", po::value<fbr::projection_method>(&ptype),
"projection method or panorama image")
("octree,O", po::value<int>(&octree),
"0 -> center\n1 -> random\nN>1 -> random N")
("width,w", po::value<int>(&width),
"width of panorama")
("height,h", po::value<int>(&height),
"height of panorama");
po::options_description output("Output options");
output.add_options()
("reflectance,R", po::bool_switch(&use_reflectance),
"Use reflectance when reducing points and save scan files in UOSR format");
po::options_description hidden("Hidden options");
hidden.add_options()
("input-dir", po::value<string>(&dir), "input dir");
// all options
po::options_description all;
all.add(generic).add(input).add(reduction).add(output).add(hidden);
// options visible with --help
po::options_description cmdline_options;
cmdline_options.add(generic).add(input).add(reduction).add(output);
// positional argument
po::positional_options_description pd;
pd.add("input-dir", 1);
// process options
po::variables_map vm;
po::store(po::command_line_parser(argc, argv).
options(all).positional(pd).run(), vm);
// display help
if (vm.count("help")) {
cout << cmdline_options;
cout << endl
<< "Example usage:" << endl
<< "\t./bin/scan_red -s 0 -e 0 -f uos --reduction OCTREE --voxel 10 --octree 0 dat" << endl
<< "\t./bin/scan_red -s 0 -e 0 -f uos --reduction RANGE --scale 0.5 --projection EQUIRECTANGULAR --width 3600 --height 1000 dat" << endl
<< "\t./bin/scan_red -s 0 -e 0 -f uos --reduction INTERPOLATE --scale 0.2 --projection EQUIRECTANGULAR --width 3600 --height 1000 dat" << endl;
exit(0);
}
if(!reduced) {
cerr << "\n*** Reduction method missed ***" << endl;
usage(argv[0]);
}
if (optind != argc-1) {
cerr << "\n*** Directory missing ***" << endl;
usage(argv[0]);
}
dir = argv[optind];
po::notify(vm);
reduction_option_dependency(vm, OCTREE, "voxel");
reduction_option_dependency(vm, OCTREE, "octree");
reduction_option_conflict(vm, OCTREE, "scale");
reduction_option_conflict(vm, OCTREE, "projection");
reduction_option_conflict(vm, OCTREE, "width");
reduction_option_conflict(vm, OCTREE, "height");
reduction_option_conflict(vm, RANGE, "voxel");
reduction_option_conflict(vm, RANGE, "octree");
reduction_option_dependency(vm, RANGE, "scale");
reduction_option_dependency(vm, RANGE, "projection");
reduction_option_dependency(vm, RANGE, "width");
reduction_option_dependency(vm, RANGE, "height");
reduction_option_conflict(vm, INTERPOLATE, "voxel");
reduction_option_conflict(vm, INTERPOLATE, "octree");
reduction_option_dependency(vm, INTERPOLATE, "scale");
reduction_option_dependency(vm, INTERPOLATE, "projection");
reduction_option_dependency(vm, INTERPOLATE, "width");
reduction_option_dependency(vm, INTERPOLATE, "height");
#ifndef _MSC_VER
if (dir[dir.length()-1] != '/') dir = dir + "/";
#else
if (dir[dir.length()-1] != '\\') dir = dir + "\\";
#endif
parseFormatFile(dir, w_type, w_start, w_end);
return 0;
}
void createdirectory(string dir)
{
int success = mkdir(dir.c_str(), S_IRWXU|S_IRWXG|S_IRWXO);
if (success == 0 || errno == EEXIST) {
cout << "Writing to " << dir << endl;
} else {
cerr << "Creating directory " << dir << " failed" << endl;
exit(1);
}
}
void scan2mat(Scan *source, cv::Mat &mat)
{
DataXYZ xyz = source->get("xyz");
DataReflectance xyz_reflectance = (((DataReflectance)source->get("reflectance")).size() == 0) ?
source->create("reflectance", sizeof(float)*xyz.size())
: source->get("reflectance");
if(((DataReflectance)source->get("reflectance")).size() == 0){
for(unsigned int i = 0; i < xyz.size(); i++)
xyz_reflectance[i] = 255;
}
unsigned int nPoints = xyz.size();
mat.create(nPoints,1,CV_32FC(4));
mat = cv::Scalar::all(0);
cv::MatIterator_<cv::Vec4f> it = mat.begin<cv::Vec4f>();
for(unsigned int i = 0; i < nPoints; i++){
float reflectance = xyz_reflectance[i];
//normalize the reflectance
reflectance += 32;
reflectance /= 64;
reflectance -= 0.2;
reflectance /= 0.3;
if (reflectance < 0) reflectance = 0;
if (reflectance > 1) reflectance = 1;
(*it)[0] = xyz[i][0];
(*it)[1] = xyz[i][1];
(*it)[2] = xyz[i][2];
(*it)[3] = reflectance;
++it;
}
}
void reduce_octree(Scan *scan, vector<cv::Vec4f> &reduced_points, int octree,
int red, bool use_reflectance)
{
if (use_reflectance) {
unsigned int types = PointType::USE_REFLECTANCE;
PointType pointtype(types);
scan->setReductionParameter(red, octree, pointtype);
scan->calcReducedPoints();
DataXYZ xyz_reduced(scan->get("xyz reduced"));
DataReflectance reflectance_reduced(scan->get("reflectance reduced"));
if (xyz_reduced.size() != reflectance_reduced.size()) {
cerr << "xyz_reduced size different than reflectance_reduced size" << endl;
return;
}
for(unsigned int j = 0; j < xyz_reduced.size(); j++) {
reduced_points.push_back(cv::Vec4f(xyz_reduced[j][0], xyz_reduced[j][1], xyz_reduced[j][2], reflectance_reduced[j]));
}
}
else {
scan->setReductionParameter(red, octree);
scan->calcReducedPoints();
DataXYZ xyz_reduced(scan->get("xyz reduced"));
for(unsigned int j = 0; j < xyz_reduced.size(); j++) {
reduced_points.push_back(cv::Vec4f(xyz_reduced[j][0], xyz_reduced[j][1], xyz_reduced[j][2], 0.0));
}
}
}
void reduce_range(Scan *scan, vector<cv::Vec4f> &reduced_points, int width,
int height, fbr::projection_method ptype, double scale,
bool use_reflectance)
{
panorama image(width, height, ptype);
cv::Mat mat;
scan2mat(scan, mat);
image.createPanorama(mat);
image.getDescription();
cv::Mat range_image_resized;
cv::Mat reflectance_image_resized;
resize(image.getRangeImage(), range_image_resized, cv::Size(),
scale, scale, cv::INTER_NEAREST);
if (use_reflectance) {
resize(image.getReflectanceImage(), reflectance_image_resized,
cv::Size(), scale, scale, cv::INTER_NEAREST);
} else {
reflectance_image_resized.create(range_image_resized.size(), CV_8U);
reflectance_image_resized = cv::Scalar::all(0);
}
image.recoverPointCloud(range_image_resized, reflectance_image_resized, reduced_points);
}
void reduce_interpolation(Scan *scan, vector<cv::Vec4f> &reduced_points,
int width, int height, fbr::projection_method ptype, double scale,
bool use_reflectance)
{
panorama image(width, height, ptype);
cv::Mat mat;
scan2mat(scan, mat);
image.createPanorama(mat);
image.getDescription();
cv::Mat range_image_resized;
cv::Mat reflectance_image_resized;
resize(image.getMap(), range_image_resized, cv::Size(),
scale, scale, cv::INTER_NEAREST);
if (use_reflectance) {
resize(image.getReflectanceImage(), reflectance_image_resized,
cv::Size(), scale, scale, cv::INTER_NEAREST);
}
for(int i = 0; i < range_image_resized.rows; i++) {
for(int j = 0; j < range_image_resized.cols; j++) {
cv::Vec3f vec = range_image_resized.at<cv::Vec3f>(i, j);
if (use_reflectance) {
reduced_points.push_back(cv::Vec4f(
vec[0], vec[1], vec[2],
reflectance_image_resized.at<uchar>(i, j)/255.0));
} else {
reduced_points.push_back(cv::Vec4f(vec[0], vec[1], vec[2], 0.0));
}
}
}
}
/*
* given a vector of 3d points, write them out as uos files
*/
void write_uos(vector<cv::Vec4f> &points, string &dir, string id)
{
ofstream outfile(dir + "/scan" + id + ".3d");
outfile << "# header is ignored" << endl;
for (vector<cv::Vec4f>::iterator it=points.begin(); it < points.end(); it++) {
outfile << (*it)[0] << " " << (*it)[1] << " " << (*it)[2] << endl;
}
outfile.close();
}
/*
* given a vector of 3d points, write them out as uosr files
*/
void write_uosr(vector<cv::Vec4f> &points, string &dir, string id)
{
ofstream outfile(dir + "/scan" + id + ".3d");
outfile << "# header is ignored" << endl;
for (vector<cv::Vec4f>::iterator it=points.begin(); it < points.end(); it++) {
outfile << (*it)[0] << " " << (*it)[1] << " " << (*it)[2] << " " << (*it)[3] << endl;
}
outfile.close();
}
// write .pose files
// .frames files can later be generated from them using ./bin/pose2frames
void writeposefile(string &dir, const double* rPos, const double* rPosTheta, string id)
{
ofstream posefile(dir + "/scan" + id + ".pose");
posefile << rPos[0] << " " << rPos[1] << " " << rPos[2] << endl;
posefile << deg(rPosTheta[0]) << " "
<< deg(rPosTheta[1]) << " "
<< deg(rPosTheta[2]) << endl;
posefile.close();
}
/**
* Main program for reducing scans.
@ -281,186 +466,63 @@ int parseArgs(int argc, char **argv, string &dir, double &red,
*/
int main(int argc, char **argv)
{
cout << "(c) Jacobs University Bremen, gGmbH, 2012" << endl << endl;
if (argc <= 1) {
usage(argv[0]);
}
// parsing the command line parameters
// init, default values if not specified
int start, end;
bool scanserver;
int width, height;
int maxDist, minDist;
fbr::projection_method ptype;
string dir;
double red = -1.0;
int start = 0, end = -1;
int maxDist = -1;
int minDist = -1;
string projection = "EQUIRECTANGULAR";
int octree = 0;
IOType type = RIEGL_TXT;
int rangeimage = 0;
bool scanserver = false;
IOType iotype;
reduction_method rtype;
double scale, voxel;
int octree;
bool use_reflectance;
parseArgs(argc, argv, dir, red, start, end, maxDist, minDist, projection,
octree, type, rangeimage, scanserver);
parse_options(argc, argv, start, end, scanserver, width, height, ptype,
dir, iotype, maxDist, minDist, rtype, scale, voxel, octree,
use_reflectance);
if (scanserver) {
try {
ClientInterface::create();
} catch(std::runtime_error& e) {
cerr << "ClientInterface could not be created: " << e.what() << endl;
cerr << "Start the scanserver first." << endl;
exit(-1);
}
}
for (int iter = start; iter <= end; iter++) {
// Get Scans
string reddir = dir + "reduced";
Scan::openDirectory(scanserver, dir, iotype, iter, iter);
#ifdef _MSC_VER
int success = mkdir(reddir.c_str());
#else
int success = mkdir(reddir.c_str(), S_IRWXU|S_IRWXG|S_IRWXO);
#endif
if(success == 0) {
cout << "Writing scans to " << reddir << endl;
} else if(errno == EEXIST) {
cout << "Directory " << reddir << " exists already. CONTINUE" << endl;
} else {
cerr << "Creating directory " << reddir << " failed" << endl;
exit(1);
}
Scan::openDirectory(scanserver, dir, type, start, end);
if(Scan::allScans.size() == 0) {
cerr << "No scans found. Did you use the correct format?" << endl;
exit(-1);
}
string scanFileName;
string poseFileName;
/// Use the OCTREE based reduction
if (rangeimage == 0) {
cout << endl << "Reducing point cloud using octrees" << endl;
int scan_number = start;
for(std::vector<Scan*>::iterator it = Scan::allScans.begin();
it != Scan::allScans.end();
++it, ++scan_number) {
for(ScanVector::iterator it = Scan::allScans.begin(); it != Scan::allScans.end(); ++it) {
Scan* scan = *it;
const double* rPos = scan->get_rPos();
const double* rPosTheta = scan->get_rPosTheta();
scan->setRangeFilter(maxDist, minDist);
scan->setReductionParameter(red, octree);
// get reduced points
DataXYZ xyz_r(scan->get("xyz reduced"));
unsigned int nPoints = xyz_r.size();
const char* id = scan->getIdentifier();
cout << "Writing Scan No. " << id;
cout << " with " << xyz_r.size() << " points" << endl;
scanFileName = reddir + "/scan" + id + ".3d";
poseFileName = reddir + "/scan" + id + ".pose";
vector<cv::Vec4f> reduced_points;
ofstream redptsout(scanFileName.c_str());
for(unsigned int j = 0; j < nPoints; j++) {
redptsout << xyz_r[j][0] << " "
<< xyz_r[j][1] << " "
<< xyz_r[j][2] << endl;
}
redptsout.close();
redptsout.clear();
string reddir = dir + "reduced";
createdirectory(reddir);
ofstream posout(poseFileName.c_str());
posout << rPos[0] << " "
<< rPos[1] << " "
<< rPos[2] << endl
<< deg(rPosTheta[0]) << " "
<< deg(rPosTheta[1]) << " "
<< deg(rPosTheta[2]) << endl;
posout.close();
posout.clear();
if (scanserver) {
scan->clear("xyz reduced");
}
}
} else { /// use the RESIZE based reduction
cout << endl << "Reducing point cloud by rescaling the range image" << endl;
Scan::openDirectory(false, dir, type, start, end);
if (Scan::allScans.size() <= 0) {
cerr << "No scans found!" << endl;
exit(-1);
switch (rtype) {
case OCTREE:
reduce_octree(scan, reduced_points, octree, voxel, use_reflectance);
break;
case RANGE:
reduce_range(scan, reduced_points, width, height, ptype, scale, use_reflectance);
break;
case INTERPOLATE:
reduce_interpolation(scan, reduced_points, width, height, ptype, scale, use_reflectance);
break;
default:
cerr << "unknown method" << endl;
return 1;
break;
}
for (int scan_number = start; scan_number <= end; scan_number++) {
Scan* scan = Scan::allScans[scan_number];
scan->setRangeFilter(maxDist, minDist);
const double* rPos = scan->get_rPos();
const double* rPosTheta = scan->get_rPosTheta();
scanFileName = dir + "reduced/scan" + to_string(scan_number, 3) + ".3d";
poseFileName = dir + "reduced/scan" + to_string(scan_number, 3) + ".pose";
// Create a panorama. The iMap inside does all the tricks for us.
scan_cv sScan(dir, scan_number, type);
sScan.convertScanToMat();
/// Project point cloud using the selected projection method
panorama image(IMAGE_WIDTH, IMAGE_HEIGHT, strToPMethod(projection));
image.createPanorama(sScan.getMatScan());
image.getDescription();
/// Resize the range image, specify desired interpolation method
double scale = 1.0/red;
cv::Mat range_image_resized; // reflectance_image_resized;
string ofilename;
stringstream ss;
ss << setw(3) << setfill('0') << (scan_number);
ofilename = reddir + "/scan" + ss.str() + ".3d";
if (rangeimage == 1) {
resize(image.getRangeImage(), range_image_resized, cv::Size(),
scale, scale, cv::INTER_NEAREST);
// Recover point cloud from image and write scan to file
stringstream ss;
ss << setw(3) << setfill('0') << (scan_number);
image.recoverPointCloud(range_image_resized, ofilename);
} else {
resize(image.getMap(), range_image_resized, cv::Size(),
scale, scale, cv::INTER_NEAREST);
ofstream redptsout(ofilename.c_str());
// Convert back to 3D.
for(int i = 0; i < range_image_resized.rows; i++) {
for(int j = 0; j < range_image_resized.cols; j++) {
cv::Vec3f vec = range_image_resized.at<cv::Vec3f>(i, j);
double x = vec[0];
double y = vec[1];
double z = vec[2];
redptsout << x << " " << y << " " << z << endl;
if (use_reflectance)
write_uosr(reduced_points, reddir, scan->getIdentifier());
else
write_uos(reduced_points, reddir, scan->getIdentifier());
writeposefile(reddir, scan->get_rPos(), scan->get_rPosTheta(), scan->getIdentifier());
}
}
}
ofstream posout(poseFileName.c_str());
posout << rPos[0] << " "
<< rPos[1] << " "
<< rPos[2] << endl
<< deg(rPosTheta[0]) << " "
<< deg(rPosTheta[1]) << " "
<< deg(rPosTheta[2]) << endl;
posout.clear();
posout.close();
}
}
cout << endl << endl;
cout << "Normal program end." << endl << endl;
if (scanserver) {
Scan::closeDirectory();
ClientInterface::destroy();
}
}

View file

@ -14,13 +14,13 @@ IF (WITH_THERMO)
IF(UNIX)
target_link_libraries(caliboard scan shape newmat dl ANN)
target_link_libraries(thermo scan shape newmat dl ANN)
target_link_libraries(thermo cv cvaux cxcore GL GLU highgui cvblob scan ANN)
target_link_libraries(thermo GL GLU cvblob ${OpenCV_LIBS} scan ANN)
ENDIF(UNIX)
IF (WIN32)
target_link_libraries(caliboard scan shape newmat XGetopt ANN)
target_link_libraries(thermo scan shape newmat XGetopt ANN)
target_link_libraries(thermo cv cvaux cxcore GL GLU highgui cvblob scan ANN)
target_link_libraries(thermo GL GLU cvblob ${OpenCV_LIBS} scan ANN)
ENDIF(WIN32)

View file

@ -190,7 +190,6 @@ void sortBlobs(double point_array[][2], int board_n, int board_h, int board_w, b
IplImage* detectBlobs(IplImage *org_image, int &corner_exp, int board_h, int board_w, bool quiet, double point_array2[][2]) {
IplImage *gray_image = cvCloneImage(org_image);
//cvThreshold(gray_image, gray_image, 100, 255, CV_THRESH_BINARY);
cvThreshold(gray_image, gray_image, GRAY_TH, 255, CV_THRESH_BINARY);
IplImage *labelImg = cvCreateImage(cvGetSize(gray_image), IPL_DEPTH_LABEL, 1);
@ -270,7 +269,7 @@ IplImage* detectBlobs(IplImage *org_image, int &corner_exp, int board_h, int boa
* Connects the detected calibration features in the image with lines.
*/
void drawLines(double point_array2[][2], int corner_exp, IplImage *image, bool color) {
for (int i = 4; i <= corner_exp - 2; i++) {
for (int i = 0; i <= corner_exp - 2; i++) {
CvPoint pt1;
CvPoint pt2;
CvScalar s;
@ -368,7 +367,6 @@ IplImage* detectCorners(IplImage *orgimage, int &corner_exp, int board_h, int bo
cout << "found corners:" << corner_exp << endl;
if (found != 0) {//if all corners found successfully
//if (corner_exp != 0) {//if all corners found successfully
//Get Subpixel accuracy on those corners
if(size.width > 400) {
cvFindCornerSubPix(gray_image, corners, corner_exp, cvSize(11, 11), cvSize(-1, -1),
@ -379,7 +377,6 @@ IplImage* detectCorners(IplImage *orgimage, int &corner_exp, int board_h, int bo
cvTermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
}
}
cout << "blub " << found << endl;
for (int i = 0; i < corner_exp; i++) {
point_array2[i][0] = corners[i].x;
@ -410,8 +407,6 @@ image_points, CvSize size, string dir, string substring) {
CV_MAT_ELEM( *image_points2, float,i,1) = CV_MAT_ELEM( *image_points, float, i, 1);
CV_MAT_ELEM(*object_points2, float, i, 0) = (j / board_w) * 4;
CV_MAT_ELEM( *object_points2, float, i, 1) = (j % board_w) * 4;
//CV_MAT_ELEM(*object_points2, float, i, 0) = (j / board_w) * 4;
//CV_MAT_ELEM( *object_points2, float, i, 1) = (j % board_w) * 4;
CV_MAT_ELEM( *object_points2, float, i, 2) = 0.0f;
}
for (int i = 0; i < images; ++i) { //These are all the same number
@ -423,10 +418,10 @@ image_points, CvSize size, string dir, string substring) {
CV_MAT_ELEM( *intrinsic_matrix, float, 1, 1 ) = 1.0f;
//CALIBRATE THE CAMERA!
cvCalibrateCamera2(object_points2, image_points2, point_counts2, size,
intrinsic_matrix, distortion_coeffs, Rotation, Translation, 0 //CV_CALIB_FIX_ASPECT_RATIO
intrinsic_matrix, distortion_coeffs, Rotation, Translation, 0
);
// SAVE AND PRINT THE INTRINSICS AND DISTORTIONS
// SAVE AND PRINT THE INTRINSICS AND DISTORTIONS
string file = dir + "Intrinsics" + substring + ".xml";
cvSave(file.c_str(), intrinsic_matrix);
file = dir + "Distortion" + substring + ".xml";
@ -467,10 +462,6 @@ chess, bool quiet, string dir, int scale) {
cvNamedWindow("Final Result", 0);
cvResizeWindow( "Original Image", 480, 640 );
cvResizeWindow( "Final Result", 480, 640 );
/*
cvNamedWindow("Final Result", 0);
cvResizeWindow( "Final Result", 320, 240 );
*/
int nr_img = end - start + 1;
if (nr_img == 0) {
cout << "ImageCount is zero!" << endl;
@ -496,14 +487,10 @@ chess, bool quiet, string dir, int scale) {
if(optical) {
//TODO t = dir + "/photo" + to_string(count, 3) + ".ppm";
t = dir + "/photo" + to_string(count, 3) + ".jpg";
//t1 = dir + "/cimage" + to_string(count, 3) + ".ppm";
//t = dir + to_string(count, 3) + "/photo" + to_string(count, 3) + ".ppm";
//t1 = dir + to_string(count, 3) + "/cimage" + to_string(count, 3) + ".ppm";
} else {
//t = dir + to_string(count, 3) + "/image" + to_string(count, 3) + ".ppm";
//t1 = dir + to_string(count, 3) + "/timage" + to_string(count, 3) + ".ppm";
t = dir + "/image" + to_string(count, 3) + ".ppm";
//t1 = dir + "/timage" + to_string(count, 3) + ".ppm";
}
cout << t << endl;
//loading images and finding corners
@ -515,7 +502,6 @@ chess, bool quiet, string dir, int scale) {
cvShowImage("Original Image", image1);
/////////////////////////////////////////////////////////////
double point_array2[corner_exp][2];
IplImage *image;
@ -579,7 +565,6 @@ chess, bool quiet, string dir, int scale) {
writeCalibParam(successes, corner_exp, board_w, image_points, size, dir, substring);
cvReleaseMat(&image_points);
}
/**
@ -987,12 +972,9 @@ void ExtrCalibFunc(int board_w, int board_h, int start, int end, bool optical, b
//ALLOCATE STORAGE(depending upon the number of images in(in case if command line arguments are given )
//not on the basis of number of images in which all corner extracted/while in the other case the number is the same )
string substring = optical? "Optical" : "";
string file = dir + "Intrinsics" + substring + ".xml";
cout << file << endl;
CvMat *intrinsic = (CvMat*) cvLoad(file.c_str());
file = dir + "Distortion" + substring + ".xml";
CvMat *distortion = (CvMat*) cvLoad(file.c_str());
CvMat *intrinsic;
CvMat *distortion;
loadIntrinsicCalibration(intrinsic, distortion, dir, optical);
//for storing the rotations and translation vectors
@ -1134,6 +1116,7 @@ void ExtrCalibFunc(int board_w, int board_h, int start, int end, bool optical, b
cout << "Number of successes: " << successes << endl;
// Now calculating mean and median rotation and trans
string substring = optical? "Optical" : "";
calculateExtrinsics(rotation_vectors_temp, translation_vectors_temp, successes, dir, quiet, substring);
calculateExtrinsicsWithReprojectionCheck(points2D, points3D, rotation_vectors_temp, translation_vectors_temp, distortion, intrinsic, corner_exp, successes, dir, quiet, substring);
cvReleaseMat(&intrinsic);
@ -1189,31 +1172,9 @@ void writeGlobalCameras(int start, int end, bool optical, bool quiet, string dir
cout << "ImageCount is zero!" << endl;
return;
}
string substring = optical? "Optical" : "";
switch(method) {
case 0:
file = dir + "Rotation" + substring + ".xml";
break;
case 1:
file = dir + "RotationMedian" + substring + ".xml";
break;
case 2:
file = dir + "RotationMean" + substring + ".xml";
break;
}
CvMat *Rotation = (CvMat*) cvLoad(file.c_str());
switch(method) {
case 0:
file = dir + "Translation" + substring + ".xml";
break;
case 1:
file = dir + "TranslationMedian" + substring + ".xml";
break;
case 2:
file = dir + "TranslationMean" + substring + ".xml";
break;
}
CvMat *Translation = (CvMat*) cvLoad(file.c_str());
CvMat *Rotation;
CvMat *Translation;
loadExtrinsicCalibration(Rotation, Translation, dir, method, optical);
double starttime = GetCurrentTimeInMilliSec();
@ -1355,35 +1316,13 @@ void calculateGlobalCameras(int start, int end, bool optical, bool quiet, string
cout << "ImageCount is zero!" << endl;
return;
}
string substring = optical? "Optical" : "";
string file = dir + "Intrinsics" + substring + ".xml";
CvMat *intrinsic = (CvMat*) cvLoad(file.c_str());
file = dir + "Distortion" + substring + ".xml";
CvMat *distortion = (CvMat*) cvLoad(file.c_str());
switch(method) {
case 0:
file = dir + "Rotation" + substring + ".xml";
break;
case 1:
file = dir + "RotationMedian" + substring + ".xml";
break;
case 2:
file = dir + "RotationMean" + substring + ".xml";
break;
}
CvMat *Rotation = (CvMat*) cvLoad(file.c_str());
switch(method) {
case 0:
file = dir + "Translation" + substring + ".xml";
break;
case 1:
file = dir + "TranslationMedian" + substring + ".xml";
break;
case 2:
file = dir + "TranslationMean" + substring + ".xml";
break;
}
CvMat *Translation = (CvMat*) cvLoad(file.c_str());
CvMat *intrinsic;
CvMat *distortion;
loadIntrinsicCalibration(intrinsic, distortion, dir, optical);
CvMat *Rotation;
CvMat *Translation;
loadExtrinsicCalibration(Translation, Rotation, dir, method, optical);
CvMat* undistort = cvCreateMat(5,1,CV_32FC1);
for (int hh = 0; hh < 5; hh++) {
CV_MAT_ELEM(*undistort, float,hh,0) = 0;
@ -1759,24 +1698,17 @@ void calculateGlobalCameras(int start, int end, bool optical, bool quiet, string
}
/**
* Main function for projecting the 3D points onto the corresponding image and
* associating temperature values to the data points.
*/
void ProjectAndMap(int start, int end, bool optical, bool quiet, string dir,
IOType type, int scale, double rot_angle, double minDist, double maxDist,
bool correction, int neighborhood, int method) {
int nr_img = end - start + 1;
if (nr_img < 1) {
cout << "ImageCount is zero!" << endl;
return;
}
void loadIntrinsicCalibration(CvMat * intrinsic, CvMat * distortion, string dir, bool optical) {
string substring = optical? "Optical" : "";
string file = dir + "Intrinsics" + substring + ".xml";
CvMat *intrinsic = (CvMat*) cvLoad(file.c_str());
intrinsic = (CvMat*) cvLoad(file.c_str());
file = dir + "Distortion" + substring + ".xml";
CvMat *distortion = (CvMat*) cvLoad(file.c_str());
distortion = (CvMat*) cvLoad(file.c_str());
}
void loadExtrinsicCalibration(CvMat * Translation, CvMat * Rotation, string dir, int method, bool optical) {
string substring = optical? "Optical" : "";
string file;
switch(method) {
case 0:
file = dir + "Rotation" + substring + ".xml";
@ -1788,7 +1720,7 @@ void ProjectAndMap(int start, int end, bool optical, bool quiet, string dir,
file = dir + "RotationMean" + substring + ".xml";
break;
}
CvMat *Rotation = (CvMat*) cvLoad(file.c_str());
Rotation = (CvMat*) cvLoad(file.c_str());
switch(method) {
case 0:
file = dir + "Translation" + substring + ".xml";
@ -1800,17 +1732,10 @@ void ProjectAndMap(int start, int end, bool optical, bool quiet, string dir,
file = dir + "TranslationMean" + substring + ".xml";
break;
}
CvMat *Translation = (CvMat*) cvLoad(file.c_str());
CvMat* undistort = cvCreateMat(5,1,CV_32FC1);
for (int hh = 0; hh < 5; hh++) {
CV_MAT_ELEM(*undistort, float,hh,0) = 0;
}
Translation = (CvMat*) cvLoad(file.c_str());
}
double starttime = GetCurrentTimeInMilliSec();
stringstream outdat;
int pointcnt = 0;
string outdir = dir + "/labscan-map";
void openOutputDirectory(string outdir) {
#ifdef _MSC_VER
int success = mkdir(outdir.c_str());
#else
@ -1824,12 +1749,9 @@ void ProjectAndMap(int start, int end, bool optical, bool quiet, string dir,
cerr << "Creating directory " << outdir << " failed" << endl;
exit(1);
}
for (int count = start; count <= end; count++) {
// filling the rotation matrix
CvMat* point_3Dcloud;
CvMat* point_2Dcloud;
CvMat* undistort_2Dcloud;
}
int openDirectory(CvMat* point_3Dcloud, string dir, IOType type, int count) {
// reading the 3D points and projecting them back to 2d
Scan::openDirectory(false, dir, type, count, count);
Scan::allScans[0]->setRangeFilter(-1, -1);
@ -1839,23 +1761,127 @@ void ProjectAndMap(int start, int end, bool optical, bool quiet, string dir,
//Scan::readScans(type, count, count, dir, maxDist, minDist, 0);
DataXYZ reduced = Scan::allScans[0]->get("xyz reduced");
int red_size = reduced.size();
point_3Dcloud = cvCreateMat(red_size, 3, CV_32FC1);
cout << "Points: " << red_size << endl;
point_2Dcloud = cvCreateMat(red_size, 2, CV_32FC1);
undistort_2Dcloud = cvCreateMat(red_size, 2, CV_32FC1);
cout << "readScans done" << endl;
for (int j = 0; j < red_size; j++) {
Point p(reduced[j]);
// TODO make sure correct points are printed
CV_MAT_ELEM(*point_3Dcloud, float,j,0) = p.z;
CV_MAT_ELEM(*point_3Dcloud, float,j,1) = -p.x;
CV_MAT_ELEM(*point_3Dcloud, float,j,2) = p.y;
}
int nr_points = red_size;
cout << "Number of points read: " << red_size << endl;
return red_size;
}
void loadImage(IplImage *image, string dir, int count0, bool optical, int scale){
string t, t0;
if(optical) {
t = dir + "/photo" + to_string(count0, 3) + ".jpg";
} else {
t = dir + "/image" + to_string(count0, 3) + ".ppm";
}
image = cvLoadImage(t.c_str(), -1);
if (!image) {
cout << "first image " << t << " cannot be loaded" << endl;
exit(0);
}
image = resizeImage(image, scale);
}
void calculateGlobalPoses(CvMat *Translation, CvMat *Rotation, CvMat *t_comI,
CvMat *rod_comI, double angle, CvMat *rot_tmp) {
CvMat* RotationI = cvCreateMat(3,1,CV_32FC1);
CvMat* TranslationI = cvCreateMat(3,1,CV_32FC1);
CvMat* rod40 = cvCreateMat(3,1,CV_32FC1);
//cout << "Angle: " << angle << " " << rad(angle) << endl;
CV_MAT_ELEM(*rod40,float,0,0) = 0.0;
CV_MAT_ELEM(*rod40,float,1,0) = 0.0;
CV_MAT_ELEM(*rod40,float,2,0) = 1.0 * rad(angle);
//cout << "tmp" << endl;
CvMat* t40 = cvCreateMat(3,1,CV_32FC1);
CV_MAT_ELEM(*t40,float,0,0) = 0.0;
CV_MAT_ELEM(*t40,float,1,0) = 0.0;
CV_MAT_ELEM(*t40,float,2,0) = 0.0;
rot_tmp = cvCreateMat(3,3,CV_32FC1);
//CvMat* rot_tmp = cvCreateMat(3,3,CV_32FC1);
rod_comI = cvCreateMat(3,1,CV_32FC1);
t_comI = cvCreateMat(3,1,CV_32FC1);
CvMat* rod_com = cvCreateMat(1,3,CV_32FC1);
CvMat* t_com = cvCreateMat(1,3,CV_32FC1);
for(int w = 0; w < 3; w++) {
CV_MAT_ELEM(*RotationI,float,w,0) = CV_MAT_ELEM(*Rotation,float,0,w);
CV_MAT_ELEM(*TranslationI,float,w,0) = CV_MAT_ELEM(*Translation,float,0,w);
}
//cout << endl;
//cout << "Final Rotation" << endl;
cvComposeRT(rod40, t40, RotationI, TranslationI, rod_comI, t_comI);
for(int w = 0; w < 3; w++) {
CV_MAT_ELEM(*rod_com,float,0,w) = CV_MAT_ELEM(*rod_comI,float,w,0);
CV_MAT_ELEM(*t_com,float,0,w) = CV_MAT_ELEM(*t_comI,float,w,0);
/*
cout << CV_MAT_ELEM(*RotationI,float,w,0) << " ";
cout << CV_MAT_ELEM(*TranslationI,float,w,0) << " ";
cout << CV_MAT_ELEM(*rod40,float,w,0) << " ";
cout << CV_MAT_ELEM(*t40,float,w,0) << " ";
cout << CV_MAT_ELEM(*rod_comI,float,w,0) << " ";
cout << CV_MAT_ELEM(*t_comI,float,w,0) << endl;
*/
}
//cout << endl;
cvRodrigues2(rod_comI, rot_tmp);
cvReleaseMat(&rod40);
cvReleaseMat(&RotationI);
cvReleaseMat(&TranslationI);
cvReleaseMat(&t40);
cvReleaseMat(&rod_com);
cvReleaseMat(&t_com);
}
/**
* Main function for projecting the 3D points onto the corresponding image and
* associating temperature values to the data points.
*/
void ProjectAndMap(int start, int end, bool optical, bool quiet, string dir,
IOType type, int scale, double rot_angle, double minDist, double maxDist,
bool correction, int neighborhood, int method) {
int nr_img = end - start + 1;
if (nr_img < 1) {
cout << "ImageCount is zero!" << endl;
return;
}
CvMat *distortion;
CvMat *intrinsic;
CvMat *Translation;
CvMat *Rotation;
loadExtrinsicCalibration(Translation, Rotation,dir,method,optical);
loadIntrinsicCalibration(intrinsic,distortion,dir,optical);
CvMat* undistort = cvCreateMat(5,1,CV_32FC1);
for (int hh = 0; hh < 5; hh++) {
CV_MAT_ELEM(*undistort, float,hh,0) = 0;
}
double starttime = GetCurrentTimeInMilliSec();
stringstream outdat;
int pointcnt = 0;
string outdir = dir + "/labscan-map";
openOutputDirectory(outdir);
for (int count = start; count <= end; count++) {
// filling the rotation matrix
CvMat *point_3Dcloud;
int nr_points = openDirectory(point_3Dcloud, dir, type, count);
CvMat* point_2Dcloud = cvCreateMat(nr_points, 2, CV_32FC1);
CvMat* undistort_2Dcloud = cvCreateMat(nr_points, 2, CV_32FC1);
cout << "Number of points read: " << nr_points << endl;
delete Scan::allScans[0];
Scan::allScans.clear();
@ -1874,80 +1900,22 @@ void ProjectAndMap(int start, int end, bool optical, bool quiet, string dir,
// loading images
int count0 = count * nrP360 + p;
string t, t0;
if(optical) {
//TODO t = dir + "/photo" + to_string(count, 3) + ".ppm";
//t = dir + "/photo" + to_string(count0, 3) + "_2.jpg";
//t = dir + "/photo" + to_string(count0, 3) + "_90.jpg";
t = dir + "/photo" + to_string(count0, 3) + ".jpg";
//t = dir + "/photo" + to_string(count0, 3) + "_1.jpg";
} else {
t = dir + "/image" + to_string(count0, 3) + ".ppm";
}
IplImage* image = cvLoadImage(t.c_str(), -1);
if (!image) {
cout << "first image " << t << " cannot be loaded" << endl;
return;
}
IplImage *image;
loadImage(image, dir, count0, optical, scale);
CvSize size = cvGetSize(image);
image = resizeImage(image, scale);
// rotate Rotation and Translation
CvMat* RotationI = cvCreateMat(3,1,CV_32FC1);
CvMat* TranslationI = cvCreateMat(3,1,CV_32FC1);
CvMat* rod40 = cvCreateMat(3,1,CV_32FC1);
cout << "Angle: " << angle << " " << rad(angle) << endl;
CV_MAT_ELEM(*rod40,float,0,0) = 0.0;
CV_MAT_ELEM(*rod40,float,1,0) = 0.0;
CV_MAT_ELEM(*rod40,float,2,0) = 1.0 * rad(angle);
cout << "tmp" << endl;
CvMat* t40 = cvCreateMat(3,1,CV_32FC1);
CV_MAT_ELEM(*t40,float,0,0) = 0.0;
CV_MAT_ELEM(*t40,float,1,0) = 0.0;
CV_MAT_ELEM(*t40,float,2,0) = 0.0;
cout << "tmp2" << endl;
CvMat* rod_comI;
CvMat* t_comI;
CvMat* rot_tmp = cvCreateMat(3,3,CV_32FC1);
CvMat* rod_comI = cvCreateMat(3,1,CV_32FC1);
CvMat* t_comI = cvCreateMat(3,1,CV_32FC1);
CvMat* rod_com = cvCreateMat(1,3,CV_32FC1);
CvMat* t_com = cvCreateMat(1,3,CV_32FC1);
cout << "tmp3" << endl;
for(int w = 0; w < 3; w++) {
CV_MAT_ELEM(*RotationI,float,w,0) = CV_MAT_ELEM(*Rotation,float,0,w);
CV_MAT_ELEM(*TranslationI,float,w,0) = CV_MAT_ELEM(*Translation,float,0,w);
}
cout << endl;
cout << "Final Rotation" << endl;
cvComposeRT(rod40, t40, RotationI, TranslationI, rod_comI, t_comI);
for(int w = 0; w < 3; w++) {
CV_MAT_ELEM(*rod_com,float,0,w) = CV_MAT_ELEM(*rod_comI,float,w,0);
CV_MAT_ELEM(*t_com,float,0,w) = CV_MAT_ELEM(*t_comI,float,w,0);
cout << CV_MAT_ELEM(*RotationI,float,w,0) << " ";
cout << CV_MAT_ELEM(*TranslationI,float,w,0) << " ";
cout << CV_MAT_ELEM(*rod40,float,w,0) << " ";
cout << CV_MAT_ELEM(*t40,float,w,0) << " ";
cout << CV_MAT_ELEM(*rod_comI,float,w,0) << " ";
cout << CV_MAT_ELEM(*t_comI,float,w,0) << endl;
}
cout << endl;
cvRodrigues2(rod_comI, rot_tmp);
calculateGlobalPoses(Translation, Rotation, t_comI, rod_comI, angle, rot_tmp);
// Project Points
cvProjectPoints2(point_3Dcloud, rod_comI, t_comI, intrinsic, distortion, point_2Dcloud, NULL, NULL, NULL, NULL, NULL, 0);
cvProjectPoints2(point_3Dcloud, rod_comI, t_comI, intrinsic, undistort, undistort_2Dcloud, NULL, NULL, NULL, NULL, NULL, 0);
cvReleaseMat(&rod40);
cvReleaseMat(&RotationI);
cvReleaseMat(&TranslationI);
cvReleaseMat(&t40);
cvReleaseMat(&rod_comI);
cvReleaseMat(&rod_com);
cvReleaseMat(&t_com);
cvReleaseMat(&t_comI);
cvReleaseMat(&rod_comI);
cout << "Done projecting points" << endl;
//for counting how many points get mapped to first and second image file
@ -2126,6 +2094,7 @@ void ProjectAndMap(int start, int end, bool optical, bool quiet, string dir,
cvReleaseMat(&undistort_2Dcloud);
}
// Final cleanup
cvReleaseMat(&intrinsic);
cvReleaseMat(&distortion);
cvReleaseMat(&Rotation);

View file

@ -925,9 +925,7 @@ void initShow(int argc, char **argv){
#endif
}
} else {
#ifndef USE_GL_POINTS
createDisplayLists(red > 0);
#elif USE_COMPACT_TREE
#if USE_COMPACT_TREE
cout << "Creating compact display octrees.." << endl;
for(int i = 0; i < (int)Scan::allScans.size() ; i++) {
compactTree *tree;