/* * David Scanner implementation * * Copyright (C) Vladislav Perelman * * Released under the GPL version 3. * */ /* * david_scanner.cc * Program takes as an input path to the config file which needs to * have all the necessary information for the program. * Config file has to have (each on a new line, 9 lines in total): * * Path to the directory where frames from the video are stored * The first frame that has to be used * The last frame that has to be used * The empty frame without the laser * Path to the file with intrinsics of the camera * Path to the rotation of the left board * Path to the rotation of the right board * Path to the translation of the left board * Path to the translation of the right board * * Program computes the 3 point cloud of the object and stores it in the * file scan000.3d, each point in the cloud is represented by the line * in the file: * x y z r g b * * * Created on: Oct 4, 2010 * Author: Vladislav Perelman v.perelman@jacobs-university.de */ #include #include #include #if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__))) #include #include #include #include #elif (CV_MAJOR_VERSION == 2) && (CV_MINOR_VERSION < 2) #include #else #include #endif #include #include #define PI 3.14159265 using namespace std; int main(int argc, char** argv){ if (argc!=2){ cout<<"USAGE: david_scanner config_file\nConfig file should contain path_to_frames first_valid_frame last_valid_frame empty_frame path_to_intrinsics" "path_to_rotation_left path_to_rotation_right path_to_translation_left and path_to_translation_right each on a new line!"<imageData; for (int row = 0; row < diff->height; row++){ for (int col = 0; col < diff->width; col++){ int R; R = pixels[ row * diff->widthStep + col * 3 + 2 ]; if (R>30) { pixels[ row * diff->widthStep + col * 3 + 0 ] = 0; pixels[ row * diff->widthStep + col * 3 + 1 ] = 0; pixels[ row * diff->widthStep + col * 3 + 2 ] = 255; } else { pixels[ row * diff->widthStep + col * 3 + 0 ] = 0; pixels[ row * diff->widthStep + col * 3 + 1 ] = 0; pixels[ row * diff->widthStep + col * 3 + 2 ] = 0; } } } //remove pixels that don't have at least 2 red neighbors for (int row = 1; row < diff->height-1; row++){ for (int col = 1; col < diff->width-1; col++){ int R = pixels[ row * diff->widthStep + col * 3 + 2 ]; if (R == 255){ int r1 = pixels[ (row-1)*diff->widthStep + col * 3 + 2]; int r2 = pixels[ (row-1)*diff->widthStep + (col-1) * 3 + 2]; int r3 = pixels[ (row-1)*diff->widthStep + (col+1) * 3 + 2]; int r4 = pixels[ (row+1)*diff->widthStep + col * 3 + 2]; int r5 = pixels[ (row+1)*diff->widthStep + (col-1) * 3 + 2]; int r6 = pixels[ (row+1)*diff->widthStep + (col+1) * 3 + 2]; int r7 = pixels[ (row)*diff->widthStep + (col-1) * 3 + 2]; int r8 = pixels[ (row)*diff->widthStep + (col+1) * 3 + 2]; if (r1+r2+r3+r4+r5+r6+r7+r8<=255) pixels[ row * diff->widthStep + col * 3 + 2 ]=0; } } } //*****finding 2 lines on the image***** bool good = false; int threshold = 50; //original threshold for Hough transform, incremented if too many groups of lines found IplImage* color_dst; IplImage* tmpImage; int minX1, minX2, maxX1, maxX2; CvSeq* lines = 0; CvPoint* line1; CvPoint* line2; int count_groups; //incrementing thresholds until only 2 groups of lines can be found while(!good){ good = true; count_groups = 0; //counter for number of line groups. Line group is defined by the slope int epsilon = 1.5; //error margin for the slope color_dst = cvCreateImage( cvGetSize(diff), 8, 3 ); color_dst = cvCloneImage(diff); tmpImage = cvCreateImage(cvGetSize(diff), IPL_DEPTH_8U, 1); cvCvtColor(diff, tmpImage, CV_RGB2GRAY); IplImage* dst = cvCreateImage( cvGetSize(diff), 8, 1 ); cvCanny(tmpImage, dst, 20, 60, 3 ); CvMemStorage* storage = cvCreateMemStorage(0); //find all lines using Hough transform lines = cvHoughLines2( dst, storage, CV_HOUGH_PROBABILISTIC, 1, CV_PI/180,threshold, 150, 100 ); double first_group, second_group; for(int i = 0; i < lines->total; i++ ){ //get the slope of the line, check if it belongs to an already existing group CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i); double angle = atan((double)(line[1].x-line[0].x)/(double)(line[1].y-line[0].y))*180/PI; //starting first group if (count_groups==0){ first_group = angle; line1 = line; minX1 = line[0].x; maxX1 = line[1].x; count_groups++; } else { if (angle-first_group(epsilon*-1)){ //line belongs to the first group of line..that's good if (line[0].xmaxX1)maxX1=line[1].x; } else { //check if belongs to the second group if ( count_groups == 2 ){ if (angle-second_group(epsilon*-1)){ if (line[0].xmaxX2)maxX2=line[1].x; }else{ //if not then try again with a higher threshold good = false; threshold+=20; cout<<"Increased threshold: "<height,ymin+10)); points[2]=cvPoint(maxX1, min(color_dst->height,ymax+10)); points[3]=cvPoint(maxX1, max(0,ymax-10)); CvPoint* pts[1]; pts[0]=points; int npts[1]; npts[0]=4; cvPolyLine(color_dst, pts, npts,1,1, CV_RGB(0,0,0), 20, 8 );//removing the group x1 = line2[0].x; x2 = line2[1].x; y1 = line2[0].y; y2 = line2[1].y; c1 = (double)(x1 - minX2)/(double)(x2 - minX2); c2 = (double)(maxX2 - x1)/(double)(maxX2 - x2); ymin = (c1*y2 - y1)/(c1-1); ymax = (c2*y2 - y1)/(c2-1); if (maxX2 == x2) ymax = y2; if (minX2 == x1) ymin = y1; //getting start and end of the second line point3 = cvPoint(minX2, ymin); point4 = cvPoint(maxX2, ymax); points[0]=cvPoint(minX2, max(0,ymin-10)); points[1]=cvPoint(minX2, min(color_dst->height,ymin+10)); points[2]=cvPoint(maxX2, min(color_dst->height,ymax+10)); points[3]=cvPoint(maxX2, max(0,ymax-10)); pts[0]=points; cvPolyLine(color_dst, pts, npts,1,1, CV_RGB(0,0,0), 20, 8 );//removing the group cvLine(color_dst, point3, point4,CV_RGB(0,255,0),3, 8 ); //draw the second line! cvLine(color_dst, point1, point2,CV_RGB(0,255,0),3, 8 ); //draw the first line! //removing everything to the left of the left line and to the right of the right line if (point4.x > point2.x){ if (color_dst->width > point4.x){ cvRectangle(color_dst,cvPoint(point4.x,0),cvPoint(color_dst->width,color_dst->height),CV_RGB(0,0,0),CV_FILLED); } if (point1.x > 0){ cvRectangle(color_dst,cvPoint(point1.x,0),cvPoint(0,color_dst->height),CV_RGB(0,0,0),CV_FILLED); } } if (point4.x < point2.x){ if (color_dst->width > point2.x){ cvRectangle(color_dst,cvPoint(point2.x,0),cvPoint(color_dst->width,color_dst->height),CV_RGB(0,0,0),CV_FILLED); } if (point3.x > 0){ cvRectangle(color_dst,cvPoint(point3.x,0),cvPoint(0,color_dst->height),CV_RGB(0,0,0),CV_FILLED); } } //at this point we have to lines which we drew in green...which means all the red pixels that remain on the image //are supposed to be laying on the object. Make them blue (for no particular reason..just looked nicer :) ) unsigned char* pixels = (unsigned char*)color_dst->imageData; for (int row = 1; row < color_dst->height-1; row++){ for (int col = 1; col < color_dst->width-1; col++){ int R = pixels[ row * color_dst->widthStep + col * 3 + 2 ]; if (R == 255){ pixels[ row * color_dst->widthStep + col * 3 + 0 ]=255; pixels[ row * color_dst->widthStep + col * 3 + 1 ]=0; pixels[ row * color_dst->widthStep + col * 3 + 2 ]=0; } } } } else continue; //take points on planes CvPoint left1, left2, right1; if (point1.x < point3.x){ left1 = point1; left2 = point2; right1 = point3; } else { left1 = point3; left2 = point4; right1 = point1; } //find 3d coordinate of the 2 points on the line on the left plane //(x,y,z).t() = s*R.i()*A.i()*(u,v,1).t() - R.i()*T CvMat* imagepoint1 = cvCreateMat( 3, 1, CV_32F ); CV_MAT_ELEM(*imagepoint1, float, 0, 0) = left1.x; CV_MAT_ELEM(*imagepoint1, float, 1, 0) = left1.y; CV_MAT_ELEM(*imagepoint1, float, 2, 0) = 1; CvMat* b1 = cvCreateMat(3, 1, CV_32F); cvMatMul(R1iAi, imagepoint1, b1); //calculate scalar s based on the fact that point we take is on the wall => z coordinate is 0 float s1 = CV_MAT_ELEM(*a1, float, 2, 0)/CV_MAT_ELEM(*b1, float, 2, 0); CvMat* identity = cvCreateMat(3,3,CV_32F); cvSetIdentity(identity); for (int i = 0; i < 3; i++){ CV_MAT_ELEM(*identity, float, i, i)=s1; } CvMat* temp = cvCreateMat(3,1,CV_32F); cvMatMul(identity,b1, temp); CvMat* dpoint1 = cvCreateMat(3,1,CV_32F); cvSub(temp, a1, dpoint1); //first 3d point on the left plane //same thing for the second point CvMat* imagepoint2 = cvCreateMat( 3, 1, CV_32F ); CV_MAT_ELEM(*imagepoint2, float, 0, 0) = left2.x; CV_MAT_ELEM(*imagepoint2, float, 1, 0) = left2.y; CV_MAT_ELEM(*imagepoint2, float, 2, 0) = 1; CvMat* b2 = cvCreateMat(3, 1, CV_32F); cvMatMul(R1iAi, imagepoint2, b2); float s2 = CV_MAT_ELEM(*a1, float, 2, 0)/CV_MAT_ELEM(*b2, float, 2, 0); cvSetIdentity(identity, cvRealScalar(s2)); cvMatMul(identity,b2, b2); CvMat* dpoint2 = cvCreateMat(3,1,CV_32F); cvSub(b2, a1, dpoint2); //second 3d point on the left plane //same for the point on the right plane CvMat* imagepoint3 = cvCreateMat( 3, 1, CV_32F ); CV_MAT_ELEM(*imagepoint3, float, 0, 0) = right1.x; CV_MAT_ELEM(*imagepoint3, float, 1, 0) = right1.y; CV_MAT_ELEM(*imagepoint3, float, 2, 0) = 1; CvMat* b3 = cvCreateMat(3, 1, CV_32F); cvMatMul(R2iAi, imagepoint3, b3); float s3 = CV_MAT_ELEM(*a2, float, 2, 0)/CV_MAT_ELEM(*b3, float, 2, 0); cvSetIdentity(identity, cvRealScalar(s3)); cvMatMul(identity,b3, b3); CvMat* dpoint3 = cvCreateMat(3,1,CV_32F); cvSub(b3, a2, dpoint3); //point on the right plane //convert point from the right plane into the coord. system of the left plane //p1 = R1.i()*[R2|T2]*p2 - R1.i()*T1 CvMat* dpoint3left = cvCreateMat(3,1,CV_32F); CvMat* pw = cvCreateMat(4,1,CV_32F); for (int i = 0; i<3; i++){ CV_MAT_ELEM(*pw, float, i, 0) = CV_MAT_ELEM(*dpoint3, float, i, 0); } CV_MAT_ELEM(*pw, float, 3, 0) = 1.0; CvMat* r2t2pw = cvCreateMat(3,1,CV_32F); cvMatMul(r2t2, pw, r2t2pw); CvMat* r1invr2t2pw = cvCreateMat(3,1,CV_32F); cvMatMul(r1inv, r2t2pw, r1invr2t2pw); cvSub(r1invr2t2pw, a1, dpoint3left); //now that we have 3 non-colinear point in the same coordinate system we can find the equation of the plane /* A = y1 (z2 - z3) + y2 (z3 - z1) + y3 (z1 - z2) B = z1 (x2 - x3) + z2 (x3 - x1) + z3 (x1 - x2) C = x1 (y2 - y3) + x2 (y3 - y1) + x3 (y1 - y2) - D = x1 (y2 z3 - y3 z2) + x2 (y3 z1 - y1 z3) + x3 (y1 z2 - y2 z1) */ float x1 = CV_MAT_ELEM(*dpoint1, float,0,0); float y1 = CV_MAT_ELEM(*dpoint1, float,1,0); float z1 = CV_MAT_ELEM(*dpoint1, float,2,0); float x2 = CV_MAT_ELEM(*dpoint2, float,0,0); float y2 = CV_MAT_ELEM(*dpoint2, float,1,0); float z2 = CV_MAT_ELEM(*dpoint2, float,2,0); float x3 = CV_MAT_ELEM(*dpoint3left, float,0,0); float y3 = CV_MAT_ELEM(*dpoint3left, float,1,0); float z3 = CV_MAT_ELEM(*dpoint3left, float,2,0); float planeA = (y1 * (z2 - z3)) + (y2 * (z3 - z1)) + (y3 * (z1 - z2)); float planeB = (z1 * (x2 - x3)) + (z2 * (x3 - x1)) + (z3 * (x1 - x2)); float planeC = (x1 * (y2 - y3)) + (x2 * (y3 - y1)) + (x3 * (y1 - y2)); float planeD = -((x1 * (y2 * z3 - y3 * z2)) + (x2 * (y3 * z1 - y1 * z3)) + (x3 * (y1 * z2 - y2 * z1))); //calculate normal to the lazer plane CvMat* planeNormal = cvCreateMat(3, 1, CV_32F); CV_MAT_ELEM(*planeNormal, float,0,0) = planeA; CV_MAT_ELEM(*planeNormal, float,1,0) = planeB; CV_MAT_ELEM(*planeNormal, float,2,0) = planeC; pixels = (unsigned char*)color_dst->imageData; unsigned char* color_pixels = (unsigned char*)image_empty->imageData; //go through all the pixels on the object and calculate the 3d coordinate for (int row = 1; row < color_dst->height-1; row++){ for (int col = 1; col < color_dst->width-1; col++){ int B = pixels[ row * color_dst->widthStep + col * 3]; if (B == 255){ //get RGB of the pixel on the original image int realB = color_pixels[ row * color_dst->widthStep + col * 3]; int realG = color_pixels[ row * color_dst->widthStep + col * 3 + 1]; int realR = color_pixels[ row * color_dst->widthStep + col * 3 + 2]; //Used http://www.cs.princeton.edu/courses/archive/fall00/cs426/lectures/raycast/sld017.htm for reference //on how to find intersection of ray and a plane float p0dotN = cvDotProduct(a1,planeNormal); CvMat* vtmp = cvCreateMat(3,1,CV_32F); CV_MAT_ELEM(*vtmp, float,0,0) = col; CV_MAT_ELEM(*vtmp, float,1,0) = row; CV_MAT_ELEM(*vtmp, float,2,0) = 1; CvMat* v = cvCreateMat(3,1,CV_32F); cvMatMul(R1iAi, vtmp, v); float vdotN = cvDotProduct(v,planeNormal); float t = (p0dotN - planeD)/vdotN; cvSetIdentity(identity, cvRealScalar(t)); cvMatMul(identity,v,v); CvMat* final = cvCreateMat(3,1,CV_32F); cvSub(v,a1,final); //final point is still in the coordinate system of the left plane. CvMat* final_rotated = cvCreateMat(3,1,CV_32F); //translate it into the coordinate system of the camera cvMatMul(rotation_matrix_left,final,final_rotated); cvAdd(final_rotated,translation_left, final_rotated); //add point to the file (minus next to the y coordinate is there to compensate for the left-handed coordinate system of slam6d, otherwise //dwarf is shown upside-down. scanfile<