00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00038 #include <stdio.h>
00039 #include <stdlib.h>
00040 #include <iostream>
00041 #include <math.h>
00042
00043 #include <QDebug>
00044 #include <QThread>
00045
00046 #include <QVApplication>
00047 #include <QVMPlayerCamera>
00048 #include <QVDefaultGUI>
00049 #include <QVImageCanvas>
00050 #include <QVGLCanvas>
00051
00052 #include <QVDisjointSet>
00053 #include <QV3DModel>
00054 #include <QVMatrix>
00055 #include <QVRANSAC>
00056
00057 #include <qvdta.h>
00058 #include <qvip.h>
00059 #include <qvprojective.h>
00060
00061 #ifndef DOXYGEN_IGNORE_THIS
00062
00063
00064
00065 class QVHomographyPROSAC: public QVPROSAC< QPair<QPointF, QPointF>, QVMatrix>
00066 {
00067 private:
00068 double maxError;
00069
00070 bool testCoherentMatchings(const QList< QPair<QPointF, QPointF> > &matchings) const
00071 {
00072 const QList<QPointF> sourcePoints = getFirstPairList<QPointF>(matchings),
00073 destinationPoints = getSecondPairList<QPointF>(matchings);
00074
00075
00076 foreach (QPointF point, sourcePoints)
00077 if (sourcePoints.count(point) > 1)
00078 return false;
00079
00080 foreach (QPointF point, destinationPoints)
00081 if (destinationPoints.count(point) > 1)
00082 return false;
00083
00084 return true;
00085 }
00086
00087 bool testCoherentMatchings(const QList< QPair<QPointF, QPointF> > &matchings, const QPair<QPointF, QPointF> matching) const
00088 {
00089 QPair <QPointF, QPointF> inlierMatching;
00090 foreach (inlierMatching, matchings)
00091 if (inlierMatching.first == matching.first || inlierMatching.second == matching.second)
00092 return false;
00093
00094 return true;
00095 }
00096
00097 public:
00098 QVHomographyPROSAC( const QList<QPointF> &sourcePoints, const QList<QPointF> &destinationPoints,
00099 const double maxError, const QList< QPair<QPointF, QPointF> > &previousMatchings):
00100 QVPROSAC< QPair<QPointF, QPointF>, QVMatrix>(4, 5), maxError(maxError)
00101 {
00102
00103 foreach (QPointF source, sourcePoints)
00104 foreach (QPointF destination, destinationPoints)
00105 {
00106
00107 double heuristic = 100000000;
00108 for (int i = 0; i < previousMatchings.size(); i++)
00109 if (previousMatchings.at(i).second == destination)
00110 heuristic = norm2(source - previousMatchings.at(i).first);
00111
00112
00113 addElement(QPair<QPointF, QPointF>(source, destination), heuristic);
00114 }
00115 init();
00116 }
00117
00118 const bool fit(const QList< QPair<QPointF, QPointF> > &matchings, QVMatrix &homography)
00119 {
00120 if (!testCoherentMatchings(matchings))
00121 return false;
00122
00123 homography = ComputeProjectiveHomography(matchings);
00124
00125 return true;
00126 };
00127
00128 const bool test(const QVMatrix &homography, const QPair<QPointF, QPointF> &matching)
00129 {
00130 if (!testCoherentMatchings(inliersSet, matching))
00131 return false;
00132
00133 return norm2(ApplyHomography(homography, matching.first) - matching.second) < maxError;
00134 };
00135 };
00136
00137 class TemplateCameraCalibrator: public QVWorker
00138 {
00139 private:
00140 QList< QPair<QPointF, QPointF> > previousMatchings;
00141 QList<QPointF> templateFPoints;
00142
00143 const QList<QPointF> denormalizePoints(const QVImage<uChar> &image, const QList<QPointF> &points)
00144 {
00145 const double rows = image.getRows(), cols = image.getCols(), factor = cols/2;
00146 QList<QPointF> pointFList;
00147
00148 foreach(QPointF point, points)
00149 pointFList.append(QPointF(cols/2 + point.x()*factor, rows/2 -point.y()*factor));
00150 return pointFList;
00151 }
00152
00153 const QList<QPointF> normalizePoints(const QVImage<uChar> &image, const QList<QPointF> &points)
00154 {
00155 const double rows = image.getRows(), cols = image.getCols(), factor = cols/2;
00156 QList<QPointF> pointFList;
00157
00158 foreach(QPointF point, points)
00159 pointFList.append(QPointF((point.x() - cols/2)/factor,-(point.y() - rows/2)/factor));
00160 return pointFList;
00161 }
00162
00163 const QVMatrix getSonyDV360x288CameraMatrix()
00164 {
00165
00166
00167
00168
00169
00170
00171 QVMatrix K(3,3);
00172 K(0,0) = 2.691209; K(0,1) = -0.047614; K(0,2) = 0.095754;
00173 K(1,0) = 0.000000; K(1,1) = 2.925366; K(1,2) = -0.151462;
00174 K(2,0) = 0.000000; K(2,1) = 0.000000; K(2,2) = 1.000000;
00175
00176 return K;
00177 }
00178 public:
00179 TemplateCameraCalibrator(QString name, QString defaultTemplateFileName): QVWorker(name)
00180 {
00181 addProperty<double>("Max pixel dist", inputFlag, 0.018, "for a pixel considered to be coincident", 0.0, 0.1);
00182 addProperty<int>("Max iterations", inputFlag, 250, "Corner response window search", 1, 5000);
00183 addProperty<int>("Window size", inputFlag, 10, "Corner response window search", 1, 100);
00184 addProperty<int>("Point number", inputFlag, 5, "Corner response window search", 1, 100);
00185
00186 addProperty< QVImage<uChar,1> >("Input image", inputFlag|outputFlag);
00187 addProperty< QList<QPointF> >("Corners", outputFlag);
00188
00189 addProperty< QVMatrix >("Homography", outputFlag);
00190 addProperty< QVMatrix >("Extrinsic matrix", outputFlag);
00191 addProperty< QVMatrix >("Intrinsic matrix", outputFlag);
00192
00193 addProperty<QString>("TemplateFile", inputFlag, defaultTemplateFileName, "Path to the file containing the template");
00194
00195 QString templateFilePath = getPropertyValue<QString>("TemplateFile");
00196 QVImage<uChar> templateImage;
00197 if (QVMPlayerCamera::getFrame(templateFilePath, templateImage) )
00198 {
00199 const uInt rows = templateImage.getRows(), cols = templateImage.getCols();
00200 QVImage<sFloat> cornerResponseTemplateImage(cols, rows);
00201 FilterHessianCornerResponseImage(templateImage, cornerResponseTemplateImage);
00202
00203 QList<QPointF> pointFList = GetMaximalResponsePoints3(cornerResponseTemplateImage);
00204 templateFPoints = normalizePoints(cornerResponseTemplateImage, pointFList.mid(MAX(0,pointFList.size()-5)));
00205
00206 if ( templateFPoints.size() == 5 )
00207 foreach (QPointF point, templateFPoints)
00208 previousMatchings.append(QPair<QPointF, QPointF>(point, point));
00209 else
00210 setLastError("Can't get five corner points on template file");
00211 }
00212 else
00213 setLastError(QString() + "Can't open template file '" + templateFilePath +"'.");
00214 }
00215
00216 void iterate()
00217 {
00218
00219 const QVImage<uChar> image = getPropertyValue< QVImage<uChar,1> >("Input image");
00220 const uInt rows = image.getRows(), cols = image.getCols(),
00221 windowSize = getPropertyValue<int>("Window size"),
00222 maxIterations = getPropertyValue<int>("Max iterations"),
00223 pointNumber = getPropertyValue<int>("Point number");
00224 const double maxPixelDist = getPropertyValue<double>("Max pixel dist");
00225
00226 timeFlag("Read input properties");
00227
00228
00229 QVImage<sFloat> cornerResponseImage(cols, rows);
00230 FilterHessianCornerResponseImage(image, cornerResponseImage);
00231 timeFlag("Corner response image");
00232
00233 QList<QPointF> maximalPoints = GetMaximalResponsePoints3(cornerResponseImage);
00234 timeFlag("Get maximal hot points");
00235
00236 QList<QPointF> imageFPoints =
00237 normalizePoints(image, maximalPoints.mid(MAX(0, maximalPoints.size() - pointNumber)));
00238 timeFlag("Hot point list to vector list");
00239
00240
00241 bool matchingFound = false;
00242 QVMatrix Hfound = QVMatrix::identity(3);
00243
00244 if (maximalPoints.size() >= 5)
00245 {
00246 QVHomographyPROSAC prosac(imageFPoints, templateFPoints, maxPixelDist, previousMatchings);
00247
00248 if (matchingFound = prosac.iterate(maxIterations))
00249 {
00250 Hfound = prosac.getBestModel();
00251 previousMatchings = prosac.getBestInliers();
00252 }
00253 }
00254
00255 timeFlag("RANSAC");
00256
00257 Hfound = Hfound / Hfound(2,2);
00258
00259
00260
00261 setPropertyValue< QList<QPointF> >("Corners", denormalizePoints(image, getFirstPairList<QPointF>(previousMatchings)));
00262 timeFlag("Draw consensus points");
00263
00264
00265 if (matchingFound)
00266 {
00267
00268 QVMatrix intrinsicCameraMatrix = getSonyDV360x288CameraMatrix(), extrinsicCameraMatrix;
00269
00270
00271
00272
00273
00274
00275
00276 GetExtrinsicCameraMatrixFromHomography(intrinsicCameraMatrix, pseudoInverse(Hfound), extrinsicCameraMatrix);
00277
00278
00279 setPropertyValue<QVMatrix>("Homography", Hfound);
00280 setPropertyValue<QVMatrix>("Intrinsic matrix", intrinsicCameraMatrix);
00281 setPropertyValue<QVMatrix>("Extrinsic matrix", extrinsicCameraMatrix);
00282 }
00283 else {
00284 setPropertyValue<QVMatrix>("Intrinsic matrix", QVMatrix::identity(3));
00285 setPropertyValue<QVMatrix>("Extrinsic matrix", QVMatrix::identity(4));
00286 }
00287
00288 timeFlag("Decompose homography matrix");
00289
00290
00291 timeFlag("Draw corners");
00292 }
00293 };
00294
00295 class ImageHomographyWarperWorker: public QVWorker
00296 {
00297 private:
00298 void myWarpPerspective(const QVImage<uChar> &src, QVImage<uChar> &dest, const QVMatrix &H, const double zoom)
00299 {
00300 const double cols = src.getCols(), rows = src.getRows();
00301
00302 QVMatrix sourcePoints(cols*rows, 3);
00303
00304 for (double col = 0; col < cols; col++)
00305 for (double row = 0; row < rows; row++)
00306 {
00307 sourcePoints(col*rows + row, 0) = 2*(col - cols/2)/cols;
00308 sourcePoints(col*rows + row, 1) = -2*(row - rows/2)/cols;
00309 sourcePoints(col*rows + row, 2) = 1;
00310 }
00311
00312 const QVMatrix destinationPoints = (sourcePoints * H.transpose()).rowHomogeneousNormalize();
00313
00314 for (double col = 0; col < cols; col++)
00315 for (double row = 0; row < rows; row++)
00316 {
00317 const QPoint p2(col, row),
00318 p1( +zoom*destinationPoints(col*rows + row, 0)+cols/2,
00319 -zoom*destinationPoints(col*rows + row, 1)+rows/2);
00320 if (dest.getROI().contains(p2) && src.getROI().contains(p1))
00321 dest(p1) = src(p2);
00322 }
00323 }
00324
00325 public:
00326 ImageHomographyWarperWorker(QString name): QVWorker(name)
00327 {
00328 addProperty<double>("Zoom", inputFlag, 30, "Size of the rectified template", 1, 100);
00329 addProperty< QVMatrix >("Homography", inputFlag);
00330
00331 addProperty< QVImage<uChar,1> >("Input image", inputFlag|outputFlag);
00332 addProperty< QVImage<uChar,1> >("Wrapped image", outputFlag);
00333
00334 }
00335
00336 void iterate()
00337 {
00338
00339 const QVImage<uChar> image = getPropertyValue< QVImage<uChar,1> >("Input image");
00340 const uInt rows = image.getRows(), cols = image.getCols();
00341 const double zoom = getPropertyValue<double>("Zoom");
00342 const QVMatrix Hfound = getPropertyValue< QVMatrix>("Homography");
00343 timeFlag("Read input properties");
00344
00345
00346
00347 QVImage<uChar> wrapped(cols, rows);
00348 Set(0, wrapped);
00349 myWarpPerspective(image, wrapped, Hfound, zoom);
00350 setPropertyValue< QVImage<uChar,1> >("Wrapped image", wrapped);
00351 timeFlag("Image wrapping");
00352 }
00353 };
00354
00356
00357 const QV3DModel cameraModel(const double baseSize, const double fov)
00358 {
00359 QV3DModel model;
00360
00361 model.addSegment(-baseSize,+baseSize,0, -baseSize,-baseSize,0, 255,255,255);
00362 model.addSegment(-baseSize,+baseSize,0, -baseSize,+baseSize,0, 255,255,255);
00363 model.addSegment(-baseSize,+baseSize,0, -baseSize,-baseSize,0, 255,255,255);
00364 model.addSegment(-baseSize,-baseSize,0, -baseSize,-baseSize,0, 255,255,255);
00365 model.addSegment(+baseSize,-baseSize,0, -baseSize,-baseSize,0, 255,255,255);
00366 model.addSegment(+baseSize,-baseSize,0, +baseSize,-baseSize,0, 255,255,255);
00367 model.addSegment(+baseSize,+baseSize,0, +baseSize,-baseSize,0, 255,255,255);
00368 model.addSegment(+baseSize,+baseSize,0, -baseSize,+baseSize,0, 255,255,255);
00369 model.addSegment(+baseSize,-baseSize,0, -baseSize,-baseSize,0, 255,255,255);
00370 model.addSegment(+baseSize,+baseSize,0, -baseSize,+baseSize,0, 255,255,255);
00371 model.addSegment(+baseSize,+baseSize,0, +baseSize,-baseSize,0, 255,255,255);
00372 model.addSegment(+baseSize,+baseSize,0, +baseSize,+baseSize,0, 255,255,255);
00373
00374 model.addSegment(0,0,0, +baseSize*1.5,0,0, 255,0,0);
00375 model.addSegment(0,0,0, 0,+baseSize*1.5,0, 0,255,0);
00376 model.addSegment(0,0,0, 0,0,+baseSize*1.5, 0,0,255);
00377
00378 model.addSegment(0,0,+fov, +baseSize,+baseSize,0, 192,192,192);
00379 model.addSegment(0,0,+fov, +baseSize,-baseSize,0, 192,192,192);
00380 model.addSegment(0,0,+fov, -baseSize,-baseSize,0, 192,192,192);
00381 model.addSegment(0,0,+fov, -baseSize,+baseSize,0, 192,192,192);
00382
00383 return model;
00384 }
00385
00386 int main(int argc, char *argv[])
00387 {
00388 QVApplication app(argc, argv,
00389 "Example program for QVision library. Obtains intrinsic and extrinsic camera parameters."
00390 );
00391
00392 QVMPlayerCamera camera("Video");
00393
00394
00395 TemplateCameraCalibrator templateCalibrator("Corners Worker", "template3d.gif");
00396 camera.link(&templateCalibrator,"Input image");
00397
00398
00399 ImageHomographyWarperWorker warper("Image warper");
00400 templateCalibrator.linkProperty("Input image", &warper, "Input image", QVWorker::SynchronousLink);
00401 templateCalibrator.linkProperty("Homography", &warper, "Homography", QVWorker::SynchronousLink);
00402
00403
00404 QVImageCanvas imageCanvas("Corners ");
00405 imageCanvas.linkProperty(templateCalibrator, "Input image");
00406 imageCanvas.linkProperty(templateCalibrator,"Corners", Qt::red, true);
00407
00408 QVImageCanvas imageCanvas2("Wrapped");
00409 imageCanvas2.linkProperty(warper, "Wrapped image");
00410
00411
00412 QVGLCanvas glCanvas("3d camera location");
00413 glCanvas.add(QV3DModel::referenceCoordinates(0.75, false), "Coordinate axis");
00414 glCanvas.add(QV3DModel::grid(0.25, 0.25, 15, 15), "Floor grid");
00415
00416
00417
00418
00419 glCanvas.add(cameraModel(0.25, 0.5), "Camera");
00420 glCanvas.linkModelMatrix(templateCalibrator, "Extrinsic matrix" , "Camera");
00421
00422 #define Camera(N) \
00423 QVMPlayerCamera camera ## N("Video " # N); \
00424 TemplateCameraCalibrator templateCalibrator ## N("Corners Worker " # N, "template3d.gif"); \
00425 camera ## N.link(&templateCalibrator ## N,"Input image"); \
00426 \
00427 QVImageCanvas imageCanvas ## N("Corners " # N); \
00428 imageCanvas ## N.linkProperty(templateCalibrator ## N, "Input image"); \
00429 imageCanvas ## N.linkProperty(templateCalibrator ## N,"Corners", Qt::red, true); \
00430 \
00431 glCanvas.add(cameraModel(0.25, 0.5), "Camera" # N); \
00432 glCanvas.linkModelMatrix(templateCalibrator ## N, "Extrinsic matrix" , "Camera" # N); \
00433
00434
00435
00436
00437
00438 QVDefaultGUI interface;
00439
00440 return app.exec();
00441 }
00442
00443 #endif
00444
00445