Cleanup: Libmv, clang-format

Is based on Google style which was used in the Libmv project before,
but is now consistently applied for the sources of the library itself
and to C-API. With some time C-API will likely be removed, and it
makes it easier to make it follow Libmv style, hence the diversion
from Blender's style.

There are quite some exceptions (clang-format off) in the code around
Eigen matrix initialization. It is rather annoying, and there could be
some neat way to make initialization readable without such exception.

Could be some places where loss of readability in matrix initialization
got lost as the change is quite big. If this has happened it is easier
to address readability once actually working on the code.

This change allowed to spot some missing header guards, so that's nice.

Doing it in bundled version, as the upstream library needs to have some
of the recent development ported over from bundle to upstream.

There should be no functional changes.
This commit is contained in:
Sergey Sharybin 2021-03-05 14:42:30 +01:00
parent e0442a955b
commit 0dd9a4a576
151 changed files with 4893 additions and 4469 deletions

View File

@ -1,2 +1,34 @@
DisableFormat: true
SortIncludes: false
BasedOnStyle: Google
ColumnLimit: 80
Standard: Cpp11
# Indent nested preprocessor.
# #ifdef Foo
# # include <nested>
# #endif
IndentPPDirectives: AfterHash
# For the cases when namespace is closing with a wrong comment
FixNamespaceComments: true
AllowShortFunctionsOnASingleLine: InlineOnly
AllowShortBlocksOnASingleLine: false
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: true
# No bin packing, every argument is on its own line.
BinPackArguments: false
BinPackParameters: false
# Ensure pointer alignment.
# ObjectType* object;
PointerAlignment: Left
DerivePointerAlignment: false
AlignEscapedNewlines: Right
IncludeBlocks: Preserve
SortIncludes: true

View File

@ -22,15 +22,15 @@
#include "intern/utildefines.h"
#include "libmv/autotrack/autotrack.h"
using libmv::TrackRegionOptions;
using libmv::TrackRegionResult;
using mv::AutoTrack;
using mv::FrameAccessor;
using mv::Marker;
using libmv::TrackRegionOptions;
using libmv::TrackRegionResult;
libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor *frame_accessor) {
return (libmv_AutoTrack*) LIBMV_OBJECT_NEW(AutoTrack,
(FrameAccessor*) frame_accessor);
libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor* frame_accessor) {
return (libmv_AutoTrack*)LIBMV_OBJECT_NEW(AutoTrack,
(FrameAccessor*)frame_accessor);
}
void libmv_autoTrackDestroy(libmv_AutoTrack* libmv_autotrack) {
@ -39,7 +39,7 @@ void libmv_autoTrackDestroy(libmv_AutoTrack* libmv_autotrack) {
void libmv_autoTrackSetOptions(libmv_AutoTrack* libmv_autotrack,
const libmv_AutoTrackOptions* options) {
AutoTrack *autotrack = ((AutoTrack*) libmv_autotrack);
AutoTrack* autotrack = ((AutoTrack*)libmv_autotrack);
libmv_configureTrackRegionOptions(options->track_region,
&autotrack->options.track_region);
@ -51,18 +51,15 @@ void libmv_autoTrackSetOptions(libmv_AutoTrack* libmv_autotrack,
int libmv_autoTrackMarker(libmv_AutoTrack* libmv_autotrack,
const libmv_TrackRegionOptions* libmv_options,
libmv_Marker *libmv_tracked_marker,
libmv_Marker* libmv_tracked_marker,
libmv_TrackRegionResult* libmv_result) {
Marker tracked_marker;
TrackRegionOptions options;
TrackRegionResult result;
libmv_apiMarkerToMarker(*libmv_tracked_marker, &tracked_marker);
libmv_configureTrackRegionOptions(*libmv_options,
&options);
bool ok = (((AutoTrack*) libmv_autotrack)->TrackMarker(&tracked_marker,
&result,
&options));
libmv_configureTrackRegionOptions(*libmv_options, &options);
bool ok = (((AutoTrack*)libmv_autotrack)
->TrackMarker(&tracked_marker, &result, &options));
libmv_markerToApiMarker(tracked_marker, libmv_tracked_marker);
libmv_regionTrackergetResult(result, libmv_result);
return ok && result.is_usable();
@ -72,7 +69,7 @@ void libmv_autoTrackAddMarker(libmv_AutoTrack* libmv_autotrack,
const libmv_Marker* libmv_marker) {
Marker marker;
libmv_apiMarkerToMarker(*libmv_marker, &marker);
((AutoTrack*) libmv_autotrack)->AddMarker(marker);
((AutoTrack*)libmv_autotrack)->AddMarker(marker);
}
void libmv_autoTrackSetMarkers(libmv_AutoTrack* libmv_autotrack,
@ -87,19 +84,17 @@ void libmv_autoTrackSetMarkers(libmv_AutoTrack* libmv_autotrack,
for (size_t i = 0; i < num_markers; ++i) {
libmv_apiMarkerToMarker(libmv_marker[i], &markers[i]);
}
((AutoTrack*) libmv_autotrack)->SetMarkers(&markers);
((AutoTrack*)libmv_autotrack)->SetMarkers(&markers);
}
int libmv_autoTrackGetMarker(libmv_AutoTrack* libmv_autotrack,
int clip,
int frame,
int track,
libmv_Marker *libmv_marker) {
libmv_Marker* libmv_marker) {
Marker marker;
int ok = ((AutoTrack*) libmv_autotrack)->GetMarker(clip,
frame,
track,
&marker);
int ok =
((AutoTrack*)libmv_autotrack)->GetMarker(clip, frame, track, &marker);
if (ok) {
libmv_markerToApiMarker(marker, libmv_marker);
}

View File

@ -21,9 +21,9 @@
#define LIBMV_C_API_AUTOTRACK_H_
#include "intern/frame_accessor.h"
#include "intern/tracksN.h"
#include "intern/track_region.h"
#include "intern/region.h"
#include "intern/track_region.h"
#include "intern/tracksN.h"
#ifdef __cplusplus
extern "C" {
@ -36,7 +36,7 @@ typedef struct libmv_AutoTrackOptions {
libmv_Region search_region;
} libmv_AutoTrackOptions;
libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor *frame_accessor);
libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor* frame_accessor);
void libmv_autoTrackDestroy(libmv_AutoTrack* libmv_autotrack);
@ -45,7 +45,7 @@ void libmv_autoTrackSetOptions(libmv_AutoTrack* libmv_autotrack,
int libmv_autoTrackMarker(libmv_AutoTrack* libmv_autotrack,
const libmv_TrackRegionOptions* libmv_options,
libmv_Marker *libmv_tracker_marker,
libmv_Marker* libmv_tracker_marker,
libmv_TrackRegionResult* libmv_result);
void libmv_autoTrackAddMarker(libmv_AutoTrack* libmv_autotrack,
@ -59,7 +59,7 @@ int libmv_autoTrackGetMarker(libmv_AutoTrack* libmv_autotrack,
int clip,
int frame,
int track,
libmv_Marker *libmv_marker);
libmv_Marker* libmv_marker);
#ifdef __cplusplus
}

View File

@ -21,62 +21,56 @@
#include "intern/utildefines.h"
#include "libmv/simple_pipeline/camera_intrinsics.h"
using libmv::BrownCameraIntrinsics;
using libmv::CameraIntrinsics;
using libmv::DivisionCameraIntrinsics;
using libmv::PolynomialCameraIntrinsics;
using libmv::NukeCameraIntrinsics;
using libmv::BrownCameraIntrinsics;
using libmv::PolynomialCameraIntrinsics;
libmv_CameraIntrinsics *libmv_cameraIntrinsicsNew(
libmv_CameraIntrinsics* libmv_cameraIntrinsicsNew(
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options) {
CameraIntrinsics *camera_intrinsics =
libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
return (libmv_CameraIntrinsics *) camera_intrinsics;
CameraIntrinsics* camera_intrinsics =
libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
return (libmv_CameraIntrinsics*)camera_intrinsics;
}
libmv_CameraIntrinsics *libmv_cameraIntrinsicsCopy(
libmv_CameraIntrinsics* libmv_cameraIntrinsicsCopy(
const libmv_CameraIntrinsics* libmv_intrinsics) {
const CameraIntrinsics *orig_intrinsics =
(const CameraIntrinsics *) libmv_intrinsics;
const CameraIntrinsics* orig_intrinsics =
(const CameraIntrinsics*)libmv_intrinsics;
CameraIntrinsics *new_intrinsics = NULL;
CameraIntrinsics* new_intrinsics = NULL;
switch (orig_intrinsics->GetDistortionModelType()) {
case libmv::DISTORTION_MODEL_POLYNOMIAL:
{
const PolynomialCameraIntrinsics *polynomial_intrinsics =
case libmv::DISTORTION_MODEL_POLYNOMIAL: {
const PolynomialCameraIntrinsics* polynomial_intrinsics =
static_cast<const PolynomialCameraIntrinsics*>(orig_intrinsics);
new_intrinsics = LIBMV_OBJECT_NEW(PolynomialCameraIntrinsics,
*polynomial_intrinsics);
break;
}
case libmv::DISTORTION_MODEL_DIVISION:
{
const DivisionCameraIntrinsics *division_intrinsics =
new_intrinsics =
LIBMV_OBJECT_NEW(PolynomialCameraIntrinsics, *polynomial_intrinsics);
break;
}
case libmv::DISTORTION_MODEL_DIVISION: {
const DivisionCameraIntrinsics* division_intrinsics =
static_cast<const DivisionCameraIntrinsics*>(orig_intrinsics);
new_intrinsics = LIBMV_OBJECT_NEW(DivisionCameraIntrinsics,
*division_intrinsics);
break;
}
case libmv::DISTORTION_MODEL_NUKE:
{
const NukeCameraIntrinsics *nuke_intrinsics =
new_intrinsics =
LIBMV_OBJECT_NEW(DivisionCameraIntrinsics, *division_intrinsics);
break;
}
case libmv::DISTORTION_MODEL_NUKE: {
const NukeCameraIntrinsics* nuke_intrinsics =
static_cast<const NukeCameraIntrinsics*>(orig_intrinsics);
new_intrinsics = LIBMV_OBJECT_NEW(NukeCameraIntrinsics,
*nuke_intrinsics);
break;
}
case libmv::DISTORTION_MODEL_BROWN:
{
const BrownCameraIntrinsics *brown_intrinsics =
new_intrinsics = LIBMV_OBJECT_NEW(NukeCameraIntrinsics, *nuke_intrinsics);
break;
}
case libmv::DISTORTION_MODEL_BROWN: {
const BrownCameraIntrinsics* brown_intrinsics =
static_cast<const BrownCameraIntrinsics*>(orig_intrinsics);
new_intrinsics = LIBMV_OBJECT_NEW(BrownCameraIntrinsics,
*brown_intrinsics);
break;
}
default:
assert(!"Unknown distortion model");
new_intrinsics =
LIBMV_OBJECT_NEW(BrownCameraIntrinsics, *brown_intrinsics);
break;
}
default: assert(!"Unknown distortion model");
}
return (libmv_CameraIntrinsics *) new_intrinsics;
return (libmv_CameraIntrinsics*)new_intrinsics;
}
void libmv_cameraIntrinsicsDestroy(libmv_CameraIntrinsics* libmv_intrinsics) {
@ -86,7 +80,7 @@ void libmv_cameraIntrinsicsDestroy(libmv_CameraIntrinsics* libmv_intrinsics) {
void libmv_cameraIntrinsicsUpdate(
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
libmv_CameraIntrinsics* libmv_intrinsics) {
CameraIntrinsics *camera_intrinsics = (CameraIntrinsics *) libmv_intrinsics;
CameraIntrinsics* camera_intrinsics = (CameraIntrinsics*)libmv_intrinsics;
double focal_length = libmv_camera_intrinsics_options->focal_length;
double principal_x = libmv_camera_intrinsics_options->principal_point_x;
@ -115,191 +109,173 @@ void libmv_cameraIntrinsicsUpdate(
}
switch (libmv_camera_intrinsics_options->distortion_model) {
case LIBMV_DISTORTION_MODEL_POLYNOMIAL:
{
assert(camera_intrinsics->GetDistortionModelType() ==
libmv::DISTORTION_MODEL_POLYNOMIAL);
case LIBMV_DISTORTION_MODEL_POLYNOMIAL: {
assert(camera_intrinsics->GetDistortionModelType() ==
libmv::DISTORTION_MODEL_POLYNOMIAL);
PolynomialCameraIntrinsics *polynomial_intrinsics =
(PolynomialCameraIntrinsics *) camera_intrinsics;
PolynomialCameraIntrinsics* polynomial_intrinsics =
(PolynomialCameraIntrinsics*)camera_intrinsics;
double k1 = libmv_camera_intrinsics_options->polynomial_k1;
double k2 = libmv_camera_intrinsics_options->polynomial_k2;
double k3 = libmv_camera_intrinsics_options->polynomial_k3;
double k1 = libmv_camera_intrinsics_options->polynomial_k1;
double k2 = libmv_camera_intrinsics_options->polynomial_k2;
double k3 = libmv_camera_intrinsics_options->polynomial_k3;
if (polynomial_intrinsics->k1() != k1 ||
polynomial_intrinsics->k2() != k2 ||
polynomial_intrinsics->k3() != k3) {
polynomial_intrinsics->SetRadialDistortion(k1, k2, k3);
}
break;
if (polynomial_intrinsics->k1() != k1 ||
polynomial_intrinsics->k2() != k2 ||
polynomial_intrinsics->k3() != k3) {
polynomial_intrinsics->SetRadialDistortion(k1, k2, k3);
}
break;
}
case LIBMV_DISTORTION_MODEL_DIVISION: {
assert(camera_intrinsics->GetDistortionModelType() ==
libmv::DISTORTION_MODEL_DIVISION);
DivisionCameraIntrinsics* division_intrinsics =
(DivisionCameraIntrinsics*)camera_intrinsics;
double k1 = libmv_camera_intrinsics_options->division_k1;
double k2 = libmv_camera_intrinsics_options->division_k2;
if (division_intrinsics->k1() != k1 || division_intrinsics->k2() != k2) {
division_intrinsics->SetDistortion(k1, k2);
}
case LIBMV_DISTORTION_MODEL_DIVISION:
{
assert(camera_intrinsics->GetDistortionModelType() ==
libmv::DISTORTION_MODEL_DIVISION);
break;
}
DivisionCameraIntrinsics *division_intrinsics =
(DivisionCameraIntrinsics *) camera_intrinsics;
case LIBMV_DISTORTION_MODEL_NUKE: {
assert(camera_intrinsics->GetDistortionModelType() ==
libmv::DISTORTION_MODEL_NUKE);
double k1 = libmv_camera_intrinsics_options->division_k1;
double k2 = libmv_camera_intrinsics_options->division_k2;
NukeCameraIntrinsics* nuke_intrinsics =
(NukeCameraIntrinsics*)camera_intrinsics;
if (division_intrinsics->k1() != k1 ||
division_intrinsics->k2() != k2) {
division_intrinsics->SetDistortion(k1, k2);
}
double k1 = libmv_camera_intrinsics_options->nuke_k1;
double k2 = libmv_camera_intrinsics_options->nuke_k2;
break;
if (nuke_intrinsics->k1() != k1 || nuke_intrinsics->k2() != k2) {
nuke_intrinsics->SetDistortion(k1, k2);
}
case LIBMV_DISTORTION_MODEL_NUKE:
{
assert(camera_intrinsics->GetDistortionModelType() ==
libmv::DISTORTION_MODEL_NUKE);
break;
}
NukeCameraIntrinsics *nuke_intrinsics =
(NukeCameraIntrinsics *) camera_intrinsics;
case LIBMV_DISTORTION_MODEL_BROWN: {
assert(camera_intrinsics->GetDistortionModelType() ==
libmv::DISTORTION_MODEL_BROWN);
double k1 = libmv_camera_intrinsics_options->nuke_k1;
double k2 = libmv_camera_intrinsics_options->nuke_k2;
BrownCameraIntrinsics* brown_intrinsics =
(BrownCameraIntrinsics*)camera_intrinsics;
if (nuke_intrinsics->k1() != k1 ||
nuke_intrinsics->k2() != k2) {
nuke_intrinsics->SetDistortion(k1, k2);
}
double k1 = libmv_camera_intrinsics_options->brown_k1;
double k2 = libmv_camera_intrinsics_options->brown_k2;
double k3 = libmv_camera_intrinsics_options->brown_k3;
double k4 = libmv_camera_intrinsics_options->brown_k4;
break;
if (brown_intrinsics->k1() != k1 || brown_intrinsics->k2() != k2 ||
brown_intrinsics->k3() != k3 || brown_intrinsics->k4() != k4) {
brown_intrinsics->SetRadialDistortion(k1, k2, k3, k4);
}
case LIBMV_DISTORTION_MODEL_BROWN:
{
assert(camera_intrinsics->GetDistortionModelType() ==
libmv::DISTORTION_MODEL_BROWN);
double p1 = libmv_camera_intrinsics_options->brown_p1;
double p2 = libmv_camera_intrinsics_options->brown_p2;
BrownCameraIntrinsics *brown_intrinsics =
(BrownCameraIntrinsics *) camera_intrinsics;
double k1 = libmv_camera_intrinsics_options->brown_k1;
double k2 = libmv_camera_intrinsics_options->brown_k2;
double k3 = libmv_camera_intrinsics_options->brown_k3;
double k4 = libmv_camera_intrinsics_options->brown_k4;
if (brown_intrinsics->k1() != k1 ||
brown_intrinsics->k2() != k2 ||
brown_intrinsics->k3() != k3 ||
brown_intrinsics->k4() != k4) {
brown_intrinsics->SetRadialDistortion(k1, k2, k3, k4);
}
double p1 = libmv_camera_intrinsics_options->brown_p1;
double p2 = libmv_camera_intrinsics_options->brown_p2;
if (brown_intrinsics->p1() != p1 || brown_intrinsics->p2() != p2) {
brown_intrinsics->SetTangentialDistortion(p1, p2);
}
break;
if (brown_intrinsics->p1() != p1 || brown_intrinsics->p2() != p2) {
brown_intrinsics->SetTangentialDistortion(p1, p2);
}
break;
}
default:
assert(!"Unknown distortion model");
default: assert(!"Unknown distortion model");
}
}
void libmv_cameraIntrinsicsSetThreads(libmv_CameraIntrinsics* libmv_intrinsics,
int threads) {
CameraIntrinsics *camera_intrinsics = (CameraIntrinsics *) libmv_intrinsics;
CameraIntrinsics* camera_intrinsics = (CameraIntrinsics*)libmv_intrinsics;
camera_intrinsics->SetThreads(threads);
}
void libmv_cameraIntrinsicsExtractOptions(
const libmv_CameraIntrinsics* libmv_intrinsics,
libmv_CameraIntrinsicsOptions* camera_intrinsics_options) {
const CameraIntrinsics *camera_intrinsics =
(const CameraIntrinsics *) libmv_intrinsics;
const CameraIntrinsics* camera_intrinsics =
(const CameraIntrinsics*)libmv_intrinsics;
// Fill in options which are common for all distortion models.
camera_intrinsics_options->focal_length = camera_intrinsics->focal_length();
camera_intrinsics_options->principal_point_x =
camera_intrinsics->principal_point_x();
camera_intrinsics->principal_point_x();
camera_intrinsics_options->principal_point_y =
camera_intrinsics->principal_point_y();
camera_intrinsics->principal_point_y();
camera_intrinsics_options->image_width = camera_intrinsics->image_width();
camera_intrinsics_options->image_height = camera_intrinsics->image_height();
switch (camera_intrinsics->GetDistortionModelType()) {
case libmv::DISTORTION_MODEL_POLYNOMIAL:
{
const PolynomialCameraIntrinsics *polynomial_intrinsics =
static_cast<const PolynomialCameraIntrinsics *>(camera_intrinsics);
camera_intrinsics_options->distortion_model =
case libmv::DISTORTION_MODEL_POLYNOMIAL: {
const PolynomialCameraIntrinsics* polynomial_intrinsics =
static_cast<const PolynomialCameraIntrinsics*>(camera_intrinsics);
camera_intrinsics_options->distortion_model =
LIBMV_DISTORTION_MODEL_POLYNOMIAL;
camera_intrinsics_options->polynomial_k1 = polynomial_intrinsics->k1();
camera_intrinsics_options->polynomial_k2 = polynomial_intrinsics->k2();
camera_intrinsics_options->polynomial_k3 = polynomial_intrinsics->k3();
camera_intrinsics_options->polynomial_p1 = polynomial_intrinsics->p1();
camera_intrinsics_options->polynomial_p2 = polynomial_intrinsics->p2();
break;
}
camera_intrinsics_options->polynomial_k1 = polynomial_intrinsics->k1();
camera_intrinsics_options->polynomial_k2 = polynomial_intrinsics->k2();
camera_intrinsics_options->polynomial_k3 = polynomial_intrinsics->k3();
camera_intrinsics_options->polynomial_p1 = polynomial_intrinsics->p1();
camera_intrinsics_options->polynomial_p2 = polynomial_intrinsics->p2();
break;
}
case libmv::DISTORTION_MODEL_DIVISION:
{
const DivisionCameraIntrinsics *division_intrinsics =
static_cast<const DivisionCameraIntrinsics *>(camera_intrinsics);
camera_intrinsics_options->distortion_model =
case libmv::DISTORTION_MODEL_DIVISION: {
const DivisionCameraIntrinsics* division_intrinsics =
static_cast<const DivisionCameraIntrinsics*>(camera_intrinsics);
camera_intrinsics_options->distortion_model =
LIBMV_DISTORTION_MODEL_DIVISION;
camera_intrinsics_options->division_k1 = division_intrinsics->k1();
camera_intrinsics_options->division_k2 = division_intrinsics->k2();
break;
}
camera_intrinsics_options->division_k1 = division_intrinsics->k1();
camera_intrinsics_options->division_k2 = division_intrinsics->k2();
break;
}
case libmv::DISTORTION_MODEL_NUKE:
{
const NukeCameraIntrinsics *nuke_intrinsics =
static_cast<const NukeCameraIntrinsics *>(camera_intrinsics);
camera_intrinsics_options->distortion_model =
LIBMV_DISTORTION_MODEL_NUKE;
camera_intrinsics_options->nuke_k1 = nuke_intrinsics->k1();
camera_intrinsics_options->nuke_k2 = nuke_intrinsics->k2();
break;
}
case libmv::DISTORTION_MODEL_NUKE: {
const NukeCameraIntrinsics* nuke_intrinsics =
static_cast<const NukeCameraIntrinsics*>(camera_intrinsics);
camera_intrinsics_options->distortion_model = LIBMV_DISTORTION_MODEL_NUKE;
camera_intrinsics_options->nuke_k1 = nuke_intrinsics->k1();
camera_intrinsics_options->nuke_k2 = nuke_intrinsics->k2();
break;
}
case libmv::DISTORTION_MODEL_BROWN:
{
const BrownCameraIntrinsics *brown_intrinsics =
static_cast<const BrownCameraIntrinsics *>(camera_intrinsics);
camera_intrinsics_options->distortion_model =
case libmv::DISTORTION_MODEL_BROWN: {
const BrownCameraIntrinsics* brown_intrinsics =
static_cast<const BrownCameraIntrinsics*>(camera_intrinsics);
camera_intrinsics_options->distortion_model =
LIBMV_DISTORTION_MODEL_BROWN;
camera_intrinsics_options->brown_k1 = brown_intrinsics->k1();
camera_intrinsics_options->brown_k2 = brown_intrinsics->k2();
camera_intrinsics_options->brown_k3 = brown_intrinsics->k3();
camera_intrinsics_options->brown_k4 = brown_intrinsics->k4();
camera_intrinsics_options->brown_p1 = brown_intrinsics->p1();
camera_intrinsics_options->brown_p2 = brown_intrinsics->p2();
break;
}
camera_intrinsics_options->brown_k1 = brown_intrinsics->k1();
camera_intrinsics_options->brown_k2 = brown_intrinsics->k2();
camera_intrinsics_options->brown_k3 = brown_intrinsics->k3();
camera_intrinsics_options->brown_k4 = brown_intrinsics->k4();
camera_intrinsics_options->brown_p1 = brown_intrinsics->p1();
camera_intrinsics_options->brown_p2 = brown_intrinsics->p2();
break;
}
default:
assert(!"Unknown distortion model");
default: assert(!"Unknown distortion model");
}
}
void libmv_cameraIntrinsicsUndistortByte(
const libmv_CameraIntrinsics* libmv_intrinsics,
const unsigned char *source_image,
const unsigned char* source_image,
int width,
int height,
float overscan,
int channels,
unsigned char* destination_image) {
CameraIntrinsics *camera_intrinsics = (CameraIntrinsics *) libmv_intrinsics;
camera_intrinsics->UndistortBuffer(source_image,
width, height,
overscan,
channels,
destination_image);
CameraIntrinsics* camera_intrinsics = (CameraIntrinsics*)libmv_intrinsics;
camera_intrinsics->UndistortBuffer(
source_image, width, height, overscan, channels, destination_image);
}
void libmv_cameraIntrinsicsUndistortFloat(
@ -310,28 +286,22 @@ void libmv_cameraIntrinsicsUndistortFloat(
float overscan,
int channels,
float* destination_image) {
CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmv_intrinsics;
intrinsics->UndistortBuffer(source_image,
width, height,
overscan,
channels,
destination_image);
CameraIntrinsics* intrinsics = (CameraIntrinsics*)libmv_intrinsics;
intrinsics->UndistortBuffer(
source_image, width, height, overscan, channels, destination_image);
}
void libmv_cameraIntrinsicsDistortByte(
const struct libmv_CameraIntrinsics* libmv_intrinsics,
const unsigned char *source_image,
const unsigned char* source_image,
int width,
int height,
float overscan,
int channels,
unsigned char *destination_image) {
CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmv_intrinsics;
intrinsics->DistortBuffer(source_image,
width, height,
overscan,
channels,
destination_image);
unsigned char* destination_image) {
CameraIntrinsics* intrinsics = (CameraIntrinsics*)libmv_intrinsics;
intrinsics->DistortBuffer(
source_image, width, height, overscan, channels, destination_image);
}
void libmv_cameraIntrinsicsDistortFloat(
@ -342,12 +312,9 @@ void libmv_cameraIntrinsicsDistortFloat(
float overscan,
int channels,
float* destination_image) {
CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmv_intrinsics;
intrinsics->DistortBuffer(source_image,
width, height,
overscan,
channels,
destination_image);
CameraIntrinsics* intrinsics = (CameraIntrinsics*)libmv_intrinsics;
intrinsics->DistortBuffer(
source_image, width, height, overscan, channels, destination_image);
}
void libmv_cameraIntrinsicsApply(
@ -356,7 +323,7 @@ void libmv_cameraIntrinsicsApply(
double y,
double* x1,
double* y1) {
CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmv_intrinsics;
CameraIntrinsics* intrinsics = (CameraIntrinsics*)libmv_intrinsics;
intrinsics->ApplyIntrinsics(x, y, x1, y1);
}
@ -366,7 +333,7 @@ void libmv_cameraIntrinsicsInvert(
double y,
double* x1,
double* y1) {
CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmv_intrinsics;
CameraIntrinsics* intrinsics = (CameraIntrinsics*)libmv_intrinsics;
intrinsics->InvertIntrinsics(x, y, x1, y1);
}
@ -381,69 +348,63 @@ static void libmv_cameraIntrinsicsFillFromOptions(
camera_intrinsics_options->principal_point_y);
camera_intrinsics->SetImageSize(camera_intrinsics_options->image_width,
camera_intrinsics_options->image_height);
camera_intrinsics_options->image_height);
switch (camera_intrinsics_options->distortion_model) {
case LIBMV_DISTORTION_MODEL_POLYNOMIAL:
{
PolynomialCameraIntrinsics *polynomial_intrinsics =
case LIBMV_DISTORTION_MODEL_POLYNOMIAL: {
PolynomialCameraIntrinsics* polynomial_intrinsics =
static_cast<PolynomialCameraIntrinsics*>(camera_intrinsics);
polynomial_intrinsics->SetRadialDistortion(
camera_intrinsics_options->polynomial_k1,
camera_intrinsics_options->polynomial_k2,
camera_intrinsics_options->polynomial_k3);
polynomial_intrinsics->SetRadialDistortion(
camera_intrinsics_options->polynomial_k1,
camera_intrinsics_options->polynomial_k2,
camera_intrinsics_options->polynomial_k3);
break;
}
break;
}
case LIBMV_DISTORTION_MODEL_DIVISION:
{
DivisionCameraIntrinsics *division_intrinsics =
case LIBMV_DISTORTION_MODEL_DIVISION: {
DivisionCameraIntrinsics* division_intrinsics =
static_cast<DivisionCameraIntrinsics*>(camera_intrinsics);
division_intrinsics->SetDistortion(
camera_intrinsics_options->division_k1,
camera_intrinsics_options->division_k2);
break;
}
division_intrinsics->SetDistortion(
camera_intrinsics_options->division_k1,
camera_intrinsics_options->division_k2);
break;
}
case LIBMV_DISTORTION_MODEL_NUKE:
{
NukeCameraIntrinsics *nuke_intrinsics =
case LIBMV_DISTORTION_MODEL_NUKE: {
NukeCameraIntrinsics* nuke_intrinsics =
static_cast<NukeCameraIntrinsics*>(camera_intrinsics);
nuke_intrinsics->SetDistortion(
camera_intrinsics_options->nuke_k1,
camera_intrinsics_options->nuke_k2);
break;
}
nuke_intrinsics->SetDistortion(camera_intrinsics_options->nuke_k1,
camera_intrinsics_options->nuke_k2);
break;
}
case LIBMV_DISTORTION_MODEL_BROWN:
{
BrownCameraIntrinsics *brown_intrinsics =
case LIBMV_DISTORTION_MODEL_BROWN: {
BrownCameraIntrinsics* brown_intrinsics =
static_cast<BrownCameraIntrinsics*>(camera_intrinsics);
brown_intrinsics->SetRadialDistortion(
camera_intrinsics_options->brown_k1,
camera_intrinsics_options->brown_k2,
camera_intrinsics_options->brown_k3,
camera_intrinsics_options->brown_k4);
brown_intrinsics->SetTangentialDistortion(
brown_intrinsics->SetRadialDistortion(
camera_intrinsics_options->brown_k1,
camera_intrinsics_options->brown_k2,
camera_intrinsics_options->brown_k3,
camera_intrinsics_options->brown_k4);
brown_intrinsics->SetTangentialDistortion(
camera_intrinsics_options->brown_p1,
camera_intrinsics_options->brown_p2);
break;
}
break;
}
default:
assert(!"Unknown distortion model");
default: assert(!"Unknown distortion model");
}
}
CameraIntrinsics* libmv_cameraIntrinsicsCreateFromOptions(
const libmv_CameraIntrinsicsOptions* camera_intrinsics_options) {
CameraIntrinsics *camera_intrinsics = NULL;
CameraIntrinsics* camera_intrinsics = NULL;
switch (camera_intrinsics_options->distortion_model) {
case LIBMV_DISTORTION_MODEL_POLYNOMIAL:
camera_intrinsics = LIBMV_OBJECT_NEW(PolynomialCameraIntrinsics);
@ -457,8 +418,7 @@ CameraIntrinsics* libmv_cameraIntrinsicsCreateFromOptions(
case LIBMV_DISTORTION_MODEL_BROWN:
camera_intrinsics = LIBMV_OBJECT_NEW(BrownCameraIntrinsics);
break;
default:
assert(!"Unknown distortion model");
default: assert(!"Unknown distortion model");
}
libmv_cameraIntrinsicsFillFromOptions(camera_intrinsics_options,
camera_intrinsics);

View File

@ -56,10 +56,10 @@ typedef struct libmv_CameraIntrinsicsOptions {
double brown_p1, brown_p2;
} libmv_CameraIntrinsicsOptions;
libmv_CameraIntrinsics *libmv_cameraIntrinsicsNew(
libmv_CameraIntrinsics* libmv_cameraIntrinsicsNew(
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options);
libmv_CameraIntrinsics *libmv_cameraIntrinsicsCopy(
libmv_CameraIntrinsics* libmv_cameraIntrinsicsCopy(
const libmv_CameraIntrinsics* libmv_intrinsics);
void libmv_cameraIntrinsicsDestroy(libmv_CameraIntrinsics* libmv_intrinsics);
@ -76,7 +76,7 @@ void libmv_cameraIntrinsicsExtractOptions(
void libmv_cameraIntrinsicsUndistortByte(
const libmv_CameraIntrinsics* libmv_intrinsics,
const unsigned char *source_image,
const unsigned char* source_image,
int width,
int height,
float overscan,
@ -94,12 +94,12 @@ void libmv_cameraIntrinsicsUndistortFloat(
void libmv_cameraIntrinsicsDistortByte(
const struct libmv_CameraIntrinsics* libmv_intrinsics,
const unsigned char *source_image,
const unsigned char* source_image,
int width,
int height,
float overscan,
int channels,
unsigned char *destination_image);
unsigned char* destination_image);
void libmv_cameraIntrinsicsDistortFloat(
const libmv_CameraIntrinsics* libmv_intrinsics,
@ -131,7 +131,7 @@ void libmv_cameraIntrinsicsInvert(
#ifdef __cplusplus
namespace libmv {
class CameraIntrinsics;
class CameraIntrinsics;
}
libmv::CameraIntrinsics* libmv_cameraIntrinsicsCreateFromOptions(

View File

@ -34,7 +34,7 @@ struct libmv_Features {
namespace {
libmv_Features *libmv_featuresFromVector(
libmv_Features* libmv_featuresFromVector(
const libmv::vector<Feature>& features) {
libmv_Features* libmv_features = LIBMV_STRUCT_NEW(libmv_Features, 1);
int count = features.size();
@ -50,12 +50,12 @@ libmv_Features *libmv_featuresFromVector(
return libmv_features;
}
void libmv_convertDetectorOptions(libmv_DetectOptions *options,
DetectOptions *detector_options) {
void libmv_convertDetectorOptions(libmv_DetectOptions* options,
DetectOptions* detector_options) {
switch (options->detector) {
#define LIBMV_CONVERT(the_detector) \
case LIBMV_DETECTOR_ ## the_detector: \
detector_options->type = DetectOptions::the_detector; \
#define LIBMV_CONVERT(the_detector) \
case LIBMV_DETECTOR_##the_detector: \
detector_options->type = DetectOptions::the_detector; \
break;
LIBMV_CONVERT(FAST)
LIBMV_CONVERT(MORAVEC)
@ -72,7 +72,7 @@ void libmv_convertDetectorOptions(libmv_DetectOptions *options,
} // namespace
libmv_Features *libmv_detectFeaturesByte(const unsigned char* image_buffer,
libmv_Features* libmv_detectFeaturesByte(const unsigned char* image_buffer,
int width,
int height,
int channels,
@ -133,7 +133,7 @@ void libmv_getFeature(const libmv_Features* libmv_features,
double* y,
double* score,
double* size) {
Feature &feature = libmv_features->features[number];
Feature& feature = libmv_features->features[number];
*x = feature.x;
*y = feature.y;
*score = feature.score;

View File

@ -38,7 +38,7 @@ typedef struct libmv_DetectOptions {
int min_distance;
int fast_min_trackness;
int moravec_max_count;
unsigned char *moravec_pattern;
unsigned char* moravec_pattern;
double harris_threshold;
} libmv_DetectOptions;

View File

@ -36,20 +36,18 @@ struct LibmvFrameAccessor : public FrameAccessor {
libmv_ReleaseImageCallback release_image_callback,
libmv_GetMaskForTrackCallback get_mask_for_track_callback,
libmv_ReleaseMaskCallback release_mask_callback)
: user_data_(user_data),
get_image_callback_(get_image_callback),
release_image_callback_(release_image_callback),
get_mask_for_track_callback_(get_mask_for_track_callback),
release_mask_callback_(release_mask_callback) { }
: user_data_(user_data),
get_image_callback_(get_image_callback),
release_image_callback_(release_image_callback),
get_mask_for_track_callback_(get_mask_for_track_callback),
release_mask_callback_(release_mask_callback) {}
virtual ~LibmvFrameAccessor() {
}
virtual ~LibmvFrameAccessor() {}
libmv_InputMode get_libmv_input_mode(InputMode input_mode) {
switch (input_mode) {
#define CHECK_INPUT_MODE(mode) \
case mode: \
return LIBMV_IMAGE_MODE_ ## mode;
#define CHECK_INPUT_MODE(mode) \
case mode: return LIBMV_IMAGE_MODE_##mode;
CHECK_INPUT_MODE(MONO)
CHECK_INPUT_MODE(RGBA)
#undef CHECK_INPUT_MODE
@ -59,8 +57,7 @@ struct LibmvFrameAccessor : public FrameAccessor {
return LIBMV_IMAGE_MODE_MONO;
}
void get_libmv_region(const Region& region,
libmv_Region* libmv_region) {
void get_libmv_region(const Region& region, libmv_Region* libmv_region) {
libmv_region->min[0] = region.min(0);
libmv_region->min[1] = region.min(1);
libmv_region->max[0] = region.max(0);
@ -74,7 +71,7 @@ struct LibmvFrameAccessor : public FrameAccessor {
const Region* region,
const Transform* transform,
FloatImage* destination) {
float *float_buffer;
float* float_buffer;
int width, height, channels;
libmv_Region libmv_region;
if (region) {
@ -86,46 +83,41 @@ struct LibmvFrameAccessor : public FrameAccessor {
get_libmv_input_mode(input_mode),
downscale,
region != NULL ? &libmv_region : NULL,
(libmv_FrameTransform*) transform,
(libmv_FrameTransform*)transform,
&float_buffer,
&width,
&height,
&channels);
// TODO(sergey): Dumb code for until we can set data directly.
FloatImage temp_image(float_buffer,
height,
width,
channels);
FloatImage temp_image(float_buffer, height, width, channels);
destination->CopyFrom(temp_image);
return cache_key;
}
void ReleaseImage(Key cache_key) {
release_image_callback_(cache_key);
}
void ReleaseImage(Key cache_key) { release_image_callback_(cache_key); }
Key GetMaskForTrack(int clip,
int frame,
int track,
const Region* region,
FloatImage* destination) {
float *float_buffer;
float* float_buffer;
int width, height;
libmv_Region libmv_region;
if (region) {
get_libmv_region(*region, &libmv_region);
}
Key cache_key = get_mask_for_track_callback_(
user_data_,
clip,
frame,
track,
region != NULL ? &libmv_region : NULL,
&float_buffer,
&width,
&height);
Key cache_key =
get_mask_for_track_callback_(user_data_,
clip,
frame,
track,
region != NULL ? &libmv_region : NULL,
&float_buffer,
&width,
&height);
if (cache_key == NULL) {
// No mask for the given track.
@ -133,30 +125,21 @@ struct LibmvFrameAccessor : public FrameAccessor {
}
// TODO(sergey): Dumb code for until we can set data directly.
FloatImage temp_image(float_buffer,
height,
width,
1);
FloatImage temp_image(float_buffer, height, width, 1);
destination->CopyFrom(temp_image);
return cache_key;
}
void ReleaseMask(Key key) {
release_mask_callback_(key);
}
void ReleaseMask(Key key) { release_mask_callback_(key); }
bool GetClipDimensions(int /*clip*/, int * /*width*/, int * /*height*/) {
bool GetClipDimensions(int /*clip*/, int* /*width*/, int* /*height*/) {
return false;
}
int NumClips() {
return 1;
}
int NumClips() { return 1; }
int NumFrames(int /*clip*/) {
return 0;
}
int NumFrames(int /*clip*/) { return 0; }
libmv_FrameAccessorUserData* user_data_;
libmv_GetImageCallback get_image_callback_;
@ -173,35 +156,35 @@ libmv_FrameAccessor* libmv_FrameAccessorNew(
libmv_ReleaseImageCallback release_image_callback,
libmv_GetMaskForTrackCallback get_mask_for_track_callback,
libmv_ReleaseMaskCallback release_mask_callback) {
return (libmv_FrameAccessor*) LIBMV_OBJECT_NEW(LibmvFrameAccessor,
user_data,
get_image_callback,
release_image_callback,
get_mask_for_track_callback,
release_mask_callback);
return (libmv_FrameAccessor*)LIBMV_OBJECT_NEW(LibmvFrameAccessor,
user_data,
get_image_callback,
release_image_callback,
get_mask_for_track_callback,
release_mask_callback);
}
void libmv_FrameAccessorDestroy(libmv_FrameAccessor* frame_accessor) {
LIBMV_OBJECT_DELETE(frame_accessor, LibmvFrameAccessor);
}
int64_t libmv_frameAccessorgetTransformKey(const libmv_FrameTransform *transform) {
return ((FrameAccessor::Transform*) transform)->key();
int64_t libmv_frameAccessorgetTransformKey(
const libmv_FrameTransform* transform) {
return ((FrameAccessor::Transform*)transform)->key();
}
void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform *transform,
const libmv_FloatImage *input_image,
libmv_FloatImage *output_image) {
void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform* transform,
const libmv_FloatImage* input_image,
libmv_FloatImage* output_image) {
const FloatImage input(input_image->buffer,
input_image->height,
input_image->width,
input_image->channels);
FloatImage output;
((FrameAccessor::Transform*) transform)->run(input,
&output);
((FrameAccessor::Transform*)transform)->run(input, &output);
int num_pixels = output.Width() *output.Height() * output.Depth();
int num_pixels = output.Width() * output.Height() * output.Depth();
output_image->buffer = new float[num_pixels];
memcpy(output_image->buffer, output.Data(), num_pixels * sizeof(float));
output_image->width = output.Width();

View File

@ -32,14 +32,14 @@ extern "C" {
typedef struct libmv_FrameAccessor libmv_FrameAccessor;
typedef struct libmv_FrameTransform libmv_FrameTransform;
typedef struct libmv_FrameAccessorUserData libmv_FrameAccessorUserData;
typedef void *libmv_CacheKey;
typedef void* libmv_CacheKey;
typedef enum {
LIBMV_IMAGE_MODE_MONO,
LIBMV_IMAGE_MODE_RGBA,
} libmv_InputMode;
typedef libmv_CacheKey (*libmv_GetImageCallback) (
typedef libmv_CacheKey (*libmv_GetImageCallback)(
libmv_FrameAccessorUserData* user_data,
int clip,
int frame,
@ -52,9 +52,9 @@ typedef libmv_CacheKey (*libmv_GetImageCallback) (
int* height,
int* channels);
typedef void (*libmv_ReleaseImageCallback) (libmv_CacheKey cache_key);
typedef void (*libmv_ReleaseImageCallback)(libmv_CacheKey cache_key);
typedef libmv_CacheKey (*libmv_GetMaskForTrackCallback) (
typedef libmv_CacheKey (*libmv_GetMaskForTrackCallback)(
libmv_FrameAccessorUserData* user_data,
int clip,
int frame,
@ -63,7 +63,7 @@ typedef libmv_CacheKey (*libmv_GetMaskForTrackCallback) (
float** destination,
int* width,
int* height);
typedef void (*libmv_ReleaseMaskCallback) (libmv_CacheKey cache_key);
typedef void (*libmv_ReleaseMaskCallback)(libmv_CacheKey cache_key);
libmv_FrameAccessor* libmv_FrameAccessorNew(
libmv_FrameAccessorUserData* user_data,
@ -73,11 +73,12 @@ libmv_FrameAccessor* libmv_FrameAccessorNew(
libmv_ReleaseMaskCallback release_mask_callback);
void libmv_FrameAccessorDestroy(libmv_FrameAccessor* frame_accessor);
int64_t libmv_frameAccessorgetTransformKey(const libmv_FrameTransform *transform);
int64_t libmv_frameAccessorgetTransformKey(
const libmv_FrameTransform* transform);
void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform *transform,
const libmv_FloatImage *input_image,
libmv_FloatImage *output_image);
void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform* transform,
const libmv_FloatImage* input_image,
libmv_FloatImage* output_image);
#ifdef __cplusplus
}
#endif

View File

@ -41,10 +41,8 @@ void libmv_homography2DFromCorrespondencesEuc(/* const */ double (*x1)[2],
LG << "x2: " << x2_mat;
libmv::EstimateHomographyOptions options;
libmv::EstimateHomography2DFromCorrespondences(x1_mat,
x2_mat,
options,
&H_mat);
libmv::EstimateHomography2DFromCorrespondences(
x1_mat, x2_mat, options, &H_mat);
LG << "H: " << H_mat;

View File

@ -21,14 +21,14 @@
#include "intern/utildefines.h"
#include "libmv/tracking/track_region.h"
#include <cassert>
#include <png.h>
#include <cassert>
using libmv::FloatImage;
using libmv::SamplePlanarPatch;
void libmv_floatImageDestroy(libmv_FloatImage *image) {
delete [] image->buffer;
void libmv_floatImageDestroy(libmv_FloatImage* image) {
delete[] image->buffer;
}
/* Image <-> buffers conversion */
@ -63,8 +63,7 @@ void libmv_floatBufferToFloatImage(const float* buffer,
}
}
void libmv_floatImageToFloatBuffer(const FloatImage &image,
float* buffer) {
void libmv_floatImageToFloatBuffer(const FloatImage& image, float* buffer) {
for (int y = 0, a = 0; y < image.Height(); y++) {
for (int x = 0; x < image.Width(); x++) {
for (int k = 0; k < image.Depth(); k++) {
@ -74,9 +73,9 @@ void libmv_floatImageToFloatBuffer(const FloatImage &image,
}
}
void libmv_floatImageToByteBuffer(const libmv::FloatImage &image,
void libmv_floatImageToByteBuffer(const libmv::FloatImage& image,
unsigned char* buffer) {
for (int y = 0, a= 0; y < image.Height(); y++) {
for (int y = 0, a = 0; y < image.Height(); y++) {
for (int x = 0; x < image.Width(); x++) {
for (int k = 0; k < image.Depth(); k++) {
buffer[a++] = image(y, x, k) * 255.0f;
@ -93,7 +92,7 @@ static bool savePNGImage(png_bytep* row_pointers,
const char* file_name) {
png_infop info_ptr;
png_structp png_ptr;
FILE *fp = fopen(file_name, "wb");
FILE* fp = fopen(file_name, "wb");
if (fp == NULL) {
return false;
@ -153,7 +152,7 @@ bool libmv_saveImage(const FloatImage& image,
int x0,
int y0) {
int x, y;
png_bytep *row_pointers;
png_bytep* row_pointers;
assert(image.Depth() == 1);
@ -180,9 +179,8 @@ bool libmv_saveImage(const FloatImage& image,
static int image_counter = 0;
char file_name[128];
snprintf(file_name, sizeof(file_name),
"%s_%02d.png",
prefix, ++image_counter);
snprintf(
file_name, sizeof(file_name), "%s_%02d.png", prefix, ++image_counter);
bool result = savePNGImage(row_pointers,
image.Width(),
image.Height(),
@ -191,9 +189,9 @@ bool libmv_saveImage(const FloatImage& image,
file_name);
for (y = 0; y < image.Height(); y++) {
delete [] row_pointers[y];
delete[] row_pointers[y];
}
delete [] row_pointers;
delete[] row_pointers;
return result;
}
@ -211,7 +209,7 @@ void libmv_samplePlanarPatchFloat(const float* image,
double* warped_position_x,
double* warped_position_y) {
FloatImage libmv_image, libmv_patch, libmv_mask;
FloatImage *libmv_mask_for_sample = NULL;
FloatImage* libmv_mask_for_sample = NULL;
libmv_floatBufferToFloatImage(image, width, height, channels, &libmv_image);
@ -221,8 +219,10 @@ void libmv_samplePlanarPatchFloat(const float* image,
}
SamplePlanarPatch(libmv_image,
xs, ys,
num_samples_x, num_samples_y,
xs,
ys,
num_samples_x,
num_samples_y,
libmv_mask_for_sample,
&libmv_patch,
warped_position_x,
@ -232,19 +232,19 @@ void libmv_samplePlanarPatchFloat(const float* image,
}
void libmv_samplePlanarPatchByte(const unsigned char* image,
int width,
int height,
int channels,
const double* xs,
const double* ys,
int num_samples_x,
int num_samples_y,
const float* mask,
unsigned char* patch,
double* warped_position_x,
double* warped_position_y) {
int width,
int height,
int channels,
const double* xs,
const double* ys,
int num_samples_x,
int num_samples_y,
const float* mask,
unsigned char* patch,
double* warped_position_x,
double* warped_position_y) {
libmv::FloatImage libmv_image, libmv_patch, libmv_mask;
libmv::FloatImage *libmv_mask_for_sample = NULL;
libmv::FloatImage* libmv_mask_for_sample = NULL;
libmv_byteBufferToFloatImage(image, width, height, channels, &libmv_image);
@ -254,8 +254,10 @@ void libmv_samplePlanarPatchByte(const unsigned char* image,
}
libmv::SamplePlanarPatch(libmv_image,
xs, ys,
num_samples_x, num_samples_y,
xs,
ys,
num_samples_x,
num_samples_y,
libmv_mask_for_sample,
&libmv_patch,
warped_position_x,

View File

@ -35,7 +35,7 @@ void libmv_floatBufferToFloatImage(const float* buffer,
libmv::FloatImage* image);
void libmv_floatImageToFloatBuffer(const libmv::FloatImage& image,
float *buffer);
float* buffer);
void libmv_floatImageToByteBuffer(const libmv::FloatImage& image,
unsigned char* buffer);
@ -51,13 +51,13 @@ extern "C" {
#endif
typedef struct libmv_FloatImage {
float *buffer;
float* buffer;
int width;
int height;
int channels;
} libmv_FloatImage;
void libmv_floatImageDestroy(libmv_FloatImage *image);
void libmv_floatImageDestroy(libmv_FloatImage* image);
void libmv_samplePlanarPatchFloat(const float* image,
int width,
@ -72,18 +72,18 @@ void libmv_samplePlanarPatchFloat(const float* image,
double* warped_position_x,
double* warped_position_y);
void libmv_samplePlanarPatchByte(const unsigned char* image,
int width,
int height,
int channels,
const double* xs,
const double* ys,
int num_samples_x,
int num_samples_y,
const float* mask,
unsigned char* patch,
double* warped_position_x,
double* warped_position_y);
void libmv_samplePlanarPatchByte(const unsigned char* image,
int width,
int height,
int channels,
const double* xs,
const double* ys,
int num_samples_x,
int num_samples_y,
const float* mask,
unsigned char* patch,
double* warped_position_x,
double* warped_position_y);
#ifdef __cplusplus
}

View File

@ -24,8 +24,8 @@
#include "libmv/logging/logging.h"
#include "libmv/simple_pipeline/bundle.h"
#include "libmv/simple_pipeline/keyframe_selection.h"
#include "libmv/simple_pipeline/initialize_reconstruction.h"
#include "libmv/simple_pipeline/keyframe_selection.h"
#include "libmv/simple_pipeline/modal_solver.h"
#include "libmv/simple_pipeline/pipeline.h"
#include "libmv/simple_pipeline/reconstruction_scale.h"
@ -39,19 +39,19 @@ using libmv::EuclideanScaleToUnity;
using libmv::Marker;
using libmv::ProgressUpdateCallback;
using libmv::PolynomialCameraIntrinsics;
using libmv::Tracks;
using libmv::EuclideanBundle;
using libmv::EuclideanCompleteReconstruction;
using libmv::EuclideanReconstructTwoFrames;
using libmv::EuclideanReprojectionError;
using libmv::PolynomialCameraIntrinsics;
using libmv::Tracks;
struct libmv_Reconstruction {
EuclideanReconstruction reconstruction;
/* Used for per-track average error calculation after reconstruction */
Tracks tracks;
CameraIntrinsics *intrinsics;
CameraIntrinsics* intrinsics;
double error;
bool is_valid;
@ -63,7 +63,7 @@ class ReconstructUpdateCallback : public ProgressUpdateCallback {
public:
ReconstructUpdateCallback(
reconstruct_progress_update_cb progress_update_callback,
void *callback_customdata) {
void* callback_customdata) {
progress_update_callback_ = progress_update_callback;
callback_customdata_ = callback_customdata;
}
@ -73,13 +73,14 @@ class ReconstructUpdateCallback : public ProgressUpdateCallback {
progress_update_callback_(callback_customdata_, progress, message);
}
}
protected:
reconstruct_progress_update_cb progress_update_callback_;
void* callback_customdata_;
};
void libmv_solveRefineIntrinsics(
const Tracks &tracks,
const Tracks& tracks,
const int refine_intrinsics,
const int bundle_constraints,
reconstruct_progress_update_cb progress_update_callback,
@ -96,11 +97,11 @@ void libmv_solveRefineIntrinsics(
bundle_intrinsics |= libmv::BUNDLE_PRINCIPAL_POINT;
}
#define SET_DISTORTION_FLAG_CHECKED(type, coefficient) \
do { \
if (refine_intrinsics & LIBMV_REFINE_ ## type ##_DISTORTION_ ## coefficient) { \
bundle_intrinsics |= libmv::BUNDLE_ ## type ## _ ## coefficient; \
} \
#define SET_DISTORTION_FLAG_CHECKED(type, coefficient) \
do { \
if (refine_intrinsics & LIBMV_REFINE_##type##_DISTORTION_##coefficient) { \
bundle_intrinsics |= libmv::BUNDLE_##type##_##coefficient; \
} \
} while (0)
SET_DISTORTION_FLAG_CHECKED(RADIAL, K1);
@ -123,20 +124,19 @@ void libmv_solveRefineIntrinsics(
}
void finishReconstruction(
const Tracks &tracks,
const CameraIntrinsics &camera_intrinsics,
libmv_Reconstruction *libmv_reconstruction,
const Tracks& tracks,
const CameraIntrinsics& camera_intrinsics,
libmv_Reconstruction* libmv_reconstruction,
reconstruct_progress_update_cb progress_update_callback,
void *callback_customdata) {
EuclideanReconstruction &reconstruction =
libmv_reconstruction->reconstruction;
void* callback_customdata) {
EuclideanReconstruction& reconstruction =
libmv_reconstruction->reconstruction;
/* Reprojection error calculation. */
progress_update_callback(callback_customdata, 1.0, "Finishing solution");
libmv_reconstruction->tracks = tracks;
libmv_reconstruction->error = EuclideanReprojectionError(tracks,
reconstruction,
camera_intrinsics);
libmv_reconstruction->error =
EuclideanReprojectionError(tracks, reconstruction, camera_intrinsics);
}
bool selectTwoKeyframesBasedOnGRICAndVariance(
@ -148,9 +148,8 @@ bool selectTwoKeyframesBasedOnGRICAndVariance(
libmv::vector<int> keyframes;
/* Get list of all keyframe candidates first. */
SelectKeyframesBasedOnGRICAndVariance(normalized_tracks,
camera_intrinsics,
keyframes);
SelectKeyframesBasedOnGRICAndVariance(
normalized_tracks, camera_intrinsics, keyframes);
if (keyframes.size() < 2) {
LG << "Not enough keyframes detected by GRIC";
@ -175,24 +174,20 @@ bool selectTwoKeyframesBasedOnGRICAndVariance(
EuclideanReconstruction reconstruction;
int current_keyframe = keyframes[i];
libmv::vector<Marker> keyframe_markers =
normalized_tracks.MarkersForTracksInBothImages(previous_keyframe,
current_keyframe);
normalized_tracks.MarkersForTracksInBothImages(previous_keyframe,
current_keyframe);
Tracks keyframe_tracks(keyframe_markers);
/* get a solution from two keyframes only */
EuclideanReconstructTwoFrames(keyframe_markers, &reconstruction);
EuclideanBundle(keyframe_tracks, &reconstruction);
EuclideanCompleteReconstruction(keyframe_tracks,
&reconstruction,
NULL);
EuclideanCompleteReconstruction(keyframe_tracks, &reconstruction, NULL);
double current_error = EuclideanReprojectionError(tracks,
reconstruction,
camera_intrinsics);
double current_error =
EuclideanReprojectionError(tracks, reconstruction, camera_intrinsics);
LG << "Error between " << previous_keyframe
<< " and " << current_keyframe
LG << "Error between " << previous_keyframe << " and " << current_keyframe
<< ": " << current_error;
if (current_error < best_error) {
@ -214,53 +209,49 @@ Marker libmv_projectMarker(const EuclideanPoint& point,
projected /= projected(2);
libmv::Marker reprojected_marker;
intrinsics.ApplyIntrinsics(projected(0), projected(1),
&reprojected_marker.x,
&reprojected_marker.y);
intrinsics.ApplyIntrinsics(
projected(0), projected(1), &reprojected_marker.x, &reprojected_marker.y);
reprojected_marker.image = camera.image;
reprojected_marker.track = point.track;
return reprojected_marker;
}
void libmv_getNormalizedTracks(const Tracks &tracks,
const CameraIntrinsics &camera_intrinsics,
Tracks *normalized_tracks) {
void libmv_getNormalizedTracks(const Tracks& tracks,
const CameraIntrinsics& camera_intrinsics,
Tracks* normalized_tracks) {
libmv::vector<Marker> markers = tracks.AllMarkers();
for (int i = 0; i < markers.size(); ++i) {
Marker &marker = markers[i];
camera_intrinsics.InvertIntrinsics(marker.x, marker.y,
&marker.x, &marker.y);
normalized_tracks->Insert(marker.image,
marker.track,
marker.x, marker.y,
marker.weight);
Marker& marker = markers[i];
camera_intrinsics.InvertIntrinsics(
marker.x, marker.y, &marker.x, &marker.y);
normalized_tracks->Insert(
marker.image, marker.track, marker.x, marker.y, marker.weight);
}
}
} // namespace
libmv_Reconstruction *libmv_solveReconstruction(
libmv_Reconstruction* libmv_solveReconstruction(
const libmv_Tracks* libmv_tracks,
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
libmv_ReconstructionOptions* libmv_reconstruction_options,
reconstruct_progress_update_cb progress_update_callback,
void* callback_customdata) {
libmv_Reconstruction *libmv_reconstruction =
LIBMV_OBJECT_NEW(libmv_Reconstruction);
libmv_Reconstruction* libmv_reconstruction =
LIBMV_OBJECT_NEW(libmv_Reconstruction);
Tracks &tracks = *((Tracks *) libmv_tracks);
EuclideanReconstruction &reconstruction =
libmv_reconstruction->reconstruction;
Tracks& tracks = *((Tracks*)libmv_tracks);
EuclideanReconstruction& reconstruction =
libmv_reconstruction->reconstruction;
ReconstructUpdateCallback update_callback =
ReconstructUpdateCallback(progress_update_callback,
callback_customdata);
ReconstructUpdateCallback(progress_update_callback, callback_customdata);
/* Retrieve reconstruction options from C-API to libmv API. */
CameraIntrinsics *camera_intrinsics;
CameraIntrinsics* camera_intrinsics;
camera_intrinsics = libmv_reconstruction->intrinsics =
libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
/* Invert the camera intrinsics/ */
Tracks normalized_tracks;
@ -276,10 +267,10 @@ libmv_Reconstruction *libmv_solveReconstruction(
update_callback.invoke(0, "Selecting keyframes");
if (selectTwoKeyframesBasedOnGRICAndVariance(tracks,
normalized_tracks,
*camera_intrinsics,
keyframe1,
keyframe2)) {
normalized_tracks,
*camera_intrinsics,
keyframe1,
keyframe2)) {
/* so keyframes in the interface would be updated */
libmv_reconstruction_options->keyframe1 = keyframe1;
libmv_reconstruction_options->keyframe2 = keyframe2;
@ -290,7 +281,7 @@ libmv_Reconstruction *libmv_solveReconstruction(
LG << "frames to init from: " << keyframe1 << " " << keyframe2;
libmv::vector<Marker> keyframe_markers =
normalized_tracks.MarkersForTracksInBothImages(keyframe1, keyframe2);
normalized_tracks.MarkersForTracksInBothImages(keyframe1, keyframe2);
LG << "number of markers for init: " << keyframe_markers.size();
@ -309,14 +300,12 @@ libmv_Reconstruction *libmv_solveReconstruction(
}
EuclideanBundle(normalized_tracks, &reconstruction);
EuclideanCompleteReconstruction(normalized_tracks,
&reconstruction,
&update_callback);
EuclideanCompleteReconstruction(
normalized_tracks, &reconstruction, &update_callback);
/* Refinement. */
if (libmv_reconstruction_options->refine_intrinsics) {
libmv_solveRefineIntrinsics(
tracks,
libmv_solveRefineIntrinsics(tracks,
libmv_reconstruction_options->refine_intrinsics,
libmv::BUNDLE_NO_CONSTRAINTS,
progress_update_callback,
@ -336,31 +325,29 @@ libmv_Reconstruction *libmv_solveReconstruction(
callback_customdata);
libmv_reconstruction->is_valid = true;
return (libmv_Reconstruction *) libmv_reconstruction;
return (libmv_Reconstruction*)libmv_reconstruction;
}
libmv_Reconstruction *libmv_solveModal(
const libmv_Tracks *libmv_tracks,
const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options,
const libmv_ReconstructionOptions *libmv_reconstruction_options,
libmv_Reconstruction* libmv_solveModal(
const libmv_Tracks* libmv_tracks,
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
const libmv_ReconstructionOptions* libmv_reconstruction_options,
reconstruct_progress_update_cb progress_update_callback,
void *callback_customdata) {
libmv_Reconstruction *libmv_reconstruction =
LIBMV_OBJECT_NEW(libmv_Reconstruction);
void* callback_customdata) {
libmv_Reconstruction* libmv_reconstruction =
LIBMV_OBJECT_NEW(libmv_Reconstruction);
Tracks &tracks = *((Tracks *) libmv_tracks);
EuclideanReconstruction &reconstruction =
libmv_reconstruction->reconstruction;
Tracks& tracks = *((Tracks*)libmv_tracks);
EuclideanReconstruction& reconstruction =
libmv_reconstruction->reconstruction;
ReconstructUpdateCallback update_callback =
ReconstructUpdateCallback(progress_update_callback,
callback_customdata);
ReconstructUpdateCallback(progress_update_callback, callback_customdata);
/* Retrieve reconstruction options from C-API to libmv API. */
CameraIntrinsics *camera_intrinsics;
CameraIntrinsics* camera_intrinsics;
camera_intrinsics = libmv_reconstruction->intrinsics =
libmv_cameraIntrinsicsCreateFromOptions(
libmv_camera_intrinsics_options);
libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
/* Invert the camera intrinsics. */
Tracks normalized_tracks;
@ -378,11 +365,11 @@ libmv_Reconstruction *libmv_solveModal(
/* Refinement. */
if (libmv_reconstruction_options->refine_intrinsics) {
libmv_solveRefineIntrinsics(
tracks,
libmv_solveRefineIntrinsics(tracks,
libmv_reconstruction_options->refine_intrinsics,
libmv::BUNDLE_NO_TRANSLATION,
progress_update_callback, callback_customdata,
progress_update_callback,
callback_customdata,
&reconstruction,
camera_intrinsics);
}
@ -395,26 +382,25 @@ libmv_Reconstruction *libmv_solveModal(
callback_customdata);
libmv_reconstruction->is_valid = true;
return (libmv_Reconstruction *) libmv_reconstruction;
return (libmv_Reconstruction*)libmv_reconstruction;
}
int libmv_reconstructionIsValid(libmv_Reconstruction *libmv_reconstruction) {
int libmv_reconstructionIsValid(libmv_Reconstruction* libmv_reconstruction) {
return libmv_reconstruction->is_valid;
}
void libmv_reconstructionDestroy(libmv_Reconstruction *libmv_reconstruction) {
void libmv_reconstructionDestroy(libmv_Reconstruction* libmv_reconstruction) {
LIBMV_OBJECT_DELETE(libmv_reconstruction->intrinsics, CameraIntrinsics);
LIBMV_OBJECT_DELETE(libmv_reconstruction, libmv_Reconstruction);
}
int libmv_reprojectionPointForTrack(
const libmv_Reconstruction *libmv_reconstruction,
const libmv_Reconstruction* libmv_reconstruction,
int track,
double pos[3]) {
const EuclideanReconstruction *reconstruction =
&libmv_reconstruction->reconstruction;
const EuclideanPoint *point =
reconstruction->PointForTrack(track);
const EuclideanReconstruction* reconstruction =
&libmv_reconstruction->reconstruction;
const EuclideanPoint* point = reconstruction->PointForTrack(track);
if (point) {
pos[0] = point->X[0];
pos[1] = point->X[2];
@ -425,23 +411,22 @@ int libmv_reprojectionPointForTrack(
}
double libmv_reprojectionErrorForTrack(
const libmv_Reconstruction *libmv_reconstruction,
int track) {
const EuclideanReconstruction *reconstruction =
&libmv_reconstruction->reconstruction;
const CameraIntrinsics *intrinsics = libmv_reconstruction->intrinsics;
const libmv_Reconstruction* libmv_reconstruction, int track) {
const EuclideanReconstruction* reconstruction =
&libmv_reconstruction->reconstruction;
const CameraIntrinsics* intrinsics = libmv_reconstruction->intrinsics;
libmv::vector<Marker> markers =
libmv_reconstruction->tracks.MarkersForTrack(track);
libmv_reconstruction->tracks.MarkersForTrack(track);
int num_reprojected = 0;
double total_error = 0.0;
for (int i = 0; i < markers.size(); ++i) {
double weight = markers[i].weight;
const EuclideanCamera *camera =
reconstruction->CameraForImage(markers[i].image);
const EuclideanPoint *point =
reconstruction->PointForTrack(markers[i].track);
const EuclideanCamera* camera =
reconstruction->CameraForImage(markers[i].image);
const EuclideanPoint* point =
reconstruction->PointForTrack(markers[i].track);
if (!camera || !point || weight == 0.0) {
continue;
@ -450,7 +435,7 @@ double libmv_reprojectionErrorForTrack(
num_reprojected++;
Marker reprojected_marker =
libmv_projectMarker(*point, *camera, *intrinsics);
libmv_projectMarker(*point, *camera, *intrinsics);
double ex = (reprojected_marker.x - markers[i].x) * weight;
double ey = (reprojected_marker.y - markers[i].y) * weight;
@ -461,14 +446,13 @@ double libmv_reprojectionErrorForTrack(
}
double libmv_reprojectionErrorForImage(
const libmv_Reconstruction *libmv_reconstruction,
int image) {
const EuclideanReconstruction *reconstruction =
&libmv_reconstruction->reconstruction;
const CameraIntrinsics *intrinsics = libmv_reconstruction->intrinsics;
const libmv_Reconstruction* libmv_reconstruction, int image) {
const EuclideanReconstruction* reconstruction =
&libmv_reconstruction->reconstruction;
const CameraIntrinsics* intrinsics = libmv_reconstruction->intrinsics;
libmv::vector<Marker> markers =
libmv_reconstruction->tracks.MarkersInImage(image);
const EuclideanCamera *camera = reconstruction->CameraForImage(image);
libmv_reconstruction->tracks.MarkersInImage(image);
const EuclideanCamera* camera = reconstruction->CameraForImage(image);
int num_reprojected = 0;
double total_error = 0.0;
@ -477,8 +461,8 @@ double libmv_reprojectionErrorForImage(
}
for (int i = 0; i < markers.size(); ++i) {
const EuclideanPoint *point =
reconstruction->PointForTrack(markers[i].track);
const EuclideanPoint* point =
reconstruction->PointForTrack(markers[i].track);
if (!point) {
continue;
@ -487,7 +471,7 @@ double libmv_reprojectionErrorForImage(
num_reprojected++;
Marker reprojected_marker =
libmv_projectMarker(*point, *camera, *intrinsics);
libmv_projectMarker(*point, *camera, *intrinsics);
double ex = (reprojected_marker.x - markers[i].x) * markers[i].weight;
double ey = (reprojected_marker.y - markers[i].y) * markers[i].weight;
@ -498,13 +482,12 @@ double libmv_reprojectionErrorForImage(
}
int libmv_reprojectionCameraForImage(
const libmv_Reconstruction *libmv_reconstruction,
const libmv_Reconstruction* libmv_reconstruction,
int image,
double mat[4][4]) {
const EuclideanReconstruction *reconstruction =
&libmv_reconstruction->reconstruction;
const EuclideanCamera *camera =
reconstruction->CameraForImage(image);
const EuclideanReconstruction* reconstruction =
&libmv_reconstruction->reconstruction;
const EuclideanCamera* camera = reconstruction->CameraForImage(image);
if (camera) {
for (int j = 0; j < 3; ++j) {
@ -541,11 +524,11 @@ int libmv_reprojectionCameraForImage(
}
double libmv_reprojectionError(
const libmv_Reconstruction *libmv_reconstruction) {
const libmv_Reconstruction* libmv_reconstruction) {
return libmv_reconstruction->error;
}
libmv_CameraIntrinsics *libmv_reconstructionExtractIntrinsics(
libmv_Reconstruction *libmv_reconstruction) {
return (libmv_CameraIntrinsics *) libmv_reconstruction->intrinsics;
libmv_CameraIntrinsics* libmv_reconstructionExtractIntrinsics(
libmv_Reconstruction* libmv_reconstruction) {
return (libmv_CameraIntrinsics*)libmv_reconstruction->intrinsics;
}

View File

@ -31,17 +31,16 @@ struct libmv_CameraIntrinsicsOptions;
typedef struct libmv_Reconstruction libmv_Reconstruction;
enum {
LIBMV_REFINE_FOCAL_LENGTH = (1 << 0),
LIBMV_REFINE_PRINCIPAL_POINT = (1 << 1),
LIBMV_REFINE_FOCAL_LENGTH = (1 << 0),
LIBMV_REFINE_PRINCIPAL_POINT = (1 << 1),
LIBMV_REFINE_RADIAL_DISTORTION_K1 = (1 << 2),
LIBMV_REFINE_RADIAL_DISTORTION_K2 = (1 << 3),
LIBMV_REFINE_RADIAL_DISTORTION_K3 = (1 << 4),
LIBMV_REFINE_RADIAL_DISTORTION_K4 = (1 << 5),
LIBMV_REFINE_RADIAL_DISTORTION = (LIBMV_REFINE_RADIAL_DISTORTION_K1 |
LIBMV_REFINE_RADIAL_DISTORTION_K2 |
LIBMV_REFINE_RADIAL_DISTORTION_K3 |
LIBMV_REFINE_RADIAL_DISTORTION_K4),
LIBMV_REFINE_RADIAL_DISTORTION_K1 = (1 << 2),
LIBMV_REFINE_RADIAL_DISTORTION_K2 = (1 << 3),
LIBMV_REFINE_RADIAL_DISTORTION_K3 = (1 << 4),
LIBMV_REFINE_RADIAL_DISTORTION_K4 = (1 << 5),
LIBMV_REFINE_RADIAL_DISTORTION =
(LIBMV_REFINE_RADIAL_DISTORTION_K1 | LIBMV_REFINE_RADIAL_DISTORTION_K2 |
LIBMV_REFINE_RADIAL_DISTORTION_K3 | LIBMV_REFINE_RADIAL_DISTORTION_K4),
LIBMV_REFINE_TANGENTIAL_DISTORTION_P1 = (1 << 6),
LIBMV_REFINE_TANGENTIAL_DISTORTION_P2 = (1 << 7),
@ -55,9 +54,9 @@ typedef struct libmv_ReconstructionOptions {
int refine_intrinsics;
} libmv_ReconstructionOptions;
typedef void (*reconstruct_progress_update_cb) (void* customdata,
double progress,
const char* message);
typedef void (*reconstruct_progress_update_cb)(void* customdata,
double progress,
const char* message);
libmv_Reconstruction* libmv_solveReconstruction(
const struct libmv_Tracks* libmv_tracks,
@ -73,35 +72,32 @@ libmv_Reconstruction* libmv_solveModal(
reconstruct_progress_update_cb progress_update_callback,
void* callback_customdata);
int libmv_reconstructionIsValid(libmv_Reconstruction *libmv_reconstruction);
int libmv_reconstructionIsValid(libmv_Reconstruction* libmv_reconstruction);
void libmv_reconstructionDestroy(libmv_Reconstruction* libmv_reconstruction);
int libmv_reprojectionPointForTrack(
const libmv_Reconstruction* libmv_reconstruction,
int track,
double pos[3]);
const libmv_Reconstruction* libmv_reconstruction, int track, double pos[3]);
double libmv_reprojectionErrorForTrack(
const libmv_Reconstruction* libmv_reconstruction,
int track);
const libmv_Reconstruction* libmv_reconstruction, int track);
double libmv_reprojectionErrorForImage(
const libmv_Reconstruction* libmv_reconstruction,
int image);
const libmv_Reconstruction* libmv_reconstruction, int image);
int libmv_reprojectionCameraForImage(
const libmv_Reconstruction* libmv_reconstruction,
int image,
double mat[4][4]);
double libmv_reprojectionError(const libmv_Reconstruction* libmv_reconstruction);
double libmv_reprojectionError(
const libmv_Reconstruction* libmv_reconstruction);
struct libmv_CameraIntrinsics* libmv_reconstructionExtractIntrinsics(
libmv_Reconstruction *libmv_Reconstruction);
libmv_Reconstruction* libmv_Reconstruction);
#ifdef __cplusplus
}
#endif
#endif // LIBMV_C_API_RECONSTRUCTION_H_
#endif // LIBMV_C_API_RECONSTRUCTION_H_

View File

@ -24,7 +24,7 @@
/* ************ Logging ************ */
void libmv_initLogging(const char * /*argv0*/) {
void libmv_initLogging(const char* /*argv0*/) {
}
void libmv_startDebugLogging(void) {
@ -36,18 +36,18 @@ void libmv_setLoggingVerbosity(int /*verbosity*/) {
/* ************ Planar tracker ************ */
/* TrackRegion (new planar tracker) */
int libmv_trackRegion(const libmv_TrackRegionOptions * /*options*/,
const float * /*image1*/,
int libmv_trackRegion(const libmv_TrackRegionOptions* /*options*/,
const float* /*image1*/,
int /*image1_width*/,
int /*image1_height*/,
const float * /*image2*/,
const float* /*image2*/,
int /*image2_width*/,
int /*image2_height*/,
const double *x1,
const double *y1,
libmv_TrackRegionResult *result,
double *x2,
double *y2) {
const double* x1,
const double* y1,
libmv_TrackRegionResult* result,
double* x2,
double* y2) {
/* Convert to doubles for the libmv api. The four corners and the center. */
for (int i = 0; i < 5; ++i) {
x2[i] = x1[i];
@ -61,46 +61,46 @@ int libmv_trackRegion(const libmv_TrackRegionOptions * /*options*/,
return false;
}
void libmv_samplePlanarPatchFloat(const float * /*image*/,
void libmv_samplePlanarPatchFloat(const float* /*image*/,
int /*width*/,
int /*height*/,
int /*channels*/,
const double * /*xs*/,
const double * /*ys*/,
const double* /*xs*/,
const double* /*ys*/,
int /*num_samples_x*/,
int /*num_samples_y*/,
const float * /*mask*/,
float * /*patch*/,
double * /*warped_position_x*/,
double * /*warped_position_y*/) {
const float* /*mask*/,
float* /*patch*/,
double* /*warped_position_x*/,
double* /*warped_position_y*/) {
/* TODO(sergey): implement */
}
void libmv_samplePlanarPatchByte(const unsigned char * /*image*/,
void libmv_samplePlanarPatchByte(const unsigned char* /*image*/,
int /*width*/,
int /*height*/,
int /*channels*/,
const double * /*xs*/,
const double * /*ys*/,
int /*num_samples_x*/, int /*num_samples_y*/,
const float * /*mask*/,
unsigned char * /*patch*/,
double * /*warped_position_x*/,
double * /*warped_position_y*/) {
const double* /*xs*/,
const double* /*ys*/,
int /*num_samples_x*/,
int /*num_samples_y*/,
const float* /*mask*/,
unsigned char* /*patch*/,
double* /*warped_position_x*/,
double* /*warped_position_y*/) {
/* TODO(sergey): implement */
}
void libmv_floatImageDestroy(libmv_FloatImage* /*image*/)
{
void libmv_floatImageDestroy(libmv_FloatImage* /*image*/) {
}
/* ************ Tracks ************ */
libmv_Tracks *libmv_tracksNew(void) {
libmv_Tracks* libmv_tracksNew(void) {
return NULL;
}
void libmv_tracksInsert(libmv_Tracks * /*libmv_tracks*/,
void libmv_tracksInsert(libmv_Tracks* /*libmv_tracks*/,
int /*image*/,
int /*track*/,
double /*x*/,
@ -108,152 +108,152 @@ void libmv_tracksInsert(libmv_Tracks * /*libmv_tracks*/,
double /*weight*/) {
}
void libmv_tracksDestroy(libmv_Tracks * /*libmv_tracks*/) {
void libmv_tracksDestroy(libmv_Tracks* /*libmv_tracks*/) {
}
/* ************ Reconstruction solver ************ */
libmv_Reconstruction *libmv_solveReconstruction(
const libmv_Tracks * /*libmv_tracks*/,
const libmv_CameraIntrinsicsOptions * /*libmv_camera_intrinsics_options*/,
libmv_ReconstructionOptions * /*libmv_reconstruction_options*/,
libmv_Reconstruction* libmv_solveReconstruction(
const libmv_Tracks* /*libmv_tracks*/,
const libmv_CameraIntrinsicsOptions* /*libmv_camera_intrinsics_options*/,
libmv_ReconstructionOptions* /*libmv_reconstruction_options*/,
reconstruct_progress_update_cb /*progress_update_callback*/,
void * /*callback_customdata*/) {
void* /*callback_customdata*/) {
return NULL;
}
libmv_Reconstruction *libmv_solveModal(
const libmv_Tracks * /*libmv_tracks*/,
const libmv_CameraIntrinsicsOptions * /*libmv_camera_intrinsics_options*/,
const libmv_ReconstructionOptions * /*libmv_reconstruction_options*/,
libmv_Reconstruction* libmv_solveModal(
const libmv_Tracks* /*libmv_tracks*/,
const libmv_CameraIntrinsicsOptions* /*libmv_camera_intrinsics_options*/,
const libmv_ReconstructionOptions* /*libmv_reconstruction_options*/,
reconstruct_progress_update_cb /*progress_update_callback*/,
void * /*callback_customdata*/) {
void* /*callback_customdata*/) {
return NULL;
}
int libmv_reconstructionIsValid(libmv_Reconstruction * /*libmv_reconstruction*/) {
int libmv_reconstructionIsValid(
libmv_Reconstruction* /*libmv_reconstruction*/) {
return 0;
}
int libmv_reprojectionPointForTrack(
const libmv_Reconstruction * /*libmv_reconstruction*/,
const libmv_Reconstruction* /*libmv_reconstruction*/,
int /*track*/,
double /*pos*/[3]) {
return 0;
}
double libmv_reprojectionErrorForTrack(
const libmv_Reconstruction * /*libmv_reconstruction*/,
int /*track*/) {
const libmv_Reconstruction* /*libmv_reconstruction*/, int /*track*/) {
return 0.0;
}
double libmv_reprojectionErrorForImage(
const libmv_Reconstruction * /*libmv_reconstruction*/,
int /*image*/) {
const libmv_Reconstruction* /*libmv_reconstruction*/, int /*image*/) {
return 0.0;
}
int libmv_reprojectionCameraForImage(
const libmv_Reconstruction * /*libmv_reconstruction*/,
const libmv_Reconstruction* /*libmv_reconstruction*/,
int /*image*/,
double /*mat*/[4][4]) {
return 0;
}
double libmv_reprojectionError(
const libmv_Reconstruction * /*libmv_reconstruction*/) {
const libmv_Reconstruction* /*libmv_reconstruction*/) {
return 0.0;
}
void libmv_reconstructionDestroy(
struct libmv_Reconstruction * /*libmv_reconstruction*/) {
struct libmv_Reconstruction* /*libmv_reconstruction*/) {
}
/* ************ Feature detector ************ */
libmv_Features *libmv_detectFeaturesByte(const unsigned char * /*image_buffer*/,
libmv_Features* libmv_detectFeaturesByte(const unsigned char* /*image_buffer*/,
int /*width*/,
int /*height*/,
int /*channels*/,
libmv_DetectOptions * /*options*/) {
libmv_DetectOptions* /*options*/) {
return NULL;
}
struct libmv_Features *libmv_detectFeaturesFloat(
const float * /*image_buffer*/,
struct libmv_Features* libmv_detectFeaturesFloat(
const float* /*image_buffer*/,
int /*width*/,
int /*height*/,
int /*channels*/,
libmv_DetectOptions * /*options*/) {
libmv_DetectOptions* /*options*/) {
return NULL;
}
int libmv_countFeatures(const libmv_Features * /*libmv_features*/) {
int libmv_countFeatures(const libmv_Features* /*libmv_features*/) {
return 0;
}
void libmv_getFeature(const libmv_Features * /*libmv_features*/,
void libmv_getFeature(const libmv_Features* /*libmv_features*/,
int /*number*/,
double *x,
double *y,
double *score,
double *size) {
double* x,
double* y,
double* score,
double* size) {
*x = 0.0;
*y = 0.0;
*score = 0.0;
*size = 0.0;
}
void libmv_featuresDestroy(struct libmv_Features * /*libmv_features*/) {
void libmv_featuresDestroy(struct libmv_Features* /*libmv_features*/) {
}
/* ************ Camera intrinsics ************ */
libmv_CameraIntrinsics *libmv_reconstructionExtractIntrinsics(
libmv_Reconstruction * /*libmv_reconstruction*/) {
libmv_CameraIntrinsics* libmv_reconstructionExtractIntrinsics(
libmv_Reconstruction* /*libmv_reconstruction*/) {
return NULL;
}
libmv_CameraIntrinsics *libmv_cameraIntrinsicsNew(
const libmv_CameraIntrinsicsOptions * /*libmv_camera_intrinsics_options*/) {
libmv_CameraIntrinsics* libmv_cameraIntrinsicsNew(
const libmv_CameraIntrinsicsOptions* /*libmv_camera_intrinsics_options*/) {
return NULL;
}
libmv_CameraIntrinsics *libmv_cameraIntrinsicsCopy(
const libmv_CameraIntrinsics * /*libmvIntrinsics*/) {
libmv_CameraIntrinsics* libmv_cameraIntrinsicsCopy(
const libmv_CameraIntrinsics* /*libmvIntrinsics*/) {
return NULL;
}
void libmv_cameraIntrinsicsDestroy(
libmv_CameraIntrinsics * /*libmvIntrinsics*/) {
libmv_CameraIntrinsics* /*libmvIntrinsics*/) {
}
void libmv_cameraIntrinsicsUpdate(
const libmv_CameraIntrinsicsOptions * /*libmv_camera_intrinsics_options*/,
libmv_CameraIntrinsics * /*libmv_intrinsics*/) {
const libmv_CameraIntrinsicsOptions* /*libmv_camera_intrinsics_options*/,
libmv_CameraIntrinsics* /*libmv_intrinsics*/) {
}
void libmv_cameraIntrinsicsSetThreads(
libmv_CameraIntrinsics * /*libmv_intrinsics*/,
int /*threads*/) {
libmv_CameraIntrinsics* /*libmv_intrinsics*/, int /*threads*/) {
}
void libmv_cameraIntrinsicsExtractOptions(
const libmv_CameraIntrinsics * /*libmv_intrinsics*/,
libmv_CameraIntrinsicsOptions *camera_intrinsics_options) {
const libmv_CameraIntrinsics* /*libmv_intrinsics*/,
libmv_CameraIntrinsicsOptions* camera_intrinsics_options) {
memset(camera_intrinsics_options, 0, sizeof(libmv_CameraIntrinsicsOptions));
camera_intrinsics_options->focal_length = 1.0;
}
void libmv_cameraIntrinsicsUndistortByte(
const libmv_CameraIntrinsics * /*libmv_intrinsics*/,
const unsigned char *source_image,
int width, int height,
const libmv_CameraIntrinsics* /*libmv_intrinsics*/,
const unsigned char* source_image,
int width,
int height,
float /*overscan*/,
int channels,
unsigned char *destination_image) {
memcpy(destination_image, source_image,
unsigned char* destination_image) {
memcpy(destination_image,
source_image,
channels * width * height * sizeof(unsigned char));
}
@ -265,19 +265,21 @@ void libmv_cameraIntrinsicsUndistortFloat(
float /*overscan*/,
int channels,
float* destination_image) {
memcpy(destination_image, source_image,
memcpy(destination_image,
source_image,
channels * width * height * sizeof(float));
}
void libmv_cameraIntrinsicsDistortByte(
const struct libmv_CameraIntrinsics* /*libmv_intrinsics*/,
const unsigned char *source_image,
const unsigned char* source_image,
int width,
int height,
float /*overscan*/,
int channels,
unsigned char *destination_image) {
memcpy(destination_image, source_image,
unsigned char* destination_image) {
memcpy(destination_image,
source_image,
channels * width * height * sizeof(unsigned char));
}
@ -289,7 +291,8 @@ void libmv_cameraIntrinsicsDistortFloat(
float /*overscan*/,
int channels,
float* destination_image) {
memcpy(destination_image, source_image,
memcpy(destination_image,
source_image,
channels * width * height * sizeof(float));
}
@ -315,8 +318,8 @@ void libmv_cameraIntrinsicsInvert(
*y1 = 0.0;
}
void libmv_homography2DFromCorrespondencesEuc(/* const */ double (* /*x1*/)[2],
/* const */ double (* /*x2*/)[2],
void libmv_homography2DFromCorrespondencesEuc(/* const */ double (*/*x1*/)[2],
/* const */ double (*/*x2*/)[2],
int /*num_points*/,
double H[3][3]) {
memset(H, 0, sizeof(double[3][3]));
@ -327,45 +330,38 @@ void libmv_homography2DFromCorrespondencesEuc(/* const */ double (* /*x1*/)[2],
/* ************ autotrack ************ */
libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor* /*frame_accessor*/)
{
libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor* /*frame_accessor*/) {
return NULL;
}
void libmv_autoTrackDestroy(libmv_AutoTrack* /*libmv_autotrack*/)
{
void libmv_autoTrackDestroy(libmv_AutoTrack* /*libmv_autotrack*/) {
}
void libmv_autoTrackSetOptions(libmv_AutoTrack* /*libmv_autotrack*/,
const libmv_AutoTrackOptions* /*options*/)
{
const libmv_AutoTrackOptions* /*options*/) {
}
int libmv_autoTrackMarker(libmv_AutoTrack* /*libmv_autotrack*/,
const libmv_TrackRegionOptions* /*libmv_options*/,
libmv_Marker * /*libmv_tracker_marker*/,
libmv_TrackRegionResult* /*libmv_result*/)
{
libmv_Marker* /*libmv_tracker_marker*/,
libmv_TrackRegionResult* /*libmv_result*/) {
return 0;
}
void libmv_autoTrackAddMarker(libmv_AutoTrack* /*libmv_autotrack*/,
const libmv_Marker* /*libmv_marker*/)
{
const libmv_Marker* /*libmv_marker*/) {
}
void libmv_autoTrackSetMarkers(libmv_AutoTrack* /*libmv_autotrack*/,
const libmv_Marker* /*libmv_marker-*/,
size_t /*num_markers*/)
{
size_t /*num_markers*/) {
}
int libmv_autoTrackGetMarker(libmv_AutoTrack* /*libmv_autotrack*/,
int /*clip*/,
int /*frame*/,
int /*track*/,
libmv_Marker* /*libmv_marker*/)
{
libmv_Marker* /*libmv_marker*/) {
return 0;
}
@ -376,24 +372,20 @@ libmv_FrameAccessor* libmv_FrameAccessorNew(
libmv_GetImageCallback /*get_image_callback*/,
libmv_ReleaseImageCallback /*release_image_callback*/,
libmv_GetMaskForTrackCallback /*get_mask_for_track_callback*/,
libmv_ReleaseMaskCallback /*release_mask_callback*/)
{
libmv_ReleaseMaskCallback /*release_mask_callback*/) {
return NULL;
}
void libmv_FrameAccessorDestroy(libmv_FrameAccessor* /*frame_accessor*/)
{
void libmv_FrameAccessorDestroy(libmv_FrameAccessor* /*frame_accessor*/) {
}
int64_t libmv_frameAccessorgetTransformKey(
const libmv_FrameTransform * /*transform*/)
{
const libmv_FrameTransform* /*transform*/) {
return 0;
}
void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform* /*transform*/,
const libmv_FloatImage* /*input_image*/,
libmv_FloatImage* /*output_image*/)
{
void libmv_frameAccessorgetTransformRun(
const libmv_FrameTransform* /*transform*/,
const libmv_FloatImage* /*input_image*/,
libmv_FloatImage* /*output_image*/) {
}

View File

@ -32,17 +32,17 @@
#undef DUMP_ALWAYS
using libmv::FloatImage;
using libmv::TrackRegion;
using libmv::TrackRegionOptions;
using libmv::TrackRegionResult;
using libmv::TrackRegion;
void libmv_configureTrackRegionOptions(
const libmv_TrackRegionOptions& options,
TrackRegionOptions* track_region_options) {
switch (options.motion_model) {
#define LIBMV_CONVERT(the_model) \
case TrackRegionOptions::the_model: \
track_region_options->mode = TrackRegionOptions::the_model; \
#define LIBMV_CONVERT(the_model) \
case TrackRegionOptions::the_model: \
track_region_options->mode = TrackRegionOptions::the_model; \
break;
LIBMV_CONVERT(TRANSLATION)
LIBMV_CONVERT(TRANSLATION_ROTATION)
@ -66,7 +66,8 @@ void libmv_configureTrackRegionOptions(
* so disabling for now for until proper prediction model is landed.
*
* The thing is, currently blender sends input coordinates as the guess to
* region tracker and in case of fast motion such an early out ruins the track.
* region tracker and in case of fast motion such an early out ruins the
* track.
*/
track_region_options->attempt_refine_before_brute = false;
track_region_options->use_normalized_intensities = options.use_normalization;
@ -74,7 +75,7 @@ void libmv_configureTrackRegionOptions(
void libmv_regionTrackergetResult(const TrackRegionResult& track_region_result,
libmv_TrackRegionResult* result) {
result->termination = (int) track_region_result.termination;
result->termination = (int)track_region_result.termination;
result->termination_reason = "";
result->correlation = track_region_result.correlation;
}
@ -108,33 +109,27 @@ int libmv_trackRegion(const libmv_TrackRegionOptions* options,
libmv_configureTrackRegionOptions(*options, &track_region_options);
if (options->image1_mask) {
libmv_floatBufferToFloatImage(options->image1_mask,
image1_width,
image1_height,
1,
&image1_mask);
libmv_floatBufferToFloatImage(
options->image1_mask, image1_width, image1_height, 1, &image1_mask);
track_region_options.image1_mask = &image1_mask;
}
// Convert from raw float buffers to libmv's FloatImage.
FloatImage old_patch, new_patch;
libmv_floatBufferToFloatImage(image1,
image1_width,
image1_height,
1,
&old_patch);
libmv_floatBufferToFloatImage(image2,
image2_width,
image2_height,
1,
&new_patch);
libmv_floatBufferToFloatImage(
image1, image1_width, image1_height, 1, &old_patch);
libmv_floatBufferToFloatImage(
image2, image2_width, image2_height, 1, &new_patch);
TrackRegionResult track_region_result;
TrackRegion(old_patch, new_patch,
xx1, yy1,
TrackRegion(old_patch,
new_patch,
xx1,
yy1,
track_region_options,
xx2, yy2,
xx2,
yy2,
&track_region_result);
// Convert to floats for the blender api.

View File

@ -31,7 +31,7 @@ typedef struct libmv_TrackRegionOptions {
int use_normalization;
double minimum_correlation;
double sigma;
float *image1_mask;
float* image1_mask;
} libmv_TrackRegionOptions;
typedef struct libmv_TrackRegionResult {
@ -42,9 +42,9 @@ typedef struct libmv_TrackRegionResult {
#ifdef __cplusplus
namespace libmv {
struct TrackRegionOptions;
struct TrackRegionResult;
}
struct TrackRegionOptions;
struct TrackRegionResult;
} // namespace libmv
void libmv_configureTrackRegionOptions(
const libmv_TrackRegionOptions& options,
libmv::TrackRegionOptions* track_region_options);

View File

@ -28,18 +28,18 @@ using libmv::Tracks;
libmv_Tracks* libmv_tracksNew(void) {
Tracks* tracks = LIBMV_OBJECT_NEW(Tracks);
return (libmv_Tracks*) tracks;
return (libmv_Tracks*)tracks;
}
void libmv_tracksDestroy(libmv_Tracks* libmv_tracks) {
LIBMV_OBJECT_DELETE(libmv_tracks, Tracks);
}
void libmv_tracksInsert(libmv_Tracks *libmv_tracks,
void libmv_tracksInsert(libmv_Tracks* libmv_tracks,
int image,
int track,
double x,
double y,
double weight) {
((Tracks *) libmv_tracks)->Insert(image, track, x, y, weight);
((Tracks*)libmv_tracks)->Insert(image, track, x, y, weight);
}

View File

@ -25,8 +25,7 @@
using mv::Marker;
using mv::Tracks;
void libmv_apiMarkerToMarker(const libmv_Marker& libmv_marker,
Marker *marker) {
void libmv_apiMarkerToMarker(const libmv_Marker& libmv_marker, Marker* marker) {
marker->clip = libmv_marker.clip;
marker->frame = libmv_marker.frame;
marker->track = libmv_marker.track;
@ -41,17 +40,16 @@ void libmv_apiMarkerToMarker(const libmv_Marker& libmv_marker,
marker->search_region.max(0) = libmv_marker.search_region_max[0];
marker->search_region.max(1) = libmv_marker.search_region_max[1];
marker->weight = libmv_marker.weight;
marker->source = (Marker::Source) libmv_marker.source;
marker->status = (Marker::Status) libmv_marker.status;
marker->source = (Marker::Source)libmv_marker.source;
marker->status = (Marker::Status)libmv_marker.status;
marker->reference_clip = libmv_marker.reference_clip;
marker->reference_frame = libmv_marker.reference_frame;
marker->model_type = (Marker::ModelType) libmv_marker.model_type;
marker->model_type = (Marker::ModelType)libmv_marker.model_type;
marker->model_id = libmv_marker.model_id;
marker->disabled_channels = libmv_marker.disabled_channels;
}
void libmv_markerToApiMarker(const Marker& marker,
libmv_Marker *libmv_marker) {
void libmv_markerToApiMarker(const Marker& marker, libmv_Marker* libmv_marker) {
libmv_marker->clip = marker.clip;
libmv_marker->frame = marker.frame;
libmv_marker->track = marker.track;
@ -66,11 +64,11 @@ void libmv_markerToApiMarker(const Marker& marker,
libmv_marker->search_region_max[0] = marker.search_region.max(0);
libmv_marker->search_region_max[1] = marker.search_region.max(1);
libmv_marker->weight = marker.weight;
libmv_marker->source = (libmv_MarkerSource) marker.source;
libmv_marker->status = (libmv_MarkerStatus) marker.status;
libmv_marker->source = (libmv_MarkerSource)marker.source;
libmv_marker->status = (libmv_MarkerStatus)marker.status;
libmv_marker->reference_clip = marker.reference_clip;
libmv_marker->reference_frame = marker.reference_frame;
libmv_marker->model_type = (libmv_MarkerModelType) marker.model_type;
libmv_marker->model_type = (libmv_MarkerModelType)marker.model_type;
libmv_marker->model_id = marker.model_id;
libmv_marker->disabled_channels = marker.disabled_channels;
}
@ -78,7 +76,7 @@ void libmv_markerToApiMarker(const Marker& marker,
libmv_TracksN* libmv_tracksNewN(void) {
Tracks* tracks = LIBMV_OBJECT_NEW(Tracks);
return (libmv_TracksN*) tracks;
return (libmv_TracksN*)tracks;
}
void libmv_tracksDestroyN(libmv_TracksN* libmv_tracks) {
@ -89,7 +87,7 @@ void libmv_tracksAddMarkerN(libmv_TracksN* libmv_tracks,
const libmv_Marker* libmv_marker) {
Marker marker;
libmv_apiMarkerToMarker(*libmv_marker, &marker);
((Tracks*) libmv_tracks)->AddMarker(marker);
((Tracks*)libmv_tracks)->AddMarker(marker);
}
void libmv_tracksGetMarkerN(libmv_TracksN* libmv_tracks,
@ -98,7 +96,7 @@ void libmv_tracksGetMarkerN(libmv_TracksN* libmv_tracks,
int track,
libmv_Marker* libmv_marker) {
Marker marker;
((Tracks*) libmv_tracks)->GetMarker(clip, frame, track, &marker);
((Tracks*)libmv_tracks)->GetMarker(clip, frame, track, &marker);
libmv_markerToApiMarker(marker, libmv_marker);
}
@ -106,26 +104,25 @@ void libmv_tracksRemoveMarkerN(libmv_TracksN* libmv_tracks,
int clip,
int frame,
int track) {
((Tracks *) libmv_tracks)->RemoveMarker(clip, frame, track);
((Tracks*)libmv_tracks)->RemoveMarker(clip, frame, track);
}
void libmv_tracksRemoveMarkersForTrack(libmv_TracksN* libmv_tracks,
int track) {
((Tracks *) libmv_tracks)->RemoveMarkersForTrack(track);
void libmv_tracksRemoveMarkersForTrack(libmv_TracksN* libmv_tracks, int track) {
((Tracks*)libmv_tracks)->RemoveMarkersForTrack(track);
}
int libmv_tracksMaxClipN(libmv_TracksN* libmv_tracks) {
return ((Tracks*) libmv_tracks)->MaxClip();
return ((Tracks*)libmv_tracks)->MaxClip();
}
int libmv_tracksMaxFrameN(libmv_TracksN* libmv_tracks, int clip) {
return ((Tracks*) libmv_tracks)->MaxFrame(clip);
return ((Tracks*)libmv_tracks)->MaxFrame(clip);
}
int libmv_tracksMaxTrackN(libmv_TracksN* libmv_tracks) {
return ((Tracks*) libmv_tracks)->MaxTrack();
return ((Tracks*)libmv_tracks)->MaxTrack();
}
int libmv_tracksNumMarkersN(libmv_TracksN* libmv_tracks) {
return ((Tracks*) libmv_tracks)->NumMarkers();
return ((Tracks*)libmv_tracks)->NumMarkers();
}

View File

@ -79,20 +79,19 @@ typedef struct libmv_Marker {
#ifdef __cplusplus
namespace mv {
struct Marker;
struct Marker;
}
void libmv_apiMarkerToMarker(const libmv_Marker& libmv_marker,
mv::Marker *marker);
mv::Marker* marker);
void libmv_markerToApiMarker(const mv::Marker& marker,
libmv_Marker *libmv_marker);
libmv_Marker* libmv_marker);
#endif
libmv_TracksN* libmv_tracksNewN(void);
void libmv_tracksDestroyN(libmv_TracksN* libmv_tracks);
void libmv_tracksAddMarkerN(libmv_TracksN* libmv_tracks,
const libmv_Marker* libmv_marker);
@ -107,8 +106,7 @@ void libmv_tracksRemoveMarkerN(libmv_TracksN* libmv_tracks,
int frame,
int track);
void libmv_tracksRemoveMarkersForTrack(libmv_TracksN* libmv_tracks,
int track);
void libmv_tracksRemoveMarkersForTrack(libmv_TracksN* libmv_tracks, int track);
int libmv_tracksMaxClipN(libmv_TracksN* libmv_tracks);
int libmv_tracksMaxFrameN(libmv_TracksN* libmv_tracks, int clip);

View File

@ -30,27 +30,33 @@
# define LIBMV_OBJECT_NEW OBJECT_GUARDED_NEW
# define LIBMV_OBJECT_DELETE OBJECT_GUARDED_DELETE
# define LIBMV_OBJECT_DELETE OBJECT_GUARDED_DELETE
# define LIBMV_STRUCT_NEW(type, count) \
(type*)MEM_mallocN(sizeof(type) * count, __func__)
# define LIBMV_STRUCT_NEW(type, count) \
(type*)MEM_mallocN(sizeof(type) * count, __func__)
# define LIBMV_STRUCT_DELETE(what) MEM_freeN(what)
#else
// Need this to keep libmv-capi potentially standalone.
# if defined __GNUC__ || defined __sun
# define LIBMV_OBJECT_NEW(type, args ...) \
new(malloc(sizeof(type))) type(args)
# define LIBMV_OBJECT_NEW(type, args...) \
new (malloc(sizeof(type))) type(args)
# else
# define LIBMV_OBJECT_NEW(type, ...) \
new(malloc(sizeof(type))) type(__VA_ARGS__)
#endif
# define LIBMV_OBJECT_DELETE(what, type) \
{ \
if (what) { \
((type*)(what))->~type(); \
free(what); \
} \
} (void)0
# define LIBMV_OBJECT_NEW(type, ...) \
new (malloc(sizeof(type))) type(__VA_ARGS__)
# endif
# define LIBMV_OBJECT_DELETE(what, type) \
{ \
if (what) { \
((type*)(what))->~type(); \
free(what); \
} \
} \
(void)0
# define LIBMV_STRUCT_NEW(type, count) (type*)malloc(sizeof(type) * count)
# define LIBMV_STRUCT_DELETE(what) { if (what) free(what); } (void)0
# define LIBMV_STRUCT_DELETE(what) \
{ \
if (what) \
free(what); \
} \
(void)0
#endif
#endif // LIBMV_C_API_UTILDEFINES_H_

View File

@ -21,9 +21,9 @@
// Author: mierle@gmail.com (Keir Mierle)
#include "libmv/autotrack/autotrack.h"
#include "libmv/autotrack/quad.h"
#include "libmv/autotrack/frame_accessor.h"
#include "libmv/autotrack/predict_tracks.h"
#include "libmv/autotrack/quad.h"
#include "libmv/base/scoped_ptr.h"
#include "libmv/logging/logging.h"
#include "libmv/numeric/numeric.h"
@ -35,34 +35,30 @@ namespace {
class DisableChannelsTransform : public FrameAccessor::Transform {
public:
DisableChannelsTransform(int disabled_channels)
: disabled_channels_(disabled_channels) { }
: disabled_channels_(disabled_channels) {}
int64_t key() const {
return disabled_channels_;
}
int64_t key() const { return disabled_channels_; }
void run(const FloatImage& input, FloatImage* output) const {
bool disable_red = (disabled_channels_ & Marker::CHANNEL_R) != 0,
bool disable_red = (disabled_channels_ & Marker::CHANNEL_R) != 0,
disable_green = (disabled_channels_ & Marker::CHANNEL_G) != 0,
disable_blue = (disabled_channels_ & Marker::CHANNEL_B) != 0;
disable_blue = (disabled_channels_ & Marker::CHANNEL_B) != 0;
LG << "Disabling channels: "
<< (disable_red ? "R " : "")
<< (disable_green ? "G " : "")
<< (disable_blue ? "B" : "");
LG << "Disabling channels: " << (disable_red ? "R " : "")
<< (disable_green ? "G " : "") << (disable_blue ? "B" : "");
// It's important to rescale the resultappropriately so that e.g. if only
// blue is selected, it's not zeroed out.
float scale = (disable_red ? 0.0f : 0.2126f) +
float scale = (disable_red ? 0.0f : 0.2126f) +
(disable_green ? 0.0f : 0.7152f) +
(disable_blue ? 0.0f : 0.0722f);
(disable_blue ? 0.0f : 0.0722f);
output->Resize(input.Height(), input.Width(), 1);
for (int y = 0; y < input.Height(); y++) {
for (int x = 0; x < input.Width(); x++) {
float r = disable_red ? 0.0f : input(y, x, 0);
float r = disable_red ? 0.0f : input(y, x, 0);
float g = disable_green ? 0.0f : input(y, x, 1);
float b = disable_blue ? 0.0f : input(y, x, 2);
float b = disable_blue ? 0.0f : input(y, x, 2);
(*output)(y, x, 0) = (0.2126f * r + 0.7152f * g + 0.0722f * b) / scale;
}
}
@ -73,7 +69,7 @@ class DisableChannelsTransform : public FrameAccessor::Transform {
int disabled_channels_;
};
template<typename QuadT, typename ArrayT>
template <typename QuadT, typename ArrayT>
void QuadToArrays(const QuadT& quad, ArrayT* x, ArrayT* y) {
for (int i = 0; i < 4; ++i) {
x[i] = quad.coordinates(i, 0);
@ -115,11 +111,8 @@ FrameAccessor::Key GetMaskForMarker(const Marker& marker,
FrameAccessor* frame_accessor,
FloatImage* mask) {
Region region = marker.search_region.Rounded();
return frame_accessor->GetMaskForTrack(marker.clip,
marker.frame,
marker.track,
&region,
mask);
return frame_accessor->GetMaskForTrack(
marker.clip, marker.frame, marker.track, &region, mask);
}
} // namespace
@ -152,23 +145,20 @@ bool AutoTrack::TrackMarker(Marker* tracked_marker,
// TODO(keir): Technically this could take a smaller slice from the source
// image instead of taking one the size of the search window.
FloatImage reference_image;
FrameAccessor::Key reference_key = GetImageForMarker(reference_marker,
frame_accessor_,
&reference_image);
FrameAccessor::Key reference_key =
GetImageForMarker(reference_marker, frame_accessor_, &reference_image);
if (!reference_key) {
LG << "Couldn't get frame for reference marker: " << reference_marker;
return false;
}
FloatImage reference_mask;
FrameAccessor::Key reference_mask_key = GetMaskForMarker(reference_marker,
frame_accessor_,
&reference_mask);
FrameAccessor::Key reference_mask_key =
GetMaskForMarker(reference_marker, frame_accessor_, &reference_mask);
FloatImage tracked_image;
FrameAccessor::Key tracked_key = GetImageForMarker(*tracked_marker,
frame_accessor_,
&tracked_image);
FrameAccessor::Key tracked_key =
GetImageForMarker(*tracked_marker, frame_accessor_, &tracked_image);
if (!tracked_key) {
frame_accessor_->ReleaseImage(reference_key);
LG << "Couldn't get frame for tracked marker: " << tracked_marker;
@ -191,9 +181,11 @@ bool AutoTrack::TrackMarker(Marker* tracked_marker,
local_track_region_options.attempt_refine_before_brute = predicted_position;
TrackRegion(reference_image,
tracked_image,
x1, y1,
x1,
y1,
local_track_region_options,
x2, y2,
x2,
y2,
result);
// Copy results over the tracked marker.
@ -208,7 +200,7 @@ bool AutoTrack::TrackMarker(Marker* tracked_marker,
tracked_marker->search_region.Offset(delta);
tracked_marker->source = Marker::TRACKED;
tracked_marker->status = Marker::UNKNOWN;
tracked_marker->reference_clip = reference_marker.clip;
tracked_marker->reference_clip = reference_marker.clip;
tracked_marker->reference_frame = reference_marker.frame;
// Release the images and masks from the accessor cache.
@ -230,7 +222,9 @@ void AutoTrack::SetMarkers(vector<Marker>* markers) {
tracks_.SetMarkers(markers);
}
bool AutoTrack::GetMarker(int clip, int frame, int track,
bool AutoTrack::GetMarker(int clip,
int frame,
int track,
Marker* markers) const {
return tracks_.GetMarker(clip, frame, track, markers);
}
@ -242,7 +236,8 @@ void AutoTrack::DetectAndTrack(const DetectAndTrackOptions& options) {
vector<Marker> previous_frame_markers;
// Q: How to decide track #s when detecting?
// Q: How to match markers from previous frame? set of prev frame tracks?
// Q: How to decide what markers should get tracked and which ones should not?
// Q: How to decide what markers should get tracked and which ones should
// not?
for (int frame = 0; frame < num_frames; ++frame) {
if (Cancelled()) {
LG << "Got cancel message while detecting and tracking...";
@ -271,8 +266,7 @@ void AutoTrack::DetectAndTrack(const DetectAndTrackOptions& options) {
for (int i = 0; i < this_frame_markers.size(); ++i) {
tracks_in_this_frame.push_back(this_frame_markers[i].track);
}
std::sort(tracks_in_this_frame.begin(),
tracks_in_this_frame.end());
std::sort(tracks_in_this_frame.begin(), tracks_in_this_frame.end());
// Find tracks in the previous frame that are not in this one.
vector<Marker*> previous_frame_markers_to_track;

View File

@ -23,8 +23,8 @@
#ifndef LIBMV_AUTOTRACK_AUTOTRACK_H_
#define LIBMV_AUTOTRACK_AUTOTRACK_H_
#include "libmv/autotrack/tracks.h"
#include "libmv/autotrack/region.h"
#include "libmv/autotrack/tracks.h"
#include "libmv/tracking/track_region.h"
namespace libmv {
@ -74,15 +74,14 @@ class AutoTrack {
Region search_region;
};
AutoTrack(FrameAccessor* frame_accessor)
: frame_accessor_(frame_accessor) {}
AutoTrack(FrameAccessor* frame_accessor) : frame_accessor_(frame_accessor) {}
// Marker manipulation.
// Clip manipulation.
// Set the number of clips. These clips will get accessed from the frame
// accessor, matches between frames found, and a reconstruction created.
//void SetNumFrames(int clip, int num_frames);
// void SetNumFrames(int clip, int num_frames);
// Tracking & Matching
@ -90,7 +89,7 @@ class AutoTrack {
// Caller maintains ownership of *result and *tracked_marker.
bool TrackMarker(Marker* tracked_marker,
TrackRegionResult* result,
const TrackRegionOptions* track_options=NULL);
const TrackRegionOptions* track_options = NULL);
// Wrapper around Tracks API; however these may add additional processing.
void AddMarker(const Marker& tracked_marker);
@ -99,36 +98,36 @@ class AutoTrack {
// TODO(keir): Implement frame matching! This could be very cool for loop
// closing and connecting across clips.
//void MatchFrames(int clip1, int frame1, int clip2, int frame2) {}
// void MatchFrames(int clip1, int frame1, int clip2, int frame2) {}
// Wrapper around the Reconstruction API.
// Returns the new ID.
int AddCameraIntrinsics(CameraIntrinsics* intrinsics) {
(void) intrinsics;
(void)intrinsics;
return 0;
} // XXX
int SetClipIntrinsics(int clip, int intrinsics) {
(void) clip;
(void) intrinsics;
(void)clip;
(void)intrinsics;
return 0;
} // XXX
} // XXX
enum Motion {
GENERAL_CAMERA_MOTION,
TRIPOD_CAMERA_MOTION,
};
int SetClipMotion(int clip, Motion motion) {
(void) clip;
(void) motion;
(void)clip;
(void)motion;
return 0;
} // XXX
} // XXX
// Decide what to refine for the given intrinsics. bundle_options is from
// bundle.h (e.g. BUNDLE_FOCAL_LENGTH | BUNDLE_RADIAL_K1).
void SetIntrinsicsRefine(int intrinsics, int bundle_options) {
(void) intrinsics;
(void) bundle_options;
} // XXX
(void)intrinsics;
(void)bundle_options;
} // XXX
// Keyframe read/write.
struct ClipFrame {
@ -150,20 +149,19 @@ class AutoTrack {
};
void DetectAndTrack(const DetectAndTrackOptions& options);
struct DetectFeaturesInFrameOptions {
};
void DetectFeaturesInFrame(int clip, int frame,
const DetectFeaturesInFrameOptions* options=NULL) {
(void) clip;
(void) frame;
(void) options;
} // XXX
struct DetectFeaturesInFrameOptions {};
void DetectFeaturesInFrame(
int clip, int frame, const DetectFeaturesInFrameOptions* options = NULL) {
(void)clip;
(void)frame;
(void)options;
} // XXX
// Does not take ownership of the given listener, but keeps a reference to it.
void AddListener(OperationListener* listener) {(void) listener;} // XXX
void AddListener(OperationListener* listener) { (void)listener; } // XXX
// Create the initial reconstruction,
//void FindInitialReconstruction();
// void FindInitialReconstruction();
// State machine
//
@ -202,17 +200,17 @@ class AutoTrack {
bool Cancelled() { return false; }
Tracks tracks_; // May be normalized camera coordinates or raw pixels.
//Reconstruction reconstruction_;
// Reconstruction reconstruction_;
// TODO(keir): Add the motion models here.
//vector<MotionModel> motion_models_;
// vector<MotionModel> motion_models_;
// TODO(keir): Should num_clips and num_frames get moved to FrameAccessor?
// TODO(keir): What about masking for clips and frames to prevent various
// things like reconstruction or tracking from happening on certain frames?
FrameAccessor* frame_accessor_;
//int num_clips_;
//vector<int> num_frames_; // Indexed by clip.
// int num_clips_;
// vector<int> num_frames_; // Indexed by clip.
// The intrinsics for each clip, assuming each clip has fixed intrinsics.
// TODO(keir): Decide what the semantics should be for varying focal length.

View File

@ -41,7 +41,7 @@ using libmv::FloatImage;
// implementations to cache filtered image pieces).
struct FrameAccessor {
struct Transform {
virtual ~Transform() { }
virtual ~Transform() {}
// The key should depend on the transform arguments. Must be non-zero.
virtual int64_t key() const = 0;
@ -50,10 +50,7 @@ struct FrameAccessor {
virtual void run(const FloatImage& input, FloatImage* output) const = 0;
};
enum InputMode {
MONO,
RGBA
};
enum InputMode { MONO, RGBA };
typedef void* Key;
@ -100,6 +97,6 @@ struct FrameAccessor {
virtual int NumFrames(int clip) = 0;
};
} // namespace libmv
} // namespace mv
#endif // LIBMV_AUTOTRACK_FRAME_ACCESSOR_H_

View File

@ -57,23 +57,19 @@ struct Marker {
float weight;
enum Source {
MANUAL, // The user placed this marker manually.
DETECTED, // A keypoint detector found this point.
TRACKED, // The tracking algorithm placed this marker.
MATCHED, // A matching algorithm (e.g. SIFT or SURF or ORB) found this.
PREDICTED, // A motion model predicted this marker. This is needed for
// handling occlusions in some cases where an imaginary marker
// is placed to keep camera motion smooth.
MANUAL, // The user placed this marker manually.
DETECTED, // A keypoint detector found this point.
TRACKED, // The tracking algorithm placed this marker.
MATCHED, // A matching algorithm (e.g. SIFT or SURF or ORB) found this.
PREDICTED, // A motion model predicted this marker. This is needed for
// handling occlusions in some cases where an imaginary marker
// is placed to keep camera motion smooth.
};
Source source;
// Markers may be inliers or outliers if the tracking fails; this allows
// visualizing the markers in the image.
enum Status {
UNKNOWN,
INLIER,
OUTLIER
};
enum Status { UNKNOWN, INLIER, OUTLIER };
Status status;
// When doing correlation tracking, where to search in the current frame for
@ -90,12 +86,7 @@ struct Marker {
// another primitive (a rectangular prisim). This captures the information
// needed to say that for example a collection of markers belongs to model #2
// (and model #2 is a plane).
enum ModelType {
POINT,
PLANE,
LINE,
CUBE
};
enum ModelType { POINT, PLANE, LINE, CUBE };
ModelType model_type;
// The model ID this track (e.g. the second model, which is a plane).
@ -114,7 +105,7 @@ struct Marker {
int disabled_channels;
// Offset everything (center, patch, search) by the given delta.
template<typename T>
template <typename T>
void Offset(const T& offset) {
center += offset.template cast<float>();
patch.coordinates.rowwise() += offset.template cast<int>();
@ -122,19 +113,15 @@ struct Marker {
}
// Shift the center to the given new position (and patch, search).
template<typename T>
template <typename T>
void SetPosition(const T& new_center) {
Offset(new_center - center);
}
};
inline std::ostream& operator<<(std::ostream& out, const Marker& marker) {
out << "{"
<< marker.clip << ", "
<< marker.frame << ", "
<< marker.track << ", ("
<< marker.center.x() << ", "
<< marker.center.y() << ")"
out << "{" << marker.clip << ", " << marker.frame << ", " << marker.track
<< ", (" << marker.center.x() << ", " << marker.center.y() << ")"
<< "}";
return out;
}

View File

@ -23,18 +23,13 @@
#ifndef LIBMV_AUTOTRACK_MODEL_H_
#define LIBMV_AUTOTRACK_MODEL_H_
#include "libmv/numeric/numeric.h"
#include "libmv/autotrack/quad.h"
#include "libmv/numeric/numeric.h"
namespace mv {
struct Model {
enum ModelType {
POINT,
PLANE,
LINE,
CUBE
};
enum ModelType { POINT, PLANE, LINE, CUBE };
// ???
};

View File

@ -20,8 +20,8 @@
//
// Author: mierle@gmail.com (Keir Mierle)
#include "libmv/autotrack/marker.h"
#include "libmv/autotrack/predict_tracks.h"
#include "libmv/autotrack/marker.h"
#include "libmv/autotrack/tracks.h"
#include "libmv/base/vector.h"
#include "libmv/logging/logging.h"
@ -31,8 +31,8 @@ namespace mv {
namespace {
using libmv::vector;
using libmv::Vec2;
using libmv::vector;
// Implied time delta between steps. Set empirically by tweaking and seeing
// what numbers did best at prediction.
@ -57,6 +57,8 @@ const double dt = 3.8;
// For a typical system having constant velocity. This gives smooth-appearing
// predictions, but they are not always as accurate.
//
// clang-format off
const double velocity_state_transition_data[] = {
1, dt, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0,
@ -65,10 +67,13 @@ const double velocity_state_transition_data[] = {
0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 1
};
// clang-format on
#if 0
// This 3rd-order system also models acceleration. This makes for "jerky"
// predictions, but that tend to be more accurate.
//
// clang-format off
const double acceleration_state_transition_data[] = {
1, dt, dt*dt/2, 0, 0, 0,
0, 1, dt, 0, 0, 0,
@ -77,9 +82,12 @@ const double acceleration_state_transition_data[] = {
0, 0, 0, 0, 1, dt,
0, 0, 0, 0, 0, 1
};
// clang-format on
// This system (attempts) to add an angular velocity component. However, it's
// total junk.
//
// clang-format off
const double angular_state_transition_data[] = {
1, dt, -dt, 0, 0, 0, // Position x
0, 1, 0, 0, 0, 0, // Velocity x
@ -88,17 +96,22 @@ const double angular_state_transition_data[] = {
0, 0, 0, 0, 1, 0, // Velocity y
0, 0, 0, 0, 0, 1 // Ignored
};
// clang-format on
#endif
const double* state_transition_data = velocity_state_transition_data;
// Observation matrix.
// clang-format off
const double observation_data[] = {
1., 0., 0., 0., 0., 0.,
0., 0., 0., 1., 0., 0.
};
// clang-format on
// Process covariance.
//
// clang-format off
const double process_covariance_data[] = {
35, 0, 0, 0, 0, 0,
0, 5, 0, 0, 0, 0,
@ -107,14 +120,19 @@ const double process_covariance_data[] = {
0, 0, 0, 0, 5, 0,
0, 0, 0, 0, 0, 5
};
// clang-format on
// Process covariance.
const double measurement_covariance_data[] = {
0.01, 0.00,
0.00, 0.01,
0.01,
0.00,
0.00,
0.01,
};
// Initial covariance.
//
// clang-format off
const double initial_covariance_data[] = {
10, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0,
@ -123,6 +141,7 @@ const double initial_covariance_data[] = {
0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 1
};
// clang-format on
typedef mv::KalmanFilter<double, 6, 2> TrackerKalman;
@ -138,7 +157,7 @@ bool OrderByFrameLessThan(const Marker* a, const Marker* b) {
}
return a->clip < b->clip;
}
return a->frame < b-> frame;
return a->frame < b->frame;
}
// Predicted must be after the previous markers (in the frame numbering sense).
@ -146,9 +165,9 @@ void RunPrediction(const vector<Marker*> previous_markers,
Marker* predicted_marker) {
TrackerKalman::State state;
state.mean << previous_markers[0]->center.x(), 0, 0,
previous_markers[0]->center.y(), 0, 0;
state.covariance = Eigen::Matrix<double, 6, 6, Eigen::RowMajor>(
initial_covariance_data);
previous_markers[0]->center.y(), 0, 0;
state.covariance =
Eigen::Matrix<double, 6, 6, Eigen::RowMajor>(initial_covariance_data);
int current_frame = previous_markers[0]->frame;
int target_frame = predicted_marker->frame;
@ -159,19 +178,18 @@ void RunPrediction(const vector<Marker*> previous_markers,
for (int i = 1; i < previous_markers.size(); ++i) {
// Step forward predicting the state until it is on the current marker.
int predictions = 0;
for (;
current_frame != previous_markers[i]->frame;
for (; current_frame != previous_markers[i]->frame;
current_frame += frame_delta) {
filter.Step(&state);
predictions++;
LG << "Predicted point (frame " << current_frame << "): "
<< state.mean(0) << ", " << state.mean(3);
LG << "Predicted point (frame " << current_frame << "): " << state.mean(0)
<< ", " << state.mean(3);
}
// Log the error -- not actually used, but interesting.
Vec2 error = previous_markers[i]->center.cast<double>() -
Vec2(state.mean(0), state.mean(3));
LG << "Prediction error for " << predictions << " steps: ("
<< error.x() << ", " << error.y() << "); norm: " << error.norm();
LG << "Prediction error for " << predictions << " steps: (" << error.x()
<< ", " << error.y() << "); norm: " << error.norm();
// Now that the state is predicted in the current frame, update the state
// based on the measurement from the current frame.
filter.Update(previous_markers[i]->center.cast<double>(),
@ -184,8 +202,8 @@ void RunPrediction(const vector<Marker*> previous_markers,
// predict until the target frame.
for (; current_frame != target_frame; current_frame += frame_delta) {
filter.Step(&state);
LG << "Final predicted point (frame " << current_frame << "): "
<< state.mean(0) << ", " << state.mean(3);
LG << "Final predicted point (frame " << current_frame
<< "): " << state.mean(0) << ", " << state.mean(3);
}
// The x and y positions are at 0 and 3; ignore acceleration and velocity.
@ -253,13 +271,13 @@ bool PredictMarkerPosition(const Tracks& tracks, Marker* marker) {
} else if (insert_at != -1) {
// Found existing marker; scan before and after it.
forward_scan_begin = insert_at + 1;
forward_scan_end = markers.size() - 1;;
forward_scan_end = markers.size() - 1;
backward_scan_begin = insert_at - 1;
backward_scan_end = 0;
} else {
// Didn't find existing marker but found an insertion point.
forward_scan_begin = insert_before;
forward_scan_end = markers.size() - 1;;
forward_scan_end = markers.size() - 1;
backward_scan_begin = insert_before - 1;
backward_scan_end = 0;
}
@ -301,9 +319,8 @@ bool PredictMarkerPosition(const Tracks& tracks, Marker* marker) {
return false;
}
LG << "Predicting backward";
int predict_begin =
std::min(forward_scan_begin + max_frames_to_predict_from,
forward_scan_end);
int predict_begin = std::min(
forward_scan_begin + max_frames_to_predict_from, forward_scan_end);
int predict_end = forward_scan_begin;
vector<Marker*> previous_markers;
for (int i = predict_begin; i >= predict_end; --i) {
@ -312,7 +329,6 @@ bool PredictMarkerPosition(const Tracks& tracks, Marker* marker) {
RunPrediction(previous_markers, marker);
return false;
}
}
} // namespace mv

View File

@ -35,17 +35,15 @@ static void AddMarker(int frame, float x, float y, Tracks* tracks) {
marker.frame = frame;
marker.center.x() = x;
marker.center.y() = y;
marker.patch.coordinates << x - 1, y - 1,
x + 1, y - 1,
x + 1, y + 1,
x - 1, y + 1;
marker.patch.coordinates << x - 1, y - 1, x + 1, y - 1, x + 1, y + 1, x - 1,
y + 1;
tracks->AddMarker(marker);
}
TEST(PredictMarkerPosition, EasyLinearMotion) {
Tracks tracks;
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(2, 3.0, 10.0, &tracks);
AddMarker(3, 4.0, 15.0, &tracks);
AddMarker(4, 5.0, 20.0, &tracks);
@ -66,10 +64,8 @@ TEST(PredictMarkerPosition, EasyLinearMotion) {
// Check the patch coordinates as well.
double x = 9, y = 40.0;
Quad2Df expected_patch;
expected_patch.coordinates << x - 1, y - 1,
x + 1, y - 1,
x + 1, y + 1,
x - 1, y + 1;
expected_patch.coordinates << x - 1, y - 1, x + 1, y - 1, x + 1, y + 1, x - 1,
y + 1;
error = (expected_patch.coordinates - predicted.patch.coordinates).norm();
LG << "Patch error: " << error;
@ -78,8 +74,8 @@ TEST(PredictMarkerPosition, EasyLinearMotion) {
TEST(PredictMarkerPosition, EasyBackwardLinearMotion) {
Tracks tracks;
AddMarker(8, 1.0, 0.0, &tracks);
AddMarker(7, 2.0, 5.0, &tracks);
AddMarker(8, 1.0, 0.0, &tracks);
AddMarker(7, 2.0, 5.0, &tracks);
AddMarker(6, 3.0, 10.0, &tracks);
AddMarker(5, 4.0, 15.0, &tracks);
AddMarker(4, 5.0, 20.0, &tracks);
@ -101,10 +97,8 @@ TEST(PredictMarkerPosition, EasyBackwardLinearMotion) {
// Check the patch coordinates as well.
double x = 9.0, y = 40.0;
Quad2Df expected_patch;
expected_patch.coordinates << x - 1, y - 1,
x + 1, y - 1,
x + 1, y + 1,
x - 1, y + 1;
expected_patch.coordinates << x - 1, y - 1, x + 1, y - 1, x + 1, y + 1, x - 1,
y + 1;
error = (expected_patch.coordinates - predicted.patch.coordinates).norm();
LG << "Patch error: " << error;
@ -113,8 +107,8 @@ TEST(PredictMarkerPosition, EasyBackwardLinearMotion) {
TEST(PredictMarkerPosition, TwoFrameGap) {
Tracks tracks;
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(2, 3.0, 10.0, &tracks);
AddMarker(3, 4.0, 15.0, &tracks);
AddMarker(4, 5.0, 20.0, &tracks);
@ -135,8 +129,8 @@ TEST(PredictMarkerPosition, TwoFrameGap) {
TEST(PredictMarkerPosition, FourFrameGap) {
Tracks tracks;
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(2, 3.0, 10.0, &tracks);
AddMarker(3, 4.0, 15.0, &tracks);
// Missing frames 4, 5, 6, 7.
@ -154,13 +148,13 @@ TEST(PredictMarkerPosition, FourFrameGap) {
TEST(PredictMarkerPosition, MultipleGaps) {
Tracks tracks;
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(2, 3.0, 10.0, &tracks);
// AddMarker(3, 4.0, 15.0, &tracks); // Note the 3-frame gap.
// AddMarker(4, 5.0, 20.0, &tracks);
// AddMarker(5, 6.0, 25.0, &tracks);
AddMarker(6, 7.0, 30.0, &tracks); // Intermediate measurement.
AddMarker(6, 7.0, 30.0, &tracks); // Intermediate measurement.
// AddMarker(7, 8.0, 35.0, &tracks);
Marker predicted;
@ -178,14 +172,14 @@ TEST(PredictMarkerPosition, MarkersInRandomOrder) {
Tracks tracks;
// This is the same as the easy, except that the tracks are randomly ordered.
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(2, 3.0, 10.0, &tracks);
AddMarker(7, 8.0, 35.0, &tracks);
AddMarker(5, 6.0, 25.0, &tracks);
AddMarker(4, 5.0, 20.0, &tracks);
AddMarker(3, 4.0, 15.0, &tracks);
AddMarker(6, 7.0, 30.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
Marker predicted;
predicted.clip = 0;

View File

@ -27,7 +27,7 @@
namespace mv {
template<typename T, int D>
template <typename T, int D>
struct Quad {
// A quad is 4 points; generally in 2D or 3D.
//
@ -35,7 +35,7 @@ struct Quad {
// |\.
// | \.
// | z (z goes into screen)
// |
// |
// | r0----->r1
// | ^ |
// | | . |
@ -44,7 +44,7 @@ struct Quad {
// | \.
// | \.
// v normal goes away (right handed).
// y
// y
//
// Each row is one of the corners coordinates; either (x, y) or (x, y, z).
Eigen::Matrix<T, 4, D> coordinates;

View File

@ -57,17 +57,17 @@ class Reconstruction {
public:
// All methods copy their input reference or take ownership of the pointer.
void AddCameraPose(const CameraPose& pose);
int AddCameraIntrinsics(CameraIntrinsics* intrinsics);
int AddPoint(const Point& point);
int AddModel(Model* model);
int AddCameraIntrinsics(CameraIntrinsics* intrinsics);
int AddPoint(const Point& point);
int AddModel(Model* model);
// Returns the corresponding pose or point or NULL if missing.
CameraPose* CameraPoseForFrame(int clip, int frame);
CameraPose* CameraPoseForFrame(int clip, int frame);
const CameraPose* CameraPoseForFrame(int clip, int frame) const;
Point* PointForTrack(int track);
Point* PointForTrack(int track);
const Point* PointForTrack(int track) const;
const vector<vector<CameraPose> >& camera_poses() const {
const vector<vector<CameraPose>>& camera_poses() const {
return camera_poses_;
}

View File

@ -46,7 +46,7 @@ struct Region {
Vec2f min;
Vec2f max;
template<typename T>
template <typename T>
void Offset(const T& offset) {
min += offset.template cast<float>();
max += offset.template cast<float>();

View File

@ -23,8 +23,8 @@
#include "libmv/autotrack/tracks.h"
#include <algorithm>
#include <vector>
#include <iterator>
#include <vector>
#include "libmv/numeric/numeric.h"
@ -34,12 +34,12 @@ Tracks::Tracks(const Tracks& other) {
markers_ = other.markers_;
}
Tracks::Tracks(const vector<Marker>& markers) : markers_(markers) {}
Tracks::Tracks(const vector<Marker>& markers) : markers_(markers) {
}
bool Tracks::GetMarker(int clip, int frame, int track, Marker* marker) const {
for (int i = 0; i < markers_.size(); ++i) {
if (markers_[i].clip == clip &&
markers_[i].frame == frame &&
if (markers_[i].clip == clip && markers_[i].frame == frame &&
markers_[i].track == track) {
*marker = markers_[i];
return true;
@ -60,8 +60,7 @@ void Tracks::GetMarkersForTrackInClip(int clip,
int track,
vector<Marker>* markers) const {
for (int i = 0; i < markers_.size(); ++i) {
if (clip == markers_[i].clip &&
track == markers_[i].track) {
if (clip == markers_[i].clip && track == markers_[i].track) {
markers->push_back(markers_[i]);
}
}
@ -71,15 +70,16 @@ void Tracks::GetMarkersInFrame(int clip,
int frame,
vector<Marker>* markers) const {
for (int i = 0; i < markers_.size(); ++i) {
if (markers_[i].clip == clip &&
markers_[i].frame == frame) {
if (markers_[i].clip == clip && markers_[i].frame == frame) {
markers->push_back(markers_[i]);
}
}
}
void Tracks::GetMarkersForTracksInBothImages(int clip1, int frame1,
int clip2, int frame2,
void Tracks::GetMarkersForTracksInBothImages(int clip1,
int frame1,
int clip2,
int frame2,
vector<Marker>* markers) const {
std::vector<int> image1_tracks;
std::vector<int> image2_tracks;
@ -99,20 +99,19 @@ void Tracks::GetMarkersForTracksInBothImages(int clip1, int frame1,
std::sort(image1_tracks.begin(), image1_tracks.end());
std::sort(image2_tracks.begin(), image2_tracks.end());
std::vector<int> intersection;
std::set_intersection(image1_tracks.begin(), image1_tracks.end(),
image2_tracks.begin(), image2_tracks.end(),
std::set_intersection(image1_tracks.begin(),
image1_tracks.end(),
image2_tracks.begin(),
image2_tracks.end(),
std::back_inserter(intersection));
// Scan through and get the relevant tracks from the two images.
for (int i = 0; i < markers_.size(); ++i) {
// Save markers that are in either frame and are in our candidate set.
if (((markers_[i].clip == clip1 &&
markers_[i].frame == frame1) ||
(markers_[i].clip == clip2 &&
markers_[i].frame == frame2)) &&
std::binary_search(intersection.begin(),
intersection.end(),
markers_[i].track)) {
if (((markers_[i].clip == clip1 && markers_[i].frame == frame1) ||
(markers_[i].clip == clip2 && markers_[i].frame == frame2)) &&
std::binary_search(
intersection.begin(), intersection.end(), markers_[i].track)) {
markers->push_back(markers_[i]);
}
}
@ -122,8 +121,7 @@ void Tracks::AddMarker(const Marker& marker) {
// TODO(keir): This is quadratic for repeated insertions. Fix this by adding
// a smarter data structure like a set<>.
for (int i = 0; i < markers_.size(); ++i) {
if (markers_[i].clip == marker.clip &&
markers_[i].frame == marker.frame &&
if (markers_[i].clip == marker.clip && markers_[i].frame == marker.frame &&
markers_[i].track == marker.track) {
markers_[i] = marker;
return;
@ -139,8 +137,7 @@ void Tracks::SetMarkers(vector<Marker>* markers) {
bool Tracks::RemoveMarker(int clip, int frame, int track) {
int size = markers_.size();
for (int i = 0; i < markers_.size(); ++i) {
if (markers_[i].clip == clip &&
markers_[i].frame == frame &&
if (markers_[i].clip == clip && markers_[i].frame == frame &&
markers_[i].track == track) {
markers_[i] = markers_[size - 1];
markers_.resize(size - 1);

View File

@ -23,8 +23,8 @@
#ifndef LIBMV_AUTOTRACK_TRACKS_H_
#define LIBMV_AUTOTRACK_TRACKS_H_
#include "libmv/base/vector.h"
#include "libmv/autotrack/marker.h"
#include "libmv/base/vector.h"
namespace mv {
@ -33,8 +33,8 @@ using libmv::vector;
// The Tracks container stores correspondences between frames.
class Tracks {
public:
Tracks() { }
Tracks(const Tracks &other);
Tracks() {}
Tracks(const Tracks& other);
// Create a tracks object with markers already initialized. Copies markers.
explicit Tracks(const vector<Marker>& markers);
@ -51,8 +51,10 @@ class Tracks {
//
// This is not the same as the union of the markers in frame1 and
// frame2; each marker is for a track that appears in both images.
void GetMarkersForTracksInBothImages(int clip1, int frame1,
int clip2, int frame2,
void GetMarkersForTracksInBothImages(int clip1,
int frame1,
int clip2,
int frame2,
vector<Marker>* markers) const;
void AddMarker(const Marker& marker);

View File

@ -22,8 +22,8 @@
#include "libmv/autotrack/tracks.h"
#include "testing/testing.h"
#include "libmv/logging/logging.h"
#include "testing/testing.h"
namespace mv {

View File

@ -41,11 +41,11 @@
namespace libmv {
void *aligned_malloc(int size, int alignment) {
void* aligned_malloc(int size, int alignment) {
#ifdef _WIN32
return _aligned_malloc(size, alignment);
#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__APPLE__)
void *result;
void* result;
if (posix_memalign(&result, alignment, size)) {
// non-zero means allocation error
@ -58,7 +58,7 @@ void *aligned_malloc(int size, int alignment) {
#endif
}
void aligned_free(void *ptr) {
void aligned_free(void* ptr) {
#ifdef _WIN32
_aligned_free(ptr);
#else

View File

@ -24,10 +24,10 @@
namespace libmv {
// Allocate block of size bytes at least aligned to a given value.
void *aligned_malloc(int size, int alignment);
void* aligned_malloc(int size, int alignment);
// Free memory allocated by aligned_malloc.
void aligned_free(void *ptr);
void aligned_free(void* ptr);
} // namespace libmv

View File

@ -28,6 +28,7 @@ class IdGenerator {
public:
IdGenerator() : next_(0) {}
ID Generate() { return next_++; }
private:
ID next_;
};

View File

@ -26,8 +26,8 @@
namespace libmv {
using std::map;
using std::make_pair;
using std::map;
} // namespace libmv

View File

@ -30,44 +30,44 @@ namespace libmv {
* A handle for a heap-allocated resource that should be freed when it goes out
* of scope. This looks similar to the one found in TR1.
*/
template<typename T>
template <typename T>
class scoped_ptr {
public:
scoped_ptr(T *resource) : resource_(resource) {}
scoped_ptr(T* resource) : resource_(resource) {}
~scoped_ptr() { reset(0); }
T *get() const { return resource_; }
T *operator->() const { return resource_; }
T &operator*() const { return *resource_; }
T* get() const { return resource_; }
T* operator->() const { return resource_; }
T& operator*() const { return *resource_; }
void reset(T *new_resource) {
void reset(T* new_resource) {
if (sizeof(T)) {
delete resource_;
}
resource_ = new_resource;
}
T *release() {
T *released_resource = resource_;
T* release() {
T* released_resource = resource_;
resource_ = 0;
return released_resource;
}
private:
// No copying allowed.
T *resource_;
T* resource_;
};
// Same as scoped_ptr but caller must allocate the data
// with new[] and the destructor will free the memory
// using delete[].
template<typename T>
template <typename T>
class scoped_array {
public:
scoped_array(T *array) : array_(array) {}
scoped_array(T* array) : array_(array) {}
~scoped_array() { reset(NULL); }
T *get() const { return array_; }
T* get() const { return array_; }
T& operator[](std::ptrdiff_t i) const {
assert(i >= 0);
@ -75,25 +75,27 @@ class scoped_array {
return array_[i];
}
void reset(T *new_array) {
void reset(T* new_array) {
if (sizeof(T)) {
delete array_;
}
array_ = new_array;
}
T *release() {
T *released_array = array_;
T* release() {
T* released_array = array_;
array_ = NULL;
return released_array;
}
private:
T *array_;
T* array_;
// Forbid comparison of different scoped_array types.
template <typename T2> bool operator==(scoped_array<T2> const& p2) const;
template <typename T2> bool operator!=(scoped_array<T2> const& p2) const;
template <typename T2>
bool operator==(scoped_array<T2> const& p2) const;
template <typename T2>
bool operator!=(scoped_array<T2> const& p2) const;
// Disallow evil constructors
scoped_array(const scoped_array&);

View File

@ -25,9 +25,9 @@ namespace libmv {
namespace {
struct FreeMe {
FreeMe(int *freed) : freed(freed) {}
FreeMe(int* freed) : freed(freed) {}
~FreeMe() { (*freed)++; }
int *freed;
int* freed;
};
TEST(ScopedPtr, NullDoesNothing) {
@ -61,8 +61,8 @@ TEST(ScopedPtr, Reset) {
TEST(ScopedPtr, ReleaseAndGet) {
int frees = 0;
FreeMe *allocated = new FreeMe(&frees);
FreeMe *released = NULL;
FreeMe* allocated = new FreeMe(&frees);
FreeMe* released = NULL;
{
scoped_ptr<FreeMe> scoped(allocated);
EXPECT_EQ(0, frees);

View File

@ -19,9 +19,9 @@
// IN THE SOFTWARE.
#include "libmv/base/vector.h"
#include <algorithm>
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
#include <algorithm>
namespace {
using namespace libmv;
@ -62,7 +62,7 @@ int foo_destruct_calls = 0;
struct Foo {
public:
Foo() : value(5) { foo_construct_calls++; }
~Foo() { foo_destruct_calls++; }
~Foo() { foo_destruct_calls++; }
int value;
};
@ -150,7 +150,7 @@ TEST_F(VectorTest, CopyConstructor) {
a.push_back(3);
vector<int> b(a);
EXPECT_EQ(a.size(), b.size());
EXPECT_EQ(a.size(), b.size());
for (int i = 0; i < a.size(); ++i) {
EXPECT_EQ(a[i], b[i]);
}
@ -164,7 +164,7 @@ TEST_F(VectorTest, OperatorEquals) {
b = a;
EXPECT_EQ(a.size(), b.size());
EXPECT_EQ(a.size(), b.size());
for (int i = 0; i < a.size(); ++i) {
EXPECT_EQ(a[i], b[i]);
}

View File

@ -18,14 +18,13 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_BASE_VECTOR_UTILS_H_
#define LIBMV_BASE_VECTOR_UTILS_H_
/// Delete the contents of a container.
template <class Array>
void DeleteElements(Array *array) {
for (int i = 0; i < array->size(); ++i) {
void DeleteElements(Array* array) {
for (int i = 0; i < array->size(); ++i) {
delete (*array)[i];
}
array->clear();

View File

@ -18,18 +18,17 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/image/image.h"
#include <iostream>
#include <cmath>
#include <iostream>
#include "libmv/image/image.h"
namespace libmv {
void FloatArrayToScaledByteArray(const Array3Df &float_array,
Array3Du *byte_array,
bool automatic_range_detection
) {
void FloatArrayToScaledByteArray(const Array3Df& float_array,
Array3Du* byte_array,
bool automatic_range_detection) {
byte_array->ResizeLike(float_array);
float minval = HUGE_VAL;
float minval = HUGE_VAL;
float maxval = -HUGE_VAL;
if (automatic_range_detection) {
for (int i = 0; i < float_array.Height(); ++i) {
@ -54,8 +53,8 @@ void FloatArrayToScaledByteArray(const Array3Df &float_array,
}
}
void ByteArrayToScaledFloatArray(const Array3Du &byte_array,
Array3Df *float_array) {
void ByteArrayToScaledFloatArray(const Array3Du& byte_array,
Array3Df* float_array) {
float_array->ResizeLike(byte_array);
for (int i = 0; i < byte_array.Height(); ++i) {
for (int j = 0; j < byte_array.Width(); ++j) {
@ -66,10 +65,10 @@ void ByteArrayToScaledFloatArray(const Array3Du &byte_array,
}
}
void SplitChannels(const Array3Df &input,
Array3Df *channel0,
Array3Df *channel1,
Array3Df *channel2) {
void SplitChannels(const Array3Df& input,
Array3Df* channel0,
Array3Df* channel1,
Array3Df* channel2) {
assert(input.Depth() >= 3);
channel0->Resize(input.Height(), input.Width());
channel1->Resize(input.Height(), input.Width());
@ -83,7 +82,7 @@ void SplitChannels(const Array3Df &input,
}
}
void PrintArray(const Array3Df &array) {
void PrintArray(const Array3Df& array) {
using namespace std;
printf("[\n");

View File

@ -44,13 +44,13 @@ class ArrayND : public BaseArray {
ArrayND() : data_(NULL), own_data_(true) { Resize(Index(0)); }
/// Create an array with the specified shape.
ArrayND(const Index &shape) : data_(NULL), own_data_(true) { Resize(shape); }
ArrayND(const Index& shape) : data_(NULL), own_data_(true) { Resize(shape); }
/// Create an array with the specified shape.
ArrayND(int *shape) : data_(NULL), own_data_(true) { Resize(shape); }
ArrayND(int* shape) : data_(NULL), own_data_(true) { Resize(shape); }
/// Copy constructor.
ArrayND(const ArrayND<T, N> &b) : data_(NULL), own_data_(true) {
ArrayND(const ArrayND<T, N>& b) : data_(NULL), own_data_(true) {
ResizeLike(b);
std::memcpy(Data(), b.Data(), sizeof(T) * Size());
}
@ -58,7 +58,7 @@ class ArrayND : public BaseArray {
ArrayND(int s0) : data_(NULL), own_data_(true) { Resize(s0); }
ArrayND(int s0, int s1) : data_(NULL), own_data_(true) { Resize(s0, s1); }
ArrayND(int s0, int s1, int s2) : data_(NULL), own_data_(true) {
Resize(s0, s1, s2);
Resize(s0, s1, s2);
}
ArrayND(T* data, int s0, int s1, int s2)
@ -69,28 +69,24 @@ class ArrayND : public BaseArray {
/// Destructor deletes pixel data.
~ArrayND() {
if (own_data_) {
delete [] data_;
delete[] data_;
}
}
/// Assignation copies pixel data.
ArrayND &operator=(const ArrayND<T, N> &b) {
ArrayND& operator=(const ArrayND<T, N>& b) {
assert(this != &b);
ResizeLike(b);
std::memcpy(Data(), b.Data(), sizeof(T) * Size());
return *this;
}
const Index &Shapes() const {
return shape_;
}
const Index& Shapes() const { return shape_; }
const Index &Strides() const {
return strides_;
}
const Index& Strides() const { return strides_; }
/// Create an array of shape s.
void Resize(const Index &new_shape) {
void Resize(const Index& new_shape) {
if (data_ != NULL && shape_ == new_shape) {
// Don't bother realloacting if the shapes match.
return;
@ -101,7 +97,7 @@ class ArrayND : public BaseArray {
strides_(i - 1) = strides_(i) * shape_(i);
}
if (own_data_) {
delete [] data_;
delete[] data_;
data_ = NULL;
if (Size() > 0) {
data_ = new T[Size()];
@ -109,15 +105,13 @@ class ArrayND : public BaseArray {
}
}
template<typename D>
void ResizeLike(const ArrayND<D, N> &other) {
template <typename D>
void ResizeLike(const ArrayND<D, N>& other) {
Resize(other.Shape());
}
/// Resizes the array to shape s. All data is lost.
void Resize(const int *new_shape_array) {
Resize(Index(new_shape_array));
}
void Resize(const int* new_shape_array) { Resize(Index(new_shape_array)); }
/// Resize a 1D array to length s0.
void Resize(int s0) {
@ -136,9 +130,7 @@ class ArrayND : public BaseArray {
}
// Match Eigen2's API.
void resize(int rows, int cols) {
Resize(rows, cols);
}
void resize(int rows, int cols) { Resize(rows, cols); }
/// Resize a 3D array to shape (s0,s1,s2).
void Resize(int s0, int s1, int s2) {
@ -147,11 +139,11 @@ class ArrayND : public BaseArray {
Resize(shape);
}
template<typename D>
void CopyFrom(const ArrayND<D, N> &other) {
template <typename D>
void CopyFrom(const ArrayND<D, N>& other) {
ResizeLike(other);
T *data = Data();
const D *other_data = other.Data();
T* data = Data();
const D* other_data = other.Data();
for (int i = 0; i < Size(); ++i) {
data[i] = T(other_data[i]);
}
@ -171,19 +163,13 @@ class ArrayND : public BaseArray {
}
/// Return a tuple containing the length of each axis.
const Index &Shape() const {
return shape_;
}
const Index& Shape() const { return shape_; }
/// Return the length of an axis.
int Shape(int axis) const {
return shape_(axis);
}
int Shape(int axis) const { return shape_(axis); }
/// Return the distance between neighboring elements along axis.
int Stride(int axis) const {
return strides_(axis);
}
int Stride(int axis) const { return strides_(axis); }
/// Return the number of elements of the array.
int Size() const {
@ -194,18 +180,16 @@ class ArrayND : public BaseArray {
}
/// Return the total amount of memory used by the array.
int MemorySizeInBytes() const {
return sizeof(*this) + Size() * sizeof(T);
}
int MemorySizeInBytes() const { return sizeof(*this) + Size() * sizeof(T); }
/// Pointer to the first element of the array.
T *Data() { return data_; }
T* Data() { return data_; }
/// Constant pointer to the first element of the array.
const T *Data() const { return data_; }
const T* Data() const { return data_; }
/// Distance between the first element and the element at position index.
int Offset(const Index &index) const {
int Offset(const Index& index) const {
int offset = 0;
for (int i = 0; i < N; ++i)
offset += index(i) * Stride(i);
@ -231,25 +215,23 @@ class ArrayND : public BaseArray {
}
/// Return a reference to the element at position index.
T &operator()(const Index &index) {
T& operator()(const Index& index) {
// TODO(pau) Boundary checking in debug mode.
return *( Data() + Offset(index) );
return *(Data() + Offset(index));
}
/// 1D specialization.
T &operator()(int i0) {
return *( Data() + Offset(i0) );
}
T& operator()(int i0) { return *(Data() + Offset(i0)); }
/// 2D specialization.
T &operator()(int i0, int i1) {
T& operator()(int i0, int i1) {
assert(0 <= i0 && i0 < Shape(0));
assert(0 <= i1 && i1 < Shape(1));
return *(Data() + Offset(i0, i1));
}
/// 3D specialization.
T &operator()(int i0, int i1, int i2) {
T& operator()(int i0, int i1, int i2) {
assert(0 <= i0 && i0 < Shape(0));
assert(0 <= i1 && i1 < Shape(1));
assert(0 <= i2 && i2 < Shape(2));
@ -257,29 +239,27 @@ class ArrayND : public BaseArray {
}
/// Return a constant reference to the element at position index.
const T &operator()(const Index &index) const {
const T& operator()(const Index& index) const {
return *(Data() + Offset(index));
}
/// 1D specialization.
const T &operator()(int i0) const {
return *(Data() + Offset(i0));
}
const T& operator()(int i0) const { return *(Data() + Offset(i0)); }
/// 2D specialization.
const T &operator()(int i0, int i1) const {
const T& operator()(int i0, int i1) const {
assert(0 <= i0 && i0 < Shape(0));
assert(0 <= i1 && i1 < Shape(1));
return *(Data() + Offset(i0, i1));
}
/// 3D specialization.
const T &operator()(int i0, int i1, int i2) const {
const T& operator()(int i0, int i1, int i2) const {
return *(Data() + Offset(i0, i1, i2));
}
/// True if index is inside array.
bool Contains(const Index &index) const {
bool Contains(const Index& index) const {
for (int i = 0; i < N; ++i)
if (index(i) < 0 || index(i) >= Shape(i))
return false;
@ -287,26 +267,24 @@ class ArrayND : public BaseArray {
}
/// 1D specialization.
bool Contains(int i0) const {
return 0 <= i0 && i0 < Shape(0);
}
bool Contains(int i0) const { return 0 <= i0 && i0 < Shape(0); }
/// 2D specialization.
bool Contains(int i0, int i1) const {
return 0 <= i0 && i0 < Shape(0)
&& 0 <= i1 && i1 < Shape(1);
return 0 <= i0 && i0 < Shape(0) && 0 <= i1 && i1 < Shape(1);
}
/// 3D specialization.
bool Contains(int i0, int i1, int i2) const {
return 0 <= i0 && i0 < Shape(0)
&& 0 <= i1 && i1 < Shape(1)
&& 0 <= i2 && i2 < Shape(2);
return 0 <= i0 && i0 < Shape(0) && 0 <= i1 && i1 < Shape(1) && 0 <= i2 &&
i2 < Shape(2);
}
bool operator==(const ArrayND<T, N> &other) const {
if (shape_ != other.shape_) return false;
if (strides_ != other.strides_) return false;
bool operator==(const ArrayND<T, N>& other) const {
if (shape_ != other.shape_)
return false;
if (strides_ != other.strides_)
return false;
for (int i = 0; i < Size(); ++i) {
if (this->Data()[i] != other.Data()[i])
return false;
@ -314,11 +292,11 @@ class ArrayND : public BaseArray {
return true;
}
bool operator!=(const ArrayND<T, N> &other) const {
bool operator!=(const ArrayND<T, N>& other) const {
return !(*this == other);
}
ArrayND<T, N> operator*(const ArrayND<T, N> &other) const {
ArrayND<T, N> operator*(const ArrayND<T, N>& other) const {
assert(Shape() = other.Shape());
ArrayND<T, N> res;
res.ResizeLike(*this);
@ -336,7 +314,7 @@ class ArrayND : public BaseArray {
Index strides_;
/// Pointer to the first element of the array.
T *data_;
T* data_;
/// Flag if this Array either own or reference the data
bool own_data_;
@ -346,30 +324,20 @@ class ArrayND : public BaseArray {
template <typename T>
class Array3D : public ArrayND<T, 3> {
typedef ArrayND<T, 3> Base;
public:
Array3D()
: Base() {
}
Array3D(int height, int width, int depth = 1)
: Base(height, width, depth) {
}
Array3D() : Base() {}
Array3D(int height, int width, int depth = 1) : Base(height, width, depth) {}
Array3D(T* data, int height, int width, int depth = 1)
: Base(data, height, width, depth) {
}
: Base(data, height, width, depth) {}
void Resize(int height, int width, int depth = 1) {
Base::Resize(height, width, depth);
}
int Height() const {
return Base::Shape(0);
}
int Width() const {
return Base::Shape(1);
}
int Depth() const {
return Base::Shape(2);
}
int Height() const { return Base::Shape(0); }
int Width() const { return Base::Shape(1); }
int Depth() const { return Base::Shape(2); }
// Match Eigen2's API so that Array3D's and Mat*'s can work together via
// template magic.
@ -377,15 +345,15 @@ class Array3D : public ArrayND<T, 3> {
int cols() const { return Width(); }
int depth() const { return Depth(); }
int Get_Step() const { return Width()*Depth(); }
int Get_Step() const { return Width() * Depth(); }
/// Enable accessing with 2 indices for grayscale images.
T &operator()(int i0, int i1, int i2 = 0) {
T& operator()(int i0, int i1, int i2 = 0) {
assert(0 <= i0 && i0 < Height());
assert(0 <= i1 && i1 < Width());
return Base::operator()(i0, i1, i2);
}
const T &operator()(int i0, int i1, int i2 = 0) const {
const T& operator()(int i0, int i1, int i2 = 0) const {
assert(0 <= i0 && i0 < Height());
assert(0 <= i1 && i1 < Width());
return Base::operator()(i0, i1, i2);
@ -398,31 +366,29 @@ typedef Array3D<int> Array3Di;
typedef Array3D<float> Array3Df;
typedef Array3D<short> Array3Ds;
void SplitChannels(const Array3Df &input,
Array3Df *channel0,
Array3Df *channel1,
Array3Df *channel2);
void SplitChannels(const Array3Df& input,
Array3Df* channel0,
Array3Df* channel1,
Array3Df* channel2);
void PrintArray(const Array3Df &array);
void PrintArray(const Array3Df& array);
/** Convert a float array into a byte array by scaling values by 255* (max-min).
* where max and min are automatically detected
* where max and min are automatically detected
* (if automatic_range_detection = true)
* \note and TODO this automatic detection only works when the image contains
* at least one pixel of both bounds.
**/
void FloatArrayToScaledByteArray(const Array3Df &float_array,
Array3Du *byte_array,
void FloatArrayToScaledByteArray(const Array3Df& float_array,
Array3Du* byte_array,
bool automatic_range_detection = false);
//! Convert a byte array into a float array by dividing values by 255.
void ByteArrayToScaledFloatArray(const Array3Du &byte_array,
Array3Df *float_array);
void ByteArrayToScaledFloatArray(const Array3Du& byte_array,
Array3Df* float_array);
template <typename AArrayType, typename BArrayType, typename CArrayType>
void MultiplyElements(const AArrayType &a,
const BArrayType &b,
CArrayType *c) {
void MultiplyElements(const AArrayType& a, const BArrayType& b, CArrayType* c) {
// This function does an element-wise multiply between
// the two Arrays A and B, and stores the result in C.
// A and B must have the same dimensions.
@ -435,7 +401,7 @@ void MultiplyElements(const AArrayType &a,
// The index starts at the maximum value for each dimension
const typename CArrayType::Index& cShape = c->Shape();
for ( int i = 0; i < CArrayType::Index::SIZE; ++i )
for (int i = 0; i < CArrayType::Index::SIZE; ++i)
index(i) = cShape(i) - 1;
// After each multiplication, the highest-dimensional index is reduced.
@ -443,12 +409,12 @@ void MultiplyElements(const AArrayType &a,
// and decrements the index of the next lower dimension.
// This ripple-action continues until the entire new array has been
// calculated, indicated by dimension zero having a negative index.
while ( index(0) >= 0 ) {
while (index(0) >= 0) {
(*c)(index) = a(index) * b(index);
int dimension = CArrayType::Index::SIZE - 1;
index(dimension) = index(dimension) - 1;
while ( dimension > 0 && index(dimension) < 0 ) {
while (dimension > 0 && index(dimension) < 0) {
index(dimension) = cShape(dimension) - 1;
index(dimension - 1) = index(dimension - 1) - 1;
--dimension;
@ -457,9 +423,9 @@ void MultiplyElements(const AArrayType &a,
}
template <typename TA, typename TB, typename TC>
void MultiplyElements(const ArrayND<TA, 3> &a,
const ArrayND<TB, 3> &b,
ArrayND<TC, 3> *c) {
void MultiplyElements(const ArrayND<TA, 3>& a,
const ArrayND<TB, 3>& b,
ArrayND<TC, 3>* c) {
// Specialization for N==3
c->ResizeLike(a);
assert(a.Shape(0) == b.Shape(0));
@ -475,9 +441,9 @@ void MultiplyElements(const ArrayND<TA, 3> &a,
}
template <typename TA, typename TB, typename TC>
void MultiplyElements(const Array3D<TA> &a,
const Array3D<TB> &b,
Array3D<TC> *c) {
void MultiplyElements(const Array3D<TA>& a,
const Array3D<TB>& b,
Array3D<TC>* c) {
// Specialization for N==3
c->ResizeLike(a);
assert(a.Shape(0) == b.Shape(0));

View File

@ -21,9 +21,9 @@
#include "libmv/image/array_nd.h"
#include "testing/testing.h"
using libmv::ArrayND;
using libmv::Array3D;
using libmv::Array3Df;
using libmv::ArrayND;
namespace {
@ -100,7 +100,7 @@ TEST(ArrayND, Size) {
int l[] = {0, 1, 2};
ArrayND<int, 3>::Index last(l);
EXPECT_EQ(a.Size(), a.Offset(last)+1);
EXPECT_EQ(a.Size(), a.Offset(last) + 1);
EXPECT_TRUE(a.Contains(last));
EXPECT_FALSE(a.Contains(shape));
}
@ -120,8 +120,8 @@ TEST(ArrayND, Parenthesis) {
int s[] = {3, 3};
ArrayND<int, 2> a(s);
*(a.Data()+0) = 0;
*(a.Data()+5) = 5;
*(a.Data() + 0) = 0;
*(a.Data() + 5) = 5;
int i1[] = {0, 0};
EXPECT_EQ(0, a(Index(i1)));
@ -210,7 +210,7 @@ TEST(ArrayND, MultiplyElements) {
b(1, 1, 0) = 3;
ArrayND<int, 3> c;
MultiplyElements(a, b, &c);
EXPECT_FLOAT_EQ(6, c(0, 0, 0));
EXPECT_FLOAT_EQ(6, c(0, 0, 0));
EXPECT_FLOAT_EQ(10, c(0, 1, 0));
EXPECT_FLOAT_EQ(12, c(1, 0, 0));
EXPECT_FLOAT_EQ(12, c(1, 1, 0));

View File

@ -29,7 +29,7 @@ namespace libmv {
// Compute a Gaussian kernel and derivative, such that you can take the
// derivative of an image by convolving with the kernel horizontally then the
// derivative vertically to get (eg) the y derivative.
void ComputeGaussianKernel(double sigma, Vec *kernel, Vec *derivative) {
void ComputeGaussianKernel(double sigma, Vec* kernel, Vec* derivative) {
assert(sigma >= 0.0);
// 0.004 implies a 3 pixel kernel with 1 pixel sigma.
@ -37,7 +37,7 @@ void ComputeGaussianKernel(double sigma, Vec *kernel, Vec *derivative) {
// Calculate the kernel size based on sigma such that it is odd.
float precisehalfwidth = GaussianInversePositive(truncation_factor, sigma);
int width = lround(2*precisehalfwidth);
int width = lround(2 * precisehalfwidth);
if (width % 2 == 0) {
width++;
}
@ -47,7 +47,7 @@ void ComputeGaussianKernel(double sigma, Vec *kernel, Vec *derivative) {
kernel->setZero();
derivative->setZero();
int halfwidth = width / 2;
for (int i = -halfwidth; i <= halfwidth; ++i) {
for (int i = -halfwidth; i <= halfwidth; ++i) {
(*kernel)(i + halfwidth) = Gaussian(i, sigma);
(*derivative)(i + halfwidth) = GaussianDerivative(i, sigma);
}
@ -57,16 +57,21 @@ void ComputeGaussianKernel(double sigma, Vec *kernel, Vec *derivative) {
// Normalize the derivative differently. See
// www.cs.duke.edu/courses/spring03/cps296.1/handouts/Image%20Processing.pdf
double factor = 0.;
for (int i = -halfwidth; i <= halfwidth; ++i) {
factor -= i*(*derivative)(i+halfwidth);
for (int i = -halfwidth; i <= halfwidth; ++i) {
factor -= i * (*derivative)(i + halfwidth);
}
*derivative /= factor;
}
template <int size, bool vertical>
void FastConvolve(const Vec &kernel, int width, int height,
const float* src, int src_stride, int src_line_stride,
float* dst, int dst_stride) {
void FastConvolve(const Vec& kernel,
int width,
int height,
const float* src,
int src_stride,
int src_line_stride,
float* dst,
int dst_stride) {
double coefficients[2 * size + 1];
for (int k = 0; k < 2 * size + 1; ++k) {
coefficients[k] = kernel(2 * size - k);
@ -93,14 +98,14 @@ void FastConvolve(const Vec &kernel, int width, int height,
}
}
template<bool vertical>
void Convolve(const Array3Df &in,
const Vec &kernel,
Array3Df *out_pointer,
template <bool vertical>
void Convolve(const Array3Df& in,
const Vec& kernel,
Array3Df* out_pointer,
int plane) {
int width = in.Width();
int height = in.Height();
Array3Df &out = *out_pointer;
Array3Df& out = *out_pointer;
if (plane == -1) {
out.ResizeLike(in);
plane = 0;
@ -119,61 +124,62 @@ void Convolve(const Array3Df &in,
// fast path.
int half_width = kernel.size() / 2;
switch (half_width) {
#define static_convolution(size) case size: \
FastConvolve<size, vertical>(kernel, width, height, src, src_stride, \
src_line_stride, dst, dst_stride); break;
static_convolution(1)
static_convolution(2)
static_convolution(3)
static_convolution(4)
static_convolution(5)
static_convolution(6)
static_convolution(7)
#define static_convolution(size) \
case size: \
FastConvolve<size, vertical>(kernel, \
width, \
height, \
src, \
src_stride, \
src_line_stride, \
dst, \
dst_stride); \
break;
static_convolution(1) static_convolution(2) static_convolution(3)
static_convolution(4) static_convolution(5) static_convolution(6)
static_convolution(7)
#undef static_convolution
default:
int dynamic_size = kernel.size() / 2;
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
double sum = 0;
// Slow path: this loop cannot be unrolled.
for (int k = -dynamic_size; k <= dynamic_size; ++k) {
if (vertical) {
if (y + k >= 0 && y + k < height) {
sum += src[k * src_line_stride] *
kernel(2 * dynamic_size - (k + dynamic_size));
}
} else {
if (x + k >= 0 && x + k < width) {
sum += src[k * src_stride] *
kernel(2 * dynamic_size - (k + dynamic_size));
}
default : int dynamic_size = kernel.size() / 2;
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
double sum = 0;
// Slow path: this loop cannot be unrolled.
for (int k = -dynamic_size; k <= dynamic_size; ++k) {
if (vertical) {
if (y + k >= 0 && y + k < height) {
sum += src[k * src_line_stride] *
kernel(2 * dynamic_size - (k + dynamic_size));
}
} else {
if (x + k >= 0 && x + k < width) {
sum += src[k * src_stride] *
kernel(2 * dynamic_size - (k + dynamic_size));
}
}
dst[0] = static_cast<float>(sum);
src += src_stride;
dst += dst_stride;
}
dst[0] = static_cast<float>(sum);
src += src_stride;
dst += dst_stride;
}
}
}
}
void ConvolveHorizontal(const Array3Df &in,
const Vec &kernel,
Array3Df *out_pointer,
void ConvolveHorizontal(const Array3Df& in,
const Vec& kernel,
Array3Df* out_pointer,
int plane) {
Convolve<false>(in, kernel, out_pointer, plane);
}
void ConvolveVertical(const Array3Df &in,
const Vec &kernel,
Array3Df *out_pointer,
void ConvolveVertical(const Array3Df& in,
const Vec& kernel,
Array3Df* out_pointer,
int plane) {
Convolve<true>(in, kernel, out_pointer, plane);
}
void ConvolveGaussian(const Array3Df &in,
double sigma,
Array3Df *out_pointer) {
void ConvolveGaussian(const Array3Df& in, double sigma, Array3Df* out_pointer) {
Vec kernel, derivative;
ComputeGaussianKernel(sigma, &kernel, &derivative);
@ -182,10 +188,10 @@ void ConvolveGaussian(const Array3Df &in,
ConvolveHorizontal(tmp, kernel, out_pointer);
}
void ImageDerivatives(const Array3Df &in,
void ImageDerivatives(const Array3Df& in,
double sigma,
Array3Df *gradient_x,
Array3Df *gradient_y) {
Array3Df* gradient_x,
Array3Df* gradient_y) {
Vec kernel, derivative;
ComputeGaussianKernel(sigma, &kernel, &derivative);
Array3Df tmp;
@ -199,11 +205,11 @@ void ImageDerivatives(const Array3Df &in,
ConvolveVertical(tmp, derivative, gradient_y);
}
void BlurredImageAndDerivatives(const Array3Df &in,
void BlurredImageAndDerivatives(const Array3Df& in,
double sigma,
Array3Df *blurred_image,
Array3Df *gradient_x,
Array3Df *gradient_y) {
Array3Df* blurred_image,
Array3Df* gradient_x,
Array3Df* gradient_y) {
Vec kernel, derivative;
ComputeGaussianKernel(sigma, &kernel, &derivative);
Array3Df tmp;
@ -224,9 +230,9 @@ void BlurredImageAndDerivatives(const Array3Df &in,
// image, and store the results in three channels. Since the blurred value and
// gradients are closer in memory, this leads to better performance if all
// three values are needed at the same time.
void BlurredImageAndDerivativesChannels(const Array3Df &in,
void BlurredImageAndDerivativesChannels(const Array3Df& in,
double sigma,
Array3Df *blurred_and_gradxy) {
Array3Df* blurred_and_gradxy) {
assert(in.Depth() == 1);
Vec kernel, derivative;
@ -246,10 +252,10 @@ void BlurredImageAndDerivativesChannels(const Array3Df &in,
ConvolveVertical(tmp, derivative, blurred_and_gradxy, 2);
}
void BoxFilterHorizontal(const Array3Df &in,
void BoxFilterHorizontal(const Array3Df& in,
int window_size,
Array3Df *out_pointer) {
Array3Df &out = *out_pointer;
Array3Df* out_pointer) {
Array3Df& out = *out_pointer;
out.ResizeLike(in);
int half_width = (window_size - 1) / 2;
@ -266,7 +272,7 @@ void BoxFilterHorizontal(const Array3Df &in,
out(i, j, k) = sum;
}
// Fill interior.
for (int j = half_width + 1; j < in.Width()-half_width; ++j) {
for (int j = half_width + 1; j < in.Width() - half_width; ++j) {
sum -= in(i, j - half_width - 1, k);
sum += in(i, j + half_width, k);
out(i, j, k) = sum;
@ -280,10 +286,10 @@ void BoxFilterHorizontal(const Array3Df &in,
}
}
void BoxFilterVertical(const Array3Df &in,
void BoxFilterVertical(const Array3Df& in,
int window_size,
Array3Df *out_pointer) {
Array3Df &out = *out_pointer;
Array3Df* out_pointer) {
Array3Df& out = *out_pointer;
out.ResizeLike(in);
int half_width = (window_size - 1) / 2;
@ -300,7 +306,7 @@ void BoxFilterVertical(const Array3Df &in,
out(i, j, k) = sum;
}
// Fill interior.
for (int i = half_width + 1; i < in.Height()-half_width; ++i) {
for (int i = half_width + 1; i < in.Height() - half_width; ++i) {
sum -= in(i - half_width - 1, j, k);
sum += in(i + half_width, j, k);
out(i, j, k) = sum;
@ -314,9 +320,7 @@ void BoxFilterVertical(const Array3Df &in,
}
}
void BoxFilter(const Array3Df &in,
int box_width,
Array3Df *out) {
void BoxFilter(const Array3Df& in, int box_width, Array3Df* out) {
Array3Df tmp;
BoxFilterHorizontal(in, box_width, &tmp);
BoxFilterVertical(tmp, box_width, out);
@ -327,17 +331,17 @@ void LaplaceFilter(unsigned char* src,
int width,
int height,
int strength) {
for (int y = 1; y < height-1; y++)
for (int x = 1; x < width-1; x++) {
const unsigned char* s = &src[y*width+x];
int l = 128 +
s[-width-1] + s[-width] + s[-width+1] +
s[1] - 8*s[0] + s[1] +
s[ width-1] + s[ width] + s[ width+1];
int d = ((256-strength)*s[0] + strength*l) / 256;
if (d < 0) d=0;
if (d > 255) d=255;
dst[y*width+x] = d;
for (int y = 1; y < height - 1; y++)
for (int x = 1; x < width - 1; x++) {
const unsigned char* s = &src[y * width + x];
int l = 128 + s[-width - 1] + s[-width] + s[-width + 1] + s[1] -
8 * s[0] + s[1] + s[width - 1] + s[width] + s[width + 1];
int d = ((256 - strength) * s[0] + strength * l) / 256;
if (d < 0)
d = 0;
if (d > 255)
d = 255;
dst[y * width + x] = d;
}
}

View File

@ -30,70 +30,71 @@ namespace libmv {
// Zero mean Gaussian.
inline double Gaussian(double x, double sigma) {
return 1/sqrt(2*M_PI*sigma*sigma) * exp(-(x*x/2/sigma/sigma));
return 1 / sqrt(2 * M_PI * sigma * sigma) * exp(-(x * x / 2 / sigma / sigma));
}
// 2D gaussian (zero mean)
// (9) in http://mathworld.wolfram.com/GaussianFunction.html
inline double Gaussian2D(double x, double y, double sigma) {
return 1.0/(2.0*M_PI*sigma*sigma) * exp( -(x*x+y*y)/(2.0*sigma*sigma));
return 1.0 / (2.0 * M_PI * sigma * sigma) *
exp(-(x * x + y * y) / (2.0 * sigma * sigma));
}
inline double GaussianDerivative(double x, double sigma) {
return -x / sigma / sigma * Gaussian(x, sigma);
}
// Solve the inverse of the Gaussian for positive x.
inline double GaussianInversePositive(double y, double sigma) {
return sqrt(-2 * sigma * sigma * log(y * sigma * sqrt(2*M_PI)));
return sqrt(-2 * sigma * sigma * log(y * sigma * sqrt(2 * M_PI)));
}
void ComputeGaussianKernel(double sigma, Vec *kernel, Vec *derivative);
void ConvolveHorizontal(const FloatImage &in,
const Vec &kernel,
FloatImage *out_pointer,
void ComputeGaussianKernel(double sigma, Vec* kernel, Vec* derivative);
void ConvolveHorizontal(const FloatImage& in,
const Vec& kernel,
FloatImage* out_pointer,
int plane = -1);
void ConvolveVertical(const FloatImage &in,
const Vec &kernel,
FloatImage *out_pointer,
void ConvolveVertical(const FloatImage& in,
const Vec& kernel,
FloatImage* out_pointer,
int plane = -1);
void ConvolveGaussian(const FloatImage &in,
void ConvolveGaussian(const FloatImage& in,
double sigma,
FloatImage *out_pointer);
FloatImage* out_pointer);
void ImageDerivatives(const FloatImage &in,
void ImageDerivatives(const FloatImage& in,
double sigma,
FloatImage *gradient_x,
FloatImage *gradient_y);
FloatImage* gradient_x,
FloatImage* gradient_y);
void BlurredImageAndDerivatives(const FloatImage &in,
void BlurredImageAndDerivatives(const FloatImage& in,
double sigma,
FloatImage *blurred_image,
FloatImage *gradient_x,
FloatImage *gradient_y);
FloatImage* blurred_image,
FloatImage* gradient_x,
FloatImage* gradient_y);
// Blur and take the gradients of an image, storing the results inside the
// three channels of blurred_and_gradxy.
void BlurredImageAndDerivativesChannels(const FloatImage &in,
void BlurredImageAndDerivativesChannels(const FloatImage& in,
double sigma,
FloatImage *blurred_and_gradxy);
FloatImage* blurred_and_gradxy);
void BoxFilterHorizontal(const FloatImage &in,
void BoxFilterHorizontal(const FloatImage& in,
int window_size,
FloatImage *out_pointer);
FloatImage* out_pointer);
void BoxFilterVertical(const FloatImage &in,
void BoxFilterVertical(const FloatImage& in,
int window_size,
FloatImage *out_pointer);
FloatImage* out_pointer);
void BoxFilter(const FloatImage &in,
int box_width,
FloatImage *out);
void BoxFilter(const FloatImage& in, int box_width, FloatImage* out);
/*!
Convolve \a src into \a dst with the discrete laplacian operator.
\a src and \a dst should be \a width x \a height images.
\a strength is an interpolation coefficient (0-256) between original image and the laplacian.
\a strength is an interpolation coefficient (0-256) between original image
and the laplacian.
\note Make sure the search region is filtered with the same strength as the pattern.
\note Make sure the search region is filtered with the same strength as the
pattern.
*/
void LaplaceFilter(unsigned char* src,
unsigned char* dst,
@ -104,4 +105,3 @@ void LaplaceFilter(unsigned char* src,
} // namespace libmv
#endif // LIBMV_IMAGE_CONVOLVE_H_

View File

@ -85,26 +85,26 @@ TEST(Convolve, BlurredImageAndDerivativesChannelsHorizontalSlope) {
FloatImage image(10, 10), blurred_and_derivatives;
for (int j = 0; j < 10; ++j) {
for (int i = 0; i < 10; ++i) {
image(j, i) = 2*i;
image(j, i) = 2 * i;
}
}
BlurredImageAndDerivativesChannels(image, 0.9, &blurred_and_derivatives);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 0), 10.0, 1e-7);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 1), 2.0, 1e-7);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 2), 0.0, 1e-7);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 1), 2.0, 1e-7);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 2), 0.0, 1e-7);
}
TEST(Convolve, BlurredImageAndDerivativesChannelsVerticalSlope) {
FloatImage image(10, 10), blurred_and_derivatives;
for (int j = 0; j < 10; ++j) {
for (int i = 0; i < 10; ++i) {
image(j, i) = 2*j;
image(j, i) = 2 * j;
}
}
BlurredImageAndDerivativesChannels(image, 0.9, &blurred_and_derivatives);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 0), 10.0, 1e-7);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 1), 0.0, 1e-7);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 2), 2.0, 1e-7);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 1), 0.0, 1e-7);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 2), 2.0, 1e-7);
}
} // namespace

View File

@ -21,14 +21,14 @@
#ifndef LIBMV_IMAGE_CORRELATION_H
#define LIBMV_IMAGE_CORRELATION_H
#include "libmv/logging/logging.h"
#include "libmv/image/image.h"
#include "libmv/logging/logging.h"
namespace libmv {
inline double PearsonProductMomentCorrelation(
const FloatImage &image_and_gradient1_sampled,
const FloatImage &image_and_gradient2_sampled) {
const FloatImage& image_and_gradient1_sampled,
const FloatImage& image_and_gradient2_sampled) {
assert(image_and_gradient1_sampled.Width() ==
image_and_gradient2_sampled.Width());
assert(image_and_gradient1_sampled.Height() ==
@ -63,9 +63,8 @@ inline double PearsonProductMomentCorrelation(
double covariance_xy = sXY - sX * sY;
double correlation = covariance_xy / sqrt(var_x * var_y);
LG << "Covariance xy: " << covariance_xy
<< ", var 1: " << var_x << ", var 2: " << var_y
<< ", correlation: " << correlation;
LG << "Covariance xy: " << covariance_xy << ", var 1: " << var_x
<< ", var 2: " << var_y << ", correlation: " << correlation;
return correlation;
}

View File

@ -39,14 +39,11 @@ typedef Array3Ds ShortImage;
// is the best solution after all.
class Image {
public:
// Create an image from an array. The image takes ownership of the array.
Image(Array3Du *array) : array_type_(BYTE), array_(array) {}
Image(Array3Df *array) : array_type_(FLOAT), array_(array) {}
Image(Array3Du* array) : array_type_(BYTE), array_(array) {}
Image(Array3Df* array) : array_type_(FLOAT), array_(array) {}
Image(const Image &img): array_type_(NONE), array_(NULL) {
*this = img;
}
Image(const Image& img) : array_type_(NONE), array_(NULL) { *this = img; }
// Underlying data type.
enum DataType {
@ -62,20 +59,18 @@ class Image {
int size;
switch (array_type_) {
case BYTE:
size = reinterpret_cast<Array3Du *>(array_)->MemorySizeInBytes();
break;
size = reinterpret_cast<Array3Du*>(array_)->MemorySizeInBytes();
break;
case FLOAT:
size = reinterpret_cast<Array3Df *>(array_)->MemorySizeInBytes();
break;
size = reinterpret_cast<Array3Df*>(array_)->MemorySizeInBytes();
break;
case INT:
size = reinterpret_cast<Array3Di *>(array_)->MemorySizeInBytes();
break;
size = reinterpret_cast<Array3Di*>(array_)->MemorySizeInBytes();
break;
case SHORT:
size = reinterpret_cast<Array3Ds *>(array_)->MemorySizeInBytes();
break;
default :
size = 0;
assert(0);
size = reinterpret_cast<Array3Ds*>(array_)->MemorySizeInBytes();
break;
default: size = 0; assert(0);
}
size += sizeof(*this);
return size;
@ -83,71 +78,57 @@ class Image {
~Image() {
switch (array_type_) {
case BYTE:
delete reinterpret_cast<Array3Du *>(array_);
break;
case FLOAT:
delete reinterpret_cast<Array3Df *>(array_);
break;
case INT:
delete reinterpret_cast<Array3Di *>(array_);
break;
case SHORT:
delete reinterpret_cast<Array3Ds *>(array_);
break;
default:
assert(0);
}
case BYTE: delete reinterpret_cast<Array3Du*>(array_); break;
case FLOAT: delete reinterpret_cast<Array3Df*>(array_); break;
case INT: delete reinterpret_cast<Array3Di*>(array_); break;
case SHORT: delete reinterpret_cast<Array3Ds*>(array_); break;
default: assert(0);
}
}
Image& operator= (const Image& f) {
Image& operator=(const Image& f) {
if (this != &f) {
array_type_ = f.array_type_;
switch (array_type_) {
case BYTE:
delete reinterpret_cast<Array3Du *>(array_);
array_ = new Array3Du(*(Array3Du *)f.array_);
break;
delete reinterpret_cast<Array3Du*>(array_);
array_ = new Array3Du(*(Array3Du*)f.array_);
break;
case FLOAT:
delete reinterpret_cast<Array3Df *>(array_);
array_ = new Array3Df(*(Array3Df *)f.array_);
break;
delete reinterpret_cast<Array3Df*>(array_);
array_ = new Array3Df(*(Array3Df*)f.array_);
break;
case INT:
delete reinterpret_cast<Array3Di *>(array_);
array_ = new Array3Di(*(Array3Di *)f.array_);
break;
delete reinterpret_cast<Array3Di*>(array_);
array_ = new Array3Di(*(Array3Di*)f.array_);
break;
case SHORT:
delete reinterpret_cast<Array3Ds *>(array_);
array_ = new Array3Ds(*(Array3Ds *)f.array_);
break;
default:
assert(0);
delete reinterpret_cast<Array3Ds*>(array_);
array_ = new Array3Ds(*(Array3Ds*)f.array_);
break;
default: assert(0);
}
}
return *this;
}
Array3Du *AsArray3Du() const {
Array3Du* AsArray3Du() const {
if (array_type_ == BYTE) {
return reinterpret_cast<Array3Du *>(array_);
return reinterpret_cast<Array3Du*>(array_);
}
return NULL;
}
Array3Df *AsArray3Df() const {
Array3Df* AsArray3Df() const {
if (array_type_ == FLOAT) {
return reinterpret_cast<Array3Df *>(array_);
return reinterpret_cast<Array3Df*>(array_);
}
return NULL;
}
private:
DataType array_type_;
BaseArray *array_;
BaseArray* array_;
};
} // namespace libmv

View File

@ -28,7 +28,7 @@ namespace libmv {
// The factor comes from http://www.easyrgb.com/
// RGB to XYZ : Y is the luminance channel
// var_R * 0.2126 + var_G * 0.7152 + var_B * 0.0722
template<typename T>
template <typename T>
inline T RGB2GRAY(const T r, const T g, const T b) {
return static_cast<T>(r * 0.2126 + g * 0.7152 + b * 0.0722);
}
@ -42,8 +42,8 @@ inline unsigned char RGB2GRAY<unsigned char>(const unsigned char r,
return (unsigned char)(r * 0.2126 + g * 0.7152 + b * 0.0722 +0.5);
}*/
template<class ImageIn, class ImageOut>
void Rgb2Gray(const ImageIn &imaIn, ImageOut *imaOut) {
template <class ImageIn, class ImageOut>
void Rgb2Gray(const ImageIn& imaIn, ImageOut* imaOut) {
// It is all fine to cnvert RGBA image here as well,
// all the additional channels will be nicely ignored.
assert(imaIn.Depth() >= 3);
@ -52,21 +52,22 @@ void Rgb2Gray(const ImageIn &imaIn, ImageOut *imaOut) {
// Convert each RGB pixel into Gray value (luminance)
for (int j = 0; j < imaIn.Height(); ++j) {
for (int i = 0; i < imaIn.Width(); ++i) {
(*imaOut)(j, i) = RGB2GRAY(imaIn(j, i, 0) , imaIn(j, i, 1), imaIn(j, i, 2));
for (int i = 0; i < imaIn.Width(); ++i) {
(*imaOut)(j, i) =
RGB2GRAY(imaIn(j, i, 0), imaIn(j, i, 1), imaIn(j, i, 2));
}
}
}
// Convert given float image to an unsigned char array of pixels.
template<class Image>
unsigned char *FloatImageToUCharArray(const Image &image) {
unsigned char *buffer =
template <class Image>
unsigned char* FloatImageToUCharArray(const Image& image) {
unsigned char* buffer =
new unsigned char[image.Width() * image.Height() * image.Depth()];
for (int y = 0; y < image.Height(); y++) {
for (int x = 0; x < image.Width(); x++) {
for (int d = 0; d < image.Depth(); d++) {
for (int x = 0; x < image.Width(); x++) {
for (int d = 0; d < image.Depth(); d++) {
int index = (y * image.Width() + x) * image.Depth() + d;
buffer[index] = 255.0 * image(y, x, d);
}

View File

@ -34,9 +34,9 @@ namespace libmv {
/// Put the pixel in the image to the given color only if the point (xc,yc)
/// is inside the image.
template <class Image, class Color>
inline void safePutPixel(int yc, int xc, const Color & col, Image *pim) {
inline void safePutPixel(int yc, int xc, const Color& col, Image* pim) {
if (!pim)
return;
return;
if (pim->Contains(yc, xc)) {
(*pim)(yc, xc) = col;
}
@ -45,9 +45,9 @@ inline void safePutPixel(int yc, int xc, const Color & col, Image *pim) {
/// is inside the image. This function support multi-channel color
/// \note The color pointer col must have size as the image depth
template <class Image, class Color>
inline void safePutPixel(int yc, int xc, const Color *col, Image *pim) {
inline void safePutPixel(int yc, int xc, const Color* col, Image* pim) {
if (!pim)
return;
return;
if (pim->Contains(yc, xc)) {
for (int i = 0; i < pim->Depth(); ++i)
(*pim)(yc, xc, i) = *(col + i);
@ -59,19 +59,23 @@ inline void safePutPixel(int yc, int xc, const Color *col, Image *pim) {
// Add the rotation of the ellipse.
// As the algo. use symmetry we must use 4 rotations.
template <class Image, class Color>
void DrawEllipse(int xc, int yc, int radiusA, int radiusB,
const Color &col, Image *pim, double angle = 0.0) {
void DrawEllipse(int xc,
int yc,
int radiusA,
int radiusB,
const Color& col,
Image* pim,
double angle = 0.0) {
int a = radiusA;
int b = radiusB;
// Counter Clockwise rotation matrix.
double matXY[4] = { cos(angle), sin(angle),
-sin(angle), cos(angle)};
double matXY[4] = {cos(angle), sin(angle), -sin(angle), cos(angle)};
int x, y;
double d1, d2;
x = 0;
y = b;
d1 = b*b - a*a*b + a*a/4;
d1 = b * b - a * a * b + a * a / 4;
float rotX = (matXY[0] * x + matXY[1] * y);
float rotY = (matXY[2] * x + matXY[3] * y);
@ -86,12 +90,12 @@ void DrawEllipse(int xc, int yc, int radiusA, int radiusB,
rotY = (-matXY[2] * x + matXY[3] * y);
safePutPixel(yc + rotY, xc + rotX, col, pim);
while (a*a*(y-.5) > b*b*(x+1)) {
while (a * a * (y - .5) > b * b * (x + 1)) {
if (d1 < 0) {
d1 += b*b*(2*x+3);
d1 += b * b * (2 * x + 3);
++x;
} else {
d1 += b*b*(2*x+3) + a*a*(-2*y+2);
d1 += b * b * (2 * x + 3) + a * a * (-2 * y + 2);
++x;
--y;
}
@ -108,14 +112,14 @@ void DrawEllipse(int xc, int yc, int radiusA, int radiusB,
rotY = (-matXY[2] * x + matXY[3] * y);
safePutPixel(yc + rotY, xc + rotX, col, pim);
}
d2 = b*b*(x+.5)*(x+.5) + a*a*(y-1)*(y-1) - a*a*b*b;
d2 = b * b * (x + .5) * (x + .5) + a * a * (y - 1) * (y - 1) - a * a * b * b;
while (y > 0) {
if (d2 < 0) {
d2 += b*b*(2*x+2) + a*a*(-2*y+3);
d2 += b * b * (2 * x + 2) + a * a * (-2 * y + 3);
--y;
++x;
} else {
d2 += a*a*(-2*y+3);
d2 += a * a * (-2 * y + 3);
--y;
}
rotX = (matXY[0] * x + matXY[1] * y);
@ -137,23 +141,23 @@ void DrawEllipse(int xc, int yc, int radiusA, int radiusB,
// So it's better the use the Andres method.
// http://fr.wikipedia.org/wiki/Algorithme_de_tracé_de_cercle_d'Andres.
template <class Image, class Color>
void DrawCircle(int x, int y, int radius, const Color &col, Image *pim) {
Image &im = *pim;
if ( im.Contains(y + radius, x + radius)
|| im.Contains(y + radius, x - radius)
|| im.Contains(y - radius, x + radius)
|| im.Contains(y - radius, x - radius)) {
void DrawCircle(int x, int y, int radius, const Color& col, Image* pim) {
Image& im = *pim;
if (im.Contains(y + radius, x + radius) ||
im.Contains(y + radius, x - radius) ||
im.Contains(y - radius, x + radius) ||
im.Contains(y - radius, x - radius)) {
int x1 = 0;
int y1 = radius;
int d = radius - 1;
while (y1 >= x1) {
// Draw the point for each octant.
safePutPixel( y1 + y, x1 + x, col, pim);
safePutPixel( x1 + y, y1 + x, col, pim);
safePutPixel( y1 + y, -x1 + x, col, pim);
safePutPixel( x1 + y, -y1 + x, col, pim);
safePutPixel(-y1 + y, x1 + x, col, pim);
safePutPixel(-x1 + y, y1 + x, col, pim);
safePutPixel(y1 + y, x1 + x, col, pim);
safePutPixel(x1 + y, y1 + x, col, pim);
safePutPixel(y1 + y, -x1 + x, col, pim);
safePutPixel(x1 + y, -y1 + x, col, pim);
safePutPixel(-y1 + y, x1 + x, col, pim);
safePutPixel(-x1 + y, y1 + x, col, pim);
safePutPixel(-y1 + y, -x1 + x, col, pim);
safePutPixel(-x1 + y, -y1 + x, col, pim);
if (d >= 2 * x1) {
@ -163,7 +167,7 @@ void DrawCircle(int x, int y, int radius, const Color &col, Image *pim) {
if (d <= 2 * (radius - y1)) {
d = d + 2 * y1 - 1;
y1 -= 1;
} else {
} else {
d = d + 2 * (y1 - x1 - 1);
y1 -= 1;
x1 += 1;
@ -175,8 +179,8 @@ void DrawCircle(int x, int y, int radius, const Color &col, Image *pim) {
// Bresenham algorithm
template <class Image, class Color>
void DrawLine(int xa, int ya, int xb, int yb, const Color &col, Image *pim) {
Image &im = *pim;
void DrawLine(int xa, int ya, int xb, int yb, const Color& col, Image* pim) {
Image& im = *pim;
// If one point is outside the image
// Replace the outside point by the intersection of the line and
@ -185,35 +189,37 @@ void DrawLine(int xa, int ya, int xb, int yb, const Color &col, Image *pim) {
int width = pim->Width();
int height = pim->Height();
const bool xdir = xa < xb, ydir = ya < yb;
float nx0 = xa, nx1 = xb, ny0 = ya, ny1 = yb,
&xleft = xdir?nx0:nx1, &yleft = xdir?ny0:ny1,
&xright = xdir?nx1:nx0, &yright = xdir?ny1:ny0,
&xup = ydir?nx0:nx1, &yup = ydir?ny0:ny1,
&xdown = ydir?nx1:nx0, &ydown = ydir?ny1:ny0;
float nx0 = xa, nx1 = xb, ny0 = ya, ny1 = yb, &xleft = xdir ? nx0 : nx1,
&yleft = xdir ? ny0 : ny1, &xright = xdir ? nx1 : nx0,
&yright = xdir ? ny1 : ny0, &xup = ydir ? nx0 : nx1,
&yup = ydir ? ny0 : ny1, &xdown = ydir ? nx1 : nx0,
&ydown = ydir ? ny1 : ny0;
if (xright < 0 || xleft >= width) return;
if (xright < 0 || xleft >= width)
return;
if (xleft < 0) {
yleft -= xleft*(yright - yleft)/(xright - xleft);
xleft = 0;
yleft -= xleft * (yright - yleft) / (xright - xleft);
xleft = 0;
}
if (xright >= width) {
yright -= (xright - width)*(yright - yleft)/(xright - xleft);
xright = width - 1;
yright -= (xright - width) * (yright - yleft) / (xright - xleft);
xright = width - 1;
}
if (ydown < 0 || yup >= height) return;
if (ydown < 0 || yup >= height)
return;
if (yup < 0) {
xup -= yup*(xdown - xup)/(ydown - yup);
yup = 0;
xup -= yup * (xdown - xup) / (ydown - yup);
yup = 0;
}
if (ydown >= height) {
xdown -= (ydown - height)*(xdown - xup)/(ydown - yup);
ydown = height - 1;
xdown -= (ydown - height) * (xdown - xup) / (ydown - yup);
ydown = height - 1;
}
xa = (int) xleft;
xb = (int) xright;
ya = (int) yleft;
yb = (int) yright;
xa = (int)xleft;
xb = (int)xright;
ya = (int)yleft;
yb = (int)yright;
}
int xbas, xhaut, ybas, yhaut;
@ -241,7 +247,7 @@ void DrawLine(int xa, int ya, int xb, int yb, const Color &col, Image *pim) {
}
if (dy > 0) { // Positive slope will increment X.
incrmY = 1;
} else { // Negative slope.
} else { // Negative slope.
incrmY = -1;
}
if (dx >= dy) {
@ -255,9 +261,9 @@ void DrawLine(int xa, int ya, int xb, int yb, const Color &col, Image *pim) {
x += incrmX;
if (dp <= 0) { // Go in direction of the South Pixel.
dp += S;
} else { // Go to the North.
} else { // Go to the North.
dp += N;
y+=incrmY;
y += incrmY;
}
}
} else {
@ -271,7 +277,7 @@ void DrawLine(int xa, int ya, int xb, int yb, const Color &col, Image *pim) {
y += incrmY;
if (dp <= 0) { // Go in direction of the South Pixel.
dp += S;
} else { // Go to the North.
} else { // Go to the North.
dp += N;
x += incrmX;
}

View File

@ -23,20 +23,20 @@
#include "libmv/image/image.h"
#include "testing/testing.h"
using libmv::Image;
using libmv::Array3Df;
using libmv::Image;
namespace {
TEST(Image, SimpleImageAccessors) {
Array3Df *array = new Array3Df(2, 3);
Array3Df* array = new Array3Df(2, 3);
Image image(array);
EXPECT_EQ(array, image.AsArray3Df());
EXPECT_TRUE(NULL == image.AsArray3Du());
}
TEST(Image, MemorySizeInBytes) {
Array3Df *array = new Array3Df(2, 3);
Array3Df* array = new Array3Df(2, 3);
Image image(array);
int size = sizeof(image) + array->MemorySizeInBytes();
EXPECT_EQ(size, image.MemorySizeInBytes());

View File

@ -26,17 +26,14 @@
namespace libmv {
/// Nearest neighbor interpolation.
template<typename T>
inline T SampleNearest(const Array3D<T> &image,
float y, float x, int v = 0) {
template <typename T>
inline T SampleNearest(const Array3D<T>& image, float y, float x, int v = 0) {
const int i = int(round(y));
const int j = int(round(x));
return image(i, j, v);
}
inline void LinearInitAxis(float x, int size,
int *x1, int *x2,
float *dx) {
inline void LinearInitAxis(float x, int size, int* x1, int* x2, float* dx) {
const int ix = static_cast<int>(x);
if (ix < 0) {
*x1 = 0;
@ -54,32 +51,32 @@ inline void LinearInitAxis(float x, int size,
}
/// Linear interpolation.
template<typename T>
inline T SampleLinear(const Array3D<T> &image, float y, float x, int v = 0) {
template <typename T>
inline T SampleLinear(const Array3D<T>& image, float y, float x, int v = 0) {
int x1, y1, x2, y2;
float dx, dy;
LinearInitAxis(y, image.Height(), &y1, &y2, &dy);
LinearInitAxis(x, image.Width(), &x1, &x2, &dx);
LinearInitAxis(x, image.Width(), &x1, &x2, &dx);
const T im11 = image(y1, x1, v);
const T im12 = image(y1, x2, v);
const T im21 = image(y2, x1, v);
const T im22 = image(y2, x2, v);
return T( dy * (dx * im11 + (1.0 - dx) * im12) +
return T(dy * (dx * im11 + (1.0 - dx) * im12) +
(1 - dy) * (dx * im21 + (1.0 - dx) * im22));
}
/// Linear interpolation, of all channels. The sample is assumed to have the
/// same size as the number of channels in image.
template<typename T>
inline void SampleLinear(const Array3D<T> &image, float y, float x, T *sample) {
template <typename T>
inline void SampleLinear(const Array3D<T>& image, float y, float x, T* sample) {
int x1, y1, x2, y2;
float dx, dy;
LinearInitAxis(y, image.Height(), &y1, &y2, &dy);
LinearInitAxis(x, image.Width(), &x1, &x2, &dx);
LinearInitAxis(x, image.Width(), &x1, &x2, &dx);
for (int i = 0; i < image.Depth(); ++i) {
const T im11 = image(y1, x1, i);
@ -87,7 +84,7 @@ inline void SampleLinear(const Array3D<T> &image, float y, float x, T *sample) {
const T im21 = image(y2, x1, i);
const T im22 = image(y2, x2, i);
sample[i] = T( dy * (dx * im11 + (1.0 - dx) * im12) +
sample[i] = T(dy * (dx * im11 + (1.0 - dx) * im12) +
(1 - dy) * (dx * im21 + (1.0 - dx) * im22));
}
}
@ -95,7 +92,7 @@ inline void SampleLinear(const Array3D<T> &image, float y, float x, T *sample) {
// Downsample all channels by 2. If the image has odd width or height, the last
// row or column is ignored.
// FIXME(MatthiasF): this implementation shouldn't be in an interface file
inline void DownsampleChannelsBy2(const Array3Df &in, Array3Df *out) {
inline void DownsampleChannelsBy2(const Array3Df& in, Array3Df* out) {
int height = in.Height() / 2;
int width = in.Width() / 2;
int depth = in.Depth();
@ -106,10 +103,12 @@ inline void DownsampleChannelsBy2(const Array3Df &in, Array3Df *out) {
for (int r = 0; r < height; ++r) {
for (int c = 0; c < width; ++c) {
for (int k = 0; k < depth; ++k) {
// clang-format off
(*out)(r, c, k) = (in(2 * r, 2 * c, k) +
in(2 * r + 1, 2 * c, k) +
in(2 * r, 2 * c + 1, k) +
in(2 * r + 1, 2 * c + 1, k)) / 4.0f;
// clang-format on
}
}
}
@ -117,11 +116,12 @@ inline void DownsampleChannelsBy2(const Array3Df &in, Array3Df *out) {
// Sample a region centered at x,y in image with size extending by half_width
// from x,y. Channels specifies the number of channels to sample from.
inline void SamplePattern(const FloatImage &image,
double x, double y,
int half_width,
int channels,
FloatImage *sampled) {
inline void SamplePattern(const FloatImage& image,
double x,
double y,
int half_width,
int channels,
FloatImage* sampled) {
sampled->Resize(2 * half_width + 1, 2 * half_width + 1, channels);
for (int r = -half_width; r <= half_width; ++r) {
for (int c = -half_width; c <= half_width; ++c) {

View File

@ -32,9 +32,9 @@ TEST(Image, Nearest) {
image(1, 0) = 2;
image(1, 1) = 3;
EXPECT_EQ(0, SampleNearest(image, -0.4f, -0.4f));
EXPECT_EQ(0, SampleNearest(image, 0.4f, 0.4f));
EXPECT_EQ(3, SampleNearest(image, 0.6f, 0.6f));
EXPECT_EQ(3, SampleNearest(image, 1.4f, 1.4f));
EXPECT_EQ(0, SampleNearest(image, 0.4f, 0.4f));
EXPECT_EQ(3, SampleNearest(image, 0.6f, 0.6f));
EXPECT_EQ(3, SampleNearest(image, 1.4f, 1.4f));
}
TEST(Image, Linear) {
@ -57,7 +57,7 @@ TEST(Image, DownsampleBy2) {
ASSERT_EQ(1, resampled_image.Height());
ASSERT_EQ(1, resampled_image.Width());
ASSERT_EQ(1, resampled_image.Depth());
EXPECT_FLOAT_EQ(6./4., resampled_image(0, 0));
EXPECT_FLOAT_EQ(6. / 4., resampled_image(0, 0));
}
TEST(Image, DownsampleBy2MultiChannel) {
@ -82,8 +82,8 @@ TEST(Image, DownsampleBy2MultiChannel) {
ASSERT_EQ(1, resampled_image.Height());
ASSERT_EQ(1, resampled_image.Width());
ASSERT_EQ(3, resampled_image.Depth());
EXPECT_FLOAT_EQ((0+1+2+3)/4., resampled_image(0, 0, 0));
EXPECT_FLOAT_EQ((5+6+7+8)/4., resampled_image(0, 0, 1));
EXPECT_FLOAT_EQ((9+10+11+12)/4., resampled_image(0, 0, 2));
EXPECT_FLOAT_EQ((0 + 1 + 2 + 3) / 4., resampled_image(0, 0, 0));
EXPECT_FLOAT_EQ((5 + 6 + 7 + 8) / 4., resampled_image(0, 0, 1));
EXPECT_FLOAT_EQ((9 + 10 + 11 + 12) / 4., resampled_image(0, 0, 2));
}
} // namespace

View File

@ -34,10 +34,14 @@ class Tuple {
Tuple(T initial_value) { Reset(initial_value); }
template <typename D>
Tuple(D *values) { Reset(values); }
Tuple(D* values) {
Reset(values);
}
template <typename D>
Tuple(const Tuple<D, N> &b) { Reset(b); }
Tuple(const Tuple<D, N>& b) {
Reset(b);
}
template <typename D>
Tuple& operator=(const Tuple<D, N>& b) {
@ -46,30 +50,32 @@ class Tuple {
}
template <typename D>
void Reset(const Tuple<D, N>& b) { Reset(b.Data()); }
void Reset(const Tuple<D, N>& b) {
Reset(b.Data());
}
template <typename D>
void Reset(D *values) {
for (int i = 0;i < N; i++) {
void Reset(D* values) {
for (int i = 0; i < N; i++) {
data_[i] = T(values[i]);
}
}
// Set all tuple values to the same thing.
void Reset(T value) {
for (int i = 0;i < N; i++) {
for (int i = 0; i < N; i++) {
data_[i] = value;
}
}
// Pointer to the first element.
T *Data() { return &data_[0]; }
const T *Data() const { return &data_[0]; }
T* Data() { return &data_[0]; }
const T* Data() const { return &data_[0]; }
T &operator()(int i) { return data_[i]; }
const T &operator()(int i) const { return data_[i]; }
T& operator()(int i) { return data_[i]; }
const T& operator()(int i) const { return data_[i]; }
bool operator==(const Tuple<T, N> &other) const {
bool operator==(const Tuple<T, N>& other) const {
for (int i = 0; i < N; ++i) {
if ((*this)(i) != other(i)) {
return false;
@ -77,9 +83,7 @@ class Tuple {
}
return true;
}
bool operator!=(const Tuple<T, N> &other) const {
return !(*this == other);
}
bool operator!=(const Tuple<T, N>& other) const { return !(*this == other); }
private:
T data_[N];

View File

@ -24,7 +24,7 @@
namespace libmv {
// HZ 4.4.4 pag.109: Point conditioning (non isotropic)
void PreconditionerFromPoints(const Mat &points, Mat3 *T) {
void PreconditionerFromPoints(const Mat& points, Mat3* T) {
Vec mean, variance;
MeanAndVarianceAlongRows(points, &mean, &variance);
@ -38,12 +38,14 @@ void PreconditionerFromPoints(const Mat &points, Mat3 *T) {
if (variance(1) < 1e-8)
yfactor = mean(1) = 1.0;
// clang-format off
*T << xfactor, 0, -xfactor * mean(0),
0, yfactor, -yfactor * mean(1),
0, 0, 1;
// clang-format on
}
// HZ 4.4.4 pag.107: Point conditioning (isotropic)
void IsotropicPreconditionerFromPoints(const Mat &points, Mat3 *T) {
void IsotropicPreconditionerFromPoints(const Mat& points, Mat3* T) {
Vec mean, variance;
MeanAndVarianceAlongRows(points, &mean, &variance);
@ -57,14 +59,16 @@ void IsotropicPreconditionerFromPoints(const Mat &points, Mat3 *T) {
mean.setOnes();
}
// clang-format off
*T << factor, 0, -factor * mean(0),
0, factor, -factor * mean(1),
0, 0, 1;
// clang-format on
}
void ApplyTransformationToPoints(const Mat &points,
const Mat3 &T,
Mat *transformed_points) {
void ApplyTransformationToPoints(const Mat& points,
const Mat3& T,
Mat* transformed_points) {
int n = points.cols();
transformed_points->resize(2, n);
Mat3X p(3, n);
@ -73,26 +77,24 @@ void ApplyTransformationToPoints(const Mat &points,
HomogeneousToEuclidean(p, transformed_points);
}
void NormalizePoints(const Mat &points,
Mat *normalized_points,
Mat3 *T) {
void NormalizePoints(const Mat& points, Mat* normalized_points, Mat3* T) {
PreconditionerFromPoints(points, T);
ApplyTransformationToPoints(points, *T, normalized_points);
}
void NormalizeIsotropicPoints(const Mat &points,
Mat *normalized_points,
Mat3 *T) {
void NormalizeIsotropicPoints(const Mat& points,
Mat* normalized_points,
Mat3* T) {
IsotropicPreconditionerFromPoints(points, T);
ApplyTransformationToPoints(points, *T, normalized_points);
}
// Denormalize the results. See HZ page 109.
void UnnormalizerT::Unnormalize(const Mat3 &T1, const Mat3 &T2, Mat3 *H) {
void UnnormalizerT::Unnormalize(const Mat3& T1, const Mat3& T2, Mat3* H) {
*H = T2.transpose() * (*H) * T1;
}
void UnnormalizerI::Unnormalize(const Mat3 &T1, const Mat3 &T2, Mat3 *H) {
void UnnormalizerI::Unnormalize(const Mat3& T1, const Mat3& T2, Mat3* H) {
*H = T2.inverse() * (*H) * T1;
}

View File

@ -26,32 +26,30 @@
namespace libmv {
// Point conditioning (non isotropic)
void PreconditionerFromPoints(const Mat &points, Mat3 *T);
void PreconditionerFromPoints(const Mat& points, Mat3* T);
// Point conditioning (isotropic)
void IsotropicPreconditionerFromPoints(const Mat &points, Mat3 *T);
void IsotropicPreconditionerFromPoints(const Mat& points, Mat3* T);
void ApplyTransformationToPoints(const Mat &points,
const Mat3 &T,
Mat *transformed_points);
void ApplyTransformationToPoints(const Mat& points,
const Mat3& T,
Mat* transformed_points);
void NormalizePoints(const Mat &points,
Mat *normalized_points,
Mat3 *T);
void NormalizePoints(const Mat& points, Mat* normalized_points, Mat3* T);
void NormalizeIsotropicPoints(const Mat &points,
Mat *normalized_points,
Mat3 *T);
void NormalizeIsotropicPoints(const Mat& points,
Mat* normalized_points,
Mat3* T);
/// Use inverse for unnormalize
struct UnnormalizerI {
// Denormalize the results. See HZ page 109.
static void Unnormalize(const Mat3 &T1, const Mat3 &T2, Mat3 *H);
static void Unnormalize(const Mat3& T1, const Mat3& T2, Mat3* H);
};
/// Use transpose for unnormalize
struct UnnormalizerT {
// Denormalize the results. See HZ page 109.
static void Unnormalize(const Mat3 &T1, const Mat3 &T2, Mat3 *H);
static void Unnormalize(const Mat3& T1, const Mat3& T2, Mat3* H);
};
} // namespace libmv

View File

@ -23,8 +23,8 @@
#include <cmath>
#include <limits>
#include <Eigen/SVD>
#include <Eigen/Geometry>
#include <Eigen/SVD>
#include "libmv/base/vector.h"
#include "libmv/logging/logging.h"
@ -35,9 +35,10 @@ namespace euclidean_resection {
typedef unsigned int uint;
bool EuclideanResection(const Mat2X &x_camera,
const Mat3X &X_world,
Mat3 *R, Vec3 *t,
bool EuclideanResection(const Mat2X& x_camera,
const Mat3X& X_world,
Mat3* R,
Vec3* t,
ResectionMethod method) {
switch (method) {
case RESECTION_ANSAR_DANIILIDIS:
@ -49,20 +50,20 @@ bool EuclideanResection(const Mat2X &x_camera,
case RESECTION_PPNP:
return EuclideanResectionPPnP(x_camera, X_world, R, t);
break;
default:
LOG(FATAL) << "Unknown resection method.";
default: LOG(FATAL) << "Unknown resection method.";
}
return false;
}
bool EuclideanResection(const Mat &x_image,
const Mat3X &X_world,
const Mat3 &K,
Mat3 *R, Vec3 *t,
bool EuclideanResection(const Mat& x_image,
const Mat3X& X_world,
const Mat3& K,
Mat3* R,
Vec3* t,
ResectionMethod method) {
CHECK(x_image.rows() == 2 || x_image.rows() == 3)
<< "Invalid size for x_image: "
<< x_image.rows() << "x" << x_image.cols();
<< "Invalid size for x_image: " << x_image.rows() << "x"
<< x_image.cols();
Mat2X x_camera;
if (x_image.rows() == 2) {
@ -73,18 +74,15 @@ bool EuclideanResection(const Mat &x_image,
return EuclideanResection(x_camera, X_world, R, t, method);
}
void AbsoluteOrientation(const Mat3X &X,
const Mat3X &Xp,
Mat3 *R,
Vec3 *t) {
void AbsoluteOrientation(const Mat3X& X, const Mat3X& Xp, Mat3* R, Vec3* t) {
int num_points = X.cols();
Vec3 C = X.rowwise().sum() / num_points; // Centroid of X.
Vec3 C = X.rowwise().sum() / num_points; // Centroid of X.
Vec3 Cp = Xp.rowwise().sum() / num_points; // Centroid of Xp.
// Normalize the two point sets.
Mat3X Xn(3, num_points), Xpn(3, num_points);
for (int i = 0; i < num_points; ++i) {
Xn.col(i) = X.col(i) - C;
Xn.col(i) = X.col(i) - C;
Xpn.col(i) = Xp.col(i) - Cp;
}
@ -100,10 +98,12 @@ void AbsoluteOrientation(const Mat3X &X,
double Szy = Xn.row(2).dot(Xpn.row(1));
Mat4 N;
// clang-format off
N << Sxx + Syy + Szz, Syz - Szy, Szx - Sxz, Sxy - Syx,
Syz - Szy, Sxx - Syy - Szz, Sxy + Syx, Szx + Sxz,
Szx - Sxz, Sxy + Syx, -Sxx + Syy - Szz, Syz + Szy,
Sxy - Syx, Szx + Sxz, Syz + Szy, -Sxx - Syy + Szz;
// clang-format on
// Find the unit quaternion q that maximizes qNq. It is the eigenvector
// corresponding to the lagest eigenvalue.
@ -118,6 +118,7 @@ void AbsoluteOrientation(const Mat3X &X,
double q1q3 = q(1) * q(3);
double q2q3 = q(2) * q(3);
// clang-format off
(*R) << qq(0) + qq(1) - qq(2) - qq(3),
2 * (q1q2 - q0q3),
2 * (q1q3 + q0q2),
@ -127,6 +128,7 @@ void AbsoluteOrientation(const Mat3X &X,
2 * (q1q3 - q0q2),
2 * (q2q3 + q0q1),
qq(0) - qq(1) - qq(2) + qq(3);
// clang-format on
// Fix the handedness of the R matrix.
if (R->determinant() < 0) {
@ -176,9 +178,7 @@ static int Sign(double value) {
// Lambda to create the constraints in equation (5) in "Linear Pose Estimation
// from Points or Lines", by Ansar, A. and Daniilidis, PAMI 2003. vol. 25, no.
// 5.
static Vec MatrixToConstraint(const Mat &A,
int num_k_columns,
int num_lambda) {
static Vec MatrixToConstraint(const Mat& A, int num_k_columns, int num_lambda) {
Vec C(num_k_columns);
C.setZero();
int idx = 0;
@ -195,17 +195,17 @@ static Vec MatrixToConstraint(const Mat &A,
}
// Normalizes the columns of vectors.
static void NormalizeColumnVectors(Mat3X *vectors) {
static void NormalizeColumnVectors(Mat3X* vectors) {
int num_columns = vectors->cols();
for (int i = 0; i < num_columns; ++i) {
vectors->col(i).normalize();
}
}
void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
const Mat3X &X_world,
Mat3 *R,
Vec3 *t) {
void EuclideanResectionAnsarDaniilidis(const Mat2X& x_camera,
const Mat3X& X_world,
Mat3* R,
Vec3* t) {
CHECK(x_camera.cols() == X_world.cols());
CHECK(x_camera.cols() > 3);
@ -229,14 +229,14 @@ void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
// them into the M matrix (8). Also store the initial (i, j) indices.
int row = 0;
for (int i = 0; i < num_points; ++i) {
for (int j = i+1; j < num_points; ++j) {
for (int j = i + 1; j < num_points; ++j) {
M(row, row) = -2 * x_camera_unit.col(i).dot(x_camera_unit.col(j));
M(row, num_m_rows + i) = x_camera_unit.col(i).dot(x_camera_unit.col(i));
M(row, num_m_rows + j) = x_camera_unit.col(j).dot(x_camera_unit.col(j));
Vec3 Xdiff = X_world.col(i) - X_world.col(j);
double center_to_point_distance = Xdiff.norm();
M(row, num_m_columns - 1) =
- center_to_point_distance * center_to_point_distance;
-center_to_point_distance * center_to_point_distance;
ij_index(row, 0) = i;
ij_index(row, 1) = j;
++row;
@ -246,17 +246,17 @@ void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
}
int num_lambda = num_points + 1; // Dimension of the null space of M.
Mat V = M.jacobiSvd(Eigen::ComputeFullV).matrixV().block(0,
num_m_rows,
num_m_columns,
num_lambda);
Mat V = M.jacobiSvd(Eigen::ComputeFullV)
.matrixV()
.block(0, num_m_rows, num_m_columns, num_lambda);
// TODO(vess): The number of constraint equations in K (num_k_rows) must be
// (num_points + 1) * (num_points + 2)/2. This creates a performance issue
// for more than 4 points. It is fine for 4 points at the moment with 18
// instead of 15 equations.
int num_k_rows = num_m_rows + num_points *
(num_points*(num_points-1)/2 - num_points+1);
int num_k_rows =
num_m_rows +
num_points * (num_points * (num_points - 1) / 2 - num_points + 1);
int num_k_columns = num_lambda * (num_lambda + 1) / 2;
Mat K(num_k_rows, num_k_columns);
K.setZero();
@ -275,8 +275,8 @@ void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
int idx4 = IJToPointIndex(i, k, num_points);
K.row(counter_k_row) =
MatrixToConstraint(V.row(idx1).transpose() * V.row(idx2)-
V.row(idx3).transpose() * V.row(idx4),
MatrixToConstraint(V.row(idx1).transpose() * V.row(idx2) -
V.row(idx3).transpose() * V.row(idx4),
num_k_columns,
num_lambda);
++counter_k_row;
@ -296,8 +296,8 @@ void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
int idx4 = IJToPointIndex(i, k, num_points);
K.row(counter_k_row) =
MatrixToConstraint(V.row(idx1).transpose() * V.row(idx2)-
V.row(idx3).transpose() * V.row(idx4),
MatrixToConstraint(V.row(idx1).transpose() * V.row(idx2) -
V.row(idx3).transpose() * V.row(idx4),
num_k_columns,
num_lambda);
++counter_k_row;
@ -317,14 +317,12 @@ void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
}
}
// Ensure positiveness of the largest value corresponding to lambda_ii.
L_sq = L_sq * Sign(L_sq(IJToIndex(max_L_sq_index,
max_L_sq_index,
num_lambda)));
L_sq =
L_sq * Sign(L_sq(IJToIndex(max_L_sq_index, max_L_sq_index, num_lambda)));
Vec L(num_lambda);
L(max_L_sq_index) = sqrt(L_sq(IJToIndex(max_L_sq_index,
max_L_sq_index,
num_lambda)));
L(max_L_sq_index) =
sqrt(L_sq(IJToIndex(max_L_sq_index, max_L_sq_index, num_lambda)));
for (int i = 0; i < num_lambda; ++i) {
if (i != max_L_sq_index) {
@ -353,9 +351,9 @@ void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
}
// Selects 4 virtual control points using mean and PCA.
static void SelectControlPoints(const Mat3X &X_world,
Mat *X_centered,
Mat34 *X_control_points) {
static void SelectControlPoints(const Mat3X& X_world,
Mat* X_centered,
Mat34* X_control_points) {
size_t num_points = X_world.cols();
// The first virtual control point, C0, is the centroid.
@ -379,13 +377,13 @@ static void SelectControlPoints(const Mat3X &X_world,
}
// Computes the barycentric coordinates for all real points
static void ComputeBarycentricCoordinates(const Mat3X &X_world_centered,
const Mat34 &X_control_points,
Mat4X *alphas) {
static void ComputeBarycentricCoordinates(const Mat3X& X_world_centered,
const Mat34& X_control_points,
Mat4X* alphas) {
size_t num_points = X_world_centered.cols();
Mat3 C2;
for (size_t c = 1; c < 4; c++) {
C2.col(c-1) = X_control_points.col(c) - X_control_points.col(0);
C2.col(c - 1) = X_control_points.col(c) - X_control_points.col(0);
}
Mat3 C2inv = C2.inverse();
@ -401,14 +399,15 @@ static void ComputeBarycentricCoordinates(const Mat3X &X_world_centered,
// Estimates the coordinates of all real points in the camera coordinate frame
static void ComputePointsCoordinatesInCameraFrame(
const Mat4X &alphas,
const Vec4 &betas,
const Eigen::Matrix<double, 12, 12> &U,
Mat3X *X_camera) {
const Mat4X& alphas,
const Vec4& betas,
const Eigen::Matrix<double, 12, 12>& U,
Mat3X* X_camera) {
size_t num_points = alphas.cols();
// Estimates the control points in the camera reference frame.
Mat34 C2b; C2b.setZero();
Mat34 C2b;
C2b.setZero();
for (size_t cu = 0; cu < 4; cu++) {
for (size_t c = 0; c < 4; c++) {
C2b.col(c) += betas(cu) * U.block(11 - cu, c * 3, 1, 3).transpose();
@ -436,9 +435,10 @@ static void ComputePointsCoordinatesInCameraFrame(
}
}
bool EuclideanResectionEPnP(const Mat2X &x_camera,
const Mat3X &X_world,
Mat3 *R, Vec3 *t) {
bool EuclideanResectionEPnP(const Mat2X& x_camera,
const Mat3X& X_world,
Mat3* R,
Vec3* t) {
CHECK(x_camera.cols() == X_world.cols());
CHECK(x_camera.cols() > 3);
size_t num_points = X_world.cols();
@ -462,6 +462,7 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
double a3 = alphas(3, c);
double ui = x_camera(0, c);
double vi = x_camera(1, c);
// clang-format off
M.block(2*c, 0, 2, 12) << a0, 0,
a0*(-ui), a1, 0,
a1*(-ui), a2, 0,
@ -471,10 +472,11 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
a1, a1*(-vi), 0,
a2, a2*(-vi), 0,
a3, a3*(-vi);
// clang-format on
}
// TODO(julien): Avoid the transpose by rewriting the u2.block() calls.
Eigen::JacobiSVD<Mat> MtMsvd(M.transpose()*M, Eigen::ComputeFullU);
Eigen::JacobiSVD<Mat> MtMsvd(M.transpose() * M, Eigen::ComputeFullU);
Eigen::Matrix<double, 12, 12> u2 = MtMsvd.matrixU().transpose();
// Estimate the L matrix.
@ -495,21 +497,22 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
dv2.row(3) = u2.block(10, 3, 1, 3) - u2.block(10, 6, 1, 3);
dv2.row(4) = u2.block(10, 3, 1, 3) - u2.block(10, 9, 1, 3);
dv2.row(5) = u2.block(10, 6, 1, 3) - u2.block(10, 9, 1, 3);
dv3.row(0) = u2.block(9, 0, 1, 3) - u2.block(9, 3, 1, 3);
dv3.row(1) = u2.block(9, 0, 1, 3) - u2.block(9, 6, 1, 3);
dv3.row(2) = u2.block(9, 0, 1, 3) - u2.block(9, 9, 1, 3);
dv3.row(3) = u2.block(9, 3, 1, 3) - u2.block(9, 6, 1, 3);
dv3.row(4) = u2.block(9, 3, 1, 3) - u2.block(9, 9, 1, 3);
dv3.row(5) = u2.block(9, 6, 1, 3) - u2.block(9, 9, 1, 3);
dv4.row(0) = u2.block(8, 0, 1, 3) - u2.block(8, 3, 1, 3);
dv4.row(1) = u2.block(8, 0, 1, 3) - u2.block(8, 6, 1, 3);
dv4.row(2) = u2.block(8, 0, 1, 3) - u2.block(8, 9, 1, 3);
dv4.row(3) = u2.block(8, 3, 1, 3) - u2.block(8, 6, 1, 3);
dv4.row(4) = u2.block(8, 3, 1, 3) - u2.block(8, 9, 1, 3);
dv4.row(5) = u2.block(8, 6, 1, 3) - u2.block(8, 9, 1, 3);
dv3.row(0) = u2.block(9, 0, 1, 3) - u2.block(9, 3, 1, 3);
dv3.row(1) = u2.block(9, 0, 1, 3) - u2.block(9, 6, 1, 3);
dv3.row(2) = u2.block(9, 0, 1, 3) - u2.block(9, 9, 1, 3);
dv3.row(3) = u2.block(9, 3, 1, 3) - u2.block(9, 6, 1, 3);
dv3.row(4) = u2.block(9, 3, 1, 3) - u2.block(9, 9, 1, 3);
dv3.row(5) = u2.block(9, 6, 1, 3) - u2.block(9, 9, 1, 3);
dv4.row(0) = u2.block(8, 0, 1, 3) - u2.block(8, 3, 1, 3);
dv4.row(1) = u2.block(8, 0, 1, 3) - u2.block(8, 6, 1, 3);
dv4.row(2) = u2.block(8, 0, 1, 3) - u2.block(8, 9, 1, 3);
dv4.row(3) = u2.block(8, 3, 1, 3) - u2.block(8, 6, 1, 3);
dv4.row(4) = u2.block(8, 3, 1, 3) - u2.block(8, 9, 1, 3);
dv4.row(5) = u2.block(8, 6, 1, 3) - u2.block(8, 9, 1, 3);
Eigen::Matrix<double, 6, 10> L;
for (size_t r = 0; r < 6; r++) {
// clang-format off
L.row(r) << dv1.row(r).dot(dv1.row(r)),
2.0 * dv1.row(r).dot(dv2.row(r)),
dv2.row(r).dot(dv2.row(r)),
@ -520,19 +523,23 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
2.0 * dv2.row(r).dot(dv4.row(r)),
2.0 * dv3.row(r).dot(dv4.row(r)),
dv4.row(r).dot(dv4.row(r));
// clang-format on
}
Vec6 rho;
// clang-format off
rho << (X_control_points.col(0) - X_control_points.col(1)).squaredNorm(),
(X_control_points.col(0) - X_control_points.col(2)).squaredNorm(),
(X_control_points.col(0) - X_control_points.col(3)).squaredNorm(),
(X_control_points.col(1) - X_control_points.col(2)).squaredNorm(),
(X_control_points.col(1) - X_control_points.col(3)).squaredNorm(),
(X_control_points.col(2) - X_control_points.col(3)).squaredNorm();
// clang-format on
// There are three possible solutions based on the three approximations of L
// (betas). Below, each one is solved for then the best one is chosen.
Mat3X X_camera;
Mat3 K; K.setIdentity();
Mat3 K;
K.setIdentity();
vector<Mat3> Rs(3);
vector<Vec3> ts(3);
Vec rmse(3);
@ -546,7 +553,7 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
//
// TODO(keir): Decide if setting this to infinity, effectively disabling the
// check, is the right approach. So far this seems the case.
double kSuccessThreshold = std::numeric_limits<double>::max();
double kSuccessThreshold = std::numeric_limits<double>::max();
// Find the first possible solution for R, t corresponding to:
// Betas = [b00 b01 b11 b02 b12 b22 b03 b13 b23 b33]
@ -563,7 +570,7 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
if (b4(0) < 0) {
b4 = -b4;
}
b4(0) = std::sqrt(b4(0));
b4(0) = std::sqrt(b4(0));
betas << b4(0), b4(1) / b4(0), b4(2) / b4(0), b4(3) / b4(0);
ComputePointsCoordinatesInCameraFrame(alphas, betas, u2, &X_camera);
AbsoluteOrientation(X_world, X_camera, &Rs[0], &ts[0]);
@ -669,12 +676,12 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
// TODO(julien): Improve the solutions with non-linear refinement.
return true;
}
/*
Straight from the paper:
http://www.diegm.uniud.it/fusiello/papers/3dimpvt12-b.pdf
function [R T] = ppnp(P,S,tol)
% input
% P : matrix (nx3) image coordinates in camera reference [u v 1]
@ -708,33 +715,34 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
end
T = -R*c;
end
*/
// TODO(keir): Re-do all the variable names and add comments matching the paper.
// This implementation has too much of the terseness of the original. On the
// other hand, it did work on the first try.
bool EuclideanResectionPPnP(const Mat2X &x_camera,
const Mat3X &X_world,
Mat3 *R, Vec3 *t) {
bool EuclideanResectionPPnP(const Mat2X& x_camera,
const Mat3X& X_world,
Mat3* R,
Vec3* t) {
int n = x_camera.cols();
Mat Z = Mat::Zero(n, n);
Vec e = Vec::Ones(n);
Mat A = Mat::Identity(n, n) - (e * e.transpose() / n);
Vec II = e / n;
Mat P(n, 3);
P.col(0) = x_camera.row(0);
P.col(1) = x_camera.row(1);
P.col(2).setConstant(1.0);
Mat S = X_world.transpose();
double error = std::numeric_limits<double>::infinity();
Mat E_old = 1000 * Mat::Ones(n, 3);
Vec3 c;
Mat E(n, 3);
int iteration = 0;
double tolerance = 1e-5;
// TODO(keir): The limit of 100 can probably be reduced, but this will require
@ -748,20 +756,21 @@ bool EuclideanResectionPPnP(const Mat2X &x_camera,
s << 1, 1, (U * VT).determinant();
*R = U * s.asDiagonal() * VT;
Mat PR = P * *R; // n x 3
c = (S - Z*PR).transpose() * II;
Mat Y = S - e*c.transpose(); // n x 3
Vec Zmindiag = (PR * Y.transpose()).diagonal()
.cwiseQuotient(P.rowwise().squaredNorm());
c = (S - Z * PR).transpose() * II;
Mat Y = S - e * c.transpose(); // n x 3
Vec Zmindiag = (PR * Y.transpose())
.diagonal()
.cwiseQuotient(P.rowwise().squaredNorm());
for (int i = 0; i < n; ++i) {
Zmindiag[i] = std::max(Zmindiag[i], 0.0);
}
Z = Zmindiag.asDiagonal();
E = Y - Z*PR;
E = Y - Z * PR;
error = (E - E_old).norm();
LG << "PPnP error(" << (iteration++) << "): " << error;
E_old = E;
}
*t = -*R*c;
*t = -*R * c;
// TODO(keir): Figure out what the failure cases are. Is it too many
// iterations? Spend some time going through the math figuring out if there
@ -769,6 +778,5 @@ bool EuclideanResectionPPnP(const Mat2X &x_camera,
return true;
}
} // namespace resection
} // namespace euclidean_resection
} // namespace libmv

View File

@ -21,8 +21,8 @@
#ifndef LIBMV_MULTIVIEW_EUCLIDEAN_RESECTION_H_
#define LIBMV_MULTIVIEW_EUCLIDEAN_RESECTION_H_
#include "libmv/numeric/numeric.h"
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
namespace euclidean_resection {
@ -33,7 +33,7 @@ enum ResectionMethod {
// The "EPnP" algorithm by Lepetit et al.
// http://cvlab.epfl.ch/~lepetit/papers/lepetit_ijcv08.pdf
RESECTION_EPNP,
// The Procrustes PNP algorithm ("PPnP")
// http://www.diegm.uniud.it/fusiello/papers/3dimpvt12-b.pdf
RESECTION_PPNP
@ -50,9 +50,10 @@ enum ResectionMethod {
* \param t Solution for the camera translation vector
* \param method The resection method to use.
*/
bool EuclideanResection(const Mat2X &x_camera,
const Mat3X &X_world,
Mat3 *R, Vec3 *t,
bool EuclideanResection(const Mat2X& x_camera,
const Mat3X& X_world,
Mat3* R,
Vec3* t,
ResectionMethod method = RESECTION_EPNP);
/**
@ -68,10 +69,11 @@ bool EuclideanResection(const Mat2X &x_camera,
* \param t Solution for the camera translation vector
* \param method Resection method
*/
bool EuclideanResection(const Mat &x_image,
const Mat3X &X_world,
const Mat3 &K,
Mat3 *R, Vec3 *t,
bool EuclideanResection(const Mat& x_image,
const Mat3X& X_world,
const Mat3& K,
Mat3* R,
Vec3* t,
ResectionMethod method = RESECTION_EPNP);
/**
@ -84,10 +86,7 @@ bool EuclideanResection(const Mat &x_image,
* Horn, Hilden, "Closed-form solution of absolute orientation using
* orthonormal matrices"
*/
void AbsoluteOrientation(const Mat3X &X,
const Mat3X &Xp,
Mat3 *R,
Vec3 *t);
void AbsoluteOrientation(const Mat3X& X, const Mat3X& Xp, Mat3* R, Vec3* t);
/**
* Computes the extrinsic parameters, R and t for a calibrated camera from 4 or
@ -102,9 +101,10 @@ void AbsoluteOrientation(const Mat3X &X,
* This is the algorithm described in: "Linear Pose Estimation from Points or
* Lines", by Ansar, A. and Daniilidis, PAMI 2003. vol. 25, no. 5.
*/
void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
const Mat3X &X_world,
Mat3 *R, Vec3 *t);
void EuclideanResectionAnsarDaniilidis(const Mat2X& x_camera,
const Mat3X& X_world,
Mat3* R,
Vec3* t);
/**
* Computes the extrinsic parameters, R and t for a calibrated camera from 4 or
* more 3D points and their images.
@ -120,9 +120,10 @@ void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
* and F. Moreno-Noguer and P. Fua, IJCV 2009. vol. 81, no. 2
* \note: the non-linear optimization is not implemented here.
*/
bool EuclideanResectionEPnP(const Mat2X &x_camera,
const Mat3X &X_world,
Mat3 *R, Vec3 *t);
bool EuclideanResectionEPnP(const Mat2X& x_camera,
const Mat3X& X_world,
Mat3* R,
Vec3* t);
/**
* Computes the extrinsic parameters, R and t for a calibrated camera from 4 or
@ -137,12 +138,12 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
* Straight from the paper:
* http://www.diegm.uniud.it/fusiello/papers/3dimpvt12-b.pdf
*/
bool EuclideanResectionPPnP(const Mat2X &x_camera,
const Mat3X &X_world,
Mat3 *R, Vec3 *t);
bool EuclideanResectionPPnP(const Mat2X& x_camera,
const Mat3X& X_world,
Mat3* R,
Vec3* t);
} // namespace euclidean_resection
} // namespace libmv
#endif /* LIBMV_MULTIVIEW_EUCLIDEAN_RESECTION_H_ */

View File

@ -19,9 +19,9 @@
// IN THE SOFTWARE.
#include "libmv/multiview/euclidean_resection.h"
#include "libmv/numeric/numeric.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
using namespace libmv::euclidean_resection;
@ -33,10 +33,10 @@ static void CreateCameraSystem(const Mat3& KK,
const Vec& X_distances,
const Mat3& R_input,
const Vec3& T_input,
Mat2X *x_camera,
Mat3X *X_world,
Mat3 *R_expected,
Vec3 *T_expected) {
Mat2X* x_camera,
Mat3X* X_world,
Mat3* R_expected,
Vec3* T_expected) {
int num_points = x_image.cols();
Mat3X x_unit_cam(3, num_points);
@ -76,9 +76,9 @@ TEST(AbsoluteOrientation, QuaternionSolution) {
// Create a random translation and rotation.
Mat3 R_input;
R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ())
* Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY())
* Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ()) *
Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY()) *
Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
Vec3 t_input;
t_input.setRandom();
@ -109,26 +109,29 @@ TEST(EuclideanResection, Points4KnownImagePointsRandomTranslationRotation) {
image_dimensions << 1600, 1200;
Mat3 KK;
// clang-format off
KK << 2796, 0, 804,
0 , 2796, 641,
0, 0, 1;
// clang-format on
// The real image points.
int num_points = 4;
Mat3X x_image(3, num_points);
// clang-format off
x_image << 1164.06, 734.948, 749.599, 430.727,
681.386, 844.59, 496.315, 580.775,
1, 1, 1, 1;
// clang-format on
// A vector of the 4 distances to the 3D points.
Vec X_distances = 100 * Vec::Random(num_points).array().abs();
// Create the random camera motion R and t that resection should recover.
Mat3 R_input;
R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ())
* Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY())
* Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ()) *
Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY()) *
Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
Vec3 T_input;
T_input.setRandom();
@ -140,15 +143,21 @@ TEST(EuclideanResection, Points4KnownImagePointsRandomTranslationRotation) {
Vec3 T_expected;
Mat3X X_world;
Mat2X x_camera;
CreateCameraSystem(KK, x_image, X_distances, R_input, T_input,
&x_camera, &X_world, &R_expected, &T_expected);
CreateCameraSystem(KK,
x_image,
X_distances,
R_input,
T_input,
&x_camera,
&X_world,
&R_expected,
&T_expected);
// Finally, run the code under test.
Mat3 R_output;
Vec3 T_output;
EuclideanResection(x_camera, X_world,
&R_output, &T_output,
RESECTION_ANSAR_DANIILIDIS);
EuclideanResection(
x_camera, X_world, &R_output, &T_output, RESECTION_ANSAR_DANIILIDIS);
EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
@ -173,9 +182,11 @@ TEST(EuclideanResection, Points4KnownImagePointsRandomTranslationRotation) {
// TODO(jmichot): Reduce the code duplication here with the code above.
TEST(EuclideanResection, Points6AllRandomInput) {
Mat3 KK;
// clang-format off
KK << 2796, 0, 804,
0 , 2796, 641,
0, 0, 1;
// clang-format on
// Create random image points for a 1600x1200 image.
int w = 1600;
@ -192,9 +203,9 @@ TEST(EuclideanResection, Points6AllRandomInput) {
// Create the random camera motion R and t that resection should recover.
Mat3 R_input;
R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ())
* Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY())
* Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ()) *
Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY()) *
Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
Vec3 T_input;
T_input.setRandom();
@ -204,33 +215,36 @@ TEST(EuclideanResection, Points6AllRandomInput) {
Mat3 R_expected;
Vec3 T_expected;
Mat3X X_world;
CreateCameraSystem(KK, x_image, X_distances, R_input, T_input,
&x_camera, &X_world, &R_expected, &T_expected);
CreateCameraSystem(KK,
x_image,
X_distances,
R_input,
T_input,
&x_camera,
&X_world,
&R_expected,
&T_expected);
// Test each of the resection methods.
{
Mat3 R_output;
Vec3 T_output;
EuclideanResection(x_camera, X_world,
&R_output, &T_output,
RESECTION_ANSAR_DANIILIDIS);
EuclideanResection(
x_camera, X_world, &R_output, &T_output, RESECTION_ANSAR_DANIILIDIS);
EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
}
{
Mat3 R_output;
Vec3 T_output;
EuclideanResection(x_camera, X_world,
&R_output, &T_output,
RESECTION_EPNP);
EuclideanResection(x_camera, X_world, &R_output, &T_output, RESECTION_EPNP);
EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
}
{
Mat3 R_output;
Vec3 T_output;
EuclideanResection(x_image, X_world, KK,
&R_output, &T_output);
EuclideanResection(x_image, X_world, KK, &R_output, &T_output);
EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
}

View File

@ -22,15 +22,15 @@
#include "ceres/ceres.h"
#include "libmv/logging/logging.h"
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/poly.h"
#include "libmv/multiview/conditioning.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/triangulation.h"
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/poly.h"
namespace libmv {
static void EliminateRow(const Mat34 &P, int row, Mat *X) {
static void EliminateRow(const Mat34& P, int row, Mat* X) {
X->resize(2, 4);
int first_row = (row + 1) % 3;
@ -42,7 +42,7 @@ static void EliminateRow(const Mat34 &P, int row, Mat *X) {
}
}
void ProjectionsFromFundamental(const Mat3 &F, Mat34 *P1, Mat34 *P2) {
void ProjectionsFromFundamental(const Mat3& F, Mat34* P1, Mat34* P2) {
*P1 << Mat3::Identity(), Vec3::Zero();
Vec3 e2;
Mat3 Ft = F.transpose();
@ -51,7 +51,7 @@ void ProjectionsFromFundamental(const Mat3 &F, Mat34 *P1, Mat34 *P2) {
}
// Addapted from vgg_F_from_P.
void FundamentalFromProjections(const Mat34 &P1, const Mat34 &P2, Mat3 *F) {
void FundamentalFromProjections(const Mat34& P1, const Mat34& P2, Mat3* F) {
Mat X[3];
Mat Y[3];
Mat XY;
@ -71,7 +71,7 @@ void FundamentalFromProjections(const Mat34 &P1, const Mat34 &P2, Mat3 *F) {
// HZ 11.1 pag.279 (x1 = x, x2 = x')
// http://www.cs.unc.edu/~marc/tutorial/node54.html
static double EightPointSolver(const Mat &x1, const Mat &x2, Mat3 *F) {
static double EightPointSolver(const Mat& x1, const Mat& x2, Mat3* F) {
DCHECK_EQ(x1.rows(), 2);
DCHECK_GE(x1.cols(), 8);
DCHECK_EQ(x1.rows(), x2.rows());
@ -98,7 +98,7 @@ static double EightPointSolver(const Mat &x1, const Mat &x2, Mat3 *F) {
}
// HZ 11.1.1 pag.280
void EnforceFundamentalRank2Constraint(Mat3 *F) {
void EnforceFundamentalRank2Constraint(Mat3* F) {
Eigen::JacobiSVD<Mat3> USV(*F, Eigen::ComputeFullU | Eigen::ComputeFullV);
Vec3 d = USV.singularValues();
d(2) = 0.0;
@ -106,9 +106,7 @@ void EnforceFundamentalRank2Constraint(Mat3 *F) {
}
// HZ 11.2 pag.281 (x1 = x, x2 = x')
double NormalizedEightPointSolver(const Mat &x1,
const Mat &x2,
Mat3 *F) {
double NormalizedEightPointSolver(const Mat& x1, const Mat& x2, Mat3* F) {
DCHECK_EQ(x1.rows(), 2);
DCHECK_GE(x1.cols(), 8);
DCHECK_EQ(x1.rows(), x2.rows());
@ -135,9 +133,9 @@ double NormalizedEightPointSolver(const Mat &x1,
// Seven-point algorithm.
// http://www.cs.unc.edu/~marc/tutorial/node55.html
double FundamentalFrom7CorrespondencesLinear(const Mat &x1,
const Mat &x2,
std::vector<Mat3> *F) {
double FundamentalFrom7CorrespondencesLinear(const Mat& x1,
const Mat& x2,
std::vector<Mat3>* F) {
DCHECK_EQ(x1.rows(), 2);
DCHECK_EQ(x1.cols(), 7);
DCHECK_EQ(x1.rows(), x2.rows());
@ -169,25 +167,29 @@ double FundamentalFrom7CorrespondencesLinear(const Mat &x1,
// Then, use the condition det(F) = 0 to determine F. In other words, solve
// det(F1 + a*F2) = 0 for a.
double a = F1(0, 0), j = F2(0, 0),
b = F1(0, 1), k = F2(0, 1),
c = F1(0, 2), l = F2(0, 2),
d = F1(1, 0), m = F2(1, 0),
e = F1(1, 1), n = F2(1, 1),
f = F1(1, 2), o = F2(1, 2),
g = F1(2, 0), p = F2(2, 0),
h = F1(2, 1), q = F2(2, 1),
i = F1(2, 2), r = F2(2, 2);
double a = F1(0, 0), j = F2(0, 0);
double b = F1(0, 1), k = F2(0, 1);
double c = F1(0, 2), l = F2(0, 2);
double d = F1(1, 0), m = F2(1, 0);
double e = F1(1, 1), n = F2(1, 1);
double f = F1(1, 2), o = F2(1, 2);
double g = F1(2, 0), p = F2(2, 0);
double h = F1(2, 1), q = F2(2, 1);
double i = F1(2, 2), r = F2(2, 2);
// Run fundamental_7point_coeffs.py to get the below coefficients.
// The coefficients are in ascending powers of alpha, i.e. P[N]*x^N.
double P[4] = {
a*e*i + b*f*g + c*d*h - a*f*h - b*d*i - c*e*g,
a*e*r + a*i*n + b*f*p + b*g*o + c*d*q + c*h*m + d*h*l + e*i*j + f*g*k -
a*f*q - a*h*o - b*d*r - b*i*m - c*e*p - c*g*n - d*i*k - e*g*l - f*h*j,
a*n*r + b*o*p + c*m*q + d*l*q + e*j*r + f*k*p + g*k*o + h*l*m + i*j*n -
a*o*q - b*m*r - c*n*p - d*k*r - e*l*p - f*j*q - g*l*n - h*j*o - i*k*m,
j*n*r + k*o*p + l*m*q - j*o*q - k*m*r - l*n*p,
a * e * i + b * f * g + c * d * h - a * f * h - b * d * i - c * e * g,
a * e * r + a * i * n + b * f * p + b * g * o + c * d * q + c * h * m +
d * h * l + e * i * j + f * g * k - a * f * q - a * h * o -
b * d * r - b * i * m - c * e * p - c * g * n - d * i * k -
e * g * l - f * h * j,
a * n * r + b * o * p + c * m * q + d * l * q + e * j * r + f * k * p +
g * k * o + h * l * m + i * j * n - a * o * q - b * m * r -
c * n * p - d * k * r - e * l * p - f * j * q - g * l * n -
h * j * o - i * k * m,
j * n * r + k * o * p + l * m * q - j * o * q - k * m * r - l * n * p,
};
// Solve for the roots of P[3]*x^3 + P[2]*x^2 + P[1]*x + P[0] = 0.
@ -195,15 +197,15 @@ double FundamentalFrom7CorrespondencesLinear(const Mat &x1,
int num_roots = SolveCubicPolynomial(P, roots);
// Build the fundamental matrix for each solution.
for (int kk = 0; kk < num_roots; ++kk) {
for (int kk = 0; kk < num_roots; ++kk) {
F->push_back(F1 + roots[kk] * F2);
}
return s;
}
double FundamentalFromCorrespondences7Point(const Mat &x1,
const Mat &x2,
std::vector<Mat3> *F) {
double FundamentalFromCorrespondences7Point(const Mat& x1,
const Mat& x2,
std::vector<Mat3>* F) {
DCHECK_EQ(x1.rows(), 2);
DCHECK_GE(x1.cols(), 7);
DCHECK_EQ(x1.rows(), x2.rows());
@ -218,25 +220,25 @@ double FundamentalFromCorrespondences7Point(const Mat &x1,
ApplyTransformationToPoints(x2, T2, &x2_normalized);
// Estimate the fundamental matrix.
double smaller_singular_value =
FundamentalFrom7CorrespondencesLinear(x1_normalized, x2_normalized, &(*F));
double smaller_singular_value = FundamentalFrom7CorrespondencesLinear(
x1_normalized, x2_normalized, &(*F));
for (int k = 0; k < F->size(); ++k) {
Mat3 & Fmat = (*F)[k];
Mat3& Fmat = (*F)[k];
// Denormalize the fundamental matrix.
Fmat = T2.transpose() * Fmat * T1;
}
return smaller_singular_value;
}
void NormalizeFundamental(const Mat3 &F, Mat3 *F_normalized) {
void NormalizeFundamental(const Mat3& F, Mat3* F_normalized) {
*F_normalized = F / FrobeniusNorm(F);
if ((*F_normalized)(2, 2) < 0) {
*F_normalized *= -1;
}
}
double SampsonDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2) {
double SampsonDistance(const Mat& F, const Vec2& x1, const Vec2& x2) {
Vec3 x(x1(0), x1(1), 1.0);
Vec3 y(x2(0), x2(1), 1.0);
@ -244,11 +246,11 @@ double SampsonDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2) {
Vec3 Ft_y = F.transpose() * y;
double y_F_x = y.dot(F_x);
return Square(y_F_x) / ( F_x.head<2>().squaredNorm()
+ Ft_y.head<2>().squaredNorm());
return Square(y_F_x) /
(F_x.head<2>().squaredNorm() + Ft_y.head<2>().squaredNorm());
}
double SymmetricEpipolarDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2) {
double SymmetricEpipolarDistance(const Mat& F, const Vec2& x1, const Vec2& x2) {
Vec3 x(x1(0), x1(1), 1.0);
Vec3 y(x2(0), x2(1), 1.0);
@ -256,43 +258,40 @@ double SymmetricEpipolarDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2) {
Vec3 Ft_y = F.transpose() * y;
double y_F_x = y.dot(F_x);
return Square(y_F_x) * ( 1 / F_x.head<2>().squaredNorm()
+ 1 / Ft_y.head<2>().squaredNorm());
return Square(y_F_x) *
(1 / F_x.head<2>().squaredNorm() + 1 / Ft_y.head<2>().squaredNorm());
}
// HZ 9.6 pag 257 (formula 9.12)
void EssentialFromFundamental(const Mat3 &F,
const Mat3 &K1,
const Mat3 &K2,
Mat3 *E) {
void EssentialFromFundamental(const Mat3& F,
const Mat3& K1,
const Mat3& K2,
Mat3* E) {
*E = K2.transpose() * F * K1;
}
// HZ 9.6 pag 257 (formula 9.12)
// Or http://ai.stanford.edu/~birch/projective/node20.html
void FundamentalFromEssential(const Mat3 &E,
const Mat3 &K1,
const Mat3 &K2,
Mat3 *F) {
void FundamentalFromEssential(const Mat3& E,
const Mat3& K1,
const Mat3& K2,
Mat3* F) {
*F = K2.inverse().transpose() * E * K1.inverse();
}
void RelativeCameraMotion(const Mat3 &R1,
const Vec3 &t1,
const Mat3 &R2,
const Vec3 &t2,
Mat3 *R,
Vec3 *t) {
void RelativeCameraMotion(const Mat3& R1,
const Vec3& t1,
const Mat3& R2,
const Vec3& t2,
Mat3* R,
Vec3* t) {
*R = R2 * R1.transpose();
*t = t2 - (*R) * t1;
}
// HZ 9.6 pag 257
void EssentialFromRt(const Mat3 &R1,
const Vec3 &t1,
const Mat3 &R2,
const Vec3 &t2,
Mat3 *E) {
void EssentialFromRt(
const Mat3& R1, const Vec3& t1, const Mat3& R2, const Vec3& t2, Mat3* E) {
Mat3 R;
Vec3 t;
RelativeCameraMotion(R1, t1, R2, t2, &R, &t);
@ -301,11 +300,11 @@ void EssentialFromRt(const Mat3 &R1,
}
// HZ 9.6 pag 259 (Result 9.19)
void MotionFromEssential(const Mat3 &E,
std::vector<Mat3> *Rs,
std::vector<Vec3> *ts) {
void MotionFromEssential(const Mat3& E,
std::vector<Mat3>* Rs,
std::vector<Vec3>* ts) {
Eigen::JacobiSVD<Mat3> USV(E, Eigen::ComputeFullU | Eigen::ComputeFullV);
Mat3 U = USV.matrixU();
Mat3 U = USV.matrixU();
Mat3 Vt = USV.matrixV().transpose();
// Last column of U is undetermined since d = (a a 0).
@ -318,9 +317,11 @@ void MotionFromEssential(const Mat3 &E,
}
Mat3 W;
// clang-format off
W << 0, -1, 0,
1, 0, 0,
0, 0, 1;
// clang-format on
Mat3 U_W_Vt = U * W * Vt;
Mat3 U_Wt_Vt = U * W.transpose() * Vt;
@ -332,18 +333,18 @@ void MotionFromEssential(const Mat3 &E,
(*Rs)[3] = U_Wt_Vt;
ts->resize(4);
(*ts)[0] = U.col(2);
(*ts)[0] = U.col(2);
(*ts)[1] = -U.col(2);
(*ts)[2] = U.col(2);
(*ts)[2] = U.col(2);
(*ts)[3] = -U.col(2);
}
int MotionFromEssentialChooseSolution(const std::vector<Mat3> &Rs,
const std::vector<Vec3> &ts,
const Mat3 &K1,
const Vec2 &x1,
const Mat3 &K2,
const Vec2 &x2) {
int MotionFromEssentialChooseSolution(const std::vector<Mat3>& Rs,
const std::vector<Vec3>& ts,
const Mat3& K1,
const Vec2& x1,
const Mat3& K2,
const Vec2& x2) {
DCHECK_EQ(4, Rs.size());
DCHECK_EQ(4, ts.size());
@ -354,8 +355,8 @@ int MotionFromEssentialChooseSolution(const std::vector<Mat3> &Rs,
t1.setZero();
P_From_KRt(K1, R1, t1, &P1);
for (int i = 0; i < 4; ++i) {
const Mat3 &R2 = Rs[i];
const Vec3 &t2 = ts[i];
const Mat3& R2 = Rs[i];
const Vec3& t2 = ts[i];
P_From_KRt(K2, R2, t2, &P2);
Vec3 X;
TriangulateDLT(P1, x1, P2, x2, &X);
@ -369,13 +370,13 @@ int MotionFromEssentialChooseSolution(const std::vector<Mat3> &Rs,
return -1;
}
bool MotionFromEssentialAndCorrespondence(const Mat3 &E,
const Mat3 &K1,
const Vec2 &x1,
const Mat3 &K2,
const Vec2 &x2,
Mat3 *R,
Vec3 *t) {
bool MotionFromEssentialAndCorrespondence(const Mat3& E,
const Mat3& K1,
const Vec2& x1,
const Mat3& K2,
const Vec2& x2,
Mat3* R,
Vec3* t) {
std::vector<Mat3> Rs;
std::vector<Vec3> ts;
MotionFromEssential(E, &Rs, &ts);
@ -389,7 +390,7 @@ bool MotionFromEssentialAndCorrespondence(const Mat3 &E,
}
}
void FundamentalToEssential(const Mat3 &F, Mat3 *E) {
void FundamentalToEssential(const Mat3& F, Mat3* E) {
Eigen::JacobiSVD<Mat3> svd(F, Eigen::ComputeFullU | Eigen::ComputeFullV);
// See Hartley & Zisserman page 294, result 11.1, which shows how to get the
@ -399,8 +400,8 @@ void FundamentalToEssential(const Mat3 &F, Mat3 *E) {
double s = (a + b) / 2.0;
LG << "Initial reconstruction's rotation is non-euclidean by "
<< (((a - b) / std::max(a, b)) * 100) << "%; singular values:"
<< svd.singularValues().transpose();
<< (((a - b) / std::max(a, b)) * 100)
<< "%; singular values:" << svd.singularValues().transpose();
Vec3 diag;
diag << s, s, 0;
@ -410,9 +411,8 @@ void FundamentalToEssential(const Mat3 &F, Mat3 *E) {
// Default settings for fundamental estimation which should be suitable
// for a wide range of use cases.
EstimateFundamentalOptions::EstimateFundamentalOptions(void) :
max_num_iterations(50),
expected_average_symmetric_distance(1e-16) {
EstimateFundamentalOptions::EstimateFundamentalOptions(void)
: max_num_iterations(50), expected_average_symmetric_distance(1e-16) {
}
namespace {
@ -420,12 +420,11 @@ namespace {
// used for fundamental matrix refinement.
class FundamentalSymmetricEpipolarCostFunctor {
public:
FundamentalSymmetricEpipolarCostFunctor(const Vec2 &x,
const Vec2 &y)
: x_(x), y_(y) {}
FundamentalSymmetricEpipolarCostFunctor(const Vec2& x, const Vec2& y)
: x_(x), y_(y) {}
template<typename T>
bool operator()(const T *fundamental_parameters, T *residuals) const {
template <typename T>
bool operator()(const T* fundamental_parameters, T* residuals) const {
typedef Eigen::Matrix<T, 3, 3> Mat3;
typedef Eigen::Matrix<T, 3, 1> Vec3;
@ -454,9 +453,10 @@ class FundamentalSymmetricEpipolarCostFunctor {
// average value.
class TerminationCheckingCallback : public ceres::IterationCallback {
public:
TerminationCheckingCallback(const Mat &x1, const Mat &x2,
const EstimateFundamentalOptions &options,
Mat3 *F)
TerminationCheckingCallback(const Mat& x1,
const Mat& x2,
const EstimateFundamentalOptions& options,
Mat3* F)
: options_(options), x1_(x1), x2_(x2), F_(F) {}
virtual ceres::CallbackReturnType operator()(
@ -469,9 +469,7 @@ class TerminationCheckingCallback : public ceres::IterationCallback {
// Calculate average of symmetric epipolar distance.
double average_distance = 0.0;
for (int i = 0; i < x1_.cols(); i++) {
average_distance = SymmetricEpipolarDistance(*F_,
x1_.col(i),
x2_.col(i));
average_distance = SymmetricEpipolarDistance(*F_, x1_.col(i), x2_.col(i));
}
average_distance /= x1_.cols();
@ -483,19 +481,19 @@ class TerminationCheckingCallback : public ceres::IterationCallback {
}
private:
const EstimateFundamentalOptions &options_;
const Mat &x1_;
const Mat &x2_;
Mat3 *F_;
const EstimateFundamentalOptions& options_;
const Mat& x1_;
const Mat& x2_;
Mat3* F_;
};
} // namespace
/* Fundamental transformation estimation. */
bool EstimateFundamentalFromCorrespondences(
const Mat &x1,
const Mat &x2,
const EstimateFundamentalOptions &options,
Mat3 *F) {
const Mat& x1,
const Mat& x2,
const EstimateFundamentalOptions& options,
Mat3* F) {
// Step 1: Algebraic fundamental estimation.
// Assume algebraic estiation always succeeds,
@ -506,16 +504,15 @@ bool EstimateFundamentalFromCorrespondences(
// Step 2: Refine matrix using Ceres minimizer.
ceres::Problem problem;
for (int i = 0; i < x1.cols(); i++) {
FundamentalSymmetricEpipolarCostFunctor
*fundamental_symmetric_epipolar_cost_function =
new FundamentalSymmetricEpipolarCostFunctor(x1.col(i),
x2.col(i));
FundamentalSymmetricEpipolarCostFunctor*
fundamental_symmetric_epipolar_cost_function =
new FundamentalSymmetricEpipolarCostFunctor(x1.col(i), x2.col(i));
problem.AddResidualBlock(
new ceres::AutoDiffCostFunction<
FundamentalSymmetricEpipolarCostFunctor,
2, // num_residuals
9>(fundamental_symmetric_epipolar_cost_function),
new ceres::AutoDiffCostFunction<FundamentalSymmetricEpipolarCostFunctor,
2, // num_residuals
9>(
fundamental_symmetric_epipolar_cost_function),
NULL,
F->data());
}

View File

@ -27,36 +27,34 @@
namespace libmv {
void ProjectionsFromFundamental(const Mat3 &F, Mat34 *P1, Mat34 *P2);
void FundamentalFromProjections(const Mat34 &P1, const Mat34 &P2, Mat3 *F);
void ProjectionsFromFundamental(const Mat3& F, Mat34* P1, Mat34* P2);
void FundamentalFromProjections(const Mat34& P1, const Mat34& P2, Mat3* F);
/**
* 7 points (minimal case, points coordinates must be normalized before):
*/
double FundamentalFrom7CorrespondencesLinear(const Mat &x1,
const Mat &x2,
std::vector<Mat3> *F);
double FundamentalFrom7CorrespondencesLinear(const Mat& x1,
const Mat& x2,
std::vector<Mat3>* F);
/**
* 7 points (points coordinates must be in image space):
*/
double FundamentalFromCorrespondences7Point(const Mat &x1,
const Mat &x2,
std::vector<Mat3> *F);
double FundamentalFromCorrespondences7Point(const Mat& x1,
const Mat& x2,
std::vector<Mat3>* F);
/**
* 8 points (points coordinates must be in image space):
*/
double NormalizedEightPointSolver(const Mat &x1,
const Mat &x2,
Mat3 *F);
double NormalizedEightPointSolver(const Mat& x1, const Mat& x2, Mat3* F);
/**
* Fundamental matrix utility function:
*/
void EnforceFundamentalRank2Constraint(Mat3 *F);
void EnforceFundamentalRank2Constraint(Mat3* F);
void NormalizeFundamental(const Mat3 &F, Mat3 *F_normalized);
void NormalizeFundamental(const Mat3& F, Mat3* F_normalized);
/**
* Approximate squared reprojection errror.
@ -64,14 +62,14 @@ void NormalizeFundamental(const Mat3 &F, Mat3 *F_normalized);
* See page 287 of HZ equation 11.9. This avoids triangulating the point,
* relying only on the entries in F.
*/
double SampsonDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2);
double SampsonDistance(const Mat& F, const Vec2& x1, const Vec2& x2);
/**
* Calculates the sum of the distances from the points to the epipolar lines.
*
* See page 288 of HZ equation 11.10.
*/
double SymmetricEpipolarDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2);
double SymmetricEpipolarDistance(const Mat& F, const Vec2& x1, const Vec2& x2);
/**
* Compute the relative camera motion between two cameras.
@ -81,32 +79,29 @@ double SymmetricEpipolarDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2);
* If T1 and T2 are the camera motions, the computed relative motion is
* T = T2 T1^{-1}
*/
void RelativeCameraMotion(const Mat3 &R1,
const Vec3 &t1,
const Mat3 &R2,
const Vec3 &t2,
Mat3 *R,
Vec3 *t);
void RelativeCameraMotion(const Mat3& R1,
const Vec3& t1,
const Mat3& R2,
const Vec3& t2,
Mat3* R,
Vec3* t);
void EssentialFromFundamental(const Mat3 &F,
const Mat3 &K1,
const Mat3 &K2,
Mat3 *E);
void EssentialFromFundamental(const Mat3& F,
const Mat3& K1,
const Mat3& K2,
Mat3* E);
void FundamentalFromEssential(const Mat3 &E,
const Mat3 &K1,
const Mat3 &K2,
Mat3 *F);
void FundamentalFromEssential(const Mat3& E,
const Mat3& K1,
const Mat3& K2,
Mat3* F);
void EssentialFromRt(const Mat3 &R1,
const Vec3 &t1,
const Mat3 &R2,
const Vec3 &t2,
Mat3 *E);
void EssentialFromRt(
const Mat3& R1, const Vec3& t1, const Mat3& R2, const Vec3& t2, Mat3* E);
void MotionFromEssential(const Mat3 &E,
std::vector<Mat3> *Rs,
std::vector<Vec3> *ts);
void MotionFromEssential(const Mat3& E,
std::vector<Mat3>* Rs,
std::vector<Vec3>* ts);
/**
* Choose one of the four possible motion solutions from an essential matrix.
@ -117,25 +112,25 @@ void MotionFromEssential(const Mat3 &E,
*
* \return index of the right solution or -1 if no solution.
*/
int MotionFromEssentialChooseSolution(const std::vector<Mat3> &Rs,
const std::vector<Vec3> &ts,
const Mat3 &K1,
const Vec2 &x1,
const Mat3 &K2,
const Vec2 &x2);
int MotionFromEssentialChooseSolution(const std::vector<Mat3>& Rs,
const std::vector<Vec3>& ts,
const Mat3& K1,
const Vec2& x1,
const Mat3& K2,
const Vec2& x2);
bool MotionFromEssentialAndCorrespondence(const Mat3 &E,
const Mat3 &K1,
const Vec2 &x1,
const Mat3 &K2,
const Vec2 &x2,
Mat3 *R,
Vec3 *t);
bool MotionFromEssentialAndCorrespondence(const Mat3& E,
const Mat3& K1,
const Vec2& x1,
const Mat3& K2,
const Vec2& x2,
Mat3* R,
Vec3* t);
/**
* Find closest essential matrix E to fundamental F
*/
void FundamentalToEssential(const Mat3 &F, Mat3 *E);
void FundamentalToEssential(const Mat3& F, Mat3* E);
/**
* This structure contains options that controls how the fundamental
@ -170,10 +165,10 @@ struct EstimateFundamentalOptions {
* refinement.
*/
bool EstimateFundamentalFromCorrespondences(
const Mat &x1,
const Mat &x2,
const EstimateFundamentalOptions &options,
Mat3 *F);
const Mat& x1,
const Mat& x2,
const EstimateFundamentalOptions& options,
Mat3* F);
} // namespace libmv

View File

@ -34,12 +34,14 @@ using namespace libmv;
TEST(Fundamental, FundamentalFromProjections) {
Mat34 P1_gt, P2_gt;
// clang-format off
P1_gt << 1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0;
P2_gt << 1, 1, 1, 3,
0, 2, 0, 3,
0, 1, 1, 0;
// clang-format on
Mat3 F_gt;
FundamentalFromProjections(P1_gt, P2_gt, &F_gt);
@ -55,8 +57,10 @@ TEST(Fundamental, FundamentalFromProjections) {
TEST(Fundamental, PreconditionerFromPoints) {
int n = 4;
Mat points(2, n);
// clang-format off
points << 0, 0, 1, 1,
0, 2, 1, 3;
// clang-format on
Mat3 T;
PreconditionerFromPoints(points, &T);
@ -152,8 +156,8 @@ TEST(Fundamental, MotionFromEssentialAndCorrespondence) {
Mat3 R_estimated;
Vec3 t_estimated;
MotionFromEssentialAndCorrespondence(E, d.K1, x1, d.K2, x2,
&R_estimated, &t_estimated);
MotionFromEssentialAndCorrespondence(
E, d.K1, x1, d.K2, x2, &R_estimated, &t_estimated);
EXPECT_LE(FrobeniusDistance(R_estimated, R), 1e-8);
EXPECT_LE(DistanceL2(t_estimated, t), 1e-8);

View File

@ -26,7 +26,7 @@
#include "libmv/multiview/homography_parameterization.h"
namespace libmv {
/** 2D Homography transformation estimation in the case that points are in
/** 2D Homography transformation estimation in the case that points are in
* euclidean coordinates.
*
* x = H y
@ -44,10 +44,7 @@ namespace libmv {
* (-x2*a+x1*d)*y1 + (-x2*b+x1*e)*y2 + -x2*c+x1*f |0|
*/
static bool Homography2DFromCorrespondencesLinearEuc(
const Mat &x1,
const Mat &x2,
Mat3 *H,
double expected_precision) {
const Mat& x1, const Mat& x2, Mat3* H, double expected_precision) {
assert(2 == x1.rows());
assert(4 <= x1.cols());
assert(x1.rows() == x2.rows());
@ -58,27 +55,27 @@ static bool Homography2DFromCorrespondencesLinearEuc(
Mat b = Mat::Zero(n * 3, 1);
for (int i = 0; i < n; ++i) {
int j = 3 * i;
L(j, 0) = x1(0, i); // a
L(j, 1) = x1(1, i); // b
L(j, 2) = 1.0; // c
L(j, 0) = x1(0, i); // a
L(j, 1) = x1(1, i); // b
L(j, 2) = 1.0; // c
L(j, 6) = -x2(0, i) * x1(0, i); // g
L(j, 7) = -x2(0, i) * x1(1, i); // h
b(j, 0) = x2(0, i); // i
b(j, 0) = x2(0, i); // i
++j;
L(j, 3) = x1(0, i); // d
L(j, 4) = x1(1, i); // e
L(j, 5) = 1.0; // f
L(j, 3) = x1(0, i); // d
L(j, 4) = x1(1, i); // e
L(j, 5) = 1.0; // f
L(j, 6) = -x2(1, i) * x1(0, i); // g
L(j, 7) = -x2(1, i) * x1(1, i); // h
b(j, 0) = x2(1, i); // i
b(j, 0) = x2(1, i); // i
// This ensures better stability
// TODO(julien) make a lite version without this 3rd set
++j;
L(j, 0) = x2(1, i) * x1(0, i); // a
L(j, 1) = x2(1, i) * x1(1, i); // b
L(j, 2) = x2(1, i); // c
L(j, 0) = x2(1, i) * x1(0, i); // a
L(j, 1) = x2(1, i) * x1(1, i); // b
L(j, 2) = x2(1, i); // c
L(j, 3) = -x2(0, i) * x1(0, i); // d
L(j, 4) = -x2(0, i) * x1(1, i); // e
L(j, 5) = -x2(0, i); // f
@ -86,14 +83,15 @@ static bool Homography2DFromCorrespondencesLinearEuc(
// Solve Lx=B
Vec h = L.fullPivLu().solve(b);
Homography2DNormalizedParameterization<double>::To(h, H);
if ((L * h).isApprox(b, expected_precision)) {
if ((L * h).isApprox(b, expected_precision)) {
return true;
} else {
return false;
}
}
/** 2D Homography transformation estimation in the case that points are in
// clang-format off
/** 2D Homography transformation estimation in the case that points are in
* homogeneous coordinates.
*
* | 0 -x3 x2| |a b c| |y1| -x3*d+x2*g -x3*e+x2*h -x3*f+x2*1 |y1| (-x3*d+x2*g)*y1 (-x3*e+x2*h)*y2 (-x3*f+x2*1)*y3 |0|
@ -101,13 +99,14 @@ static bool Homography2DFromCorrespondencesLinearEuc(
* |-x2 x1 0| |g h 1| |y3| -x2*a+x1*d -x2*b+x1*e -x2*c+x1*f |y3| (-x2*a+x1*d)*y1 (-x2*b+x1*e)*y2 (-x2*c+x1*f)*y3 |0|
* X = |a b c d e f g h|^t
*/
bool Homography2DFromCorrespondencesLinear(const Mat &x1,
const Mat &x2,
Mat3 *H,
// clang-format on
bool Homography2DFromCorrespondencesLinear(const Mat& x1,
const Mat& x2,
Mat3* H,
double expected_precision) {
if (x1.rows() == 2) {
return Homography2DFromCorrespondencesLinearEuc(x1, x2, H,
expected_precision);
return Homography2DFromCorrespondencesLinearEuc(
x1, x2, H, expected_precision);
}
assert(3 == x1.rows());
assert(4 <= x1.cols());
@ -122,33 +121,33 @@ bool Homography2DFromCorrespondencesLinear(const Mat &x1,
Mat b = Mat::Zero(n * 3, 1);
for (int i = 0; i < n; ++i) {
int j = 3 * i;
L(j, 0) = x2(w, i) * x1(x, i); // a
L(j, 1) = x2(w, i) * x1(y, i); // b
L(j, 2) = x2(w, i) * x1(w, i); // c
L(j, 0) = x2(w, i) * x1(x, i); // a
L(j, 1) = x2(w, i) * x1(y, i); // b
L(j, 2) = x2(w, i) * x1(w, i); // c
L(j, 6) = -x2(x, i) * x1(x, i); // g
L(j, 7) = -x2(x, i) * x1(y, i); // h
b(j, 0) = x2(x, i) * x1(w, i);
b(j, 0) = x2(x, i) * x1(w, i);
++j;
L(j, 3) = x2(w, i) * x1(x, i); // d
L(j, 4) = x2(w, i) * x1(y, i); // e
L(j, 5) = x2(w, i) * x1(w, i); // f
L(j, 3) = x2(w, i) * x1(x, i); // d
L(j, 4) = x2(w, i) * x1(y, i); // e
L(j, 5) = x2(w, i) * x1(w, i); // f
L(j, 6) = -x2(y, i) * x1(x, i); // g
L(j, 7) = -x2(y, i) * x1(y, i); // h
b(j, 0) = x2(y, i) * x1(w, i);
b(j, 0) = x2(y, i) * x1(w, i);
// This ensures better stability
++j;
L(j, 0) = x2(y, i) * x1(x, i); // a
L(j, 1) = x2(y, i) * x1(y, i); // b
L(j, 2) = x2(y, i) * x1(w, i); // c
L(j, 0) = x2(y, i) * x1(x, i); // a
L(j, 1) = x2(y, i) * x1(y, i); // b
L(j, 2) = x2(y, i) * x1(w, i); // c
L(j, 3) = -x2(x, i) * x1(x, i); // d
L(j, 4) = -x2(x, i) * x1(y, i); // e
L(j, 5) = -x2(x, i) * x1(w, i); // f
}
// Solve Lx=B
Vec h = L.fullPivLu().solve(b);
if ((L * h).isApprox(b, expected_precision)) {
if ((L * h).isApprox(b, expected_precision)) {
Homography2DNormalizedParameterization<double>::To(h, H);
return true;
} else {
@ -158,32 +157,30 @@ bool Homography2DFromCorrespondencesLinear(const Mat &x1,
// Default settings for homography estimation which should be suitable
// for a wide range of use cases.
EstimateHomographyOptions::EstimateHomographyOptions(void) :
use_normalization(true),
max_num_iterations(50),
expected_average_symmetric_distance(1e-16) {
EstimateHomographyOptions::EstimateHomographyOptions(void)
: use_normalization(true),
max_num_iterations(50),
expected_average_symmetric_distance(1e-16) {
}
namespace {
void GetNormalizedPoints(const Mat &original_points,
Mat *normalized_points,
Mat3 *normalization_matrix) {
void GetNormalizedPoints(const Mat& original_points,
Mat* normalized_points,
Mat3* normalization_matrix) {
IsotropicPreconditionerFromPoints(original_points, normalization_matrix);
ApplyTransformationToPoints(original_points,
*normalization_matrix,
normalized_points);
ApplyTransformationToPoints(
original_points, *normalization_matrix, normalized_points);
}
// Cost functor which computes symmetric geometric distance
// used for homography matrix refinement.
class HomographySymmetricGeometricCostFunctor {
public:
HomographySymmetricGeometricCostFunctor(const Vec2 &x,
const Vec2 &y)
: x_(x), y_(y) { }
HomographySymmetricGeometricCostFunctor(const Vec2& x, const Vec2& y)
: x_(x), y_(y) {}
template<typename T>
bool operator()(const T *homography_parameters, T *residuals) const {
template <typename T>
bool operator()(const T* homography_parameters, T* residuals) const {
typedef Eigen::Matrix<T, 3, 3> Mat3;
typedef Eigen::Matrix<T, 3, 1> Vec3;
@ -221,9 +218,10 @@ class HomographySymmetricGeometricCostFunctor {
// average value.
class TerminationCheckingCallback : public ceres::IterationCallback {
public:
TerminationCheckingCallback(const Mat &x1, const Mat &x2,
const EstimateHomographyOptions &options,
Mat3 *H)
TerminationCheckingCallback(const Mat& x1,
const Mat& x2,
const EstimateHomographyOptions& options,
Mat3* H)
: options_(options), x1_(x1), x2_(x2), H_(H) {}
virtual ceres::CallbackReturnType operator()(
@ -236,9 +234,8 @@ class TerminationCheckingCallback : public ceres::IterationCallback {
// Calculate average of symmetric geometric distance.
double average_distance = 0.0;
for (int i = 0; i < x1_.cols(); i++) {
average_distance = SymmetricGeometricDistance(*H_,
x1_.col(i),
x2_.col(i));
average_distance =
SymmetricGeometricDistance(*H_, x1_.col(i), x2_.col(i));
}
average_distance /= x1_.cols();
@ -250,10 +247,10 @@ class TerminationCheckingCallback : public ceres::IterationCallback {
}
private:
const EstimateHomographyOptions &options_;
const Mat &x1_;
const Mat &x2_;
Mat3 *H_;
const EstimateHomographyOptions& options_;
const Mat& x1_;
const Mat& x2_;
Mat3* H_;
};
} // namespace
@ -261,10 +258,10 @@ class TerminationCheckingCallback : public ceres::IterationCallback {
* euclidean coordinates.
*/
bool EstimateHomography2DFromCorrespondences(
const Mat &x1,
const Mat &x2,
const EstimateHomographyOptions &options,
Mat3 *H) {
const Mat& x1,
const Mat& x2,
const EstimateHomographyOptions& options,
Mat3* H) {
// TODO(sergey): Support homogenous coordinates, not just euclidean.
assert(2 == x1.rows());
@ -272,8 +269,7 @@ bool EstimateHomography2DFromCorrespondences(
assert(x1.rows() == x2.rows());
assert(x1.cols() == x2.cols());
Mat3 T1 = Mat3::Identity(),
T2 = Mat3::Identity();
Mat3 T1 = Mat3::Identity(), T2 = Mat3::Identity();
// Step 1: Algebraic homography estimation.
Mat x1_normalized, x2_normalized;
@ -300,16 +296,15 @@ bool EstimateHomography2DFromCorrespondences(
// Step 2: Refine matrix using Ceres minimizer.
ceres::Problem problem;
for (int i = 0; i < x1.cols(); i++) {
HomographySymmetricGeometricCostFunctor
*homography_symmetric_geometric_cost_function =
new HomographySymmetricGeometricCostFunctor(x1.col(i),
x2.col(i));
HomographySymmetricGeometricCostFunctor*
homography_symmetric_geometric_cost_function =
new HomographySymmetricGeometricCostFunctor(x1.col(i), x2.col(i));
problem.AddResidualBlock(
new ceres::AutoDiffCostFunction<
HomographySymmetricGeometricCostFunctor,
4, // num_residuals
9>(homography_symmetric_geometric_cost_function),
new ceres::AutoDiffCostFunction<HomographySymmetricGeometricCostFunctor,
4, // num_residuals
9>(
homography_symmetric_geometric_cost_function),
NULL,
H->data());
}
@ -335,15 +330,16 @@ bool EstimateHomography2DFromCorrespondences(
return summary.IsSolutionUsable();
}
// clang-format off
/**
* x2 ~ A * x1
* x2^t * Hi * A *x1 = 0
* H1 = H2 = H3 =
* H1 = H2 = H3 =
* | 0 0 0 1| |-x2w| |0 0 0 0| | 0 | | 0 0 1 0| |-x2z|
* | 0 0 0 0| -> | 0 | |0 0 1 0| -> |-x2z| | 0 0 0 0| -> | 0 |
* | 0 0 0 0| | 0 | |0-1 0 0| | x2y| |-1 0 0 0| | x2x|
* |-1 0 0 0| | x2x| |0 0 0 0| | 0 | | 0 0 0 0| | 0 |
* H4 = H5 = H6 =
* H4 = H5 = H6 =
* |0 0 0 0| | 0 | | 0 1 0 0| |-x2y| |0 0 0 0| | 0 |
* |0 0 0 1| -> |-x2w| |-1 0 0 0| -> | x2x| |0 0 0 0| -> | 0 |
* |0 0 0 0| | 0 | | 0 0 0 0| | 0 | |0 0 0 1| |-x2w|
@ -361,10 +357,11 @@ bool EstimateHomography2DFromCorrespondences(
* x2^t * H6 * A *x1 = (-x2w*i +x2z*m )*x1x + (-x2w*j +x2z*n )*x1y + (-x2w*k +x2z*o )*x1z + (-x2w*l +x2z*1 )*x1w = 0
*
* X = |a b c d e f g h i j k l m n o|^t
*/
bool Homography3DFromCorrespondencesLinear(const Mat &x1,
const Mat &x2,
Mat4 *H,
*/
// clang-format on
bool Homography3DFromCorrespondencesLinear(const Mat& x1,
const Mat& x2,
Mat4* H,
double expected_precision) {
assert(4 == x1.rows());
assert(5 <= x1.cols());
@ -379,68 +376,68 @@ bool Homography3DFromCorrespondencesLinear(const Mat &x1,
Mat b = Mat::Zero(n * 6, 1);
for (int i = 0; i < n; ++i) {
int j = 6 * i;
L(j, 0) = -x2(w, i) * x1(x, i); // a
L(j, 1) = -x2(w, i) * x1(y, i); // b
L(j, 2) = -x2(w, i) * x1(z, i); // c
L(j, 3) = -x2(w, i) * x1(w, i); // d
L(j, 12) = x2(x, i) * x1(x, i); // m
L(j, 13) = x2(x, i) * x1(y, i); // n
L(j, 14) = x2(x, i) * x1(z, i); // o
b(j, 0) = -x2(x, i) * x1(w, i);
L(j, 0) = -x2(w, i) * x1(x, i); // a
L(j, 1) = -x2(w, i) * x1(y, i); // b
L(j, 2) = -x2(w, i) * x1(z, i); // c
L(j, 3) = -x2(w, i) * x1(w, i); // d
L(j, 12) = x2(x, i) * x1(x, i); // m
L(j, 13) = x2(x, i) * x1(y, i); // n
L(j, 14) = x2(x, i) * x1(z, i); // o
b(j, 0) = -x2(x, i) * x1(w, i);
++j;
L(j, 4) = -x2(z, i) * x1(x, i); // e
L(j, 5) = -x2(z, i) * x1(y, i); // f
L(j, 6) = -x2(z, i) * x1(z, i); // g
L(j, 7) = -x2(z, i) * x1(w, i); // h
L(j, 8) = x2(y, i) * x1(x, i); // i
L(j, 9) = x2(y, i) * x1(y, i); // j
L(j, 10) = x2(y, i) * x1(z, i); // k
L(j, 11) = x2(y, i) * x1(w, i); // l
L(j, 4) = -x2(z, i) * x1(x, i); // e
L(j, 5) = -x2(z, i) * x1(y, i); // f
L(j, 6) = -x2(z, i) * x1(z, i); // g
L(j, 7) = -x2(z, i) * x1(w, i); // h
L(j, 8) = x2(y, i) * x1(x, i); // i
L(j, 9) = x2(y, i) * x1(y, i); // j
L(j, 10) = x2(y, i) * x1(z, i); // k
L(j, 11) = x2(y, i) * x1(w, i); // l
++j;
L(j, 0) = -x2(z, i) * x1(x, i); // a
L(j, 1) = -x2(z, i) * x1(y, i); // b
L(j, 2) = -x2(z, i) * x1(z, i); // c
L(j, 3) = -x2(z, i) * x1(w, i); // d
L(j, 8) = x2(x, i) * x1(x, i); // i
L(j, 9) = x2(x, i) * x1(y, i); // j
L(j, 10) = x2(x, i) * x1(z, i); // k
L(j, 11) = x2(x, i) * x1(w, i); // l
L(j, 0) = -x2(z, i) * x1(x, i); // a
L(j, 1) = -x2(z, i) * x1(y, i); // b
L(j, 2) = -x2(z, i) * x1(z, i); // c
L(j, 3) = -x2(z, i) * x1(w, i); // d
L(j, 8) = x2(x, i) * x1(x, i); // i
L(j, 9) = x2(x, i) * x1(y, i); // j
L(j, 10) = x2(x, i) * x1(z, i); // k
L(j, 11) = x2(x, i) * x1(w, i); // l
++j;
L(j, 4) = -x2(w, i) * x1(x, i); // e
L(j, 5) = -x2(w, i) * x1(y, i); // f
L(j, 6) = -x2(w, i) * x1(z, i); // g
L(j, 7) = -x2(w, i) * x1(w, i); // h
L(j, 12) = x2(y, i) * x1(x, i); // m
L(j, 13) = x2(y, i) * x1(y, i); // n
L(j, 14) = x2(y, i) * x1(z, i); // o
b(j, 0) = -x2(y, i) * x1(w, i);
L(j, 4) = -x2(w, i) * x1(x, i); // e
L(j, 5) = -x2(w, i) * x1(y, i); // f
L(j, 6) = -x2(w, i) * x1(z, i); // g
L(j, 7) = -x2(w, i) * x1(w, i); // h
L(j, 12) = x2(y, i) * x1(x, i); // m
L(j, 13) = x2(y, i) * x1(y, i); // n
L(j, 14) = x2(y, i) * x1(z, i); // o
b(j, 0) = -x2(y, i) * x1(w, i);
++j;
L(j, 0) = -x2(y, i) * x1(x, i); // a
L(j, 1) = -x2(y, i) * x1(y, i); // b
L(j, 2) = -x2(y, i) * x1(z, i); // c
L(j, 3) = -x2(y, i) * x1(w, i); // d
L(j, 4) = x2(x, i) * x1(x, i); // e
L(j, 5) = x2(x, i) * x1(y, i); // f
L(j, 6) = x2(x, i) * x1(z, i); // g
L(j, 7) = x2(x, i) * x1(w, i); // h
L(j, 4) = x2(x, i) * x1(x, i); // e
L(j, 5) = x2(x, i) * x1(y, i); // f
L(j, 6) = x2(x, i) * x1(z, i); // g
L(j, 7) = x2(x, i) * x1(w, i); // h
++j;
L(j, 8) = -x2(w, i) * x1(x, i); // i
L(j, 9) = -x2(w, i) * x1(y, i); // j
L(j, 8) = -x2(w, i) * x1(x, i); // i
L(j, 9) = -x2(w, i) * x1(y, i); // j
L(j, 10) = -x2(w, i) * x1(z, i); // k
L(j, 11) = -x2(w, i) * x1(w, i); // l
L(j, 12) = x2(z, i) * x1(x, i); // m
L(j, 13) = x2(z, i) * x1(y, i); // n
L(j, 14) = x2(z, i) * x1(z, i); // o
b(j, 0) = -x2(z, i) * x1(w, i);
L(j, 12) = x2(z, i) * x1(x, i); // m
L(j, 13) = x2(z, i) * x1(y, i); // n
L(j, 14) = x2(z, i) * x1(z, i); // o
b(j, 0) = -x2(z, i) * x1(w, i);
}
// Solve Lx=B
Vec h = L.fullPivLu().solve(b);
if ((L * h).isApprox(b, expected_precision)) {
if ((L * h).isApprox(b, expected_precision)) {
Homography3DNormalizedParameterization<double>::To(h, H);
return true;
} else {
@ -448,9 +445,9 @@ bool Homography3DFromCorrespondencesLinear(const Mat &x1,
}
}
double SymmetricGeometricDistance(const Mat3 &H,
const Vec2 &x1,
const Vec2 &x2) {
double SymmetricGeometricDistance(const Mat3& H,
const Vec2& x1,
const Vec2& x2) {
Vec3 x(x1(0), x1(1), 1.0);
Vec3 y(x2(0), x2(1), 1.0);

View File

@ -49,11 +49,11 @@ namespace libmv {
* \return True if the transformation estimation has succeeded.
* \note There must be at least 4 non-colinear points.
*/
bool Homography2DFromCorrespondencesLinear(const Mat &x1,
const Mat &x2,
Mat3 *H,
double expected_precision =
EigenDouble::dummy_precision());
bool Homography2DFromCorrespondencesLinear(
const Mat& x1,
const Mat& x2,
Mat3* H,
double expected_precision = EigenDouble::dummy_precision());
/**
* This structure contains options that controls how the homography
@ -101,10 +101,10 @@ struct EstimateHomographyOptions {
* refinement.
*/
bool EstimateHomography2DFromCorrespondences(
const Mat &x1,
const Mat &x2,
const EstimateHomographyOptions &options,
Mat3 *H);
const Mat& x1,
const Mat& x2,
const EstimateHomographyOptions& options,
Mat3* H);
/**
* 3D Homography transformation estimation.
@ -129,20 +129,20 @@ bool EstimateHomography2DFromCorrespondences(
* \note Need at least 5 non coplanar points
* \note Points coordinates must be in homogeneous coordinates
*/
bool Homography3DFromCorrespondencesLinear(const Mat &x1,
const Mat &x2,
Mat4 *H,
double expected_precision =
EigenDouble::dummy_precision());
bool Homography3DFromCorrespondencesLinear(
const Mat& x1,
const Mat& x2,
Mat4* H,
double expected_precision = EigenDouble::dummy_precision());
/**
* Calculate symmetric geometric cost:
*
* D(H * x1, x2)^2 + D(H^-1 * x2, x1)
*/
double SymmetricGeometricDistance(const Mat3 &H,
const Vec2 &x1,
const Vec2 &x2);
double SymmetricGeometricDistance(const Mat3& H,
const Vec2& x1,
const Vec2& x2);
} // namespace libmv

View File

@ -27,18 +27,18 @@ namespace libmv {
namespace homography {
namespace homography2D {
/**
* Structure for estimating the asymmetric error between a vector x2 and the
* transformed x1 such that
* Error = ||x2 - Psi(H * x1)||^2
* where Psi is the function that transforms homogeneous to euclidean coords.
* \note It should be distributed as Chi-squared with k = 2.
*/
/**
* Structure for estimating the asymmetric error between a vector x2 and the
* transformed x1 such that
* Error = ||x2 - Psi(H * x1)||^2
* where Psi is the function that transforms homogeneous to euclidean coords.
* \note It should be distributed as Chi-squared with k = 2.
*/
struct AsymmetricError {
/**
* Computes the asymmetric residuals between a set of 2D points x2 and the
* Computes the asymmetric residuals between a set of 2D points x2 and the
* transformed 2D point set x1 such that
* Residuals_i = x2_i - Psi(H * x1_i)
* Residuals_i = x2_i - Psi(H * x1_i)
* where Psi is the function that transforms homogeneous to euclidean coords.
*
* \param[in] H The 3x3 homography matrix.
@ -47,8 +47,7 @@ struct AsymmetricError {
* \param[in] x2 A set of 2D points (2xN or 3xN matrix of column vectors).
* \param[out] dx A 2xN matrix of column vectors of residuals errors
*/
static void Residuals(const Mat &H, const Mat &x1,
const Mat &x2, Mat2X *dx) {
static void Residuals(const Mat& H, const Mat& x1, const Mat& x2, Mat2X* dx) {
dx->resize(2, x1.cols());
Mat3X x2h_est;
if (x1.rows() == 2)
@ -63,19 +62,18 @@ struct AsymmetricError {
*dx = HomogeneousToEuclidean(static_cast<Mat3X>(x2)) - *dx;
}
/**
* Computes the asymmetric residuals between a 2D point x2 and the transformed
* Computes the asymmetric residuals between a 2D point x2 and the transformed
* 2D point x1 such that
* Residuals = x2 - Psi(H * x1)
* Residuals = x2 - Psi(H * x1)
* where Psi is the function that transforms homogeneous to euclidean coords.
*
* \param[in] H The 3x3 homography matrix.
* The estimated homography should approximatelly hold the condition y = H x.
* \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[out] dx A vector of size 2 of the residual error
*/
static void Residuals(const Mat &H, const Vec &x1,
const Vec &x2, Vec2 *dx) {
static void Residuals(const Mat& H, const Vec& x1, const Vec& x2, Vec2* dx) {
Vec3 x2h_est;
if (x1.rows() == 2)
x2h_est = H * EuclideanToHomogeneous(static_cast<Vec2>(x1));
@ -85,10 +83,10 @@ struct AsymmetricError {
*dx = x2 - x2h_est.head<2>() / x2h_est[2];
else
*dx = HomogeneousToEuclidean(static_cast<Vec3>(x2)) -
x2h_est.head<2>() / x2h_est[2];
x2h_est.head<2>() / x2h_est[2];
}
/**
* Computes the squared norm of the residuals between a set of 2D points x2
* Computes the squared norm of the residuals between a set of 2D points x2
* and the transformed 2D point set x1 such that
* Error = || x2 - Psi(H * x1) ||^2
* where Psi is the function that transforms homogeneous to euclidean coords.
@ -99,70 +97,70 @@ struct AsymmetricError {
* \param[in] x2 A set of 2D points (2xN or 3xN matrix of column vectors).
* \return The squared norm of the asymmetric residuals errors
*/
static double Error(const Mat &H, const Mat &x1, const Mat &x2) {
static double Error(const Mat& H, const Mat& x1, const Mat& x2) {
Mat2X dx;
Residuals(H, x1, x2, &dx);
return dx.squaredNorm();
}
/**
* Computes the squared norm of the residuals between a 2D point x2 and the
* transformed 2D point x1 such that rms = || x2 - Psi(H * x1) ||^2
* Computes the squared norm of the residuals between a 2D point x2 and the
* transformed 2D point x1 such that rms = || x2 - Psi(H * x1) ||^2
* where Psi is the function that transforms homogeneous to euclidean coords.
*
* \param[in] H The 3x3 homography matrix.
* The estimated homography should approximatelly hold the condition y = H x.
* \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \return The squared norm of the asymmetric residual error
*/
static double Error(const Mat &H, const Vec &x1, const Vec &x2) {
static double Error(const Mat& H, const Vec& x1, const Vec& x2) {
Vec2 dx;
Residuals(H, x1, x2, &dx);
return dx.squaredNorm();
}
};
/**
* Structure for estimating the symmetric error
* between a vector x2 and the transformed x1 such that
* Error = ||x2 - Psi(H * x1)||^2 + ||x1 - Psi(H^-1 * x2)||^2
* where Psi is the function that transforms homogeneous to euclidean coords.
* \note It should be distributed as Chi-squared with k = 4.
*/
/**
* Structure for estimating the symmetric error
* between a vector x2 and the transformed x1 such that
* Error = ||x2 - Psi(H * x1)||^2 + ||x1 - Psi(H^-1 * x2)||^2
* where Psi is the function that transforms homogeneous to euclidean coords.
* \note It should be distributed as Chi-squared with k = 4.
*/
struct SymmetricError {
/**
* Computes the squared norm of the residuals between x2 and the
* transformed x1 such that
* Computes the squared norm of the residuals between x2 and the
* transformed x1 such that
* Error = ||x2 - Psi(H * x1)||^2 + ||x1 - Psi(H^-1 * x2)||^2
* where Psi is the function that transforms homogeneous to euclidean coords.
*
* \param[in] H The 3x3 homography matrix.
* The estimated homography should approximatelly hold the condition y = H x.
* \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \return The squared norm of the symmetric residuals errors
*/
static double Error(const Mat &H, const Vec &x1, const Vec &x2) {
static double Error(const Mat& H, const Vec& x1, const Vec& x2) {
// TODO(keir): This is awesomely inefficient because it does a 3x3
// inversion for each evaluation.
Mat3 Hinv = H.inverse();
return AsymmetricError::Error(H, x1, x2) +
return AsymmetricError::Error(H, x1, x2) +
AsymmetricError::Error(Hinv, x2, x1);
}
// TODO(julien) Add residuals function \see AsymmetricError
};
/**
* Structure for estimating the algebraic error (cross product)
* between a vector x2 and the transformed x1 such that
* Error = ||[x2] * H * x1||^^2
* where [x2] is the skew matrix of x2.
*/
/**
* Structure for estimating the algebraic error (cross product)
* between a vector x2 and the transformed x1 such that
* Error = ||[x2] * H * x1||^^2
* where [x2] is the skew matrix of x2.
*/
struct AlgebraicError {
// TODO(julien) Make an AlgebraicError2Rows and AlgebraicError3Rows
/**
* Computes the algebraic residuals (cross product) between a set of 2D
* points x2 and the transformed 2D point set x1 such that
* Computes the algebraic residuals (cross product) between a set of 2D
* points x2 and the transformed 2D point set x1 such that
* [x2] * H * x1 where [x2] is the skew matrix of x2.
*
* \param[in] H The 3x3 homography matrix.
@ -171,8 +169,7 @@ struct AlgebraicError {
* \param[in] x2 A set of 2D points (2xN or 3xN matrix of column vectors).
* \param[out] dx A 3xN matrix of column vectors of residuals errors
*/
static void Residuals(const Mat &H, const Mat &x1,
const Mat &x2, Mat3X *dx) {
static void Residuals(const Mat& H, const Mat& x1, const Mat& x2, Mat3X* dx) {
dx->resize(3, x1.cols());
Vec3 col;
for (int i = 0; i < x1.cols(); ++i) {
@ -181,18 +178,17 @@ struct AlgebraicError {
}
}
/**
* Computes the algebraic residuals (cross product) between a 2D point x2
* and the transformed 2D point x1 such that
* Computes the algebraic residuals (cross product) between a 2D point x2
* and the transformed 2D point x1 such that
* [x2] * H * x1 where [x2] is the skew matrix of x2.
*
* \param[in] H The 3x3 homography matrix.
* The estimated homography should approximatelly hold the condition y = H x.
* \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[out] dx A vector of size 3 of the residual error
*/
static void Residuals(const Mat &H, const Vec &x1,
const Vec &x2, Vec3 *dx) {
static void Residuals(const Mat& H, const Vec& x1, const Vec& x2, Vec3* dx) {
Vec3 x2h_est;
if (x1.rows() == 2)
x2h_est = H * EuclideanToHomogeneous(static_cast<Vec2>(x1));
@ -206,8 +202,8 @@ struct AlgebraicError {
// identical 3x3 skew matrix for each evaluation.
}
/**
* Computes the squared norm of the algebraic residuals between a set of 2D
* points x2 and the transformed 2D point set x1 such that
* Computes the squared norm of the algebraic residuals between a set of 2D
* points x2 and the transformed 2D point set x1 such that
* [x2] * H * x1 where [x2] is the skew matrix of x2.
*
* \param[in] H The 3x3 homography matrix.
@ -216,23 +212,23 @@ struct AlgebraicError {
* \param[in] x2 A set of 2D points (2xN or 3xN matrix of column vectors).
* \return The squared norm of the asymmetric residuals errors
*/
static double Error(const Mat &H, const Mat &x1, const Mat &x2) {
static double Error(const Mat& H, const Mat& x1, const Mat& x2) {
Mat3X dx;
Residuals(H, x1, x2, &dx);
return dx.squaredNorm();
}
/**
* Computes the squared norm of the algebraic residuals between a 2D point x2
* and the transformed 2D point x1 such that
* Computes the squared norm of the algebraic residuals between a 2D point x2
* and the transformed 2D point x1 such that
* [x2] * H * x1 where [x2] is the skew matrix of x2.
*
* \param[in] H The 3x3 homography matrix.
* The estimated homography should approximatelly hold the condition y = H x.
* \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \return The squared norm of the asymmetric residual error
*/
static double Error(const Mat &H, const Vec &x1, const Vec &x2) {
static double Error(const Mat& H, const Vec& x1, const Vec& x2) {
Vec3 dx;
Residuals(H, x1, x2, &dx);
return dx.squaredNorm();

View File

@ -25,64 +25,72 @@
namespace libmv {
/** A parameterization of the 2D homography matrix that uses 8 parameters so
/** A parameterization of the 2D homography matrix that uses 8 parameters so
* that the matrix is normalized (H(2,2) == 1).
* The homography matrix H is built from a list of 8 parameters (a, b,...g, h)
* as follows
* |a b c|
* |a b c|
* H = |d e f|
* |g h 1|
* |g h 1|
*/
template<typename T = double>
template <typename T = double>
class Homography2DNormalizedParameterization {
public:
typedef Eigen::Matrix<T, 8, 1> Parameters; // a, b, ... g, h
typedef Eigen::Matrix<T, 3, 3> Parameterized; // H
/// Convert from the 8 parameters to a H matrix.
static void To(const Parameters &p, Parameterized *h) {
static void To(const Parameters& p, Parameterized* h) {
// clang-format off
*h << p(0), p(1), p(2),
p(3), p(4), p(5),
p(6), p(7), 1.0;
// clang-format on
}
/// Convert from a H matrix to the 8 parameters.
static void From(const Parameterized &h, Parameters *p) {
static void From(const Parameterized& h, Parameters* p) {
// clang-format off
*p << h(0, 0), h(0, 1), h(0, 2),
h(1, 0), h(1, 1), h(1, 2),
h(2, 0), h(2, 1);
// clang-format on
}
};
/** A parameterization of the 2D homography matrix that uses 15 parameters so
/** A parameterization of the 2D homography matrix that uses 15 parameters so
* that the matrix is normalized (H(3,3) == 1).
* The homography matrix H is built from a list of 15 parameters (a, b,...n, o)
* as follows
* |a b c d|
* |a b c d|
* H = |e f g h|
* |i j k l|
* |m n o 1|
* |m n o 1|
*/
template<typename T = double>
template <typename T = double>
class Homography3DNormalizedParameterization {
public:
typedef Eigen::Matrix<T, 15, 1> Parameters; // a, b, ... n, o
typedef Eigen::Matrix<T, 4, 4> Parameterized; // H
typedef Eigen::Matrix<T, 15, 1> Parameters; // a, b, ... n, o
typedef Eigen::Matrix<T, 4, 4> Parameterized; // H
/// Convert from the 15 parameters to a H matrix.
static void To(const Parameters &p, Parameterized *h) {
static void To(const Parameters& p, Parameterized* h) {
// clang-format off
*h << p(0), p(1), p(2), p(3),
p(4), p(5), p(6), p(7),
p(8), p(9), p(10), p(11),
p(12), p(13), p(14), 1.0;
// clang-format on
}
/// Convert from a H matrix to the 15 parameters.
static void From(const Parameterized &h, Parameters *p) {
static void From(const Parameterized& h, Parameters* p) {
// clang-format off
*p << h(0, 0), h(0, 1), h(0, 2), h(0, 3),
h(1, 0), h(1, 1), h(1, 2), h(1, 3),
h(2, 0), h(2, 1), h(2, 2), h(2, 3),
h(3, 0), h(3, 1), h(3, 2);
// clang-format on
}
};

View File

@ -18,10 +18,10 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "testing/testing.h"
#include "libmv/multiview/homography.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/homography.h"
#include "testing/testing.h"
namespace {
using namespace libmv;
@ -34,9 +34,7 @@ namespace {
// TODO(sergey): Consider using this in all tests since possible homography
// matrix is not fixed to a single value and different-looking matrices
// might actually crrespond to the same exact transform.
void CheckHomography2DTransform(const Mat3 &H,
const Mat &x1,
const Mat &x2) {
void CheckHomography2DTransform(const Mat3& H, const Mat& x1, const Mat& x2) {
for (int i = 0; i < x2.cols(); ++i) {
Vec3 x2_expected = x2.col(i);
Vec3 x2_observed = H * x1.col(i);
@ -49,15 +47,19 @@ void CheckHomography2DTransform(const Mat3 &H,
TEST(Homography2DTest, Rotation45AndTranslationXY) {
Mat x1(3, 4);
// clang-format off
x1 << 0, 1, 0, 5,
0, 0, 2, 3,
1, 1, 1, 1;
// clang-format on
double angle = 45.0;
Mat3 m;
// clang-format off
m << cos(angle), -sin(angle), -2,
sin(angle), cos(angle), 5,
0, 0, 1;
// clang-format on
Mat x2 = x1;
// Transform point from ground truth matrix
@ -76,13 +78,17 @@ TEST(Homography2DTest, Rotation45AndTranslationXY) {
TEST(Homography2DTest, AffineGeneral4) {
// TODO(julien) find why it doesn't work with 4 points!!!
Mat x1(3, 4);
// clang-format off
x1 << 0, 1, 0, 2,
0, 0, 1, 2,
1, 1, 1, 1;
// clang-format on
Mat3 m;
// clang-format off
m << 3, -1, 4,
6, -2, -3,
0, 0, 1;
// clang-format on
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i) {
@ -109,13 +115,17 @@ TEST(Homography2DTest, AffineGeneral4) {
TEST(Homography2DTest, AffineGeneral5) {
Mat x1(3, 5);
// clang-format off
x1 << 0, 1, 0, 2, 5,
0, 0, 1, 2, 2,
1, 1, 1, 1, 1;
// clang-format on
Mat3 m;
// clang-format off
m << 3, -1, 4,
6, -2, -3,
0, 0, 1;
// clang-format on
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i)
@ -142,13 +152,17 @@ TEST(Homography2DTest, AffineGeneral5) {
TEST(Homography2DTest, HomographyGeneral) {
Mat x1(3, 4);
// clang-format off
x1 << 0, 1, 0, 5,
0, 0, 2, 3,
1, 1, 1, 1;
// clang-format on
Mat3 m;
// clang-format off
m << 3, -1, 4,
6, -2, -3,
1, -3, 1;
// clang-format on
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i)
@ -164,10 +178,12 @@ TEST(Homography2DTest, HomographyGeneral) {
TEST(Homography3DTest, RotationAndTranslationXYZ) {
Mat x1(4, 5);
// clang-format off
x1 << 0, 0, 1, 5, 2,
0, 1, 2, 3, 5,
0, 2, 0, 1, 5,
1, 1, 1, 1, 1;
// clang-format on
Mat4 M;
M.setIdentity();
/*
@ -178,24 +194,30 @@ TEST(Homography3DTest, RotationAndTranslationXYZ) {
// Rotation on x + translation
double angle = 45.0;
Mat4 rot;
// clang-format off
rot << 1, 0, 0, 1,
0, cos(angle), -sin(angle), 3,
0, sin(angle), cos(angle), -2,
0, 0, 0, 1;
// clang-format on
M *= rot;
// Rotation on y
angle = 25.0;
// clang-format off
rot << cos(angle), 0, sin(angle), 0,
0, 1, 0, 0,
-sin(angle), 0, cos(angle), 0,
0, 0, 0, 1;
// clang-format on
M *= rot;
// Rotation on z
angle = 5.0;
// clang-format off
rot << cos(angle), -sin(angle), 0, 0,
sin(angle), cos(angle), 0, 0,
0, 0, 1, 0,
0, 0, 0, 1;
// clang-format on
M *= rot;
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i) {
@ -212,15 +234,19 @@ TEST(Homography3DTest, RotationAndTranslationXYZ) {
TEST(Homography3DTest, AffineGeneral) {
Mat x1(4, 5);
// clang-format off
x1 << 0, 0, 1, 5, 2,
0, 1, 2, 3, 5,
0, 2, 0, 1, 5,
1, 1, 1, 1, 1;
// clang-format on
Mat4 m;
// clang-format off
m << 3, -1, 4, 1,
6, -2, -3, -6,
1, 0, 1, 2,
0, 0, 0, 1;
// clang-format on
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i) {
@ -236,15 +262,19 @@ TEST(Homography3DTest, AffineGeneral) {
TEST(Homography3DTest, HomographyGeneral) {
Mat x1(4, 5);
// clang-format off
x1 << 0, 0, 1, 5, 2,
0, 1, 2, 3, 5,
0, 2, 0, 1, 5,
1, 1, 1, 1, 1;
// clang-format on
Mat4 m;
// clang-format off
m << 3, -1, 4, 1,
6, -2, -3, -6,
1, 0, 1, 2,
-3, 1, 0, 1;
// clang-format on
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i) {

View File

@ -34,22 +34,22 @@ namespace libmv {
// x's are 2D coordinates (x,y,1) in each image; Ps are projective cameras. The
// output, X, is a homogeneous four vectors.
template<typename T>
void NViewTriangulate(const Matrix<T, 2, Dynamic> &x,
const vector<Matrix<T, 3, 4> > &Ps,
Matrix<T, 4, 1> *X) {
template <typename T>
void NViewTriangulate(const Matrix<T, 2, Dynamic>& x,
const vector<Matrix<T, 3, 4>>& Ps,
Matrix<T, 4, 1>* X) {
int nviews = x.cols();
assert(nviews == Ps.size());
Matrix<T, Dynamic, Dynamic> design(3*nviews, 4 + nviews);
Matrix<T, Dynamic, Dynamic> design(3 * nviews, 4 + nviews);
design.setConstant(0.0);
for (int i = 0; i < nviews; i++) {
design.template block<3, 4>(3*i, 0) = -Ps[i];
design(3*i + 0, 4 + i) = x(0, i);
design(3*i + 1, 4 + i) = x(1, i);
design(3*i + 2, 4 + i) = 1.0;
design.template block<3, 4>(3 * i, 0) = -Ps[i];
design(3 * i + 0, 4 + i) = x(0, i);
design(3 * i + 1, 4 + i) = x(1, i);
design(3 * i + 2, 4 + i) = 1.0;
}
Matrix<T, Dynamic, 1> X_and_alphas;
Matrix<T, Dynamic, 1> X_and_alphas;
Nullspace(&design, &X_and_alphas);
X->resize(4);
*X = X_and_alphas.head(4);
@ -60,16 +60,16 @@ void NViewTriangulate(const Matrix<T, 2, Dynamic> &x,
// This method uses the algebraic distance approximation.
// Note that this method works better when the 2D points are normalized
// with an isotopic normalization.
template<typename T>
void NViewTriangulateAlgebraic(const Matrix<T, 2, Dynamic> &x,
const vector<Matrix<T, 3, 4> > &Ps,
Matrix<T, 4, 1> *X) {
template <typename T>
void NViewTriangulateAlgebraic(const Matrix<T, 2, Dynamic>& x,
const vector<Matrix<T, 3, 4>>& Ps,
Matrix<T, 4, 1>* X) {
int nviews = x.cols();
assert(nviews == Ps.size());
Matrix<T, Dynamic, 4> design(2*nviews, 4);
Matrix<T, Dynamic, 4> design(2 * nviews, 4);
for (int i = 0; i < nviews; i++) {
design.template block<2, 4>(2*i, 0) = SkewMatMinimal(x.col(i)) * Ps[i];
design.template block<2, 4>(2 * i, 0) = SkewMatMinimal(x.col(i)) * Ps[i];
}
X->resize(4);
Nullspace(&design, X);

View File

@ -54,7 +54,7 @@ TEST(NViewTriangulate, FiveViews) {
// Check reprojection error. Should be nearly zero.
for (int j = 0; j < nviews; ++j) {
Vec3 x_reprojected = Ps[j]*X;
Vec3 x_reprojected = Ps[j] * X;
x_reprojected /= x_reprojected(2);
double error = (x_reprojected.head(2) - xs.col(j)).norm();
EXPECT_NEAR(error, 0.0, 1e-9);
@ -84,7 +84,7 @@ TEST(NViewTriangulateAlgebraic, FiveViews) {
// Check reprojection error. Should be nearly zero.
for (int j = 0; j < nviews; ++j) {
Vec3 x_reprojected = Ps[j]*X;
Vec3 x_reprojected = Ps[j] * X;
x_reprojected /= x_reprojected(2);
double error = (x_reprojected.head<2>() - xs.col(j)).norm();
EXPECT_NEAR(error, 0.0, 1e-9);

View File

@ -24,8 +24,9 @@
namespace libmv {
static bool Build_Minimal2Point_PolynomialFactor(
const Mat & x1, const Mat & x2,
double * P) { // P must be a double[4]
const Mat& x1,
const Mat& x2,
double* P) { // P must be a double[4]
assert(2 == x1.rows());
assert(2 == x1.cols());
assert(x1.rows() == x2.rows());
@ -40,11 +41,11 @@ static bool Build_Minimal2Point_PolynomialFactor(
Vec yx2 = (x2.col(1)).transpose();
double b12 = xx2.dot(yx2);
double a1 = xx1.squaredNorm();
double a2 = yx1.squaredNorm();
double a1 = xx1.squaredNorm();
double a2 = yx1.squaredNorm();
double b1 = xx2.squaredNorm();
double b2 = yx2.squaredNorm();
double b1 = xx2.squaredNorm();
double b2 = yx2.squaredNorm();
// Build the 3rd degre polynomial in F^2.
//
@ -52,10 +53,12 @@ static bool Build_Minimal2Point_PolynomialFactor(
//
// Coefficients in ascending powers of alpha, i.e. P[N]*x^N.
// Run panography_coeffs.py to get the below coefficients.
P[0] = b1*b2*a12*a12-a1*a2*b12*b12;
P[1] = -2*a1*a2*b12+2*a12*b1*b2+b1*a12*a12+b2*a12*a12-a1*b12*b12-a2*b12*b12;
P[2] = b1*b2-a1*a2-2*a1*b12-2*a2*b12+2*a12*b1+2*a12*b2+a12*a12-b12*b12;
P[3] = b1+b2-2*b12-a1-a2+2*a12;
P[0] = b1 * b2 * a12 * a12 - a1 * a2 * b12 * b12;
P[1] = -2 * a1 * a2 * b12 + 2 * a12 * b1 * b2 + b1 * a12 * a12 +
b2 * a12 * a12 - a1 * b12 * b12 - a2 * b12 * b12;
P[2] = b1 * b2 - a1 * a2 - 2 * a1 * b12 - 2 * a2 * b12 + 2 * a12 * b1 +
2 * a12 * b2 + a12 * a12 - b12 * b12;
P[3] = b1 + b2 - 2 * b12 - a1 - a2 + 2 * a12;
// If P[3] equal to 0 we get ill conditionned data
return (P[3] != 0.0);
@ -67,8 +70,9 @@ static bool Build_Minimal2Point_PolynomialFactor(
//
// [1] M. Brown and R. Hartley and D. Nister. Minimal Solutions for Panoramic
// Stitching. CVPR07.
void F_FromCorrespondance_2points(const Mat &x1, const Mat &x2,
vector<double> *fs) {
void F_FromCorrespondance_2points(const Mat& x1,
const Mat& x2,
vector<double>* fs) {
// Build Polynomial factor to get squared focal value.
double P[4];
Build_Minimal2Point_PolynomialFactor(x1, x2, &P[0]);
@ -79,8 +83,8 @@ void F_FromCorrespondance_2points(const Mat &x1, const Mat &x2,
//
double roots[3];
int num_roots = SolveCubicPolynomial(P, roots);
for (int i = 0; i < num_roots; ++i) {
if (roots[i] > 0.0) {
for (int i = 0; i < num_roots; ++i) {
if (roots[i] > 0.0) {
fs->push_back(sqrt(roots[i]));
}
}
@ -92,17 +96,18 @@ void F_FromCorrespondance_2points(const Mat &x1, const Mat &x2,
// K. Arun,T. Huand and D. Blostein. Least-squares fitting of 2 3-D point
// sets. IEEE Transactions on Pattern Analysis and Machine Intelligence,
// 9:698-700, 1987.
void GetR_FixedCameraCenter(const Mat &x1, const Mat &x2,
void GetR_FixedCameraCenter(const Mat& x1,
const Mat& x2,
const double focal,
Mat3 *R) {
Mat3* R) {
assert(3 == x1.rows());
assert(2 <= x1.cols());
assert(x1.rows() == x2.rows());
assert(x1.cols() == x2.cols());
// Build simplified K matrix
Mat3 K(Mat3::Identity() * 1.0/focal);
K(2, 2)= 1.0;
Mat3 K(Mat3::Identity() * 1.0 / focal);
K(2, 2) = 1.0;
// Build the correlation matrix; equation (22) in [1].
Mat3 C = Mat3::Zero();
@ -115,9 +120,9 @@ void GetR_FixedCameraCenter(const Mat &x1, const Mat &x2,
// Solve for rotation. Equations (24) and (25) in [1].
Eigen::JacobiSVD<Mat> svd(C, Eigen::ComputeThinU | Eigen::ComputeThinV);
Mat3 scale = Mat3::Identity();
scale(2, 2) = ((svd.matrixU() * svd.matrixV().transpose()).determinant() > 0.0)
? 1.0
: -1.0;
scale(2, 2) =
((svd.matrixU() * svd.matrixV().transpose()).determinant() > 0.0) ? 1.0
: -1.0;
(*R) = svd.matrixU() * scale * svd.matrixV().transpose();
}

View File

@ -22,9 +22,9 @@
#ifndef LIBMV_MULTIVIEW_PANOGRAPHY_H
#define LIBMV_MULTIVIEW_PANOGRAPHY_H
#include "libmv/base/vector.h"
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/poly.h"
#include "libmv/base/vector.h"
namespace libmv {
@ -53,8 +53,9 @@ namespace libmv {
// K = [0 f 0]
// [0 0 1]
//
void F_FromCorrespondance_2points(const Mat &x1, const Mat &x2,
vector<double> *fs);
void F_FromCorrespondance_2points(const Mat& x1,
const Mat& x2,
vector<double>* fs);
// Compute the 3x3 rotation matrix that fits two 3D point clouds in the least
// square sense. The method is from:
@ -90,9 +91,10 @@ void F_FromCorrespondance_2points(const Mat &x1, const Mat &x2,
//
// R = arg min || X2 - R * x1 ||
//
void GetR_FixedCameraCenter(const Mat &x1, const Mat &x2,
void GetR_FixedCameraCenter(const Mat& x1,
const Mat& x2,
const double focal,
Mat3 *R);
Mat3* R);
} // namespace libmv

View File

@ -25,7 +25,7 @@ namespace libmv {
namespace panography {
namespace kernel {
void TwoPointSolver::Solve(const Mat &x1, const Mat &x2, vector<Mat3> *Hs) {
void TwoPointSolver::Solve(const Mat& x1, const Mat& x2, vector<Mat3>* Hs) {
// Solve for the focal lengths.
vector<double> fs;
F_FromCorrespondance_2points(x1, x2, &fs);
@ -34,7 +34,7 @@ void TwoPointSolver::Solve(const Mat &x1, const Mat &x2, vector<Mat3> *Hs) {
Mat x1h, x2h;
EuclideanToHomogeneous(x1, &x1h);
EuclideanToHomogeneous(x2, &x2h);
for (int i = 0; i < fs.size(); ++i) {
for (int i = 0; i < fs.size(); ++i) {
Mat3 K1 = Mat3::Identity() * fs[i];
K1(2, 2) = 1.0;

View File

@ -23,9 +23,9 @@
#include "libmv/base/vector.h"
#include "libmv/multiview/conditioning.h"
#include "libmv/multiview/homography_error.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/two_view_kernel.h"
#include "libmv/multiview/homography_error.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
@ -34,18 +34,18 @@ namespace kernel {
struct TwoPointSolver {
enum { MINIMUM_SAMPLES = 2 };
static void Solve(const Mat &x1, const Mat &x2, vector<Mat3> *Hs);
static void Solve(const Mat& x1, const Mat& x2, vector<Mat3>* Hs);
};
typedef two_view::kernel::Kernel<
TwoPointSolver, homography::homography2D::AsymmetricError, Mat3>
UnnormalizedKernel;
typedef two_view::kernel::
Kernel<TwoPointSolver, homography::homography2D::AsymmetricError, Mat3>
UnnormalizedKernel;
typedef two_view::kernel::Kernel<
two_view::kernel::NormalizedSolver<TwoPointSolver, UnnormalizerI>,
homography::homography2D::AsymmetricError,
Mat3>
Kernel;
two_view::kernel::NormalizedSolver<TwoPointSolver, UnnormalizerI>,
homography::homography2D::AsymmetricError,
Mat3>
Kernel;
} // namespace kernel
} // namespace panography

View File

@ -18,8 +18,8 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/logging/logging.h"
#include "libmv/multiview/panography.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/panography_kernel.h"
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
@ -30,18 +30,16 @@ namespace {
TEST(Panography, PrintSomeSharedFocalEstimationValues) {
Mat x1(2, 2), x2(2, 2);
x1<< 158, 78,
124, 113;
x2<< 300, 214,
125, 114;
x1 << 158, 78, 124, 113;
x2 << 300, 214, 125, 114;
// Normalize data (set principal point 0,0 and image border to 1.0).
x1.block<1, 2>(0, 0) /= 320;
x1.block<1, 2>(1, 0) /= 240;
x2.block<1, 2>(0, 0) /= 320;
x2.block<1, 2>(1, 0) /= 240;
x1+=Mat2::Constant(0.5);
x2+=Mat2::Constant(0.5);
x1 += Mat2::Constant(0.5);
x2 += Mat2::Constant(0.5);
vector<double> fs;
F_FromCorrespondance_2points(x1, x2, &fs);
@ -53,9 +51,11 @@ TEST(Panography, PrintSomeSharedFocalEstimationValues) {
TEST(Panography, GetR_FixedCameraCenterWithIdentity) {
Mat x1(3, 3);
// clang-format off
x1 << 0.5, 0.6, 0.7,
0.5, 0.5, 0.4,
10.0, 10.0, 10.0;
// clang-format on
Mat3 R;
GetR_FixedCameraCenter(x1, x1, 1.0, &R);
@ -68,16 +68,20 @@ TEST(Panography, Homography_GetR_Test_PitchY30) {
int n = 3;
Mat x1(3, n);
// clang-format off
x1 << 0.5, 0.6, 0.7,
0.5, 0.5, 0.4,
10, 10, 10;
// clang-format on
Mat x2 = x1;
const double alpha = 30.0 * M_PI / 180.0;
Mat3 rotY;
// clang-format off
rotY << cos(alpha), 0, -sin(alpha),
0, 1, 0,
sin(alpha), 0, cos(alpha);
// clang-format on
for (int i = 0; i < n; ++i) {
x2.block<3, 1>(0, i) = rotY * x1.col(i);
@ -101,17 +105,23 @@ TEST(Panography, Homography_GetR_Test_PitchY30) {
TEST(MinimalPanoramic, Real_Case_Kernel) {
const int n = 2;
Mat x1(2, n); // From image 0.jpg
// clang-format off
x1<< 158, 78,
124, 113;
// clang-format on
Mat x2(2, n); // From image 3.jpg
// clang-format off
x2<< 300, 214,
125, 114;
// clang-format on
Mat3 Ground_TruthHomography;
// clang-format off
Ground_TruthHomography<< 1, 0.02, 129.83,
-0.02, 1.012, 0.07823,
0, 0, 1;
// clang-format on
vector<Mat3> Hs;
@ -130,7 +140,7 @@ TEST(MinimalPanoramic, Real_Case_Kernel) {
// Assert that residuals are small enough
for (int i = 0; i < n; ++i) {
Vec x1p = H * x1h.col(i);
Vec residuals = x1p/x1p(2) - x2h.col(i);
Vec residuals = x1p / x1p(2) - x2h.col(i);
EXPECT_MATRIX_NEAR_ZERO(residuals, 1e-5);
}
}

View File

@ -23,13 +23,13 @@
namespace libmv {
void P_From_KRt(const Mat3 &K, const Mat3 &R, const Vec3 &t, Mat34 *P) {
void P_From_KRt(const Mat3& K, const Mat3& R, const Vec3& t, Mat34* P) {
P->block<3, 3>(0, 0) = R;
P->col(3) = t;
(*P) = K * (*P);
}
void KRt_From_P(const Mat34 &P, Mat3 *Kp, Mat3 *Rp, Vec3 *tp) {
void KRt_From_P(const Mat34& P, Mat3* Kp, Mat3* Rp, Vec3* tp) {
// Decompose using the RQ decomposition HZ A4.1.1 pag.579.
Mat3 K = P.block(0, 0, 3, 3);
@ -44,9 +44,11 @@ void KRt_From_P(const Mat34 &P, Mat3 *Kp, Mat3 *Rp, Vec3 *tp) {
c /= l;
s /= l;
Mat3 Qx;
// clang-format off
Qx << 1, 0, 0,
0, c, -s,
0, s, c;
// clang-format on
K = K * Qx;
Q = Qx.transpose() * Q;
}
@ -58,9 +60,11 @@ void KRt_From_P(const Mat34 &P, Mat3 *Kp, Mat3 *Rp, Vec3 *tp) {
c /= l;
s /= l;
Mat3 Qy;
// clang-format off
Qy << c, 0, s,
0, 1, 0,
-s, 0, c;
// clang-format on
K = K * Qy;
Q = Qy.transpose() * Q;
}
@ -72,9 +76,11 @@ void KRt_From_P(const Mat34 &P, Mat3 *Kp, Mat3 *Rp, Vec3 *tp) {
c /= l;
s /= l;
Mat3 Qz;
// clang-format off
Qz << c, -s, 0,
s, c, 0,
0, 0, 1;
// clang-format on
K = K * Qz;
Q = Qz.transpose() * Q;
}
@ -92,17 +98,21 @@ void KRt_From_P(const Mat34 &P, Mat3 *Kp, Mat3 *Rp, Vec3 *tp) {
}
if (K(1, 1) < 0) {
Mat3 S;
// clang-format off
S << 1, 0, 0,
0, -1, 0,
0, 0, 1;
// clang-format on
K = K * S;
R = S * R;
}
if (K(0, 0) < 0) {
Mat3 S;
// clang-format off
S << -1, 0, 0,
0, 1, 0,
0, 0, 1;
// clang-format on
K = K * S;
R = S * R;
}
@ -122,26 +132,30 @@ void KRt_From_P(const Mat34 &P, Mat3 *Kp, Mat3 *Rp, Vec3 *tp) {
*tp = t;
}
void ProjectionShiftPrincipalPoint(const Mat34 &P,
const Vec2 &principal_point,
const Vec2 &principal_point_new,
Mat34 *P_new) {
void ProjectionShiftPrincipalPoint(const Mat34& P,
const Vec2& principal_point,
const Vec2& principal_point_new,
Mat34* P_new) {
Mat3 T;
// clang-format off
T << 1, 0, principal_point_new(0) - principal_point(0),
0, 1, principal_point_new(1) - principal_point(1),
0, 0, 1;
// clang-format on
*P_new = T * P;
}
void ProjectionChangeAspectRatio(const Mat34 &P,
const Vec2 &principal_point,
void ProjectionChangeAspectRatio(const Mat34& P,
const Vec2& principal_point,
double aspect_ratio,
double aspect_ratio_new,
Mat34 *P_new) {
Mat34* P_new) {
Mat3 T;
// clang-format off
T << 1, 0, 0,
0, aspect_ratio_new / aspect_ratio, 0,
0, 0, 1;
// clang-format on
Mat34 P_temp;
ProjectionShiftPrincipalPoint(P, principal_point, Vec2(0, 0), &P_temp);
@ -149,7 +163,7 @@ void ProjectionChangeAspectRatio(const Mat34 &P,
ProjectionShiftPrincipalPoint(P_temp, Vec2(0, 0), principal_point, P_new);
}
void HomogeneousToEuclidean(const Mat &H, Mat *X) {
void HomogeneousToEuclidean(const Mat& H, Mat* X) {
int d = H.rows() - 1;
int n = H.cols();
X->resize(d, n);
@ -161,29 +175,29 @@ void HomogeneousToEuclidean(const Mat &H, Mat *X) {
}
}
void HomogeneousToEuclidean(const Mat3X &h, Mat2X *e) {
void HomogeneousToEuclidean(const Mat3X& h, Mat2X* e) {
e->resize(2, h.cols());
e->row(0) = h.row(0).array() / h.row(2).array();
e->row(1) = h.row(1).array() / h.row(2).array();
}
void HomogeneousToEuclidean(const Mat4X &h, Mat3X *e) {
void HomogeneousToEuclidean(const Mat4X& h, Mat3X* e) {
e->resize(3, h.cols());
e->row(0) = h.row(0).array() / h.row(3).array();
e->row(1) = h.row(1).array() / h.row(3).array();
e->row(2) = h.row(2).array() / h.row(3).array();
}
void HomogeneousToEuclidean(const Vec3 &H, Vec2 *X) {
void HomogeneousToEuclidean(const Vec3& H, Vec2* X) {
double w = H(2);
*X << H(0) / w, H(1) / w;
}
void HomogeneousToEuclidean(const Vec4 &H, Vec3 *X) {
void HomogeneousToEuclidean(const Vec4& H, Vec3* X) {
double w = H(3);
*X << H(0) / w, H(1) / w, H(2) / w;
}
void EuclideanToHomogeneous(const Mat &X, Mat *H) {
void EuclideanToHomogeneous(const Mat& X, Mat* H) {
int d = X.rows();
int n = X.cols();
H->resize(d + 1, n);
@ -191,32 +205,32 @@ void EuclideanToHomogeneous(const Mat &X, Mat *H) {
H->row(d).setOnes();
}
void EuclideanToHomogeneous(const Vec2 &X, Vec3 *H) {
void EuclideanToHomogeneous(const Vec2& X, Vec3* H) {
*H << X(0), X(1), 1;
}
void EuclideanToHomogeneous(const Vec3 &X, Vec4 *H) {
void EuclideanToHomogeneous(const Vec3& X, Vec4* H) {
*H << X(0), X(1), X(2), 1;
}
// TODO(julien) Call conditioning.h/ApplyTransformationToPoints ?
void EuclideanToNormalizedCamera(const Mat2X &x, const Mat3 &K, Mat2X *n) {
void EuclideanToNormalizedCamera(const Mat2X& x, const Mat3& K, Mat2X* n) {
Mat3X x_image_h;
EuclideanToHomogeneous(x, &x_image_h);
Mat3X x_camera_h = K.inverse() * x_image_h;
HomogeneousToEuclidean(x_camera_h, n);
}
void HomogeneousToNormalizedCamera(const Mat3X &x, const Mat3 &K, Mat2X *n) {
void HomogeneousToNormalizedCamera(const Mat3X& x, const Mat3& K, Mat2X* n) {
Mat3X x_camera_h = K.inverse() * x;
HomogeneousToEuclidean(x_camera_h, n);
}
double Depth(const Mat3 &R, const Vec3 &t, const Vec3 &X) {
return (R*X)(2) + t(2);
double Depth(const Mat3& R, const Vec3& t, const Vec3& X) {
return (R * X)(2) + t(2);
}
double Depth(const Mat3 &R, const Vec3 &t, const Vec4 &X) {
double Depth(const Mat3& R, const Vec3& t, const Vec4& X) {
Vec3 Xe = X.head<3>() / X(3);
return Depth(R, t, Xe);
}

View File

@ -25,108 +25,108 @@
namespace libmv {
void P_From_KRt(const Mat3 &K, const Mat3 &R, const Vec3 &t, Mat34 *P);
void KRt_From_P(const Mat34 &P, Mat3 *K, Mat3 *R, Vec3 *t);
void P_From_KRt(const Mat3& K, const Mat3& R, const Vec3& t, Mat34* P);
void KRt_From_P(const Mat34& P, Mat3* K, Mat3* R, Vec3* t);
// Applies a change of basis to the image coordinates of the projection matrix
// so that the principal point becomes principal_point_new.
void ProjectionShiftPrincipalPoint(const Mat34 &P,
const Vec2 &principal_point,
const Vec2 &principal_point_new,
Mat34 *P_new);
void ProjectionShiftPrincipalPoint(const Mat34& P,
const Vec2& principal_point,
const Vec2& principal_point_new,
Mat34* P_new);
// Applies a change of basis to the image coordinates of the projection matrix
// so that the aspect ratio becomes aspect_ratio_new. This is done by
// stretching the y axis. The aspect ratio is defined as the quotient between
// the focal length of the y and the x axis.
void ProjectionChangeAspectRatio(const Mat34 &P,
const Vec2 &principal_point,
void ProjectionChangeAspectRatio(const Mat34& P,
const Vec2& principal_point,
double aspect_ratio,
double aspect_ratio_new,
Mat34 *P_new);
Mat34* P_new);
void HomogeneousToEuclidean(const Mat &H, Mat *X);
void HomogeneousToEuclidean(const Mat3X &h, Mat2X *e);
void HomogeneousToEuclidean(const Mat4X &h, Mat3X *e);
void HomogeneousToEuclidean(const Vec3 &H, Vec2 *X);
void HomogeneousToEuclidean(const Vec4 &H, Vec3 *X);
inline Vec2 HomogeneousToEuclidean(const Vec3 &h) {
void HomogeneousToEuclidean(const Mat& H, Mat* X);
void HomogeneousToEuclidean(const Mat3X& h, Mat2X* e);
void HomogeneousToEuclidean(const Mat4X& h, Mat3X* e);
void HomogeneousToEuclidean(const Vec3& H, Vec2* X);
void HomogeneousToEuclidean(const Vec4& H, Vec3* X);
inline Vec2 HomogeneousToEuclidean(const Vec3& h) {
return h.head<2>() / h(2);
}
inline Vec3 HomogeneousToEuclidean(const Vec4 &h) {
inline Vec3 HomogeneousToEuclidean(const Vec4& h) {
return h.head<3>() / h(3);
}
inline Mat2X HomogeneousToEuclidean(const Mat3X &h) {
inline Mat2X HomogeneousToEuclidean(const Mat3X& h) {
Mat2X e(2, h.cols());
e.row(0) = h.row(0).array() / h.row(2).array();
e.row(1) = h.row(1).array() / h.row(2).array();
return e;
}
void EuclideanToHomogeneous(const Mat &X, Mat *H);
inline Mat3X EuclideanToHomogeneous(const Mat2X &x) {
void EuclideanToHomogeneous(const Mat& X, Mat* H);
inline Mat3X EuclideanToHomogeneous(const Mat2X& x) {
Mat3X h(3, x.cols());
h.block(0, 0, 2, x.cols()) = x;
h.row(2).setOnes();
return h;
}
inline void EuclideanToHomogeneous(const Mat2X &x, Mat3X *h) {
inline void EuclideanToHomogeneous(const Mat2X& x, Mat3X* h) {
h->resize(3, x.cols());
h->block(0, 0, 2, x.cols()) = x;
h->row(2).setOnes();
}
inline Mat4X EuclideanToHomogeneous(const Mat3X &x) {
inline Mat4X EuclideanToHomogeneous(const Mat3X& x) {
Mat4X h(4, x.cols());
h.block(0, 0, 3, x.cols()) = x;
h.row(3).setOnes();
return h;
}
inline void EuclideanToHomogeneous(const Mat3X &x, Mat4X *h) {
inline void EuclideanToHomogeneous(const Mat3X& x, Mat4X* h) {
h->resize(4, x.cols());
h->block(0, 0, 3, x.cols()) = x;
h->row(3).setOnes();
}
void EuclideanToHomogeneous(const Vec2 &X, Vec3 *H);
void EuclideanToHomogeneous(const Vec3 &X, Vec4 *H);
inline Vec3 EuclideanToHomogeneous(const Vec2 &x) {
void EuclideanToHomogeneous(const Vec2& X, Vec3* H);
void EuclideanToHomogeneous(const Vec3& X, Vec4* H);
inline Vec3 EuclideanToHomogeneous(const Vec2& x) {
return Vec3(x(0), x(1), 1);
}
inline Vec4 EuclideanToHomogeneous(const Vec3 &x) {
inline Vec4 EuclideanToHomogeneous(const Vec3& x) {
return Vec4(x(0), x(1), x(2), 1);
}
// Conversion from image coordinates to normalized camera coordinates
void EuclideanToNormalizedCamera(const Mat2X &x, const Mat3 &K, Mat2X *n);
void HomogeneousToNormalizedCamera(const Mat3X &x, const Mat3 &K, Mat2X *n);
void EuclideanToNormalizedCamera(const Mat2X& x, const Mat3& K, Mat2X* n);
void HomogeneousToNormalizedCamera(const Mat3X& x, const Mat3& K, Mat2X* n);
inline Vec2 Project(const Mat34 &P, const Vec3 &X) {
inline Vec2 Project(const Mat34& P, const Vec3& X) {
Vec4 HX;
HX << X, 1.0;
Vec3 hx = P * HX;
return hx.head<2>() / hx(2);
}
inline void Project(const Mat34 &P, const Vec4 &X, Vec3 *x) {
inline void Project(const Mat34& P, const Vec4& X, Vec3* x) {
*x = P * X;
}
inline void Project(const Mat34 &P, const Vec4 &X, Vec2 *x) {
inline void Project(const Mat34& P, const Vec4& X, Vec2* x) {
Vec3 hx = P * X;
*x = hx.head<2>() / hx(2);
}
inline void Project(const Mat34 &P, const Vec3 &X, Vec3 *x) {
inline void Project(const Mat34& P, const Vec3& X, Vec3* x) {
Vec4 HX;
HX << X, 1.0;
Project(P, HX, x);
}
inline void Project(const Mat34 &P, const Vec3 &X, Vec2 *x) {
inline void Project(const Mat34& P, const Vec3& X, Vec2* x) {
Vec3 hx;
Project(P, X, &hx);
*x = hx.head<2>() / hx(2);
}
inline void Project(const Mat34 &P, const Mat4X &X, Mat2X *x) {
inline void Project(const Mat34& P, const Mat4X& X, Mat2X* x) {
x->resize(2, X.cols());
for (int c = 0; c < X.cols(); ++c) {
Vec3 hx = P * X.col(c);
@ -134,13 +134,13 @@ inline void Project(const Mat34 &P, const Mat4X &X, Mat2X *x) {
}
}
inline Mat2X Project(const Mat34 &P, const Mat4X &X) {
inline Mat2X Project(const Mat34& P, const Mat4X& X) {
Mat2X x;
Project(P, X, &x);
return x;
}
inline void Project(const Mat34 &P, const Mat3X &X, Mat2X *x) {
inline void Project(const Mat34& P, const Mat3X& X, Mat2X* x) {
x->resize(2, X.cols());
for (int c = 0; c < X.cols(); ++c) {
Vec4 HX;
@ -150,7 +150,7 @@ inline void Project(const Mat34 &P, const Mat3X &X, Mat2X *x) {
}
}
inline void Project(const Mat34 &P, const Mat3X &X, const Vecu &ids, Mat2X *x) {
inline void Project(const Mat34& P, const Mat3X& X, const Vecu& ids, Mat2X* x) {
x->resize(2, ids.size());
Vec4 HX;
Vec3 hx;
@ -161,26 +161,26 @@ inline void Project(const Mat34 &P, const Mat3X &X, const Vecu &ids, Mat2X *x) {
}
}
inline Mat2X Project(const Mat34 &P, const Mat3X &X) {
inline Mat2X Project(const Mat34& P, const Mat3X& X) {
Mat2X x(2, X.cols());
Project(P, X, &x);
return x;
}
inline Mat2X Project(const Mat34 &P, const Mat3X &X, const Vecu &ids) {
inline Mat2X Project(const Mat34& P, const Mat3X& X, const Vecu& ids) {
Mat2X x(2, ids.size());
Project(P, X, ids, &x);
return x;
}
double Depth(const Mat3 &R, const Vec3 &t, const Vec3 &X);
double Depth(const Mat3 &R, const Vec3 &t, const Vec4 &X);
double Depth(const Mat3& R, const Vec3& t, const Vec3& X);
double Depth(const Mat3& R, const Vec3& t, const Vec4& X);
/**
* Returns true if the homogenious 3D point X is in front of
* the camera P.
*/
inline bool isInFrontOfCamera(const Mat34 &P, const Vec4 &X) {
* Returns true if the homogenious 3D point X is in front of
* the camera P.
*/
inline bool isInFrontOfCamera(const Mat34& P, const Vec4& X) {
double condition_1 = P.row(2).dot(X) * X[3];
double condition_2 = X[2] * X[3];
if (condition_1 > 0 && condition_2 > 0)
@ -189,37 +189,37 @@ inline bool isInFrontOfCamera(const Mat34 &P, const Vec4 &X) {
return false;
}
inline bool isInFrontOfCamera(const Mat34 &P, const Vec3 &X) {
inline bool isInFrontOfCamera(const Mat34& P, const Vec3& X) {
Vec4 X_homo;
X_homo.segment<3>(0) = X;
X_homo(3) = 1;
return isInFrontOfCamera( P, X_homo);
return isInFrontOfCamera(P, X_homo);
}
/**
* Transforms a 2D point from pixel image coordinates to a 2D point in
* normalized image coordinates.
*/
inline Vec2 ImageToNormImageCoordinates(Mat3 &Kinverse, Vec2 &x) {
Vec3 x_h = Kinverse*EuclideanToHomogeneous(x);
return HomogeneousToEuclidean( x_h );
* Transforms a 2D point from pixel image coordinates to a 2D point in
* normalized image coordinates.
*/
inline Vec2 ImageToNormImageCoordinates(Mat3& Kinverse, Vec2& x) {
Vec3 x_h = Kinverse * EuclideanToHomogeneous(x);
return HomogeneousToEuclidean(x_h);
}
/// Estimates the root mean square error (2D)
inline double RootMeanSquareError(const Mat2X &x_image,
const Mat4X &X_world,
const Mat34 &P) {
inline double RootMeanSquareError(const Mat2X& x_image,
const Mat4X& X_world,
const Mat34& P) {
size_t num_points = x_image.cols();
Mat2X dx = Project(P, X_world) - x_image;
return dx.norm() / num_points;
}
/// Estimates the root mean square error (2D)
inline double RootMeanSquareError(const Mat2X &x_image,
const Mat3X &X_world,
const Mat3 &K,
const Mat3 &R,
const Vec3 &t) {
inline double RootMeanSquareError(const Mat2X& x_image,
const Mat3X& X_world,
const Mat3& K,
const Mat3& R,
const Vec3& t) {
Mat34 P;
P_From_KRt(K, R, t, &P);
size_t num_points = x_image.cols();

View File

@ -29,14 +29,18 @@ using namespace libmv;
TEST(Projection, P_From_KRt) {
Mat3 K, Kp;
// clang-format off
K << 10, 1, 30,
0, 20, 40,
0, 0, 1;
// clang-format on
Mat3 R, Rp;
// clang-format off
R << 1, 0, 0,
0, 1, 0,
0, 0, 1;
// clang-format on
Vec3 t, tp;
t << 1, 2, 3;
@ -62,16 +66,18 @@ Vec4 GetRandomPoint() {
TEST(Projection, isInFrontOfCamera) {
Mat34 P;
// clang-format off
P << 1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0;
// clang-format on
Vec4 X_front = GetRandomPoint();
Vec4 X_back = GetRandomPoint();
X_front(2) = 10; /* Any point in the positive Z direction
* where Z > 1 is in front of the camera. */
X_back(2) = -10; /* Any point in the negative Z direction
* is behind the camera. */
X_front(2) = 10; /* Any point in the positive Z direction
* where Z > 1 is in front of the camera. */
X_back(2) = -10; /* Any point in the negative Z direction
* is behind the camera. */
bool res_front = isInFrontOfCamera(P, X_front);
bool res_back = isInFrontOfCamera(P, X_back);
@ -82,12 +88,14 @@ TEST(Projection, isInFrontOfCamera) {
TEST(AutoCalibration, ProjectionShiftPrincipalPoint) {
Mat34 P1, P2;
// clang-format off
P1 << 1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0;
P2 << 1, 0, 3, 0,
0, 1, 4, 0,
0, 0, 1, 0;
// clang-format on
Mat34 P1_computed, P2_computed;
ProjectionShiftPrincipalPoint(P1, Vec2(0, 0), Vec2(3, 4), &P2_computed);
ProjectionShiftPrincipalPoint(P2, Vec2(3, 4), Vec2(0, 0), &P1_computed);
@ -98,12 +106,14 @@ TEST(AutoCalibration, ProjectionShiftPrincipalPoint) {
TEST(AutoCalibration, ProjectionChangeAspectRatio) {
Mat34 P1, P2;
// clang-format off
P1 << 1, 0, 3, 0,
0, 1, 4, 0,
0, 0, 1, 0;
P2 << 1, 0, 3, 0,
0, 2, 4, 0,
0, 0, 1, 0;
// clang-format on
Mat34 P1_computed, P2_computed;
ProjectionChangeAspectRatio(P1, Vec2(3, 4), 1, 2, &P2_computed);
ProjectionChangeAspectRatio(P2, Vec2(3, 4), 2, 1, &P1_computed);

View File

@ -33,23 +33,23 @@ namespace libmv {
namespace resection {
// x's are 2D image coordinates, (x,y,1), and X's are homogeneous four vectors.
template<typename T>
void Resection(const Matrix<T, 2, Dynamic> &x,
const Matrix<T, 4, Dynamic> &X,
Matrix<T, 3, 4> *P) {
template <typename T>
void Resection(const Matrix<T, 2, Dynamic>& x,
const Matrix<T, 4, Dynamic>& X,
Matrix<T, 3, 4>* P) {
int N = x.cols();
assert(X.cols() == N);
Matrix<T, Dynamic, 12> design(2*N, 12);
Matrix<T, Dynamic, 12> design(2 * N, 12);
design.setZero();
for (int i = 0; i < N; i++) {
T xi = x(0, i);
T yi = x(1, i);
// See equation (7.2) on page 179 of H&Z.
design.template block<1, 4>(2*i, 4) = -X.col(i).transpose();
design.template block<1, 4>(2*i, 8) = yi*X.col(i).transpose();
design.template block<1, 4>(2*i + 1, 0) = X.col(i).transpose();
design.template block<1, 4>(2*i + 1, 8) = -xi*X.col(i).transpose();
design.template block<1, 4>(2 * i, 4) = -X.col(i).transpose();
design.template block<1, 4>(2 * i, 8) = yi * X.col(i).transpose();
design.template block<1, 4>(2 * i + 1, 0) = X.col(i).transpose();
design.template block<1, 4>(2 * i + 1, 8) = -xi * X.col(i).transpose();
}
Matrix<T, 12, 1> p;
Nullspace(&design, &p);

View File

@ -20,12 +20,12 @@
#include <iostream>
#include "libmv/logging/logging.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/resection.h"
#include "libmv/multiview/test_data_sets.h"
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
#include "libmv/logging/logging.h"
namespace {
@ -40,15 +40,15 @@ TEST(Resection, ThreeViews) {
Mat4X X(4, npoints);
X.block(0, 0, 3, npoints) = d.X;
X.row(3).setOnes();
const Mat2X &x = d.x[i];
const Mat2X& x = d.x[i];
Mat34 P;
Resection(x, X, &P);
Mat34 P_expected = d.P(i);
// Because the P matrices are homogeneous, it is necessary to be tricky
// about the scale factor to make them match.
P_expected *= 1/P_expected.array().abs().sum();
P *= 1/P.array().abs().sum();
P_expected *= 1 / P_expected.array().abs().sum();
P *= 1 / P.array().abs().sum();
if (!((P(0, 0) > 0 && P_expected(0, 0) > 0) ||
(P(0, 0) < 0 && P_expected(0, 0) < 0))) {
P *= -1;

View File

@ -22,24 +22,28 @@
#include <cmath>
#include "libmv/numeric/numeric.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/fundamental.h"
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
TwoViewDataSet TwoRealisticCameras(bool same_K) {
TwoViewDataSet d;
// clang-format off
d.K1 << 320, 0, 160,
0, 320, 120,
0, 0, 1;
// clang-format on
if (same_K) {
d.K2 = d.K1;
} else {
// clang-format off
d.K2 << 360, 0, 170,
0, 360, 110,
0, 0, 1;
// clang-format on
}
d.R1 = RotationAroundZ(-0.1);
d.R2 = RotationAroundX(-0.1);
@ -59,10 +63,8 @@ TwoViewDataSet TwoRealisticCameras(bool same_K) {
return d;
}
nViewDatasetConfigator::nViewDatasetConfigator(int fx , int fy,
int cx, int cy,
double distance,
double jitter_amount) {
nViewDatasetConfigator::nViewDatasetConfigator(
int fx, int fy, int cx, int cy, double distance, double jitter_amount) {
_fx = fx;
_fy = fy;
_cx = cx;
@ -71,7 +73,8 @@ nViewDatasetConfigator::nViewDatasetConfigator(int fx , int fy,
_jitter_amount = jitter_amount;
}
NViewDataSet NRealisticCamerasFull(int nviews, int npoints,
NViewDataSet NRealisticCamerasFull(int nviews,
int npoints,
const nViewDatasetConfigator config) {
NViewDataSet d;
d.n = nviews;
@ -102,9 +105,11 @@ NViewDataSet NRealisticCamerasFull(int nviews, int npoints,
jitter *= config._jitter_amount / camera_center.norm();
lookdir = -camera_center + jitter;
// clang-format off
d.K[i] << config._fx, 0, config._cx,
0, config._fy, config._cy,
0, 0, 1;
// clang-format on
d.R[i] = LookAt(lookdir);
d.t[i] = -d.R[i] * camera_center;
d.x[i] = Project(d.P(i), d.X);
@ -113,9 +118,10 @@ NViewDataSet NRealisticCamerasFull(int nviews, int npoints,
return d;
}
NViewDataSet NRealisticCamerasSparse(int nviews, int npoints,
float view_ratio, unsigned min_projections,
NViewDataSet NRealisticCamerasSparse(int nviews,
int npoints,
float view_ratio,
unsigned min_projections,
const nViewDatasetConfigator config) {
assert(view_ratio <= 1.0);
assert(view_ratio > 0.0);
@ -137,7 +143,7 @@ NViewDataSet NRealisticCamerasSparse(int nviews, int npoints,
visibility.setZero();
Mat randoms(nviews, npoints);
randoms.setRandom();
randoms = (randoms.array() + 1)/2.0;
randoms = (randoms.array() + 1) / 2.0;
unsigned num_visibles = 0;
for (size_t i = 0; i < nviews; ++i) {
num_visibles = 0;
@ -174,15 +180,17 @@ NViewDataSet NRealisticCamerasSparse(int nviews, int npoints,
jitter *= config._jitter_amount / camera_center.norm();
lookdir = -camera_center + jitter;
// clang-format off
d.K[i] << config._fx, 0, config._cx,
0, config._fy, config._cy,
0, 0, 1;
// clang-format on
d.R[i] = LookAt(lookdir);
d.t[i] = -d.R[i] * camera_center;
j_visible = 0;
for (size_t j = 0; j < npoints; j++) {
if (visibility(i, j)) {
X = d.X.col(j);
X = d.X.col(j);
d.x[i].col(j_visible) = Project(d.P(i), X);
d.x_ids[i][j_visible] = j;
j_visible++;
@ -192,5 +200,4 @@ NViewDataSet NRealisticCamerasSparse(int nviews, int npoints,
return d;
}
} // namespace libmv

View File

@ -34,8 +34,8 @@ struct TwoViewDataSet {
Vec3 t1, t2; // Translation.
Mat34 P1, P2; // Projection matrix, P = K(R|t)
Mat3 F; // Fundamental matrix.
Mat3X X; // 3D points.
Mat2X x1, x2; // Projected points.
Mat3X X; // 3D points.
Mat2X x1, x2; // Projected points.
};
// Two cameras at (-1,-1,-10) and (2,1,-10) looking approximately towards z+.
@ -45,13 +45,13 @@ TwoViewDataSet TwoRealisticCameras(bool same_K = false);
// and the other reconstruction data types is that all points are seen by all
// cameras.
struct NViewDataSet {
vector<Mat3> K; // Internal parameters (fx, fy, etc).
vector<Mat3> R; // Rotation.
vector<Vec3> t; // Translation.
vector<Vec3> C; // Camera centers.
Mat3X X; // 3D points.
vector<Mat2X> x; // Projected points; may have noise added.
vector<Vecu> x_ids; // Indexes of points corresponding to the projections
vector<Mat3> K; // Internal parameters (fx, fy, etc).
vector<Mat3> R; // Rotation.
vector<Vec3> t; // Translation.
vector<Vec3> C; // Camera centers.
Mat3X X; // 3D points.
vector<Mat2X> x; // Projected points; may have noise added.
vector<Vecu> x_ids; // Indexes of points corresponding to the projections
int n; // Actual number of cameras.
@ -83,22 +83,26 @@ struct nViewDatasetConfigator {
double _dist;
double _jitter_amount;
nViewDatasetConfigator(int fx = 1000, int fy = 1000,
int cx = 500, int cy = 500,
nViewDatasetConfigator(int fx = 1000,
int fy = 1000,
int cx = 500,
int cy = 500,
double distance = 1.5,
double jitter_amount = 0.01);
};
NViewDataSet NRealisticCamerasFull(int nviews, int npoints,
const nViewDatasetConfigator
config = nViewDatasetConfigator());
NViewDataSet NRealisticCamerasFull(
int nviews,
int npoints,
const nViewDatasetConfigator config = nViewDatasetConfigator());
// Generates sparse projections (not all points are projected)
NViewDataSet NRealisticCamerasSparse(int nviews, int npoints,
float view_ratio = 0.6,
unsigned min_projections = 3,
const nViewDatasetConfigator
config = nViewDatasetConfigator());
NViewDataSet NRealisticCamerasSparse(
int nviews,
int npoints,
float view_ratio = 0.6,
unsigned min_projections = 3,
const nViewDatasetConfigator config = nViewDatasetConfigator());
} // namespace libmv

View File

@ -20,15 +20,17 @@
#include "libmv/multiview/triangulation.h"
#include "libmv/numeric/numeric.h"
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
// HZ 12.2 pag.312
void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
const Mat34 &P2, const Vec2 &x2,
Vec4 *X_homogeneous) {
void TriangulateDLT(const Mat34& P1,
const Vec2& x1,
const Mat34& P2,
const Vec2& x2,
Vec4* X_homogeneous) {
Mat4 design;
for (int i = 0; i < 4; ++i) {
design(0, i) = x1(0) * P1(2, i) - P1(0, i);
@ -39,9 +41,11 @@ void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
Nullspace(&design, X_homogeneous);
}
void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
const Mat34 &P2, const Vec2 &x2,
Vec3 *X_euclidean) {
void TriangulateDLT(const Mat34& P1,
const Vec2& x1,
const Mat34& P2,
const Vec2& x2,
Vec3* X_euclidean) {
Vec4 X_homogeneous;
TriangulateDLT(P1, x1, P2, x2, &X_homogeneous);
HomogeneousToEuclidean(X_homogeneous, X_euclidean);

View File

@ -25,13 +25,17 @@
namespace libmv {
void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
const Mat34 &P2, const Vec2 &x2,
Vec4 *X_homogeneous);
void TriangulateDLT(const Mat34& P1,
const Vec2& x1,
const Mat34& P2,
const Vec2& x2,
Vec4* X_homogeneous);
void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
const Mat34 &P2, const Vec2 &x2,
Vec3 *X_euclidean);
void TriangulateDLT(const Mat34& P1,
const Vec2& x1,
const Mat34& P2,
const Vec2& x2,
Vec3* X_euclidean);
} // namespace libmv

View File

@ -20,10 +20,10 @@
#include <iostream>
#include "libmv/multiview/triangulation.h"
#include "libmv/multiview/fundamental.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/test_data_sets.h"
#include "libmv/multiview/triangulation.h"
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"

View File

@ -30,10 +30,10 @@ namespace libmv {
namespace two_view {
namespace kernel {
template<typename Solver, typename Unnormalizer>
template <typename Solver, typename Unnormalizer>
struct NormalizedSolver {
enum { MINIMUM_SAMPLES = Solver::MINIMUM_SAMPLES };
static void Solve(const Mat &x1, const Mat &x2, vector<Mat3> *models) {
static void Solve(const Mat& x1, const Mat& x2, vector<Mat3>* models) {
assert(2 == x1.rows());
assert(MINIMUM_SAMPLES <= x1.cols());
assert(x1.rows() == x2.rows());
@ -53,10 +53,10 @@ struct NormalizedSolver {
}
};
template<typename Solver, typename Unnormalizer>
template <typename Solver, typename Unnormalizer>
struct IsotropicNormalizedSolver {
enum { MINIMUM_SAMPLES = Solver::MINIMUM_SAMPLES };
static void Solve(const Mat &x1, const Mat &x2, vector<Mat3> *models) {
static void Solve(const Mat& x1, const Mat& x2, vector<Mat3>* models) {
assert(2 == x1.rows());
assert(MINIMUM_SAMPLES <= x1.cols());
assert(x1.rows() == x2.rows());
@ -99,35 +99,32 @@ struct IsotropicNormalizedSolver {
//
// The fit routine must not clear existing entries in the vector of models; it
// should append new solutions to the end.
template<typename SolverArg,
typename ErrorArg,
typename ModelArg = Mat3>
template <typename SolverArg, typename ErrorArg, typename ModelArg = Mat3>
class Kernel {
public:
Kernel(const Mat &x1, const Mat &x2) : x1_(x1), x2_(x2) {}
Kernel(const Mat& x1, const Mat& x2) : x1_(x1), x2_(x2) {}
typedef SolverArg Solver;
typedef ModelArg Model;
typedef ModelArg Model;
enum { MINIMUM_SAMPLES = Solver::MINIMUM_SAMPLES };
void Fit(const vector<int> &samples, vector<Model> *models) const {
void Fit(const vector<int>& samples, vector<Model>* models) const {
Mat x1 = ExtractColumns(x1_, samples);
Mat x2 = ExtractColumns(x2_, samples);
Solver::Solve(x1, x2, models);
}
double Error(int sample, const Model &model) const {
double Error(int sample, const Model& model) const {
return ErrorArg::Error(model,
static_cast<Vec>(x1_.col(sample)),
static_cast<Vec>(x2_.col(sample)));
}
int NumSamples() const {
return x1_.cols();
}
static void Solve(const Mat &x1, const Mat &x2, vector<Model> *models) {
int NumSamples() const { return x1_.cols(); }
static void Solve(const Mat& x1, const Mat& x2, vector<Model>* models) {
// By offering this, Kernel types can be passed to templates.
Solver::Solve(x1, x2, models);
}
protected:
const Mat &x1_;
const Mat &x2_;
const Mat& x1_;
const Mat& x2_;
};
} // namespace kernel

View File

@ -32,18 +32,18 @@
#include <cmath>
#include <cstdio>
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/function_derivative.h"
#include "libmv/logging/logging.h"
#include "libmv/numeric/function_derivative.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
template<typename Function,
typename Jacobian = NumericJacobian<Function>,
typename Solver = Eigen::PartialPivLU<
Matrix<typename Function::FMatrixType::RealScalar,
Function::XMatrixType::RowsAtCompileTime,
Function::XMatrixType::RowsAtCompileTime> > >
template <typename Function,
typename Jacobian = NumericJacobian<Function>,
typename Solver = Eigen::PartialPivLU<
Matrix<typename Function::FMatrixType::RealScalar,
Function::XMatrixType::RowsAtCompileTime,
Function::XMatrixType::RowsAtCompileTime>>>
class Dogleg {
public:
typedef typename Function::XMatrixType::RealScalar Scalar;
@ -51,10 +51,12 @@ class Dogleg {
typedef typename Function::XMatrixType Parameters;
typedef Matrix<typename Function::FMatrixType::RealScalar,
Function::FMatrixType::RowsAtCompileTime,
Function::XMatrixType::RowsAtCompileTime> JMatrixType;
Function::XMatrixType::RowsAtCompileTime>
JMatrixType;
typedef Matrix<typename JMatrixType::RealScalar,
JMatrixType::ColsAtCompileTime,
JMatrixType::ColsAtCompileTime> AMatrixType;
JMatrixType::ColsAtCompileTime>
AMatrixType;
enum Status {
RUNNING,
@ -71,34 +73,38 @@ class Dogleg {
STEEPEST_DESCENT,
};
Dogleg(const Function &f)
: f_(f), df_(f) {}
Dogleg(const Function& f) : f_(f), df_(f) {}
struct SolverParameters {
SolverParameters()
: gradient_threshold(1e-16),
relative_step_threshold(1e-16),
error_threshold(1e-16),
initial_trust_radius(1e0),
max_iterations(500) {}
: gradient_threshold(1e-16),
relative_step_threshold(1e-16),
error_threshold(1e-16),
initial_trust_radius(1e0),
max_iterations(500) {}
Scalar gradient_threshold; // eps > max(J'*f(x))
Scalar relative_step_threshold; // eps > ||dx|| / ||x||
Scalar error_threshold; // eps > ||f(x)||
Scalar initial_trust_radius; // Initial u for solving normal equations.
int max_iterations; // Maximum number of solver iterations.
int max_iterations; // Maximum number of solver iterations.
};
struct Results {
Scalar error_magnitude; // ||f(x)||
Scalar gradient_magnitude; // ||J'f(x)||
int iterations;
int iterations;
Status status;
};
Status Update(const Parameters &x, const SolverParameters &params,
JMatrixType *J, AMatrixType *A, FVec *error, Parameters *g) {
Status Update(const Parameters& x,
const SolverParameters& params,
JMatrixType* J,
AMatrixType* A,
FVec* error,
Parameters* g) {
*J = df_(x);
// TODO(keir): In the case of m = n, avoid computing A and just do J^-1 directly.
// TODO(keir): In the case of m = n, avoid computing A and just do J^-1
// directly.
*A = (*J).transpose() * (*J);
*error = f_(x);
*g = (*J).transpose() * *error;
@ -110,12 +116,12 @@ class Dogleg {
return RUNNING;
}
Step SolveDoglegDirection(const Parameters &dx_sd,
const Parameters &dx_gn,
Step SolveDoglegDirection(const Parameters& dx_sd,
const Parameters& dx_gn,
Scalar radius,
Scalar alpha,
Parameters *dx_dl,
Scalar *beta) {
Parameters* dx_dl,
Scalar* beta) {
Parameters a, b_minus_a;
// Solve for Dogleg step dx_dl.
if (dx_gn.norm() < radius) {
@ -128,30 +134,29 @@ class Dogleg {
} else {
Parameters a = alpha * dx_sd;
const Parameters &b = dx_gn;
const Parameters& b = dx_gn;
b_minus_a = a - b;
Scalar Mbma2 = b_minus_a.squaredNorm();
Scalar Ma2 = a.squaredNorm();
Scalar c = a.dot(b_minus_a);
Scalar radius2 = radius*radius;
Scalar radius2 = radius * radius;
if (c <= 0) {
*beta = (-c + sqrt(c*c + Mbma2*(radius2 - Ma2)))/(Mbma2);
*beta = (-c + sqrt(c * c + Mbma2 * (radius2 - Ma2))) / (Mbma2);
} else {
*beta = (radius2 - Ma2) /
(c + sqrt(c*c + Mbma2*(radius2 - Ma2)));
*beta = (radius2 - Ma2) / (c + sqrt(c * c + Mbma2 * (radius2 - Ma2)));
}
*dx_dl = alpha * dx_sd + (*beta) * (dx_gn - alpha*dx_sd);
*dx_dl = alpha * dx_sd + (*beta) * (dx_gn - alpha * dx_sd);
return DOGLEG;
}
}
Results minimize(Parameters *x_and_min) {
Results minimize(Parameters* x_and_min) {
SolverParameters params;
return minimize(params, x_and_min);
}
Results minimize(const SolverParameters &params, Parameters *x_and_min) {
Parameters &x = *x_and_min;
Results minimize(const SolverParameters& params, Parameters* x_and_min) {
Parameters& x = *x_and_min;
JMatrixType J;
AMatrixType A;
FVec error;
@ -167,18 +172,21 @@ class Dogleg {
Parameters dx_sd; // Steepest descent step.
Parameters dx_dl; // Dogleg step.
Parameters dx_gn; // Gauss-Newton step.
printf("iteration ||f(x)|| max(g) radius\n");
printf("iteration ||f(x)|| max(g) radius\n");
int i = 0;
for (; results.status == RUNNING && i < params.max_iterations; ++i) {
printf("%9d %12g %12g %12g",
i, f_(x).norm(), g.array().abs().maxCoeff(), radius);
i,
f_(x).norm(),
g.array().abs().maxCoeff(),
radius);
//LG << "iteration: " << i;
//LG << "||f(x)||: " << f_(x).norm();
//LG << "max(g): " << g.cwise().abs().maxCoeff();
//LG << "radius: " << radius;
// LG << "iteration: " << i;
// LG << "||f(x)||: " << f_(x).norm();
// LG << "max(g): " << g.cwise().abs().maxCoeff();
// LG << "radius: " << radius;
// Eqn 3.19 from [1]
Scalar alpha = g.squaredNorm() / (J*g).squaredNorm();
Scalar alpha = g.squaredNorm() / (J * g).squaredNorm();
// Solve for steepest descent direction dx_sd.
dx_sd = -g;
@ -199,11 +207,11 @@ class Dogleg {
// Solve for dogleg direction dx_dl.
Scalar beta = 0;
Step step = SolveDoglegDirection(dx_sd, dx_gn, radius, alpha,
&dx_dl, &beta);
Step step =
SolveDoglegDirection(dx_sd, dx_gn, radius, alpha, &dx_dl, &beta);
Scalar e3 = params.relative_step_threshold;
if (dx_dl.norm() < e3*(x.norm() + e3)) {
if (dx_dl.norm() < e3 * (x.norm() + e3)) {
results.status = RELATIVE_STEP_SIZE_TOO_SMALL;
break;
}
@ -214,16 +222,19 @@ class Dogleg {
if (step == GAUSS_NEWTON) {
predicted = f_(x).squaredNorm();
} else if (step == STEEPEST_DESCENT) {
predicted = radius * (2*alpha*g.norm() - radius) / 2 / alpha;
predicted = radius * (2 * alpha * g.norm() - radius) / 2 / alpha;
} else if (step == DOGLEG) {
predicted = 0.5 * alpha * (1-beta)*(1-beta)*g.squaredNorm() +
beta*(2-beta)*f_(x).squaredNorm();
predicted = 0.5 * alpha * (1 - beta) * (1 - beta) * g.squaredNorm() +
beta * (2 - beta) * f_(x).squaredNorm();
}
Scalar rho = actual / predicted;
if (step == GAUSS_NEWTON) printf(" GAUSS");
if (step == STEEPEST_DESCENT) printf(" STEE");
if (step == DOGLEG) printf(" DOGL");
if (step == GAUSS_NEWTON)
printf(" GAUSS");
if (step == STEEPEST_DESCENT)
printf(" STEE");
if (step == DOGLEG)
printf(" DOGL");
printf(" %12g %12g %12g\n", rho, actual, predicted);
@ -234,7 +245,7 @@ class Dogleg {
x_updated = true;
}
if (rho > 0.75) {
radius = std::max(radius, 3*dx_dl.norm());
radius = std::max(radius, 3 * dx_dl.norm());
} else if (rho < 0.25) {
radius /= 2;
if (radius < e3 * (x.norm() + e3)) {
@ -252,10 +263,10 @@ class Dogleg {
}
private:
const Function &f_;
const Function& f_;
Jacobian df_;
};
} // namespace mv
} // namespace libmv
#endif // LIBMV_NUMERIC_DOGLEG_H

View File

@ -18,8 +18,8 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "testing/testing.h"
#include "libmv/numeric/dogleg.h"
#include "testing/testing.h"
using namespace libmv;
@ -29,26 +29,26 @@ class F {
public:
typedef Vec4 FMatrixType;
typedef Vec3 XMatrixType;
Vec4 operator()(const Vec3 &x) const {
Vec4 operator()(const Vec3& x) const {
double x1 = x.x() - 2;
double y1 = x.y() - 5;
double z1 = x.z();
Vec4 fx; fx << x1*x1 + z1*z1,
y1*y1 + z1*z1,
z1*z1,
x1*x1;
Vec4 fx;
fx << x1 * x1 + z1 * z1, y1 * y1 + z1 * z1, z1 * z1, x1 * x1;
return fx;
}
};
TEST(Dogleg, SimpleCase) {
Vec3 x; x << 0.76026643, -30.01799744, 0.55192142;
Vec3 x;
x << 0.76026643, -30.01799744, 0.55192142;
F f;
Dogleg<F>::SolverParameters params;
Dogleg<F> lm(f);
/* TODO(sergey): Better error handling. */
/* Dogleg<F>::Results results = */ lm.minimize(params, &x);
Vec3 expected_min_x; expected_min_x << 2, 5, 0;
Vec3 expected_min_x;
expected_min_x << 2, 5, 0;
EXPECT_MATRIX_NEAR(expected_min_x, x, 1e-5);
}
@ -59,20 +59,21 @@ class F32 {
public:
typedef Vec2 FMatrixType;
typedef Vec2 XMatrixType;
Vec2 operator()(const Vec2 &x) const {
Vec2 operator()(const Vec2& x) const {
double x1 = x(0);
double x2 = 10*x(0)/(x(0) + 0.1) + 2*x(1)*x(1);
Vec2 fx; fx << x1, x2;
double x2 = 10 * x(0) / (x(0) + 0.1) + 2 * x(1) * x(1);
Vec2 fx;
fx << x1, x2;
return fx;
}
};
class JF32 {
public:
JF32(const F32 &f) { (void) f; }
Mat2 operator()(const Vec2 &x) {
Mat2 J; J << 1, 0,
1./pow(x(0) + 0.1, 2), 4*x(1)*x(1);
JF32(const F32& f) { (void)f; }
Mat2 operator()(const Vec2& x) {
Mat2 J;
J << 1, 0, 1. / pow(x(0) + 0.1, 2), 4 * x(1) * x(1);
return J;
}
};

View File

@ -23,8 +23,8 @@
#include <cmath>
#include "libmv/numeric/numeric.h"
#include "libmv/logging/logging.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
@ -36,7 +36,7 @@ enum NumericJacobianMode {
FORWARD,
};
template<typename Function, NumericJacobianMode mode = CENTRAL>
template <typename Function, NumericJacobianMode mode = CENTRAL>
class NumericJacobian {
public:
typedef typename Function::XMatrixType Parameters;
@ -45,12 +45,12 @@ class NumericJacobian {
typedef Matrix<typename Function::FMatrixType::RealScalar,
Function::FMatrixType::RowsAtCompileTime,
Function::XMatrixType::RowsAtCompileTime>
JMatrixType;
JMatrixType;
NumericJacobian(const Function &f) : f_(f) {}
NumericJacobian(const Function& f) : f_(f) {}
// TODO(keir): Perhaps passing the jacobian back by value is not a good idea.
JMatrixType operator()(const Parameters &x) {
JMatrixType operator()(const Parameters& x) {
// Empirically determined constant.
Parameters eps = x.array().abs() * XScalar(1e-5);
// To handle cases where a paremeter is exactly zero, instead use the mean
@ -87,12 +87,13 @@ class NumericJacobian {
}
return jacobian;
}
private:
const Function &f_;
const Function& f_;
};
template<typename Function, typename Jacobian>
bool CheckJacobian(const Function &f, const typename Function::XMatrixType &x) {
template <typename Function, typename Jacobian>
bool CheckJacobian(const Function& f, const typename Function::XMatrixType& x) {
Jacobian j_analytic(f);
NumericJacobian<Function> j_numeric(f);

View File

@ -18,9 +18,9 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "testing/testing.h"
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/function_derivative.h"
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
using namespace libmv;
@ -30,22 +30,22 @@ class F {
public:
typedef Vec2 FMatrixType;
typedef Vec3 XMatrixType;
Vec2 operator()(const Vec3 &x) const {
Vec2 operator()(const Vec3& x) const {
Vec2 fx;
fx << 0.19*x(0) + 0.19*x(1)*x(1) + x(2),
3*sin(x(0)) + 2*cos(x(1));
fx << 0.19 * x(0) + 0.19 * x(1) * x(1) + x(2),
3 * sin(x(0)) + 2 * cos(x(1));
return fx;
}
Mat23 J(const Vec3 &x) const {
Mat23 J(const Vec3& x) const {
Mat23 jacobian;
jacobian << 0.19, 2*0.19*x(1), 1.0,
3*cos(x(0)), -2*sin(x(1)), 0;
jacobian << 0.19, 2 * 0.19 * x(1), 1.0, 3 * cos(x(0)), -2 * sin(x(1)), 0;
return jacobian;
}
};
TEST(FunctionDerivative, SimpleCase) {
Vec3 x; x << 0.76026643, 0.01799744, 0.55192142;
Vec3 x;
x << 0.76026643, 0.01799744, 0.55192142;
F f;
NumericJacobian<F, CENTRAL> J(f);
EXPECT_MATRIX_NEAR(f.J(x), J(x), 1e-8);

View File

@ -31,18 +31,18 @@
#include <cmath>
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/function_derivative.h"
#include "libmv/logging/logging.h"
#include "libmv/numeric/function_derivative.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
template<typename Function,
typename Jacobian = NumericJacobian<Function>,
typename Solver = Eigen::PartialPivLU<
Matrix<typename Function::FMatrixType::RealScalar,
Function::XMatrixType::RowsAtCompileTime,
Function::XMatrixType::RowsAtCompileTime> > >
template <typename Function,
typename Jacobian = NumericJacobian<Function>,
typename Solver = Eigen::PartialPivLU<
Matrix<typename Function::FMatrixType::RealScalar,
Function::XMatrixType::RowsAtCompileTime,
Function::XMatrixType::RowsAtCompileTime>>>
class LevenbergMarquardt {
public:
typedef typename Function::XMatrixType::RealScalar Scalar;
@ -50,10 +50,12 @@ class LevenbergMarquardt {
typedef typename Function::XMatrixType Parameters;
typedef Matrix<typename Function::FMatrixType::RealScalar,
Function::FMatrixType::RowsAtCompileTime,
Function::XMatrixType::RowsAtCompileTime> JMatrixType;
Function::XMatrixType::RowsAtCompileTime>
JMatrixType;
typedef Matrix<typename JMatrixType::RealScalar,
JMatrixType::ColsAtCompileTime,
JMatrixType::ColsAtCompileTime> AMatrixType;
JMatrixType::ColsAtCompileTime>
AMatrixType;
// TODO(keir): Some of these knobs can be derived from each other and
// removed, instead of requiring the user to set them.
@ -65,32 +67,35 @@ class LevenbergMarquardt {
HIT_MAX_ITERATIONS,
};
LevenbergMarquardt(const Function &f)
: f_(f), df_(f) {}
LevenbergMarquardt(const Function& f) : f_(f), df_(f) {}
struct SolverParameters {
SolverParameters()
: gradient_threshold(1e-16),
relative_step_threshold(1e-16),
error_threshold(1e-16),
initial_scale_factor(1e-3),
max_iterations(100) {}
: gradient_threshold(1e-16),
relative_step_threshold(1e-16),
error_threshold(1e-16),
initial_scale_factor(1e-3),
max_iterations(100) {}
Scalar gradient_threshold; // eps > max(J'*f(x))
Scalar relative_step_threshold; // eps > ||dx|| / ||x||
Scalar error_threshold; // eps > ||f(x)||
Scalar initial_scale_factor; // Initial u for solving normal equations.
int max_iterations; // Maximum number of solver iterations.
int max_iterations; // Maximum number of solver iterations.
};
struct Results {
Scalar error_magnitude; // ||f(x)||
Scalar gradient_magnitude; // ||J'f(x)||
int iterations;
int iterations;
Status status;
};
Status Update(const Parameters &x, const SolverParameters &params,
JMatrixType *J, AMatrixType *A, FVec *error, Parameters *g) {
Status Update(const Parameters& x,
const SolverParameters& params,
JMatrixType* J,
AMatrixType* A,
FVec* error,
Parameters* g) {
*J = df_(x);
*A = (*J).transpose() * (*J);
*error = -f_(x);
@ -103,13 +108,13 @@ class LevenbergMarquardt {
return RUNNING;
}
Results minimize(Parameters *x_and_min) {
Results minimize(Parameters* x_and_min) {
SolverParameters params;
minimize(params, x_and_min);
}
Results minimize(const SolverParameters &params, Parameters *x_and_min) {
Parameters &x = *x_and_min;
Results minimize(const SolverParameters& params, Parameters* x_and_min) {
Parameters& x = *x_and_min;
JMatrixType J;
AMatrixType A;
FVec error;
@ -118,7 +123,7 @@ class LevenbergMarquardt {
Results results;
results.status = Update(x, params, &J, &A, &error, &g);
Scalar u = Scalar(params.initial_scale_factor*A.diagonal().maxCoeff());
Scalar u = Scalar(params.initial_scale_factor * A.diagonal().maxCoeff());
Scalar v = 2;
Parameters dx, x_new;
@ -130,7 +135,8 @@ class LevenbergMarquardt {
VLOG(3) << "u: " << u;
VLOG(3) << "v: " << v;
AMatrixType A_augmented = A + u*AMatrixType::Identity(J.cols(), J.cols());
AMatrixType A_augmented =
A + u * AMatrixType::Identity(J.cols(), J.cols());
Solver solver(A_augmented);
dx = solver.solve(g);
bool solved = (A_augmented * dx).isApprox(g);
@ -146,14 +152,14 @@ class LevenbergMarquardt {
// Rho is the ratio of the actual reduction in error to the reduction
// in error that would be obtained if the problem was linear.
// See [1] for details.
Scalar rho((error.squaredNorm() - f_(x_new).squaredNorm())
/ dx.dot(u*dx + g));
Scalar rho((error.squaredNorm() - f_(x_new).squaredNorm()) /
dx.dot(u * dx + g));
if (rho > 0) {
// Accept the Gauss-Newton step because the linear model fits well.
x = x_new;
results.status = Update(x, params, &J, &A, &error, &g);
Scalar tmp = Scalar(2*rho-1);
u = u*std::max(1/3., 1 - (tmp*tmp*tmp));
Scalar tmp = Scalar(2 * rho - 1);
u = u * std::max(1 / 3., 1 - (tmp * tmp * tmp));
v = 2;
continue;
}
@ -174,10 +180,10 @@ class LevenbergMarquardt {
}
private:
const Function &f_;
const Function& f_;
Jacobian df_;
};
} // namespace mv
} // namespace libmv
#endif // LIBMV_NUMERIC_LEVENBERG_MARQUARDT_H

View File

@ -18,8 +18,8 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "testing/testing.h"
#include "libmv/numeric/levenberg_marquardt.h"
#include "testing/testing.h"
using namespace libmv;
@ -29,14 +29,12 @@ class F {
public:
typedef Vec4 FMatrixType;
typedef Vec3 XMatrixType;
Vec4 operator()(const Vec3 &x) const {
Vec4 operator()(const Vec3& x) const {
double x1 = x.x() - 2;
double y1 = x.y() - 5;
double z1 = x.z();
Vec4 fx; fx << x1*x1 + z1*z1,
y1*y1 + z1*z1,
z1*z1,
x1*x1;
Vec4 fx;
fx << x1 * x1 + z1 * z1, y1 * y1 + z1 * z1, z1 * z1, x1 * x1;
return fx;
}
};

View File

@ -18,7 +18,6 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/numeric/numeric.h"
namespace libmv {
@ -27,9 +26,11 @@ Mat3 RotationAroundX(double angle) {
double c, s;
sincos(angle, &s, &c);
Mat3 R;
// clang-format off
R << 1, 0, 0,
0, c, -s,
0, s, c;
// clang-format on
return R;
}
@ -37,9 +38,11 @@ Mat3 RotationAroundY(double angle) {
double c, s;
sincos(angle, &s, &c);
Mat3 R;
// clang-format off
R << c, 0, s,
0, 1, 0,
-s, 0, c;
// clang-format on
return R;
}
@ -47,14 +50,15 @@ Mat3 RotationAroundZ(double angle) {
double c, s;
sincos(angle, &s, &c);
Mat3 R;
// clang-format off
R << c, -s, 0,
s, c, 0,
0, 0, 1;
// clang-format on
return R;
}
Mat3 RotationRodrigues(const Vec3 &axis) {
Mat3 RotationRodrigues(const Vec3& axis) {
double theta = axis.norm();
Vec3 w = axis / theta;
Mat3 W = CrossProductMatrix(w);
@ -62,7 +66,6 @@ Mat3 RotationRodrigues(const Vec3 &axis) {
return Mat3::Identity() + sin(theta) * W + (1 - cos(theta)) * W * W;
}
Mat3 LookAt(Vec3 center) {
Vec3 zc = center.normalized();
Vec3 xc = Vec3::UnitY().cross(zc).normalized();
@ -74,19 +77,21 @@ Mat3 LookAt(Vec3 center) {
return R;
}
Mat3 CrossProductMatrix(const Vec3 &x) {
Mat3 CrossProductMatrix(const Vec3& x) {
Mat3 X;
// clang-format off
X << 0, -x(2), x(1),
x(2), 0, -x(0),
-x(1), x(0), 0;
// clang-format on
return X;
}
void MeanAndVarianceAlongRows(const Mat &A,
Vec *mean_pointer,
Vec *variance_pointer) {
Vec &mean = *mean_pointer;
Vec &variance = *variance_pointer;
void MeanAndVarianceAlongRows(const Mat& A,
Vec* mean_pointer,
Vec* variance_pointer) {
Vec& mean = *mean_pointer;
Vec& variance = *variance_pointer;
int n = A.rows();
int m = A.cols();
mean.resize(n);
@ -108,29 +113,28 @@ void MeanAndVarianceAlongRows(const Mat &A,
}
}
void HorizontalStack(const Mat &left, const Mat &right, Mat *stacked) {
void HorizontalStack(const Mat& left, const Mat& right, Mat* stacked) {
assert(left.rows() == right.rows());
int n = left.rows();
int m1 = left.cols();
int m2 = right.cols();
stacked->resize(n, m1 + m2);
stacked->block(0, 0, n, m1) = left;
stacked->block(0, 0, n, m1) = left;
stacked->block(0, m1, n, m2) = right;
}
void MatrixColumn(const Mat &A, int i, Vec2 *v) {
void MatrixColumn(const Mat& A, int i, Vec2* v) {
assert(A.rows() == 2);
*v << A(0, i), A(1, i);
}
void MatrixColumn(const Mat &A, int i, Vec3 *v) {
void MatrixColumn(const Mat& A, int i, Vec3* v) {
assert(A.rows() == 3);
*v << A(0, i), A(1, i), A(2, i);
}
void MatrixColumn(const Mat &A, int i, Vec4 *v) {
void MatrixColumn(const Mat& A, int i, Vec4* v) {
assert(A.rows() == 4);
*v << A(0, i), A(1, i), A(2, i), A(3, i);
}
} // namespace libmv

View File

@ -34,10 +34,9 @@
#include <Eigen/SVD>
#if !defined(__MINGW64__)
# if defined(_WIN32) || defined(__APPLE__) || \
defined(__FreeBSD__) || defined(__NetBSD__) || \
defined(__HAIKU__)
inline void sincos(double x, double *sinx, double *cosx) {
# if defined(_WIN32) || defined(__APPLE__) || defined(__FreeBSD__) || \
defined(__NetBSD__) || defined(__HAIKU__)
inline void sincos(double x, double* sinx, double* cosx) {
*sinx = sin(x);
*cosx = cos(x);
}
@ -46,11 +45,11 @@ inline void sincos(double x, double *sinx, double *cosx) {
#if (defined(WIN32) || defined(WIN64)) && !defined(__MINGW32__)
inline long lround(double d) {
return (long)(d>0 ? d+0.5 : ceil(d-0.5));
return (long)(d > 0 ? d + 0.5 : ceil(d - 0.5));
}
# if _MSC_VER < 1800
inline int round(double d) {
return (d>0) ? int(d+0.5) : int(d-0.5);
return (d > 0) ? int(d + 0.5) : int(d - 0.5);
}
# endif // _MSC_VER < 1800
typedef unsigned int uint;
@ -92,25 +91,25 @@ typedef Eigen::Matrix<double, 4, 4, Eigen::RowMajor> RMat4;
typedef Eigen::Matrix<double, 2, Eigen::Dynamic> Mat2X;
typedef Eigen::Matrix<double, 3, Eigen::Dynamic> Mat3X;
typedef Eigen::Matrix<double, 4, Eigen::Dynamic> Mat4X;
typedef Eigen::Matrix<double, Eigen::Dynamic, 2> MatX2;
typedef Eigen::Matrix<double, Eigen::Dynamic, 3> MatX3;
typedef Eigen::Matrix<double, Eigen::Dynamic, 4> MatX4;
typedef Eigen::Matrix<double, Eigen::Dynamic, 5> MatX5;
typedef Eigen::Matrix<double, Eigen::Dynamic, 6> MatX6;
typedef Eigen::Matrix<double, Eigen::Dynamic, 7> MatX7;
typedef Eigen::Matrix<double, Eigen::Dynamic, 8> MatX8;
typedef Eigen::Matrix<double, Eigen::Dynamic, 9> MatX9;
typedef Eigen::Matrix<double, Eigen::Dynamic, 2> MatX2;
typedef Eigen::Matrix<double, Eigen::Dynamic, 3> MatX3;
typedef Eigen::Matrix<double, Eigen::Dynamic, 4> MatX4;
typedef Eigen::Matrix<double, Eigen::Dynamic, 5> MatX5;
typedef Eigen::Matrix<double, Eigen::Dynamic, 6> MatX6;
typedef Eigen::Matrix<double, Eigen::Dynamic, 7> MatX7;
typedef Eigen::Matrix<double, Eigen::Dynamic, 8> MatX8;
typedef Eigen::Matrix<double, Eigen::Dynamic, 9> MatX9;
typedef Eigen::Matrix<double, Eigen::Dynamic, 15> MatX15;
typedef Eigen::Matrix<double, Eigen::Dynamic, 16> MatX16;
typedef Eigen::Vector2d Vec2;
typedef Eigen::Vector3d Vec3;
typedef Eigen::Vector4d Vec4;
typedef Eigen::Matrix<double, 5, 1> Vec5;
typedef Eigen::Matrix<double, 6, 1> Vec6;
typedef Eigen::Matrix<double, 7, 1> Vec7;
typedef Eigen::Matrix<double, 8, 1> Vec8;
typedef Eigen::Matrix<double, 9, 1> Vec9;
typedef Eigen::Matrix<double, 5, 1> Vec5;
typedef Eigen::Matrix<double, 6, 1> Vec6;
typedef Eigen::Matrix<double, 7, 1> Vec7;
typedef Eigen::Matrix<double, 8, 1> Vec8;
typedef Eigen::Matrix<double, 9, 1> Vec9;
typedef Eigen::Matrix<double, 10, 1> Vec10;
typedef Eigen::Matrix<double, 11, 1> Vec11;
typedef Eigen::Matrix<double, 12, 1> Vec12;
@ -133,15 +132,13 @@ typedef Eigen::Vector2i Vec2i;
typedef Eigen::Vector3i Vec3i;
typedef Eigen::Vector4i Vec4i;
typedef Eigen::Matrix<float,
Eigen::Dynamic,
Eigen::Dynamic,
Eigen::RowMajor> RMatf;
typedef Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>
RMatf;
typedef Eigen::NumTraits<double> EigenDouble;
using Eigen::Map;
using Eigen::Dynamic;
using Eigen::Map;
using Eigen::Matrix;
// Find U, s, and VT such that
@ -149,7 +146,7 @@ using Eigen::Matrix;
// A = U * diag(s) * VT
//
template <typename TMat, typename TVec>
inline void SVD(TMat * /*A*/, Vec * /*s*/, Mat * /*U*/, Mat * /*VT*/) {
inline void SVD(TMat* /*A*/, Vec* /*s*/, Mat* /*U*/, Mat* /*VT*/) {
assert(0);
}
@ -158,11 +155,11 @@ inline void SVD(TMat * /*A*/, Vec * /*s*/, Mat * /*U*/, Mat * /*VT*/) {
// Destroys A and resizes x if necessary.
// TODO(maclean): Take the SVD of the transpose instead of this zero padding.
template <typename TMat, typename TVec>
double Nullspace(TMat *A, TVec *nullspace) {
double Nullspace(TMat* A, TVec* nullspace) {
Eigen::JacobiSVD<TMat> svd(*A, Eigen::ComputeFullV);
(*nullspace) = svd.matrixV().col(A->cols()-1);
(*nullspace) = svd.matrixV().col(A->cols() - 1);
if (A->rows() >= A->cols())
return svd.singularValues()(A->cols()-1);
return svd.singularValues()(A->cols() - 1);
else
return 0.0;
}
@ -173,55 +170,55 @@ double Nullspace(TMat *A, TVec *nullspace) {
// the singluar value corresponding to the solution x1. Destroys A and resizes
// x if necessary.
template <typename TMat, typename TVec1, typename TVec2>
double Nullspace2(TMat *A, TVec1 *x1, TVec2 *x2) {
double Nullspace2(TMat* A, TVec1* x1, TVec2* x2) {
Eigen::JacobiSVD<TMat> svd(*A, Eigen::ComputeFullV);
*x1 = svd.matrixV().col(A->cols() - 1);
*x2 = svd.matrixV().col(A->cols() - 2);
if (A->rows() >= A->cols())
return svd.singularValues()(A->cols()-1);
return svd.singularValues()(A->cols() - 1);
else
return 0.0;
}
// In place transpose for square matrices.
template<class TA>
inline void TransposeInPlace(TA *A) {
template <class TA>
inline void TransposeInPlace(TA* A) {
*A = A->transpose().eval();
}
template<typename TVec>
inline double NormL1(const TVec &x) {
template <typename TVec>
inline double NormL1(const TVec& x) {
return x.array().abs().sum();
}
template<typename TVec>
inline double NormL2(const TVec &x) {
template <typename TVec>
inline double NormL2(const TVec& x) {
return x.norm();
}
template<typename TVec>
inline double NormLInfinity(const TVec &x) {
template <typename TVec>
inline double NormLInfinity(const TVec& x) {
return x.array().abs().maxCoeff();
}
template<typename TVec>
inline double DistanceL1(const TVec &x, const TVec &y) {
template <typename TVec>
inline double DistanceL1(const TVec& x, const TVec& y) {
return (x - y).array().abs().sum();
}
template<typename TVec>
inline double DistanceL2(const TVec &x, const TVec &y) {
template <typename TVec>
inline double DistanceL2(const TVec& x, const TVec& y) {
return (x - y).norm();
}
template<typename TVec>
inline double DistanceLInfinity(const TVec &x, const TVec &y) {
template <typename TVec>
inline double DistanceLInfinity(const TVec& x, const TVec& y) {
return (x - y).array().abs().maxCoeff();
}
// Normalize a vector with the L1 norm, and return the norm before it was
// normalized.
template<typename TVec>
inline double NormalizeL1(TVec *x) {
template <typename TVec>
inline double NormalizeL1(TVec* x) {
double norm = NormL1(*x);
*x /= norm;
return norm;
@ -229,8 +226,8 @@ inline double NormalizeL1(TVec *x) {
// Normalize a vector with the L2 norm, and return the norm before it was
// normalized.
template<typename TVec>
inline double NormalizeL2(TVec *x) {
template <typename TVec>
inline double NormalizeL2(TVec* x) {
double norm = NormL2(*x);
*x /= norm;
return norm;
@ -238,15 +235,15 @@ inline double NormalizeL2(TVec *x) {
// Normalize a vector with the L^Infinity norm, and return the norm before it
// was normalized.
template<typename TVec>
inline double NormalizeLInfinity(TVec *x) {
template <typename TVec>
inline double NormalizeLInfinity(TVec* x) {
double norm = NormLInfinity(*x);
*x /= norm;
return norm;
}
// Return the square of a number.
template<typename T>
template <typename T>
inline T Square(T x) {
return x * x;
}
@ -258,7 +255,7 @@ Mat3 RotationAroundZ(double angle);
// Returns the rotation matrix of a rotation of angle |axis| around axis.
// This is computed using the Rodrigues formula, see:
// http://mathworld.wolfram.com/RodriguesRotationFormula.html
Mat3 RotationRodrigues(const Vec3 &axis);
Mat3 RotationRodrigues(const Vec3& axis);
// Make a rotation matrix such that center becomes the direction of the
// positive z-axis, and y is oriented close to up.
@ -266,177 +263,173 @@ Mat3 LookAt(Vec3 center);
// Return a diagonal matrix from a vector containg the diagonal values.
template <typename TVec>
inline Mat Diag(const TVec &x) {
inline Mat Diag(const TVec& x) {
return x.asDiagonal();
}
template<typename TMat>
inline double FrobeniusNorm(const TMat &A) {
template <typename TMat>
inline double FrobeniusNorm(const TMat& A) {
return sqrt(A.array().abs2().sum());
}
template<typename TMat>
inline double FrobeniusDistance(const TMat &A, const TMat &B) {
template <typename TMat>
inline double FrobeniusDistance(const TMat& A, const TMat& B) {
return FrobeniusNorm(A - B);
}
inline Vec3 CrossProduct(const Vec3 &x, const Vec3 &y) {
inline Vec3 CrossProduct(const Vec3& x, const Vec3& y) {
return x.cross(y);
}
Mat3 CrossProductMatrix(const Vec3 &x);
Mat3 CrossProductMatrix(const Vec3& x);
void MeanAndVarianceAlongRows(const Mat &A,
Vec *mean_pointer,
Vec *variance_pointer);
void MeanAndVarianceAlongRows(const Mat& A,
Vec* mean_pointer,
Vec* variance_pointer);
#if defined(_WIN32)
// TODO(bomboze): un-#if this for both platforms once tested under Windows
/* This solution was extensively discussed here
http://forum.kde.org/viewtopic.php?f=74&t=61940 */
#define SUM_OR_DYNAMIC(x, y) (x == Eigen::Dynamic || y == Eigen::Dynamic) ? Eigen::Dynamic : (x+y)
// TODO(bomboze): un-#if this for both platforms once tested under Windows
/* This solution was extensively discussed here
http://forum.kde.org/viewtopic.php?f=74&t=61940 */
# define SUM_OR_DYNAMIC(x, y) \
(x == Eigen::Dynamic || y == Eigen::Dynamic) ? Eigen::Dynamic : (x + y)
template<typename Derived1, typename Derived2>
struct hstack_return {
typedef typename Derived1::Scalar Scalar;
enum {
RowsAtCompileTime = Derived1::RowsAtCompileTime,
ColsAtCompileTime = SUM_OR_DYNAMIC(Derived1::ColsAtCompileTime,
Derived2::ColsAtCompileTime),
Options = Derived1::Flags&Eigen::RowMajorBit ? Eigen::RowMajor : 0,
MaxRowsAtCompileTime = Derived1::MaxRowsAtCompileTime,
MaxColsAtCompileTime = SUM_OR_DYNAMIC(Derived1::MaxColsAtCompileTime,
Derived2::MaxColsAtCompileTime)
};
typedef Eigen::Matrix<Scalar,
RowsAtCompileTime,
ColsAtCompileTime,
Options,
MaxRowsAtCompileTime,
MaxColsAtCompileTime> type;
template <typename Derived1, typename Derived2>
struct hstack_return {
typedef typename Derived1::Scalar Scalar;
enum {
RowsAtCompileTime = Derived1::RowsAtCompileTime,
ColsAtCompileTime = SUM_OR_DYNAMIC(Derived1::ColsAtCompileTime,
Derived2::ColsAtCompileTime),
Options = Derived1::Flags & Eigen::RowMajorBit ? Eigen::RowMajor : 0,
MaxRowsAtCompileTime = Derived1::MaxRowsAtCompileTime,
MaxColsAtCompileTime = SUM_OR_DYNAMIC(Derived1::MaxColsAtCompileTime,
Derived2::MaxColsAtCompileTime)
};
typedef Eigen::Matrix<Scalar,
RowsAtCompileTime,
ColsAtCompileTime,
Options,
MaxRowsAtCompileTime,
MaxColsAtCompileTime>
type;
};
template<typename Derived1, typename Derived2>
typename hstack_return<Derived1, Derived2>::type
HStack(const Eigen::MatrixBase<Derived1>& lhs,
const Eigen::MatrixBase<Derived2>& rhs) {
typename hstack_return<Derived1, Derived2>::type res;
res.resize(lhs.rows(), lhs.cols()+rhs.cols());
res << lhs, rhs;
return res;
};
template<typename Derived1, typename Derived2>
struct vstack_return {
typedef typename Derived1::Scalar Scalar;
enum {
RowsAtCompileTime = SUM_OR_DYNAMIC(Derived1::RowsAtCompileTime,
Derived2::RowsAtCompileTime),
ColsAtCompileTime = Derived1::ColsAtCompileTime,
Options = Derived1::Flags&Eigen::RowMajorBit ? Eigen::RowMajor : 0,
MaxRowsAtCompileTime = SUM_OR_DYNAMIC(Derived1::MaxRowsAtCompileTime,
Derived2::MaxRowsAtCompileTime),
MaxColsAtCompileTime = Derived1::MaxColsAtCompileTime
};
typedef Eigen::Matrix<Scalar,
RowsAtCompileTime,
ColsAtCompileTime,
Options,
MaxRowsAtCompileTime,
MaxColsAtCompileTime> type;
};
template<typename Derived1, typename Derived2>
typename vstack_return<Derived1, Derived2>::type
VStack(const Eigen::MatrixBase<Derived1>& lhs,
const Eigen::MatrixBase<Derived2>& rhs) {
typename vstack_return<Derived1, Derived2>::type res;
res.resize(lhs.rows()+rhs.rows(), lhs.cols());
res << lhs, rhs;
return res;
template <typename Derived1, typename Derived2>
typename hstack_return<Derived1, Derived2>::type HStack(
const Eigen::MatrixBase<Derived1>& lhs,
const Eigen::MatrixBase<Derived2>& rhs) {
typename hstack_return<Derived1, Derived2>::type res;
res.resize(lhs.rows(), lhs.cols() + rhs.cols());
res << lhs, rhs;
return res;
};
template <typename Derived1, typename Derived2>
struct vstack_return {
typedef typename Derived1::Scalar Scalar;
enum {
RowsAtCompileTime = SUM_OR_DYNAMIC(Derived1::RowsAtCompileTime,
Derived2::RowsAtCompileTime),
ColsAtCompileTime = Derived1::ColsAtCompileTime,
Options = Derived1::Flags & Eigen::RowMajorBit ? Eigen::RowMajor : 0,
MaxRowsAtCompileTime = SUM_OR_DYNAMIC(Derived1::MaxRowsAtCompileTime,
Derived2::MaxRowsAtCompileTime),
MaxColsAtCompileTime = Derived1::MaxColsAtCompileTime
};
typedef Eigen::Matrix<Scalar,
RowsAtCompileTime,
ColsAtCompileTime,
Options,
MaxRowsAtCompileTime,
MaxColsAtCompileTime>
type;
};
template <typename Derived1, typename Derived2>
typename vstack_return<Derived1, Derived2>::type VStack(
const Eigen::MatrixBase<Derived1>& lhs,
const Eigen::MatrixBase<Derived2>& rhs) {
typename vstack_return<Derived1, Derived2>::type res;
res.resize(lhs.rows() + rhs.rows(), lhs.cols());
res << lhs, rhs;
return res;
};
#else // _WIN32
// Since it is not possible to typedef privately here, use a macro.
// Always take dynamic columns if either side is dynamic.
#define COLS \
((ColsLeft == Eigen::Dynamic || ColsRight == Eigen::Dynamic) \
? Eigen::Dynamic : (ColsLeft + ColsRight))
// Since it is not possible to typedef privately here, use a macro.
// Always take dynamic columns if either side is dynamic.
# define COLS \
((ColsLeft == Eigen::Dynamic || ColsRight == Eigen::Dynamic) \
? Eigen::Dynamic \
: (ColsLeft + ColsRight))
// Same as above, except that prefer fixed size if either is fixed.
#define ROWS \
((RowsLeft == Eigen::Dynamic && RowsRight == Eigen::Dynamic) \
? Eigen::Dynamic \
: ((RowsLeft == Eigen::Dynamic) \
? RowsRight \
: RowsLeft \
) \
)
// Same as above, except that prefer fixed size if either is fixed.
# define ROWS \
((RowsLeft == Eigen::Dynamic && RowsRight == Eigen::Dynamic) \
? Eigen::Dynamic \
: ((RowsLeft == Eigen::Dynamic) ? RowsRight : RowsLeft))
// TODO(keir): Add a static assert if both rows are at compiletime.
template<typename T, int RowsLeft, int RowsRight, int ColsLeft, int ColsRight>
Eigen::Matrix<T, ROWS, COLS>
HStack(const Eigen::Matrix<T, RowsLeft, ColsLeft> &left,
const Eigen::Matrix<T, RowsRight, ColsRight> &right) {
assert(left.rows() == right.rows());
int n = left.rows();
int m1 = left.cols();
int m2 = right.cols();
// TODO(keir): Add a static assert if both rows are at compiletime.
template <typename T, int RowsLeft, int RowsRight, int ColsLeft, int ColsRight>
Eigen::Matrix<T, ROWS, COLS> HStack(
const Eigen::Matrix<T, RowsLeft, ColsLeft>& left,
const Eigen::Matrix<T, RowsRight, ColsRight>& right) {
assert(left.rows() == right.rows());
int n = left.rows();
int m1 = left.cols();
int m2 = right.cols();
Eigen::Matrix<T, ROWS, COLS> stacked(n, m1 + m2);
stacked.block(0, 0, n, m1) = left;
stacked.block(0, m1, n, m2) = right;
return stacked;
}
Eigen::Matrix<T, ROWS, COLS> stacked(n, m1 + m2);
stacked.block(0, 0, n, m1) = left;
stacked.block(0, m1, n, m2) = right;
return stacked;
}
// Reuse the above macros by swapping the order of Rows and Cols. Nasty, but
// the duplication is worse.
// TODO(keir): Add a static assert if both rows are at compiletime.
// TODO(keir): Mail eigen list about making this work for general expressions
// rather than only matrix types.
template<typename T, int RowsLeft, int RowsRight, int ColsLeft, int ColsRight>
Eigen::Matrix<T, COLS, ROWS>
VStack(const Eigen::Matrix<T, ColsLeft, RowsLeft> &top,
const Eigen::Matrix<T, ColsRight, RowsRight> &bottom) {
assert(top.cols() == bottom.cols());
int n1 = top.rows();
int n2 = bottom.rows();
int m = top.cols();
// Reuse the above macros by swapping the order of Rows and Cols. Nasty, but
// the duplication is worse.
// TODO(keir): Add a static assert if both rows are at compiletime.
// TODO(keir): Mail eigen list about making this work for general expressions
// rather than only matrix types.
template <typename T, int RowsLeft, int RowsRight, int ColsLeft, int ColsRight>
Eigen::Matrix<T, COLS, ROWS> VStack(
const Eigen::Matrix<T, ColsLeft, RowsLeft>& top,
const Eigen::Matrix<T, ColsRight, RowsRight>& bottom) {
assert(top.cols() == bottom.cols());
int n1 = top.rows();
int n2 = bottom.rows();
int m = top.cols();
Eigen::Matrix<T, COLS, ROWS> stacked(n1 + n2, m);
stacked.block(0, 0, n1, m) = top;
stacked.block(n1, 0, n2, m) = bottom;
return stacked;
}
#undef COLS
#undef ROWS
Eigen::Matrix<T, COLS, ROWS> stacked(n1 + n2, m);
stacked.block(0, 0, n1, m) = top;
stacked.block(n1, 0, n2, m) = bottom;
return stacked;
}
# undef COLS
# undef ROWS
#endif // _WIN32
void HorizontalStack(const Mat& left, const Mat& right, Mat* stacked);
void HorizontalStack(const Mat &left, const Mat &right, Mat *stacked);
template<typename TTop, typename TBot, typename TStacked>
void VerticalStack(const TTop &top, const TBot &bottom, TStacked *stacked) {
template <typename TTop, typename TBot, typename TStacked>
void VerticalStack(const TTop& top, const TBot& bottom, TStacked* stacked) {
assert(top.cols() == bottom.cols());
int n1 = top.rows();
int n2 = bottom.rows();
int m = top.cols();
stacked->resize(n1 + n2, m);
stacked->block(0, 0, n1, m) = top;
stacked->block(0, 0, n1, m) = top;
stacked->block(n1, 0, n2, m) = bottom;
}
void MatrixColumn(const Mat &A, int i, Vec2 *v);
void MatrixColumn(const Mat &A, int i, Vec3 *v);
void MatrixColumn(const Mat &A, int i, Vec4 *v);
void MatrixColumn(const Mat& A, int i, Vec2* v);
void MatrixColumn(const Mat& A, int i, Vec3* v);
void MatrixColumn(const Mat& A, int i, Vec4* v);
template <typename TMat, typename TCols>
TMat ExtractColumns(const TMat &A, const TCols &columns) {
TMat ExtractColumns(const TMat& A, const TCols& columns) {
TMat compressed(A.rows(), columns.size());
for (int i = 0; i < columns.size(); ++i) {
compressed.col(i) = A.col(columns[i]);
@ -445,12 +438,12 @@ TMat ExtractColumns(const TMat &A, const TCols &columns) {
}
template <typename TMat, typename TDest>
void reshape(const TMat &a, int rows, int cols, TDest *b) {
assert(a.rows()*a.cols() == rows*cols);
void reshape(const TMat& a, int rows, int cols, TDest* b) {
assert(a.rows() * a.cols() == rows * cols);
b->resize(rows, cols);
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
(*b)(i, j) = a[cols*i + j];
(*b)(i, j) = a[cols * i + j];
}
}
}
@ -467,24 +460,21 @@ inline bool isnan(double i) {
/// and negative values
template <typename FloatType>
FloatType ceil0(const FloatType& value) {
FloatType result = std::ceil(std::fabs(value));
return (value < 0.0) ? -result : result;
FloatType result = std::ceil(std::fabs(value));
return (value < 0.0) ? -result : result;
}
/// Returns the skew anti-symmetric matrix of a vector
inline Mat3 SkewMat(const Vec3 &x) {
inline Mat3 SkewMat(const Vec3& x) {
Mat3 skew;
skew << 0 , -x(2), x(1),
x(2), 0 , -x(0),
-x(1), x(0), 0;
skew << 0, -x(2), x(1), x(2), 0, -x(0), -x(1), x(0), 0;
return skew;
}
/// Returns the skew anti-symmetric matrix of a vector with only
/// the first two (independent) lines
inline Mat23 SkewMatMinimal(const Vec2 &x) {
inline Mat23 SkewMatMinimal(const Vec2& x) {
Mat23 skew;
skew << 0, -1, x(1),
1, 0, -x(0);
skew << 0, -1, x(1), 1, 0, -x(0);
return skew;
}
@ -496,7 +486,8 @@ inline Mat3 RotationFromEulerVector(Vec3 euler_vector) {
}
Vec3 w = euler_vector / theta;
Mat3 w_hat = CrossProductMatrix(w);
return Mat3::Identity() + w_hat*sin(theta) + w_hat*w_hat*(1 - cos(theta));
return Mat3::Identity() + w_hat * sin(theta) +
w_hat * w_hat * (1 - cos(theta));
}
} // namespace libmv

View File

@ -27,9 +27,11 @@ namespace {
TEST(Numeric, DynamicSizedNullspace) {
Mat A(3, 4);
// clang-format off
A << 0.76026643, 0.01799744, 0.55192142, 0.8699745,
0.42016166, 0.97863392, 0.33711682, 0.14479271,
0.51016811, 0.66528302, 0.54395496, 0.57794893;
// clang-format on
Vec x;
double s = Nullspace(&A, &x);
EXPECT_NEAR(0.0, s, 1e-15);
@ -39,9 +41,11 @@ TEST(Numeric, DynamicSizedNullspace) {
TEST(Numeric, FixedSizeMatrixNullspace) {
Mat34 A;
// clang-format off
A << 0.76026643, 0.01799744, 0.55192142, 0.8699745,
0.42016166, 0.97863392, 0.33711682, 0.14479271,
0.51016811, 0.66528302, 0.54395496, 0.57794893;
// clang-format on
Vec x;
double s = Nullspace(&A, &x);
EXPECT_NEAR(0.0, s, 1e-15);
@ -51,10 +55,12 @@ TEST(Numeric, FixedSizeMatrixNullspace) {
TEST(Numeric, NullspaceMatchesLapackSVD) {
Mat43 A;
// clang-format off
A << 0.76026643, 0.01799744, 0.55192142,
0.8699745, 0.42016166, 0.97863392,
0.33711682, 0.14479271, 0.51016811,
0.66528302, 0.54395496, 0.57794893;
// clang-format on
Vec x;
double s = Nullspace(&A, &x);
EXPECT_NEAR(1.0, x.norm(), 1e-15);
@ -68,10 +74,12 @@ TEST(Numeric, NullspaceMatchesLapackSVD) {
TEST(Numeric, Nullspace2) {
Mat43 A;
// clang-format off
A << 0.76026643, 0.01799744, 0.55192142,
0.8699745, 0.42016166, 0.97863392,
0.33711682, 0.14479271, 0.51016811,
0.66528302, 0.54395496, 0.57794893;
// clang-format on
Vec3 x1, x2;
double s = Nullspace2(&A, &x1, &x2);
EXPECT_NEAR(1.0, x1.norm(), 1e-15);
@ -80,14 +88,14 @@ TEST(Numeric, Nullspace2) {
EXPECT_NEAR(-0.64999717, x1(0), 1e-8);
EXPECT_NEAR(-0.18452646, x1(1), 1e-8);
EXPECT_NEAR( 0.7371931, x1(2), 1e-8);
EXPECT_NEAR(0.7371931, x1(2), 1e-8);
if (x2(0) < 0) {
x2 *= -1;
}
EXPECT_NEAR( 0.34679618, x2(0), 1e-8);
EXPECT_NEAR(0.34679618, x2(0), 1e-8);
EXPECT_NEAR(-0.93519689, x2(1), 1e-8);
EXPECT_NEAR( 0.07168809, x2(2), 1e-8);
EXPECT_NEAR(0.07168809, x2(2), 1e-8);
}
TEST(Numeric, TinyMatrixSquareTranspose) {
@ -105,8 +113,8 @@ TEST(Numeric, NormalizeL1) {
x << 1, 2;
double l1 = NormalizeL1(&x);
EXPECT_DOUBLE_EQ(3., l1);
EXPECT_DOUBLE_EQ(1./3., x(0));
EXPECT_DOUBLE_EQ(2./3., x(1));
EXPECT_DOUBLE_EQ(1. / 3., x(0));
EXPECT_DOUBLE_EQ(2. / 3., x(1));
}
TEST(Numeric, NormalizeL2) {
@ -114,8 +122,8 @@ TEST(Numeric, NormalizeL2) {
x << 1, 2;
double l2 = NormalizeL2(&x);
EXPECT_DOUBLE_EQ(sqrt(5.0), l2);
EXPECT_DOUBLE_EQ(1./sqrt(5.), x(0));
EXPECT_DOUBLE_EQ(2./sqrt(5.), x(1));
EXPECT_DOUBLE_EQ(1. / sqrt(5.), x(0));
EXPECT_DOUBLE_EQ(2. / sqrt(5.), x(1));
}
TEST(Numeric, Diag) {
@ -130,31 +138,32 @@ TEST(Numeric, Diag) {
TEST(Numeric, Determinant) {
Mat A(2, 2);
A << 1, 2,
-1, 3;
A << 1, 2, -1, 3;
double detA = A.determinant();
EXPECT_NEAR(5, detA, 1e-8);
Mat B(4, 4);
// clang-format off
B << 0, 1, 2, 3,
4, 5, 6, 7,
8, 9, 10, 11,
12, 13, 14, 15;
// clang-format on
double detB = B.determinant();
EXPECT_NEAR(0, detB, 1e-8);
Mat3 C;
C << 0, 1, 2,
3, 4, 5,
6, 7, 1;
C << 0, 1, 2, 3, 4, 5, 6, 7, 1;
double detC = C.determinant();
EXPECT_NEAR(21, detC, 1e-8);
}
TEST(Numeric, Inverse) {
Mat A(2, 2), A1;
// clang-format off
A << 1, 2,
-1, 3;
// clang-format on
Mat I = A * A.inverse();
EXPECT_NEAR(1, I(0, 0), 1e-8);
@ -163,10 +172,12 @@ TEST(Numeric, Inverse) {
EXPECT_NEAR(1, I(1, 1), 1e-8);
Mat B(4, 4), B1;
// clang-format off
B << 0, 1, 2, 3,
4, 5, 6, 7,
8, 9, 2, 11,
12, 13, 14, 4;
// clang-format on
Mat I2 = B * B.inverse();
EXPECT_NEAR(1, I2(0, 0), 1e-8);
EXPECT_NEAR(0, I2(0, 1), 1e-8);
@ -182,8 +193,10 @@ TEST(Numeric, Inverse) {
TEST(Numeric, MeanAndVarianceAlongRows) {
int n = 4;
Mat points(2, n);
// clang-format off
points << 0, 0, 1, 1,
0, 2, 1, 3;
// clang-format on
Vec mean, variance;
MeanAndVarianceAlongRows(points, &mean, &variance);
@ -213,15 +226,17 @@ TEST(Numeric, HStack) {
Mat x(2, 1), y(2, 1), z(2, 2);
x << 1, 2;
y << 3, 4;
// clang-format off
z << 1, 3,
2, 4;
// clang-format on
Vec2 xC = x, yC = y;
Mat2 xy = HStack(x, y);
Mat2 xy = HStack(x, y);
EXPECT_MATRIX_EQ(z, xy);
EXPECT_MATRIX_EQ(z, HStack(x, y));
EXPECT_MATRIX_EQ(z, HStack(x, yC));
EXPECT_MATRIX_EQ(z, HStack(x, y));
EXPECT_MATRIX_EQ(z, HStack(x, yC));
EXPECT_MATRIX_EQ(z, HStack(xC, y));
EXPECT_MATRIX_EQ(z, HStack(xC, yC));
}
@ -230,6 +245,7 @@ TEST(Numeric, HStack) {
// resulting stacked matrices properly propagate the fixed dimensions.
TEST(Numeric, VStack) {
Mat x(2, 2), y(2, 2), z(4, 2);
// clang-format off
x << 1, 2,
3, 4;
y << 10, 20,
@ -238,13 +254,14 @@ TEST(Numeric, VStack) {
3, 4,
10, 20,
30, 40;
// clang-format on
Mat2 xC = x, yC = y;
Mat xy = VStack(x, y);
Mat xy = VStack(x, y);
EXPECT_MATRIX_EQ(z, xy);
EXPECT_MATRIX_EQ(z, VStack(x, y));
EXPECT_MATRIX_EQ(z, VStack(x, yC));
EXPECT_MATRIX_EQ(z, VStack(x, y));
EXPECT_MATRIX_EQ(z, VStack(x, yC));
EXPECT_MATRIX_EQ(z, VStack(xC, y));
EXPECT_MATRIX_EQ(z, VStack(xC, yC));
}
@ -293,17 +310,21 @@ TEST(Numeric, CrossProductMatrix) {
TEST(Numeric, MatrixColumn) {
Mat A2(2, 3);
Vec2 v2;
// clang-format off
A2 << 1, 2, 3,
4, 5, 6;
// clang-format on
MatrixColumn(A2, 1, &v2);
EXPECT_EQ(2, v2(0));
EXPECT_EQ(5, v2(1));
Mat A3(3, 3);
Vec3 v3;
// clang-format off
A3 << 1, 2, 3,
4, 5, 6,
7, 8, 9;
// clang-format on
MatrixColumn(A3, 1, &v3);
EXPECT_EQ(2, v3(0));
EXPECT_EQ(5, v3(1));
@ -311,14 +332,16 @@ TEST(Numeric, MatrixColumn) {
Mat A4(4, 3);
Vec4 v4;
// clang-format off
A4 << 1, 2, 3,
4, 5, 6,
7, 8, 9,
10, 11, 12;
// clang-format on
MatrixColumn(A4, 1, &v4);
EXPECT_EQ( 2, v4(0));
EXPECT_EQ( 5, v4(1));
EXPECT_EQ( 8, v4(2));
EXPECT_EQ(2, v4(0));
EXPECT_EQ(5, v4(1));
EXPECT_EQ(8, v4(2));
EXPECT_EQ(11, v4(3));
}
@ -337,7 +360,8 @@ TEST(Numeric, Mat3MatProduct) {
// This gives a compile error.
TEST(Numeric, Vec3Negative) {
Vec3 y; y << 1, 2, 3;
Vec3 y;
y << 1, 2, 3;
Vec3 x = -y;
EXPECT_EQ(-1, x(0));
EXPECT_EQ(-2, x(1));
@ -357,19 +381,23 @@ TEST(Numeric, Vec3VecInteroperability) {
// This segfaults inside lapack.
TEST(Numeric, DeterminantLU7) {
Mat A(5, 5);
// clang-format off
A << 1, 0, 0, 0, 0,
0, 1, 0, 0, 0,
0, 0, 1, 0, 0,
0, 0, 0, 1, 0,
0, 0, 0, 0, 1;
// clang-format on
EXPECT_NEAR(1, A.determinant(), 1e-8);
}
// This segfaults inside lapack.
TEST(Numeric, DeterminantLU) {
Mat A(2, 2);
// clang-format off
A << 1, 2,
-1, 3;
// clang-format on
EXPECT_NEAR(5, A.determinant(), 1e-8);
}
@ -377,19 +405,24 @@ TEST(Numeric, DeterminantLU) {
// Keir: Not with eigen2!
TEST(Numeric, InplaceProduct) {
Mat2 K, S;
// clang-format off
K << 1, 0,
0, 1;
S << 1, 0,
0, 1;
K = K * S;
// clang-format on
EXPECT_MATRIX_NEAR(Mat2::Identity(), K, 1e-8);
}
TEST(Numeric, ExtractColumns) {
Mat2X A(2, 5);
// clang-format off
A << 1, 2, 3, 4, 5,
6, 7, 8, 9, 10;
Vec2i columns; columns << 0, 2;
// clang-format on
Vec2i columns;
columns << 0, 2;
Mat2X extracted = ExtractColumns(A, columns);
EXPECT_NEAR(1, extracted(0, 0), 1e-15);
EXPECT_NEAR(3, extracted(0, 1), 1e-15);
@ -418,21 +451,22 @@ TEST(Numeric, RotationRodrigues) {
TEST(Numeric, LookAt) {
// Simple orthogonality check.
Vec3 e; e << 1, 2, 3;
Vec3 e;
e << 1, 2, 3;
Mat3 R = LookAt(e), I = Mat3::Identity();
Mat3 RRT = R*R.transpose();
Mat3 RTR = R.transpose()*R;
Mat3 RRT = R * R.transpose();
Mat3 RTR = R.transpose() * R;
EXPECT_MATRIX_NEAR(I, RRT, 1e-15);
EXPECT_MATRIX_NEAR(I, RTR, 1e-15);
}
TEST(Numeric, Reshape) {
Vec4 x; x << 1, 2, 3, 4;
Vec4 x;
x << 1, 2, 3, 4;
Mat2 M, M_expected;
reshape(x, 2, 2, &M);
M_expected << 1, 2,
3, 4;
M_expected << 1, 2, 3, 4;
EXPECT_MATRIX_NEAR(M_expected, M, 1e-15);
}

View File

@ -21,8 +21,8 @@
#ifndef LIBMV_NUMERIC_POLY_H_
#define LIBMV_NUMERIC_POLY_H_
#include <cmath>
#include <stdio.h>
#include <cmath>
namespace libmv {
@ -35,9 +35,8 @@ namespace libmv {
// if there are 2 roots, only x0 and x1 are set.
//
// The GSL cubic solver was used as a reference for this routine.
template<typename Real>
int SolveCubicPolynomial(Real a, Real b, Real c,
Real *x0, Real *x1, Real *x2) {
template <typename Real>
int SolveCubicPolynomial(Real a, Real b, Real c, Real* x0, Real* x1, Real* x2) {
Real q = a * a - 3 * b;
Real r = 2 * a * a * a - 9 * a * b + 27 * c;
@ -65,12 +64,12 @@ int SolveCubicPolynomial(Real a, Real b, Real c,
Real sqrtQ = sqrt(Q);
if (R > 0) {
*x0 = -2 * sqrtQ - a / 3;
*x1 = sqrtQ - a / 3;
*x2 = sqrtQ - a / 3;
*x1 = sqrtQ - a / 3;
*x2 = sqrtQ - a / 3;
} else {
*x0 = -sqrtQ - a / 3;
*x1 = -sqrtQ - a / 3;
*x2 = 2 * sqrtQ - a / 3;
*x0 = -sqrtQ - a / 3;
*x1 = -sqrtQ - a / 3;
*x2 = 2 * sqrtQ - a / 3;
}
return 3;
@ -97,15 +96,15 @@ int SolveCubicPolynomial(Real a, Real b, Real c,
return 3;
}
Real sgnR = (R >= 0 ? 1 : -1);
Real A = -sgnR * pow(fabs(R) + sqrt(R2 - Q3), 1.0/3.0);
Real A = -sgnR * pow(fabs(R) + sqrt(R2 - Q3), 1.0 / 3.0);
Real B = Q / A;
*x0 = A + B - a / 3;
return 1;
}
// The coefficients are in ascending powers, i.e. coeffs[N]*x^N.
template<typename Real>
int SolveCubicPolynomial(const Real *coeffs, Real *solutions) {
template <typename Real>
int SolveCubicPolynomial(const Real* coeffs, Real* solutions) {
if (coeffs[0] == 0.0) {
// TODO(keir): This is a quadratic not a cubic. Implement a quadratic
// solver!
@ -114,10 +113,8 @@ int SolveCubicPolynomial(const Real *coeffs, Real *solutions) {
Real a = coeffs[2] / coeffs[3];
Real b = coeffs[1] / coeffs[3];
Real c = coeffs[0] / coeffs[3];
return SolveCubicPolynomial(a, b, c,
solutions + 0,
solutions + 1,
solutions + 2);
return SolveCubicPolynomial(
a, b, c, solutions + 0, solutions + 1, solutions + 2);
}
} // namespace libmv
#endif // LIBMV_NUMERIC_POLY_H_

View File

@ -18,8 +18,8 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/poly.h"
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
using namespace libmv;
@ -34,8 +34,8 @@ namespace {
//
// x^3 - (c+b+a) * x^2 + (a*b+(b+a)*c) * x - a*b*c = 0.
// = p = q = r
void CoeffsForCubicZeros(double a, double b, double c,
double *p, double *q, double *r) {
void CoeffsForCubicZeros(
double a, double b, double c, double* p, double* q, double* r) {
*p = -(c + b + a);
*q = (a * b + (b + a) * c);
*r = -a * b * c;
@ -45,35 +45,45 @@ TEST(Poly, SolveCubicPolynomial) {
double a, b, c, aa, bb, cc;
double p, q, r;
a = 1; b = 2; c = 3;
a = 1;
b = 2;
c = 3;
CoeffsForCubicZeros(a, b, c, &p, &q, &r);
ASSERT_EQ(3, SolveCubicPolynomial(p, q, r, &aa, &bb, &cc));
EXPECT_NEAR(a, aa, 1e-10);
EXPECT_NEAR(b, bb, 1e-10);
EXPECT_NEAR(c, cc, 1e-10);
a = 0; b = 1; c = 3;
a = 0;
b = 1;
c = 3;
CoeffsForCubicZeros(a, b, c, &p, &q, &r);
ASSERT_EQ(3, SolveCubicPolynomial(p, q, r, &aa, &bb, &cc));
EXPECT_NEAR(a, aa, 1e-10);
EXPECT_NEAR(b, bb, 1e-10);
EXPECT_NEAR(c, cc, 1e-10);
a = -10; b = 0; c = 1;
a = -10;
b = 0;
c = 1;
CoeffsForCubicZeros(a, b, c, &p, &q, &r);
ASSERT_EQ(3, SolveCubicPolynomial(p, q, r, &aa, &bb, &cc));
EXPECT_NEAR(a, aa, 1e-10);
EXPECT_NEAR(b, bb, 1e-10);
EXPECT_NEAR(c, cc, 1e-10);
a = -8; b = 1; c = 3;
a = -8;
b = 1;
c = 3;
CoeffsForCubicZeros(a, b, c, &p, &q, &r);
ASSERT_EQ(3, SolveCubicPolynomial(p, q, r, &aa, &bb, &cc));
EXPECT_NEAR(a, aa, 1e-10);
EXPECT_NEAR(b, bb, 1e-10);
EXPECT_NEAR(c, cc, 1e-10);
a = 28; b = 28; c = 105;
a = 28;
b = 28;
c = 105;
CoeffsForCubicZeros(a, b, c, &p, &q, &r);
ASSERT_EQ(3, SolveCubicPolynomial(p, q, r, &aa, &bb, &cc));
EXPECT_NEAR(a, aa, 1e-10);

View File

@ -32,16 +32,16 @@
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
#include "libmv/simple_pipeline/camera_intrinsics.h"
#include "libmv/simple_pipeline/reconstruction.h"
#include "libmv/simple_pipeline/tracks.h"
#include "libmv/simple_pipeline/distortion_models.h"
#include "libmv/simple_pipeline/packed_intrinsics.h"
#include "libmv/simple_pipeline/reconstruction.h"
#include "libmv/simple_pipeline/tracks.h"
namespace libmv {
namespace {
bool NeedUseInvertIntrinsicsPipeline(const CameraIntrinsics *intrinsics) {
bool NeedUseInvertIntrinsicsPipeline(const CameraIntrinsics* intrinsics) {
const DistortionModelType distortion_model =
intrinsics->GetDistortionModelType();
return (distortion_model == DISTORTION_MODEL_NUKE);
@ -59,12 +59,14 @@ bool NeedUseInvertIntrinsicsPipeline(const CameraIntrinsics *intrinsics) {
// The invariant_intrinsics are used to access intrinsics which are never
// packed into parameter block: for example, distortion model type and image
// dimension.
template<typename T>
template <typename T>
void ApplyDistortionModelUsingIntrinsicsBlock(
const CameraIntrinsics *invariant_intrinsics,
const CameraIntrinsics* invariant_intrinsics,
const T* const intrinsics_block,
const T& normalized_x, const T& normalized_y,
T* distorted_x, T* distorted_y) {
const T& normalized_x,
const T& normalized_y,
T* distorted_x,
T* distorted_y) {
// Unpack the intrinsics.
const T& focal_length =
intrinsics_block[PackedIntrinsics::OFFSET_FOCAL_LENGTH];
@ -76,65 +78,75 @@ void ApplyDistortionModelUsingIntrinsicsBlock(
// TODO(keir): Do early bailouts for zero distortion; these are expensive
// jet operations.
switch (invariant_intrinsics->GetDistortionModelType()) {
case DISTORTION_MODEL_POLYNOMIAL:
{
const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
const T& k3 = intrinsics_block[PackedIntrinsics::OFFSET_K3];
const T& p1 = intrinsics_block[PackedIntrinsics::OFFSET_P1];
const T& p2 = intrinsics_block[PackedIntrinsics::OFFSET_P2];
case DISTORTION_MODEL_POLYNOMIAL: {
const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
const T& k3 = intrinsics_block[PackedIntrinsics::OFFSET_K3];
const T& p1 = intrinsics_block[PackedIntrinsics::OFFSET_P1];
const T& p2 = intrinsics_block[PackedIntrinsics::OFFSET_P2];
ApplyPolynomialDistortionModel(focal_length,
focal_length,
principal_point_x,
principal_point_y,
k1, k2, k3,
p1, p2,
normalized_x, normalized_y,
distorted_x, distorted_y);
return;
}
case DISTORTION_MODEL_DIVISION:
{
const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
ApplyDivisionDistortionModel(focal_length,
ApplyPolynomialDistortionModel(focal_length,
focal_length,
principal_point_x,
principal_point_y,
k1, k2,
normalized_x, normalized_y,
distorted_x, distorted_y);
return;
}
k1,
k2,
k3,
p1,
p2,
normalized_x,
normalized_y,
distorted_x,
distorted_y);
return;
}
case DISTORTION_MODEL_NUKE:
{
LOG(FATAL) << "Unsupported distortion model.";
return;
}
case DISTORTION_MODEL_DIVISION: {
const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
case DISTORTION_MODEL_BROWN:
{
const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
const T& k3 = intrinsics_block[PackedIntrinsics::OFFSET_K3];
const T& k4 = intrinsics_block[PackedIntrinsics::OFFSET_K4];
const T& p1 = intrinsics_block[PackedIntrinsics::OFFSET_P1];
const T& p2 = intrinsics_block[PackedIntrinsics::OFFSET_P2];
ApplyDivisionDistortionModel(focal_length,
focal_length,
principal_point_x,
principal_point_y,
k1,
k2,
normalized_x,
normalized_y,
distorted_x,
distorted_y);
return;
}
ApplyBrownDistortionModel(focal_length,
focal_length,
principal_point_x,
principal_point_y,
k1, k2, k3, k4,
p1, p2,
normalized_x, normalized_y,
distorted_x, distorted_y);
return;
}
case DISTORTION_MODEL_NUKE: {
LOG(FATAL) << "Unsupported distortion model.";
return;
}
case DISTORTION_MODEL_BROWN: {
const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
const T& k3 = intrinsics_block[PackedIntrinsics::OFFSET_K3];
const T& k4 = intrinsics_block[PackedIntrinsics::OFFSET_K4];
const T& p1 = intrinsics_block[PackedIntrinsics::OFFSET_P1];
const T& p2 = intrinsics_block[PackedIntrinsics::OFFSET_P2];
ApplyBrownDistortionModel(focal_length,
focal_length,
principal_point_x,
principal_point_y,
k1,
k2,
k3,
k4,
p1,
p2,
normalized_x,
normalized_y,
distorted_x,
distorted_y);
return;
}
}
LOG(FATAL) << "Unknown distortion model.";
@ -152,12 +164,14 @@ void ApplyDistortionModelUsingIntrinsicsBlock(
// The invariant_intrinsics are used to access intrinsics which are never
// packed into parameter block: for example, distortion model type and image
// dimension.
template<typename T>
template <typename T>
void InvertDistortionModelUsingIntrinsicsBlock(
const CameraIntrinsics *invariant_intrinsics,
const CameraIntrinsics* invariant_intrinsics,
const T* const intrinsics_block,
const T& image_x, const T& image_y,
T* normalized_x, T* normalized_y) {
const T& image_x,
const T& image_y,
T* normalized_x,
T* normalized_y) {
// Unpack the intrinsics.
const T& focal_length =
intrinsics_block[PackedIntrinsics::OFFSET_FOCAL_LENGTH];
@ -175,31 +189,35 @@ void InvertDistortionModelUsingIntrinsicsBlock(
LOG(FATAL) << "Unsupported distortion model.";
return;
case DISTORTION_MODEL_NUKE:
{
const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
case DISTORTION_MODEL_NUKE: {
const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
InvertNukeDistortionModel(focal_length,
focal_length,
principal_point_x,
principal_point_y,
invariant_intrinsics->image_width(),
invariant_intrinsics->image_height(),
k1, k2,
image_x, image_y,
normalized_x, normalized_y);
return;
}
InvertNukeDistortionModel(focal_length,
focal_length,
principal_point_x,
principal_point_y,
invariant_intrinsics->image_width(),
invariant_intrinsics->image_height(),
k1,
k2,
image_x,
image_y,
normalized_x,
normalized_y);
return;
}
}
LOG(FATAL) << "Unknown distortion model.";
}
template<typename T>
template <typename T>
void NormalizedToImageSpace(const T* const intrinsics_block,
const T& normalized_x, const T& normalized_y,
T* image_x, T* image_y) {
const T& normalized_x,
const T& normalized_y,
T* image_x,
T* image_y) {
// Unpack the intrinsics.
const T& focal_length =
intrinsics_block[PackedIntrinsics::OFFSET_FOCAL_LENGTH];
@ -219,11 +237,10 @@ void NormalizedToImageSpace(const T* const intrinsics_block,
// This functor can only be used for distortion models which have analytically
// defined Apply() function.
struct ReprojectionErrorApplyIntrinsics {
ReprojectionErrorApplyIntrinsics(
const CameraIntrinsics *invariant_intrinsics,
const double observed_distorted_x,
const double observed_distorted_y,
const double weight)
ReprojectionErrorApplyIntrinsics(const CameraIntrinsics* invariant_intrinsics,
const double observed_distorted_x,
const double observed_distorted_y,
const double weight)
: invariant_intrinsics_(invariant_intrinsics),
observed_distorted_x_(observed_distorted_x),
observed_distorted_y_(observed_distorted_y),
@ -253,11 +270,12 @@ struct ReprojectionErrorApplyIntrinsics {
T yn = x[1] / x[2];
T predicted_distorted_x, predicted_distorted_y;
ApplyDistortionModelUsingIntrinsicsBlock(
invariant_intrinsics_,
intrinsics,
xn, yn,
&predicted_distorted_x, &predicted_distorted_y);
ApplyDistortionModelUsingIntrinsicsBlock(invariant_intrinsics_,
intrinsics,
xn,
yn,
&predicted_distorted_x,
&predicted_distorted_y);
// The error is the difference between the predicted and observed position.
residuals[0] = (predicted_distorted_x - T(observed_distorted_x_)) * weight_;
@ -265,7 +283,7 @@ struct ReprojectionErrorApplyIntrinsics {
return true;
}
const CameraIntrinsics *invariant_intrinsics_;
const CameraIntrinsics* invariant_intrinsics_;
const double observed_distorted_x_;
const double observed_distorted_y_;
const double weight_;
@ -279,7 +297,7 @@ struct ReprojectionErrorApplyIntrinsics {
// defined Invert() function.
struct ReprojectionErrorInvertIntrinsics {
ReprojectionErrorInvertIntrinsics(
const CameraIntrinsics *invariant_intrinsics,
const CameraIntrinsics* invariant_intrinsics,
const double observed_distorted_x,
const double observed_distorted_y,
const double weight)
@ -295,8 +313,7 @@ struct ReprojectionErrorInvertIntrinsics {
const T* const X, // Point coordinates 3x1.
T* residuals) const {
// Unpack the intrinsics.
const T& focal_length =
intrinsics[PackedIntrinsics::OFFSET_FOCAL_LENGTH];
const T& focal_length = intrinsics[PackedIntrinsics::OFFSET_FOCAL_LENGTH];
const T& principal_point_x =
intrinsics[PackedIntrinsics::OFFSET_PRINCIPAL_POINT_X];
const T& principal_point_y =
@ -327,14 +344,17 @@ struct ReprojectionErrorInvertIntrinsics {
InvertDistortionModelUsingIntrinsicsBlock(
invariant_intrinsics_,
intrinsics,
T(observed_distorted_x_), T(observed_distorted_y_),
&observed_undistorted_normalized_x, &observed_undistorted_normalized_y);
T(observed_distorted_x_),
T(observed_distorted_y_),
&observed_undistorted_normalized_x,
&observed_undistorted_normalized_y);
T observed_undistorted_image_x, observed_undistorted_image_y;
NormalizedToImageSpace(
intrinsics,
observed_undistorted_normalized_x, observed_undistorted_normalized_y,
&observed_undistorted_image_x, &observed_undistorted_image_y);
NormalizedToImageSpace(intrinsics,
observed_undistorted_normalized_x,
observed_undistorted_normalized_y,
&observed_undistorted_image_x,
&observed_undistorted_image_y);
// The error is the difference between the predicted and observed position.
residuals[0] = (predicted_x - observed_undistorted_image_x) * weight_;
@ -343,7 +363,7 @@ struct ReprojectionErrorInvertIntrinsics {
return true;
}
const CameraIntrinsics *invariant_intrinsics_;
const CameraIntrinsics* invariant_intrinsics_;
const double observed_distorted_x_;
const double observed_distorted_y_;
const double weight_;
@ -356,22 +376,23 @@ void BundleIntrinsicsLogMessage(const int bundle_intrinsics) {
} else {
std::string bundling_message = "";
#define APPEND_BUNDLING_INTRINSICS(name, flag) \
if (bundle_intrinsics & flag) { \
if (!bundling_message.empty()) { \
bundling_message += ", "; \
} \
bundling_message += name; \
} (void)0
#define APPEND_BUNDLING_INTRINSICS(name, flag) \
if (bundle_intrinsics & flag) { \
if (!bundling_message.empty()) { \
bundling_message += ", "; \
} \
bundling_message += name; \
} \
(void)0
APPEND_BUNDLING_INTRINSICS("f", BUNDLE_FOCAL_LENGTH);
APPEND_BUNDLING_INTRINSICS("f", BUNDLE_FOCAL_LENGTH);
APPEND_BUNDLING_INTRINSICS("px, py", BUNDLE_PRINCIPAL_POINT);
APPEND_BUNDLING_INTRINSICS("k1", BUNDLE_RADIAL_K1);
APPEND_BUNDLING_INTRINSICS("k2", BUNDLE_RADIAL_K2);
APPEND_BUNDLING_INTRINSICS("k3", BUNDLE_RADIAL_K3);
APPEND_BUNDLING_INTRINSICS("k4", BUNDLE_RADIAL_K4);
APPEND_BUNDLING_INTRINSICS("p1", BUNDLE_TANGENTIAL_P1);
APPEND_BUNDLING_INTRINSICS("p2", BUNDLE_TANGENTIAL_P2);
APPEND_BUNDLING_INTRINSICS("k1", BUNDLE_RADIAL_K1);
APPEND_BUNDLING_INTRINSICS("k2", BUNDLE_RADIAL_K2);
APPEND_BUNDLING_INTRINSICS("k3", BUNDLE_RADIAL_K3);
APPEND_BUNDLING_INTRINSICS("k4", BUNDLE_RADIAL_K4);
APPEND_BUNDLING_INTRINSICS("p1", BUNDLE_TANGENTIAL_P1);
APPEND_BUNDLING_INTRINSICS("p2", BUNDLE_TANGENTIAL_P2);
LG << "Bundling " << bundling_message << ".";
}
@ -383,7 +404,7 @@ void BundleIntrinsicsLogMessage(const int bundle_intrinsics) {
// Element with key i matches to a rotation+translation for
// camera at image i.
map<int, Vec6> PackCamerasRotationAndTranslation(
const EuclideanReconstruction &reconstruction) {
const EuclideanReconstruction& reconstruction) {
map<int, Vec6> all_cameras_R_t;
vector<EuclideanCamera> all_cameras = reconstruction.AllCameras();
@ -399,14 +420,13 @@ map<int, Vec6> PackCamerasRotationAndTranslation(
// Convert cameras rotations fro mangle axis back to rotation matrix.
void UnpackCamerasRotationAndTranslation(
const map<int, Vec6> &all_cameras_R_t,
EuclideanReconstruction *reconstruction) {
const map<int, Vec6>& all_cameras_R_t,
EuclideanReconstruction* reconstruction) {
for (map<int, Vec6>::value_type image_and_camera_R_T : all_cameras_R_t) {
const int image = image_and_camera_R_T.first;
const Vec6& camera_R_t = image_and_camera_R_T.second;
EuclideanCamera *camera = reconstruction->CameraForImage(image);
EuclideanCamera* camera = reconstruction->CameraForImage(image);
if (!camera) {
continue;
}
@ -421,8 +441,8 @@ void UnpackCamerasRotationAndTranslation(
//
// TODO(sergey): currently uses dense Eigen matrices, best would
// be to use sparse Eigen matrices
void CRSMatrixToEigenMatrix(const ceres::CRSMatrix &crs_matrix,
Mat *eigen_matrix) {
void CRSMatrixToEigenMatrix(const ceres::CRSMatrix& crs_matrix,
Mat* eigen_matrix) {
eigen_matrix->resize(crs_matrix.num_rows, crs_matrix.num_cols);
eigen_matrix->setZero();
@ -439,11 +459,11 @@ void CRSMatrixToEigenMatrix(const ceres::CRSMatrix &crs_matrix,
}
}
void EuclideanBundlerPerformEvaluation(const Tracks &tracks,
EuclideanReconstruction *reconstruction,
map<int, Vec6> *all_cameras_R_t,
ceres::Problem *problem,
BundleEvaluation *evaluation) {
void EuclideanBundlerPerformEvaluation(const Tracks& tracks,
EuclideanReconstruction* reconstruction,
map<int, Vec6>* all_cameras_R_t,
ceres::Problem* problem,
BundleEvaluation* evaluation) {
int max_track = tracks.MaxTrack();
// Number of camera rotations equals to number of translation,
int num_cameras = all_cameras_R_t->size();
@ -451,7 +471,7 @@ void EuclideanBundlerPerformEvaluation(const Tracks &tracks,
vector<EuclideanPoint*> minimized_points;
for (int i = 0; i <= max_track; i++) {
EuclideanPoint *point = reconstruction->PointForTrack(i);
EuclideanPoint* point = reconstruction->PointForTrack(i);
if (point) {
// We need to know whether the track is a constant zero weight.
// If it is so it wouldn't have a parameter block in the problem.
@ -477,16 +497,16 @@ void EuclideanBundlerPerformEvaluation(const Tracks &tracks,
evaluation->num_cameras = num_cameras;
evaluation->num_points = num_points;
if (evaluation->evaluate_jacobian) { // Evaluate jacobian matrix.
if (evaluation->evaluate_jacobian) { // Evaluate jacobian matrix.
ceres::CRSMatrix evaluated_jacobian;
ceres::Problem::EvaluateOptions eval_options;
// Cameras goes first in the ordering.
int max_image = tracks.MaxImage();
for (int i = 0; i <= max_image; i++) {
const EuclideanCamera *camera = reconstruction->CameraForImage(i);
const EuclideanCamera* camera = reconstruction->CameraForImage(i);
if (camera) {
double *current_camera_R_t = &(*all_cameras_R_t)[i](0);
double* current_camera_R_t = &(*all_cameras_R_t)[i](0);
// All cameras are variable now.
problem->SetParameterBlockVariable(current_camera_R_t);
@ -497,63 +517,65 @@ void EuclideanBundlerPerformEvaluation(const Tracks &tracks,
// Points goes at the end of ordering,
for (int i = 0; i < minimized_points.size(); i++) {
EuclideanPoint *point = minimized_points.at(i);
EuclideanPoint* point = minimized_points.at(i);
eval_options.parameter_blocks.push_back(&point->X(0));
}
problem->Evaluate(eval_options,
NULL, NULL, NULL,
&evaluated_jacobian);
problem->Evaluate(eval_options, NULL, NULL, NULL, &evaluated_jacobian);
CRSMatrixToEigenMatrix(evaluated_jacobian, &evaluation->jacobian);
}
}
template<typename CostFunction>
void AddResidualBlockToProblemImpl(const CameraIntrinsics *invariant_intrinsics,
double observed_x, double observed_y,
template <typename CostFunction>
void AddResidualBlockToProblemImpl(const CameraIntrinsics* invariant_intrinsics,
double observed_x,
double observed_y,
double weight,
double *intrinsics_block,
double *camera_R_t,
EuclideanPoint *point,
double* intrinsics_block,
double* camera_R_t,
EuclideanPoint* point,
ceres::Problem* problem) {
problem->AddResidualBlock(new ceres::AutoDiffCostFunction<
CostFunction, 2, PackedIntrinsics::NUM_PARAMETERS, 6, 3>(
new CostFunction(
invariant_intrinsics,
observed_x, observed_y,
weight)),
problem->AddResidualBlock(
new ceres::AutoDiffCostFunction<CostFunction,
2,
PackedIntrinsics::NUM_PARAMETERS,
6,
3>(new CostFunction(
invariant_intrinsics, observed_x, observed_y, weight)),
NULL,
intrinsics_block,
camera_R_t,
&point->X(0));
}
void AddResidualBlockToProblem(const CameraIntrinsics *invariant_intrinsics,
const Marker &marker,
void AddResidualBlockToProblem(const CameraIntrinsics* invariant_intrinsics,
const Marker& marker,
double marker_weight,
double* intrinsics_block,
double *camera_R_t,
EuclideanPoint *point,
double* camera_R_t,
EuclideanPoint* point,
ceres::Problem* problem) {
if (NeedUseInvertIntrinsicsPipeline(invariant_intrinsics)) {
AddResidualBlockToProblemImpl<ReprojectionErrorInvertIntrinsics>(
invariant_intrinsics,
marker.x, marker.y,
marker_weight,
intrinsics_block,
camera_R_t,
point,
problem);
invariant_intrinsics,
marker.x,
marker.y,
marker_weight,
intrinsics_block,
camera_R_t,
point,
problem);
} else {
AddResidualBlockToProblemImpl<ReprojectionErrorApplyIntrinsics>(
invariant_intrinsics,
marker.x, marker.y,
marker_weight,
intrinsics_block,
camera_R_t,
point,
problem);
invariant_intrinsics,
marker.x,
marker.y,
marker_weight,
intrinsics_block,
camera_R_t,
point,
problem);
}
}
@ -566,25 +588,25 @@ void AddResidualBlockToProblem(const CameraIntrinsics *invariant_intrinsics,
//
// At this point we only need to bundle points positions, cameras
// are to be totally still here.
void EuclideanBundlePointsOnly(const CameraIntrinsics *invariant_intrinsics,
const vector<Marker> &markers,
map<int, Vec6> &all_cameras_R_t,
void EuclideanBundlePointsOnly(const CameraIntrinsics* invariant_intrinsics,
const vector<Marker>& markers,
map<int, Vec6>& all_cameras_R_t,
double* intrinsics_block,
EuclideanReconstruction *reconstruction) {
EuclideanReconstruction* reconstruction) {
ceres::Problem::Options problem_options;
ceres::Problem problem(problem_options);
int num_residuals = 0;
for (int i = 0; i < markers.size(); ++i) {
const Marker &marker = markers[i];
EuclideanCamera *camera = reconstruction->CameraForImage(marker.image);
EuclideanPoint *point = reconstruction->PointForTrack(marker.track);
const Marker& marker = markers[i];
EuclideanCamera* camera = reconstruction->CameraForImage(marker.image);
EuclideanPoint* point = reconstruction->PointForTrack(marker.track);
if (camera == NULL || point == NULL) {
continue;
}
// Rotation of camera denoted in angle axis followed with
// camera translation.
double *current_camera_R_t = &all_cameras_R_t[camera->image](0);
double* current_camera_R_t = &all_cameras_R_t[camera->image](0);
AddResidualBlockToProblem(invariant_intrinsics,
marker,
@ -625,8 +647,8 @@ void EuclideanBundlePointsOnly(const CameraIntrinsics *invariant_intrinsics,
} // namespace
void EuclideanBundle(const Tracks &tracks,
EuclideanReconstruction *reconstruction) {
void EuclideanBundle(const Tracks& tracks,
EuclideanReconstruction* reconstruction) {
PolynomialCameraIntrinsics empty_intrinsics;
EuclideanBundleCommonIntrinsics(tracks,
BUNDLE_NO_INTRINSICS,
@ -636,13 +658,12 @@ void EuclideanBundle(const Tracks &tracks,
NULL);
}
void EuclideanBundleCommonIntrinsics(
const Tracks &tracks,
const int bundle_intrinsics,
const int bundle_constraints,
EuclideanReconstruction *reconstruction,
CameraIntrinsics *intrinsics,
BundleEvaluation *evaluation) {
void EuclideanBundleCommonIntrinsics(const Tracks& tracks,
const int bundle_intrinsics,
const int bundle_constraints,
EuclideanReconstruction* reconstruction,
CameraIntrinsics* intrinsics,
BundleEvaluation* evaluation) {
LG << "Original intrinsics: " << *intrinsics;
vector<Marker> markers = tracks.AllMarkers();
@ -661,19 +682,19 @@ void EuclideanBundleCommonIntrinsics(
// Block for minimization has got the following structure:
// <3 elements for angle-axis> <3 elements for translation>
map<int, Vec6> all_cameras_R_t =
PackCamerasRotationAndTranslation(*reconstruction);
PackCamerasRotationAndTranslation(*reconstruction);
// Parameterization used to restrict camera motion for modal solvers.
ceres::SubsetParameterization *constant_translation_parameterization = NULL;
ceres::SubsetParameterization* constant_translation_parameterization = NULL;
if (bundle_constraints & BUNDLE_NO_TRANSLATION) {
std::vector<int> constant_translation;
std::vector<int> constant_translation;
// First three elements are rotation, ast three are translation.
constant_translation.push_back(3);
constant_translation.push_back(4);
constant_translation.push_back(5);
// First three elements are rotation, ast three are translation.
constant_translation.push_back(3);
constant_translation.push_back(4);
constant_translation.push_back(5);
constant_translation_parameterization =
constant_translation_parameterization =
new ceres::SubsetParameterization(6, constant_translation);
}
@ -683,16 +704,16 @@ void EuclideanBundleCommonIntrinsics(
int num_residuals = 0;
bool have_locked_camera = false;
for (int i = 0; i < markers.size(); ++i) {
const Marker &marker = markers[i];
EuclideanCamera *camera = reconstruction->CameraForImage(marker.image);
EuclideanPoint *point = reconstruction->PointForTrack(marker.track);
const Marker& marker = markers[i];
EuclideanCamera* camera = reconstruction->CameraForImage(marker.image);
EuclideanPoint* point = reconstruction->PointForTrack(marker.track);
if (camera == NULL || point == NULL) {
continue;
}
// Rotation of camera denoted in angle axis followed with
// camera translation.
double *current_camera_R_t = &all_cameras_R_t[camera->image](0);
double* current_camera_R_t = &all_cameras_R_t[camera->image](0);
// Skip residual block for markers which does have absolutely
// no affect on the final solution.
@ -706,7 +727,8 @@ void EuclideanBundleCommonIntrinsics(
point,
&problem);
// We lock the first camera to better deal with scene orientation ambiguity.
// We lock the first camera to better deal with scene orientation
// ambiguity.
if (!have_locked_camera) {
problem.SetParameterBlockConstant(current_camera_R_t);
have_locked_camera = true;
@ -729,7 +751,7 @@ void EuclideanBundleCommonIntrinsics(
}
if (intrinsics->GetDistortionModelType() == DISTORTION_MODEL_DIVISION &&
(bundle_intrinsics & BUNDLE_TANGENTIAL) != 0) {
(bundle_intrinsics & BUNDLE_TANGENTIAL) != 0) {
LOG(FATAL) << "Division model doesn't support bundling "
"of tangential distortion";
}
@ -745,29 +767,29 @@ void EuclideanBundleCommonIntrinsics(
// constant using some macro trickery.
std::vector<int> constant_intrinsics;
#define MAYBE_SET_CONSTANT(bundle_enum, offset) \
if (!(bundle_intrinsics & bundle_enum) || \
!packed_intrinsics.IsParameterDefined(offset)) { \
constant_intrinsics.push_back(offset); \
}
#define MAYBE_SET_CONSTANT(bundle_enum, offset) \
if (!(bundle_intrinsics & bundle_enum) || \
!packed_intrinsics.IsParameterDefined(offset)) { \
constant_intrinsics.push_back(offset); \
}
MAYBE_SET_CONSTANT(BUNDLE_FOCAL_LENGTH,
PackedIntrinsics::OFFSET_FOCAL_LENGTH);
MAYBE_SET_CONSTANT(BUNDLE_PRINCIPAL_POINT,
PackedIntrinsics::OFFSET_PRINCIPAL_POINT_X);
MAYBE_SET_CONSTANT(BUNDLE_PRINCIPAL_POINT,
PackedIntrinsics::OFFSET_PRINCIPAL_POINT_Y);
MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K1, PackedIntrinsics::OFFSET_K1);
MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K2, PackedIntrinsics::OFFSET_K2);
MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K3, PackedIntrinsics::OFFSET_K3);
MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K4, PackedIntrinsics::OFFSET_K4);
MAYBE_SET_CONSTANT(BUNDLE_TANGENTIAL_P1, PackedIntrinsics::OFFSET_P1);
MAYBE_SET_CONSTANT(BUNDLE_TANGENTIAL_P2, PackedIntrinsics::OFFSET_P2);
MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K1, PackedIntrinsics::OFFSET_K1);
MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K2, PackedIntrinsics::OFFSET_K2);
MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K3, PackedIntrinsics::OFFSET_K3);
MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K4, PackedIntrinsics::OFFSET_K4);
MAYBE_SET_CONSTANT(BUNDLE_TANGENTIAL_P1, PackedIntrinsics::OFFSET_P1);
MAYBE_SET_CONSTANT(BUNDLE_TANGENTIAL_P2, PackedIntrinsics::OFFSET_P2);
#undef MAYBE_SET_CONSTANT
if (!constant_intrinsics.empty()) {
ceres::SubsetParameterization *subset_parameterization =
new ceres::SubsetParameterization(PackedIntrinsics::NUM_PARAMETERS,
constant_intrinsics);
ceres::SubsetParameterization* subset_parameterization =
new ceres::SubsetParameterization(PackedIntrinsics::NUM_PARAMETERS,
constant_intrinsics);
problem.SetParameterization(intrinsics_block, subset_parameterization);
}
@ -800,8 +822,8 @@ void EuclideanBundleCommonIntrinsics(
LG << "Final intrinsics: " << *intrinsics;
if (evaluation) {
EuclideanBundlerPerformEvaluation(tracks, reconstruction, &all_cameras_R_t,
&problem, evaluation);
EuclideanBundlerPerformEvaluation(
tracks, reconstruction, &all_cameras_R_t, &problem, evaluation);
}
// Separate step to adjust positions of tracks which are
@ -828,8 +850,8 @@ void EuclideanBundleCommonIntrinsics(
}
}
void ProjectiveBundle(const Tracks & /*tracks*/,
ProjectiveReconstruction * /*reconstruction*/) {
void ProjectiveBundle(const Tracks& /*tracks*/,
ProjectiveReconstruction* /*reconstruction*/) {
// TODO(keir): Implement this! This can't work until we have a better bundler
// than SSBA, since SSBA has no support for projective bundling.
}

View File

@ -31,11 +31,8 @@ class ProjectiveReconstruction;
class Tracks;
struct BundleEvaluation {
BundleEvaluation() :
num_cameras(0),
num_points(0),
evaluate_jacobian(false) {
}
BundleEvaluation()
: num_cameras(0), num_points(0), evaluate_jacobian(false) {}
// Number of cameras appeared in bundle adjustment problem
int num_cameras;
@ -72,8 +69,8 @@ struct BundleEvaluation {
\sa EuclideanResect, EuclideanIntersect, EuclideanReconstructTwoFrames
*/
void EuclideanBundle(const Tracks &tracks,
EuclideanReconstruction *reconstruction);
void EuclideanBundle(const Tracks& tracks,
EuclideanReconstruction* reconstruction);
/*!
Refine camera poses and 3D coordinates using bundle adjustment.
@ -109,9 +106,7 @@ enum BundleIntrinsics {
BUNDLE_RADIAL_K2 = (1 << 3),
BUNDLE_RADIAL_K3 = (1 << 4),
BUNDLE_RADIAL_K4 = (1 << 5),
BUNDLE_RADIAL = (BUNDLE_RADIAL_K1 |
BUNDLE_RADIAL_K2 |
BUNDLE_RADIAL_K3 |
BUNDLE_RADIAL = (BUNDLE_RADIAL_K1 | BUNDLE_RADIAL_K2 | BUNDLE_RADIAL_K3 |
BUNDLE_RADIAL_K4),
BUNDLE_TANGENTIAL_P1 = (1 << 6),
@ -122,13 +117,12 @@ enum BundleConstraints {
BUNDLE_NO_CONSTRAINTS = 0,
BUNDLE_NO_TRANSLATION = 1,
};
void EuclideanBundleCommonIntrinsics(
const Tracks &tracks,
const int bundle_intrinsics,
const int bundle_constraints,
EuclideanReconstruction *reconstruction,
CameraIntrinsics *intrinsics,
BundleEvaluation *evaluation = NULL);
void EuclideanBundleCommonIntrinsics(const Tracks& tracks,
const int bundle_intrinsics,
const int bundle_constraints,
EuclideanReconstruction* reconstruction,
CameraIntrinsics* intrinsics,
BundleEvaluation* evaluation = NULL);
/*!
Refine camera poses and 3D coordinates using bundle adjustment.
@ -147,10 +141,9 @@ void EuclideanBundleCommonIntrinsics(
\sa ProjectiveResect, ProjectiveIntersect, ProjectiveReconstructTwoFrames
*/
void ProjectiveBundle(const Tracks &tracks,
ProjectiveReconstruction *reconstruction);
void ProjectiveBundle(const Tracks& tracks,
ProjectiveReconstruction* reconstruction);
} // namespace libmv
#endif // LIBMV_SIMPLE_PIPELINE_BUNDLE_H
#endif // LIBMV_SIMPLE_PIPELINE_BUNDLE_H

Some files were not shown because too many files have changed in this diff Show More