Cleanup: Libmv, clang-format

Is based on Google style which was used in the Libmv project before,
but is now consistently applied for the sources of the library itself
and to C-API. With some time C-API will likely be removed, and it
makes it easier to make it follow Libmv style, hence the diversion
from Blender's style.

There are quite some exceptions (clang-format off) in the code around
Eigen matrix initialization. It is rather annoying, and there could be
some neat way to make initialization readable without such exception.

Could be some places where loss of readability in matrix initialization
got lost as the change is quite big. If this has happened it is easier
to address readability once actually working on the code.

This change allowed to spot some missing header guards, so that's nice.

Doing it in bundled version, as the upstream library needs to have some
of the recent development ported over from bundle to upstream.

There should be no functional changes.
This commit is contained in:
Sergey Sharybin 2021-03-05 14:42:30 +01:00
parent e0442a955b
commit 0dd9a4a576
151 changed files with 4893 additions and 4469 deletions

View File

@ -1,2 +1,34 @@
DisableFormat: true
SortIncludes: false
BasedOnStyle: Google
ColumnLimit: 80
Standard: Cpp11
# Indent nested preprocessor.
# #ifdef Foo
# # include <nested>
# #endif
IndentPPDirectives: AfterHash
# For the cases when namespace is closing with a wrong comment
FixNamespaceComments: true
AllowShortFunctionsOnASingleLine: InlineOnly
AllowShortBlocksOnASingleLine: false
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: true
# No bin packing, every argument is on its own line.
BinPackArguments: false
BinPackParameters: false
# Ensure pointer alignment.
# ObjectType* object;
PointerAlignment: Left
DerivePointerAlignment: false
AlignEscapedNewlines: Right
IncludeBlocks: Preserve
SortIncludes: true

View File

@ -22,11 +22,11 @@
#include "intern/utildefines.h"
#include "libmv/autotrack/autotrack.h"
using libmv::TrackRegionOptions;
using libmv::TrackRegionResult;
using mv::AutoTrack;
using mv::FrameAccessor;
using mv::Marker;
using libmv::TrackRegionOptions;
using libmv::TrackRegionResult;
libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor* frame_accessor) {
return (libmv_AutoTrack*)LIBMV_OBJECT_NEW(AutoTrack,
@ -53,16 +53,13 @@ int libmv_autoTrackMarker(libmv_AutoTrack* libmv_autotrack,
const libmv_TrackRegionOptions* libmv_options,
libmv_Marker* libmv_tracked_marker,
libmv_TrackRegionResult* libmv_result) {
Marker tracked_marker;
TrackRegionOptions options;
TrackRegionResult result;
libmv_apiMarkerToMarker(*libmv_tracked_marker, &tracked_marker);
libmv_configureTrackRegionOptions(*libmv_options,
&options);
bool ok = (((AutoTrack*) libmv_autotrack)->TrackMarker(&tracked_marker,
&result,
&options));
libmv_configureTrackRegionOptions(*libmv_options, &options);
bool ok = (((AutoTrack*)libmv_autotrack)
->TrackMarker(&tracked_marker, &result, &options));
libmv_markerToApiMarker(tracked_marker, libmv_tracked_marker);
libmv_regionTrackergetResult(result, libmv_result);
return ok && result.is_usable();
@ -96,10 +93,8 @@ int libmv_autoTrackGetMarker(libmv_AutoTrack* libmv_autotrack,
int track,
libmv_Marker* libmv_marker) {
Marker marker;
int ok = ((AutoTrack*) libmv_autotrack)->GetMarker(clip,
frame,
track,
&marker);
int ok =
((AutoTrack*)libmv_autotrack)->GetMarker(clip, frame, track, &marker);
if (ok) {
libmv_markerToApiMarker(marker, libmv_marker);
}

View File

@ -21,9 +21,9 @@
#define LIBMV_C_API_AUTOTRACK_H_
#include "intern/frame_accessor.h"
#include "intern/tracksN.h"
#include "intern/track_region.h"
#include "intern/region.h"
#include "intern/track_region.h"
#include "intern/tracksN.h"
#ifdef __cplusplus
extern "C" {

View File

@ -21,11 +21,11 @@
#include "intern/utildefines.h"
#include "libmv/simple_pipeline/camera_intrinsics.h"
using libmv::BrownCameraIntrinsics;
using libmv::CameraIntrinsics;
using libmv::DivisionCameraIntrinsics;
using libmv::PolynomialCameraIntrinsics;
using libmv::NukeCameraIntrinsics;
using libmv::BrownCameraIntrinsics;
using libmv::PolynomialCameraIntrinsics;
libmv_CameraIntrinsics* libmv_cameraIntrinsicsNew(
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options) {
@ -41,40 +41,34 @@ libmv_CameraIntrinsics *libmv_cameraIntrinsicsCopy(
CameraIntrinsics* new_intrinsics = NULL;
switch (orig_intrinsics->GetDistortionModelType()) {
case libmv::DISTORTION_MODEL_POLYNOMIAL:
{
case libmv::DISTORTION_MODEL_POLYNOMIAL: {
const PolynomialCameraIntrinsics* polynomial_intrinsics =
static_cast<const PolynomialCameraIntrinsics*>(orig_intrinsics);
new_intrinsics = LIBMV_OBJECT_NEW(PolynomialCameraIntrinsics,
*polynomial_intrinsics);
new_intrinsics =
LIBMV_OBJECT_NEW(PolynomialCameraIntrinsics, *polynomial_intrinsics);
break;
}
case libmv::DISTORTION_MODEL_DIVISION:
{
case libmv::DISTORTION_MODEL_DIVISION: {
const DivisionCameraIntrinsics* division_intrinsics =
static_cast<const DivisionCameraIntrinsics*>(orig_intrinsics);
new_intrinsics = LIBMV_OBJECT_NEW(DivisionCameraIntrinsics,
*division_intrinsics);
new_intrinsics =
LIBMV_OBJECT_NEW(DivisionCameraIntrinsics, *division_intrinsics);
break;
}
case libmv::DISTORTION_MODEL_NUKE:
{
case libmv::DISTORTION_MODEL_NUKE: {
const NukeCameraIntrinsics* nuke_intrinsics =
static_cast<const NukeCameraIntrinsics*>(orig_intrinsics);
new_intrinsics = LIBMV_OBJECT_NEW(NukeCameraIntrinsics,
*nuke_intrinsics);
new_intrinsics = LIBMV_OBJECT_NEW(NukeCameraIntrinsics, *nuke_intrinsics);
break;
}
case libmv::DISTORTION_MODEL_BROWN:
{
case libmv::DISTORTION_MODEL_BROWN: {
const BrownCameraIntrinsics* brown_intrinsics =
static_cast<const BrownCameraIntrinsics*>(orig_intrinsics);
new_intrinsics = LIBMV_OBJECT_NEW(BrownCameraIntrinsics,
*brown_intrinsics);
new_intrinsics =
LIBMV_OBJECT_NEW(BrownCameraIntrinsics, *brown_intrinsics);
break;
}
default:
assert(!"Unknown distortion model");
default: assert(!"Unknown distortion model");
}
return (libmv_CameraIntrinsics*)new_intrinsics;
}
@ -115,8 +109,7 @@ void libmv_cameraIntrinsicsUpdate(
}
switch (libmv_camera_intrinsics_options->distortion_model) {
case LIBMV_DISTORTION_MODEL_POLYNOMIAL:
{
case LIBMV_DISTORTION_MODEL_POLYNOMIAL: {
assert(camera_intrinsics->GetDistortionModelType() ==
libmv::DISTORTION_MODEL_POLYNOMIAL);
@ -135,8 +128,7 @@ void libmv_cameraIntrinsicsUpdate(
break;
}
case LIBMV_DISTORTION_MODEL_DIVISION:
{
case LIBMV_DISTORTION_MODEL_DIVISION: {
assert(camera_intrinsics->GetDistortionModelType() ==
libmv::DISTORTION_MODEL_DIVISION);
@ -146,16 +138,14 @@ void libmv_cameraIntrinsicsUpdate(
double k1 = libmv_camera_intrinsics_options->division_k1;
double k2 = libmv_camera_intrinsics_options->division_k2;
if (division_intrinsics->k1() != k1 ||
division_intrinsics->k2() != k2) {
if (division_intrinsics->k1() != k1 || division_intrinsics->k2() != k2) {
division_intrinsics->SetDistortion(k1, k2);
}
break;
}
case LIBMV_DISTORTION_MODEL_NUKE:
{
case LIBMV_DISTORTION_MODEL_NUKE: {
assert(camera_intrinsics->GetDistortionModelType() ==
libmv::DISTORTION_MODEL_NUKE);
@ -165,16 +155,14 @@ void libmv_cameraIntrinsicsUpdate(
double k1 = libmv_camera_intrinsics_options->nuke_k1;
double k2 = libmv_camera_intrinsics_options->nuke_k2;
if (nuke_intrinsics->k1() != k1 ||
nuke_intrinsics->k2() != k2) {
if (nuke_intrinsics->k1() != k1 || nuke_intrinsics->k2() != k2) {
nuke_intrinsics->SetDistortion(k1, k2);
}
break;
}
case LIBMV_DISTORTION_MODEL_BROWN:
{
case LIBMV_DISTORTION_MODEL_BROWN: {
assert(camera_intrinsics->GetDistortionModelType() ==
libmv::DISTORTION_MODEL_BROWN);
@ -186,10 +174,8 @@ void libmv_cameraIntrinsicsUpdate(
double k3 = libmv_camera_intrinsics_options->brown_k3;
double k4 = libmv_camera_intrinsics_options->brown_k4;
if (brown_intrinsics->k1() != k1 ||
brown_intrinsics->k2() != k2 ||
brown_intrinsics->k3() != k3 ||
brown_intrinsics->k4() != k4) {
if (brown_intrinsics->k1() != k1 || brown_intrinsics->k2() != k2 ||
brown_intrinsics->k3() != k3 || brown_intrinsics->k4() != k4) {
brown_intrinsics->SetRadialDistortion(k1, k2, k3, k4);
}
@ -202,8 +188,7 @@ void libmv_cameraIntrinsicsUpdate(
break;
}
default:
assert(!"Unknown distortion model");
default: assert(!"Unknown distortion model");
}
}
@ -230,8 +215,7 @@ void libmv_cameraIntrinsicsExtractOptions(
camera_intrinsics_options->image_height = camera_intrinsics->image_height();
switch (camera_intrinsics->GetDistortionModelType()) {
case libmv::DISTORTION_MODEL_POLYNOMIAL:
{
case libmv::DISTORTION_MODEL_POLYNOMIAL: {
const PolynomialCameraIntrinsics* polynomial_intrinsics =
static_cast<const PolynomialCameraIntrinsics*>(camera_intrinsics);
camera_intrinsics_options->distortion_model =
@ -244,8 +228,7 @@ void libmv_cameraIntrinsicsExtractOptions(
break;
}
case libmv::DISTORTION_MODEL_DIVISION:
{
case libmv::DISTORTION_MODEL_DIVISION: {
const DivisionCameraIntrinsics* division_intrinsics =
static_cast<const DivisionCameraIntrinsics*>(camera_intrinsics);
camera_intrinsics_options->distortion_model =
@ -255,19 +238,16 @@ void libmv_cameraIntrinsicsExtractOptions(
break;
}
case libmv::DISTORTION_MODEL_NUKE:
{
case libmv::DISTORTION_MODEL_NUKE: {
const NukeCameraIntrinsics* nuke_intrinsics =
static_cast<const NukeCameraIntrinsics*>(camera_intrinsics);
camera_intrinsics_options->distortion_model =
LIBMV_DISTORTION_MODEL_NUKE;
camera_intrinsics_options->distortion_model = LIBMV_DISTORTION_MODEL_NUKE;
camera_intrinsics_options->nuke_k1 = nuke_intrinsics->k1();
camera_intrinsics_options->nuke_k2 = nuke_intrinsics->k2();
break;
}
case libmv::DISTORTION_MODEL_BROWN:
{
case libmv::DISTORTION_MODEL_BROWN: {
const BrownCameraIntrinsics* brown_intrinsics =
static_cast<const BrownCameraIntrinsics*>(camera_intrinsics);
camera_intrinsics_options->distortion_model =
@ -281,8 +261,7 @@ void libmv_cameraIntrinsicsExtractOptions(
break;
}
default:
assert(!"Unknown distortion model");
default: assert(!"Unknown distortion model");
}
}
@ -295,11 +274,8 @@ void libmv_cameraIntrinsicsUndistortByte(
int channels,
unsigned char* destination_image) {
CameraIntrinsics* camera_intrinsics = (CameraIntrinsics*)libmv_intrinsics;
camera_intrinsics->UndistortBuffer(source_image,
width, height,
overscan,
channels,
destination_image);
camera_intrinsics->UndistortBuffer(
source_image, width, height, overscan, channels, destination_image);
}
void libmv_cameraIntrinsicsUndistortFloat(
@ -311,11 +287,8 @@ void libmv_cameraIntrinsicsUndistortFloat(
int channels,
float* destination_image) {
CameraIntrinsics* intrinsics = (CameraIntrinsics*)libmv_intrinsics;
intrinsics->UndistortBuffer(source_image,
width, height,
overscan,
channels,
destination_image);
intrinsics->UndistortBuffer(
source_image, width, height, overscan, channels, destination_image);
}
void libmv_cameraIntrinsicsDistortByte(
@ -327,11 +300,8 @@ void libmv_cameraIntrinsicsDistortByte(
int channels,
unsigned char* destination_image) {
CameraIntrinsics* intrinsics = (CameraIntrinsics*)libmv_intrinsics;
intrinsics->DistortBuffer(source_image,
width, height,
overscan,
channels,
destination_image);
intrinsics->DistortBuffer(
source_image, width, height, overscan, channels, destination_image);
}
void libmv_cameraIntrinsicsDistortFloat(
@ -343,11 +313,8 @@ void libmv_cameraIntrinsicsDistortFloat(
int channels,
float* destination_image) {
CameraIntrinsics* intrinsics = (CameraIntrinsics*)libmv_intrinsics;
intrinsics->DistortBuffer(source_image,
width, height,
overscan,
channels,
destination_image);
intrinsics->DistortBuffer(
source_image, width, height, overscan, channels, destination_image);
}
void libmv_cameraIntrinsicsApply(
@ -384,8 +351,7 @@ static void libmv_cameraIntrinsicsFillFromOptions(
camera_intrinsics_options->image_height);
switch (camera_intrinsics_options->distortion_model) {
case LIBMV_DISTORTION_MODEL_POLYNOMIAL:
{
case LIBMV_DISTORTION_MODEL_POLYNOMIAL: {
PolynomialCameraIntrinsics* polynomial_intrinsics =
static_cast<PolynomialCameraIntrinsics*>(camera_intrinsics);
@ -397,8 +363,7 @@ static void libmv_cameraIntrinsicsFillFromOptions(
break;
}
case LIBMV_DISTORTION_MODEL_DIVISION:
{
case LIBMV_DISTORTION_MODEL_DIVISION: {
DivisionCameraIntrinsics* division_intrinsics =
static_cast<DivisionCameraIntrinsics*>(camera_intrinsics);
@ -408,19 +373,16 @@ static void libmv_cameraIntrinsicsFillFromOptions(
break;
}
case LIBMV_DISTORTION_MODEL_NUKE:
{
case LIBMV_DISTORTION_MODEL_NUKE: {
NukeCameraIntrinsics* nuke_intrinsics =
static_cast<NukeCameraIntrinsics*>(camera_intrinsics);
nuke_intrinsics->SetDistortion(
camera_intrinsics_options->nuke_k1,
nuke_intrinsics->SetDistortion(camera_intrinsics_options->nuke_k1,
camera_intrinsics_options->nuke_k2);
break;
}
case LIBMV_DISTORTION_MODEL_BROWN:
{
case LIBMV_DISTORTION_MODEL_BROWN: {
BrownCameraIntrinsics* brown_intrinsics =
static_cast<BrownCameraIntrinsics*>(camera_intrinsics);
@ -436,8 +398,7 @@ static void libmv_cameraIntrinsicsFillFromOptions(
break;
}
default:
assert(!"Unknown distortion model");
default: assert(!"Unknown distortion model");
}
}
@ -457,8 +418,7 @@ CameraIntrinsics* libmv_cameraIntrinsicsCreateFromOptions(
case LIBMV_DISTORTION_MODEL_BROWN:
camera_intrinsics = LIBMV_OBJECT_NEW(BrownCameraIntrinsics);
break;
default:
assert(!"Unknown distortion model");
default: assert(!"Unknown distortion model");
}
libmv_cameraIntrinsicsFillFromOptions(camera_intrinsics_options,
camera_intrinsics);

View File

@ -42,14 +42,12 @@ struct LibmvFrameAccessor : public FrameAccessor {
get_mask_for_track_callback_(get_mask_for_track_callback),
release_mask_callback_(release_mask_callback) {}
virtual ~LibmvFrameAccessor() {
}
virtual ~LibmvFrameAccessor() {}
libmv_InputMode get_libmv_input_mode(InputMode input_mode) {
switch (input_mode) {
#define CHECK_INPUT_MODE(mode) \
case mode: \
return LIBMV_IMAGE_MODE_ ## mode;
case mode: return LIBMV_IMAGE_MODE_##mode;
CHECK_INPUT_MODE(MONO)
CHECK_INPUT_MODE(RGBA)
#undef CHECK_INPUT_MODE
@ -59,8 +57,7 @@ struct LibmvFrameAccessor : public FrameAccessor {
return LIBMV_IMAGE_MODE_MONO;
}
void get_libmv_region(const Region& region,
libmv_Region* libmv_region) {
void get_libmv_region(const Region& region, libmv_Region* libmv_region) {
libmv_region->min[0] = region.min(0);
libmv_region->min[1] = region.min(1);
libmv_region->max[0] = region.max(0);
@ -93,18 +90,13 @@ struct LibmvFrameAccessor : public FrameAccessor {
&channels);
// TODO(sergey): Dumb code for until we can set data directly.
FloatImage temp_image(float_buffer,
height,
width,
channels);
FloatImage temp_image(float_buffer, height, width, channels);
destination->CopyFrom(temp_image);
return cache_key;
}
void ReleaseImage(Key cache_key) {
release_image_callback_(cache_key);
}
void ReleaseImage(Key cache_key) { release_image_callback_(cache_key); }
Key GetMaskForTrack(int clip,
int frame,
@ -117,8 +109,8 @@ struct LibmvFrameAccessor : public FrameAccessor {
if (region) {
get_libmv_region(*region, &libmv_region);
}
Key cache_key = get_mask_for_track_callback_(
user_data_,
Key cache_key =
get_mask_for_track_callback_(user_data_,
clip,
frame,
track,
@ -133,30 +125,21 @@ struct LibmvFrameAccessor : public FrameAccessor {
}
// TODO(sergey): Dumb code for until we can set data directly.
FloatImage temp_image(float_buffer,
height,
width,
1);
FloatImage temp_image(float_buffer, height, width, 1);
destination->CopyFrom(temp_image);
return cache_key;
}
void ReleaseMask(Key key) {
release_mask_callback_(key);
}
void ReleaseMask(Key key) { release_mask_callback_(key); }
bool GetClipDimensions(int /*clip*/, int* /*width*/, int* /*height*/) {
return false;
}
int NumClips() {
return 1;
}
int NumClips() { return 1; }
int NumFrames(int /*clip*/) {
return 0;
}
int NumFrames(int /*clip*/) { return 0; }
libmv_FrameAccessorUserData* user_data_;
libmv_GetImageCallback get_image_callback_;
@ -185,7 +168,8 @@ void libmv_FrameAccessorDestroy(libmv_FrameAccessor* frame_accessor) {
LIBMV_OBJECT_DELETE(frame_accessor, LibmvFrameAccessor);
}
int64_t libmv_frameAccessorgetTransformKey(const libmv_FrameTransform *transform) {
int64_t libmv_frameAccessorgetTransformKey(
const libmv_FrameTransform* transform) {
return ((FrameAccessor::Transform*)transform)->key();
}
@ -198,8 +182,7 @@ void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform *transform,
input_image->channels);
FloatImage output;
((FrameAccessor::Transform*) transform)->run(input,
&output);
((FrameAccessor::Transform*)transform)->run(input, &output);
int num_pixels = output.Width() * output.Height() * output.Depth();
output_image->buffer = new float[num_pixels];

View File

@ -73,7 +73,8 @@ libmv_FrameAccessor* libmv_FrameAccessorNew(
libmv_ReleaseMaskCallback release_mask_callback);
void libmv_FrameAccessorDestroy(libmv_FrameAccessor* frame_accessor);
int64_t libmv_frameAccessorgetTransformKey(const libmv_FrameTransform *transform);
int64_t libmv_frameAccessorgetTransformKey(
const libmv_FrameTransform* transform);
void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform* transform,
const libmv_FloatImage* input_image,

View File

@ -41,10 +41,8 @@ void libmv_homography2DFromCorrespondencesEuc(/* const */ double (*x1)[2],
LG << "x2: " << x2_mat;
libmv::EstimateHomographyOptions options;
libmv::EstimateHomography2DFromCorrespondences(x1_mat,
x2_mat,
options,
&H_mat);
libmv::EstimateHomography2DFromCorrespondences(
x1_mat, x2_mat, options, &H_mat);
LG << "H: " << H_mat;

View File

@ -21,8 +21,8 @@
#include "intern/utildefines.h"
#include "libmv/tracking/track_region.h"
#include <cassert>
#include <png.h>
#include <cassert>
using libmv::FloatImage;
using libmv::SamplePlanarPatch;
@ -63,8 +63,7 @@ void libmv_floatBufferToFloatImage(const float* buffer,
}
}
void libmv_floatImageToFloatBuffer(const FloatImage &image,
float* buffer) {
void libmv_floatImageToFloatBuffer(const FloatImage& image, float* buffer) {
for (int y = 0, a = 0; y < image.Height(); y++) {
for (int x = 0; x < image.Width(); x++) {
for (int k = 0; k < image.Depth(); k++) {
@ -180,9 +179,8 @@ bool libmv_saveImage(const FloatImage& image,
static int image_counter = 0;
char file_name[128];
snprintf(file_name, sizeof(file_name),
"%s_%02d.png",
prefix, ++image_counter);
snprintf(
file_name, sizeof(file_name), "%s_%02d.png", prefix, ++image_counter);
bool result = savePNGImage(row_pointers,
image.Width(),
image.Height(),
@ -221,8 +219,10 @@ void libmv_samplePlanarPatchFloat(const float* image,
}
SamplePlanarPatch(libmv_image,
xs, ys,
num_samples_x, num_samples_y,
xs,
ys,
num_samples_x,
num_samples_y,
libmv_mask_for_sample,
&libmv_patch,
warped_position_x,
@ -254,8 +254,10 @@ void libmv_samplePlanarPatchByte(const unsigned char* image,
}
libmv::SamplePlanarPatch(libmv_image,
xs, ys,
num_samples_x, num_samples_y,
xs,
ys,
num_samples_x,
num_samples_y,
libmv_mask_for_sample,
&libmv_patch,
warped_position_x,

View File

@ -24,8 +24,8 @@
#include "libmv/logging/logging.h"
#include "libmv/simple_pipeline/bundle.h"
#include "libmv/simple_pipeline/keyframe_selection.h"
#include "libmv/simple_pipeline/initialize_reconstruction.h"
#include "libmv/simple_pipeline/keyframe_selection.h"
#include "libmv/simple_pipeline/modal_solver.h"
#include "libmv/simple_pipeline/pipeline.h"
#include "libmv/simple_pipeline/reconstruction_scale.h"
@ -39,12 +39,12 @@ using libmv::EuclideanScaleToUnity;
using libmv::Marker;
using libmv::ProgressUpdateCallback;
using libmv::PolynomialCameraIntrinsics;
using libmv::Tracks;
using libmv::EuclideanBundle;
using libmv::EuclideanCompleteReconstruction;
using libmv::EuclideanReconstructTwoFrames;
using libmv::EuclideanReprojectionError;
using libmv::PolynomialCameraIntrinsics;
using libmv::Tracks;
struct libmv_Reconstruction {
EuclideanReconstruction reconstruction;
@ -73,6 +73,7 @@ class ReconstructUpdateCallback : public ProgressUpdateCallback {
progress_update_callback_(callback_customdata_, progress, message);
}
}
protected:
reconstruct_progress_update_cb progress_update_callback_;
void* callback_customdata_;
@ -134,9 +135,8 @@ void finishReconstruction(
/* Reprojection error calculation. */
progress_update_callback(callback_customdata, 1.0, "Finishing solution");
libmv_reconstruction->tracks = tracks;
libmv_reconstruction->error = EuclideanReprojectionError(tracks,
reconstruction,
camera_intrinsics);
libmv_reconstruction->error =
EuclideanReprojectionError(tracks, reconstruction, camera_intrinsics);
}
bool selectTwoKeyframesBasedOnGRICAndVariance(
@ -148,9 +148,8 @@ bool selectTwoKeyframesBasedOnGRICAndVariance(
libmv::vector<int> keyframes;
/* Get list of all keyframe candidates first. */
SelectKeyframesBasedOnGRICAndVariance(normalized_tracks,
camera_intrinsics,
keyframes);
SelectKeyframesBasedOnGRICAndVariance(
normalized_tracks, camera_intrinsics, keyframes);
if (keyframes.size() < 2) {
LG << "Not enough keyframes detected by GRIC";
@ -183,16 +182,12 @@ bool selectTwoKeyframesBasedOnGRICAndVariance(
/* get a solution from two keyframes only */
EuclideanReconstructTwoFrames(keyframe_markers, &reconstruction);
EuclideanBundle(keyframe_tracks, &reconstruction);
EuclideanCompleteReconstruction(keyframe_tracks,
&reconstruction,
NULL);
EuclideanCompleteReconstruction(keyframe_tracks, &reconstruction, NULL);
double current_error = EuclideanReprojectionError(tracks,
reconstruction,
camera_intrinsics);
double current_error =
EuclideanReprojectionError(tracks, reconstruction, camera_intrinsics);
LG << "Error between " << previous_keyframe
<< " and " << current_keyframe
LG << "Error between " << previous_keyframe << " and " << current_keyframe
<< ": " << current_error;
if (current_error < best_error) {
@ -214,9 +209,8 @@ Marker libmv_projectMarker(const EuclideanPoint& point,
projected /= projected(2);
libmv::Marker reprojected_marker;
intrinsics.ApplyIntrinsics(projected(0), projected(1),
&reprojected_marker.x,
&reprojected_marker.y);
intrinsics.ApplyIntrinsics(
projected(0), projected(1), &reprojected_marker.x, &reprojected_marker.y);
reprojected_marker.image = camera.image;
reprojected_marker.track = point.track;
@ -229,12 +223,10 @@ void libmv_getNormalizedTracks(const Tracks &tracks,
libmv::vector<Marker> markers = tracks.AllMarkers();
for (int i = 0; i < markers.size(); ++i) {
Marker& marker = markers[i];
camera_intrinsics.InvertIntrinsics(marker.x, marker.y,
&marker.x, &marker.y);
normalized_tracks->Insert(marker.image,
marker.track,
marker.x, marker.y,
marker.weight);
camera_intrinsics.InvertIntrinsics(
marker.x, marker.y, &marker.x, &marker.y);
normalized_tracks->Insert(
marker.image, marker.track, marker.x, marker.y, marker.weight);
}
}
@ -254,8 +246,7 @@ libmv_Reconstruction *libmv_solveReconstruction(
libmv_reconstruction->reconstruction;
ReconstructUpdateCallback update_callback =
ReconstructUpdateCallback(progress_update_callback,
callback_customdata);
ReconstructUpdateCallback(progress_update_callback, callback_customdata);
/* Retrieve reconstruction options from C-API to libmv API. */
CameraIntrinsics* camera_intrinsics;
@ -309,14 +300,12 @@ libmv_Reconstruction *libmv_solveReconstruction(
}
EuclideanBundle(normalized_tracks, &reconstruction);
EuclideanCompleteReconstruction(normalized_tracks,
&reconstruction,
&update_callback);
EuclideanCompleteReconstruction(
normalized_tracks, &reconstruction, &update_callback);
/* Refinement. */
if (libmv_reconstruction_options->refine_intrinsics) {
libmv_solveRefineIntrinsics(
tracks,
libmv_solveRefineIntrinsics(tracks,
libmv_reconstruction_options->refine_intrinsics,
libmv::BUNDLE_NO_CONSTRAINTS,
progress_update_callback,
@ -353,14 +342,12 @@ libmv_Reconstruction *libmv_solveModal(
libmv_reconstruction->reconstruction;
ReconstructUpdateCallback update_callback =
ReconstructUpdateCallback(progress_update_callback,
callback_customdata);
ReconstructUpdateCallback(progress_update_callback, callback_customdata);
/* Retrieve reconstruction options from C-API to libmv API. */
CameraIntrinsics* camera_intrinsics;
camera_intrinsics = libmv_reconstruction->intrinsics =
libmv_cameraIntrinsicsCreateFromOptions(
libmv_camera_intrinsics_options);
libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
/* Invert the camera intrinsics. */
Tracks normalized_tracks;
@ -378,11 +365,11 @@ libmv_Reconstruction *libmv_solveModal(
/* Refinement. */
if (libmv_reconstruction_options->refine_intrinsics) {
libmv_solveRefineIntrinsics(
tracks,
libmv_solveRefineIntrinsics(tracks,
libmv_reconstruction_options->refine_intrinsics,
libmv::BUNDLE_NO_TRANSLATION,
progress_update_callback, callback_customdata,
progress_update_callback,
callback_customdata,
&reconstruction,
camera_intrinsics);
}
@ -413,8 +400,7 @@ int libmv_reprojectionPointForTrack(
double pos[3]) {
const EuclideanReconstruction* reconstruction =
&libmv_reconstruction->reconstruction;
const EuclideanPoint *point =
reconstruction->PointForTrack(track);
const EuclideanPoint* point = reconstruction->PointForTrack(track);
if (point) {
pos[0] = point->X[0];
pos[1] = point->X[2];
@ -425,8 +411,7 @@ int libmv_reprojectionPointForTrack(
}
double libmv_reprojectionErrorForTrack(
const libmv_Reconstruction *libmv_reconstruction,
int track) {
const libmv_Reconstruction* libmv_reconstruction, int track) {
const EuclideanReconstruction* reconstruction =
&libmv_reconstruction->reconstruction;
const CameraIntrinsics* intrinsics = libmv_reconstruction->intrinsics;
@ -461,8 +446,7 @@ double libmv_reprojectionErrorForTrack(
}
double libmv_reprojectionErrorForImage(
const libmv_Reconstruction *libmv_reconstruction,
int image) {
const libmv_Reconstruction* libmv_reconstruction, int image) {
const EuclideanReconstruction* reconstruction =
&libmv_reconstruction->reconstruction;
const CameraIntrinsics* intrinsics = libmv_reconstruction->intrinsics;
@ -503,8 +487,7 @@ int libmv_reprojectionCameraForImage(
double mat[4][4]) {
const EuclideanReconstruction* reconstruction =
&libmv_reconstruction->reconstruction;
const EuclideanCamera *camera =
reconstruction->CameraForImage(image);
const EuclideanCamera* camera = reconstruction->CameraForImage(image);
if (camera) {
for (int j = 0; j < 3; ++j) {

View File

@ -38,10 +38,9 @@ enum {
LIBMV_REFINE_RADIAL_DISTORTION_K2 = (1 << 3),
LIBMV_REFINE_RADIAL_DISTORTION_K3 = (1 << 4),
LIBMV_REFINE_RADIAL_DISTORTION_K4 = (1 << 5),
LIBMV_REFINE_RADIAL_DISTORTION = (LIBMV_REFINE_RADIAL_DISTORTION_K1 |
LIBMV_REFINE_RADIAL_DISTORTION_K2 |
LIBMV_REFINE_RADIAL_DISTORTION_K3 |
LIBMV_REFINE_RADIAL_DISTORTION_K4),
LIBMV_REFINE_RADIAL_DISTORTION =
(LIBMV_REFINE_RADIAL_DISTORTION_K1 | LIBMV_REFINE_RADIAL_DISTORTION_K2 |
LIBMV_REFINE_RADIAL_DISTORTION_K3 | LIBMV_REFINE_RADIAL_DISTORTION_K4),
LIBMV_REFINE_TANGENTIAL_DISTORTION_P1 = (1 << 6),
LIBMV_REFINE_TANGENTIAL_DISTORTION_P2 = (1 << 7),
@ -78,24 +77,21 @@ int libmv_reconstructionIsValid(libmv_Reconstruction *libmv_reconstruction);
void libmv_reconstructionDestroy(libmv_Reconstruction* libmv_reconstruction);
int libmv_reprojectionPointForTrack(
const libmv_Reconstruction* libmv_reconstruction,
int track,
double pos[3]);
const libmv_Reconstruction* libmv_reconstruction, int track, double pos[3]);
double libmv_reprojectionErrorForTrack(
const libmv_Reconstruction* libmv_reconstruction,
int track);
const libmv_Reconstruction* libmv_reconstruction, int track);
double libmv_reprojectionErrorForImage(
const libmv_Reconstruction* libmv_reconstruction,
int image);
const libmv_Reconstruction* libmv_reconstruction, int image);
int libmv_reprojectionCameraForImage(
const libmv_Reconstruction* libmv_reconstruction,
int image,
double mat[4][4]);
double libmv_reprojectionError(const libmv_Reconstruction* libmv_reconstruction);
double libmv_reprojectionError(
const libmv_Reconstruction* libmv_reconstruction);
struct libmv_CameraIntrinsics* libmv_reconstructionExtractIntrinsics(
libmv_Reconstruction* libmv_Reconstruction);

View File

@ -82,7 +82,8 @@ void libmv_samplePlanarPatchByte(const unsigned char * /*image*/,
int /*channels*/,
const double* /*xs*/,
const double* /*ys*/,
int /*num_samples_x*/, int /*num_samples_y*/,
int /*num_samples_x*/,
int /*num_samples_y*/,
const float* /*mask*/,
unsigned char* /*patch*/,
double* /*warped_position_x*/,
@ -90,8 +91,7 @@ void libmv_samplePlanarPatchByte(const unsigned char * /*image*/,
/* TODO(sergey): implement */
}
void libmv_floatImageDestroy(libmv_FloatImage* /*image*/)
{
void libmv_floatImageDestroy(libmv_FloatImage* /*image*/) {
}
/* ************ Tracks ************ */
@ -131,7 +131,8 @@ libmv_Reconstruction *libmv_solveModal(
return NULL;
}
int libmv_reconstructionIsValid(libmv_Reconstruction * /*libmv_reconstruction*/) {
int libmv_reconstructionIsValid(
libmv_Reconstruction* /*libmv_reconstruction*/) {
return 0;
}
@ -143,14 +144,12 @@ int libmv_reprojectionPointForTrack(
}
double libmv_reprojectionErrorForTrack(
const libmv_Reconstruction * /*libmv_reconstruction*/,
int /*track*/) {
const libmv_Reconstruction* /*libmv_reconstruction*/, int /*track*/) {
return 0.0;
}
double libmv_reprojectionErrorForImage(
const libmv_Reconstruction * /*libmv_reconstruction*/,
int /*image*/) {
const libmv_Reconstruction* /*libmv_reconstruction*/, int /*image*/) {
return 0.0;
}
@ -235,8 +234,7 @@ void libmv_cameraIntrinsicsUpdate(
}
void libmv_cameraIntrinsicsSetThreads(
libmv_CameraIntrinsics * /*libmv_intrinsics*/,
int /*threads*/) {
libmv_CameraIntrinsics* /*libmv_intrinsics*/, int /*threads*/) {
}
void libmv_cameraIntrinsicsExtractOptions(
@ -249,11 +247,13 @@ void libmv_cameraIntrinsicsExtractOptions(
void libmv_cameraIntrinsicsUndistortByte(
const libmv_CameraIntrinsics* /*libmv_intrinsics*/,
const unsigned char* source_image,
int width, int height,
int width,
int height,
float /*overscan*/,
int channels,
unsigned char* destination_image) {
memcpy(destination_image, source_image,
memcpy(destination_image,
source_image,
channels * width * height * sizeof(unsigned char));
}
@ -265,7 +265,8 @@ void libmv_cameraIntrinsicsUndistortFloat(
float /*overscan*/,
int channels,
float* destination_image) {
memcpy(destination_image, source_image,
memcpy(destination_image,
source_image,
channels * width * height * sizeof(float));
}
@ -277,7 +278,8 @@ void libmv_cameraIntrinsicsDistortByte(
float /*overscan*/,
int channels,
unsigned char* destination_image) {
memcpy(destination_image, source_image,
memcpy(destination_image,
source_image,
channels * width * height * sizeof(unsigned char));
}
@ -289,7 +291,8 @@ void libmv_cameraIntrinsicsDistortFloat(
float /*overscan*/,
int channels,
float* destination_image) {
memcpy(destination_image, source_image,
memcpy(destination_image,
source_image,
channels * width * height * sizeof(float));
}
@ -327,45 +330,38 @@ void libmv_homography2DFromCorrespondencesEuc(/* const */ double (* /*x1*/)[2],
/* ************ autotrack ************ */
libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor* /*frame_accessor*/)
{
libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor* /*frame_accessor*/) {
return NULL;
}
void libmv_autoTrackDestroy(libmv_AutoTrack* /*libmv_autotrack*/)
{
void libmv_autoTrackDestroy(libmv_AutoTrack* /*libmv_autotrack*/) {
}
void libmv_autoTrackSetOptions(libmv_AutoTrack* /*libmv_autotrack*/,
const libmv_AutoTrackOptions* /*options*/)
{
const libmv_AutoTrackOptions* /*options*/) {
}
int libmv_autoTrackMarker(libmv_AutoTrack* /*libmv_autotrack*/,
const libmv_TrackRegionOptions* /*libmv_options*/,
libmv_Marker* /*libmv_tracker_marker*/,
libmv_TrackRegionResult* /*libmv_result*/)
{
libmv_TrackRegionResult* /*libmv_result*/) {
return 0;
}
void libmv_autoTrackAddMarker(libmv_AutoTrack* /*libmv_autotrack*/,
const libmv_Marker* /*libmv_marker*/)
{
const libmv_Marker* /*libmv_marker*/) {
}
void libmv_autoTrackSetMarkers(libmv_AutoTrack* /*libmv_autotrack*/,
const libmv_Marker* /*libmv_marker-*/,
size_t /*num_markers*/)
{
size_t /*num_markers*/) {
}
int libmv_autoTrackGetMarker(libmv_AutoTrack* /*libmv_autotrack*/,
int /*clip*/,
int /*frame*/,
int /*track*/,
libmv_Marker* /*libmv_marker*/)
{
libmv_Marker* /*libmv_marker*/) {
return 0;
}
@ -376,24 +372,20 @@ libmv_FrameAccessor* libmv_FrameAccessorNew(
libmv_GetImageCallback /*get_image_callback*/,
libmv_ReleaseImageCallback /*release_image_callback*/,
libmv_GetMaskForTrackCallback /*get_mask_for_track_callback*/,
libmv_ReleaseMaskCallback /*release_mask_callback*/)
{
libmv_ReleaseMaskCallback /*release_mask_callback*/) {
return NULL;
}
void libmv_FrameAccessorDestroy(libmv_FrameAccessor* /*frame_accessor*/)
{
void libmv_FrameAccessorDestroy(libmv_FrameAccessor* /*frame_accessor*/) {
}
int64_t libmv_frameAccessorgetTransformKey(
const libmv_FrameTransform * /*transform*/)
{
const libmv_FrameTransform* /*transform*/) {
return 0;
}
void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform* /*transform*/,
void libmv_frameAccessorgetTransformRun(
const libmv_FrameTransform* /*transform*/,
const libmv_FloatImage* /*input_image*/,
libmv_FloatImage* /*output_image*/)
{
libmv_FloatImage* /*output_image*/) {
}

View File

@ -32,9 +32,9 @@
#undef DUMP_ALWAYS
using libmv::FloatImage;
using libmv::TrackRegion;
using libmv::TrackRegionOptions;
using libmv::TrackRegionResult;
using libmv::TrackRegion;
void libmv_configureTrackRegionOptions(
const libmv_TrackRegionOptions& options,
@ -66,7 +66,8 @@ void libmv_configureTrackRegionOptions(
* so disabling for now for until proper prediction model is landed.
*
* The thing is, currently blender sends input coordinates as the guess to
* region tracker and in case of fast motion such an early out ruins the track.
* region tracker and in case of fast motion such an early out ruins the
* track.
*/
track_region_options->attempt_refine_before_brute = false;
track_region_options->use_normalized_intensities = options.use_normalization;
@ -108,33 +109,27 @@ int libmv_trackRegion(const libmv_TrackRegionOptions* options,
libmv_configureTrackRegionOptions(*options, &track_region_options);
if (options->image1_mask) {
libmv_floatBufferToFloatImage(options->image1_mask,
image1_width,
image1_height,
1,
&image1_mask);
libmv_floatBufferToFloatImage(
options->image1_mask, image1_width, image1_height, 1, &image1_mask);
track_region_options.image1_mask = &image1_mask;
}
// Convert from raw float buffers to libmv's FloatImage.
FloatImage old_patch, new_patch;
libmv_floatBufferToFloatImage(image1,
image1_width,
image1_height,
1,
&old_patch);
libmv_floatBufferToFloatImage(image2,
image2_width,
image2_height,
1,
&new_patch);
libmv_floatBufferToFloatImage(
image1, image1_width, image1_height, 1, &old_patch);
libmv_floatBufferToFloatImage(
image2, image2_width, image2_height, 1, &new_patch);
TrackRegionResult track_region_result;
TrackRegion(old_patch, new_patch,
xx1, yy1,
TrackRegion(old_patch,
new_patch,
xx1,
yy1,
track_region_options,
xx2, yy2,
xx2,
yy2,
&track_region_result);
// Convert to floats for the blender api.

View File

@ -44,7 +44,7 @@ typedef struct libmv_TrackRegionResult {
namespace libmv {
struct TrackRegionOptions;
struct TrackRegionResult;
}
} // namespace libmv
void libmv_configureTrackRegionOptions(
const libmv_TrackRegionOptions& options,
libmv::TrackRegionOptions* track_region_options);

View File

@ -25,8 +25,7 @@
using mv::Marker;
using mv::Tracks;
void libmv_apiMarkerToMarker(const libmv_Marker& libmv_marker,
Marker *marker) {
void libmv_apiMarkerToMarker(const libmv_Marker& libmv_marker, Marker* marker) {
marker->clip = libmv_marker.clip;
marker->frame = libmv_marker.frame;
marker->track = libmv_marker.track;
@ -50,8 +49,7 @@ void libmv_apiMarkerToMarker(const libmv_Marker& libmv_marker,
marker->disabled_channels = libmv_marker.disabled_channels;
}
void libmv_markerToApiMarker(const Marker& marker,
libmv_Marker *libmv_marker) {
void libmv_markerToApiMarker(const Marker& marker, libmv_Marker* libmv_marker) {
libmv_marker->clip = marker.clip;
libmv_marker->frame = marker.frame;
libmv_marker->track = marker.track;
@ -109,8 +107,7 @@ void libmv_tracksRemoveMarkerN(libmv_TracksN* libmv_tracks,
((Tracks*)libmv_tracks)->RemoveMarker(clip, frame, track);
}
void libmv_tracksRemoveMarkersForTrack(libmv_TracksN* libmv_tracks,
int track) {
void libmv_tracksRemoveMarkersForTrack(libmv_TracksN* libmv_tracks, int track) {
((Tracks*)libmv_tracks)->RemoveMarkersForTrack(track);
}

View File

@ -92,7 +92,6 @@ libmv_TracksN* libmv_tracksNewN(void);
void libmv_tracksDestroyN(libmv_TracksN* libmv_tracks);
void libmv_tracksAddMarkerN(libmv_TracksN* libmv_tracks,
const libmv_Marker* libmv_marker);
@ -107,8 +106,7 @@ void libmv_tracksRemoveMarkerN(libmv_TracksN* libmv_tracks,
int frame,
int track);
void libmv_tracksRemoveMarkersForTrack(libmv_TracksN* libmv_tracks,
int track);
void libmv_tracksRemoveMarkersForTrack(libmv_TracksN* libmv_tracks, int track);
int libmv_tracksMaxClipN(libmv_TracksN* libmv_tracks);
int libmv_tracksMaxFrameN(libmv_TracksN* libmv_tracks, int clip);

View File

@ -48,9 +48,15 @@
((type*)(what))->~type(); \
free(what); \
} \
} (void)0
} \
(void)0
# define LIBMV_STRUCT_NEW(type, count) (type*)malloc(sizeof(type) * count)
# define LIBMV_STRUCT_DELETE(what) { if (what) free(what); } (void)0
# define LIBMV_STRUCT_DELETE(what) \
{ \
if (what) \
free(what); \
} \
(void)0
#endif
#endif // LIBMV_C_API_UTILDEFINES_H_

View File

@ -21,9 +21,9 @@
// Author: mierle@gmail.com (Keir Mierle)
#include "libmv/autotrack/autotrack.h"
#include "libmv/autotrack/quad.h"
#include "libmv/autotrack/frame_accessor.h"
#include "libmv/autotrack/predict_tracks.h"
#include "libmv/autotrack/quad.h"
#include "libmv/base/scoped_ptr.h"
#include "libmv/logging/logging.h"
#include "libmv/numeric/numeric.h"
@ -37,19 +37,15 @@ class DisableChannelsTransform : public FrameAccessor::Transform {
DisableChannelsTransform(int disabled_channels)
: disabled_channels_(disabled_channels) {}
int64_t key() const {
return disabled_channels_;
}
int64_t key() const { return disabled_channels_; }
void run(const FloatImage& input, FloatImage* output) const {
bool disable_red = (disabled_channels_ & Marker::CHANNEL_R) != 0,
disable_green = (disabled_channels_ & Marker::CHANNEL_G) != 0,
disable_blue = (disabled_channels_ & Marker::CHANNEL_B) != 0;
LG << "Disabling channels: "
<< (disable_red ? "R " : "")
<< (disable_green ? "G " : "")
<< (disable_blue ? "B" : "");
LG << "Disabling channels: " << (disable_red ? "R " : "")
<< (disable_green ? "G " : "") << (disable_blue ? "B" : "");
// It's important to rescale the resultappropriately so that e.g. if only
// blue is selected, it's not zeroed out.
@ -115,11 +111,8 @@ FrameAccessor::Key GetMaskForMarker(const Marker& marker,
FrameAccessor* frame_accessor,
FloatImage* mask) {
Region region = marker.search_region.Rounded();
return frame_accessor->GetMaskForTrack(marker.clip,
marker.frame,
marker.track,
&region,
mask);
return frame_accessor->GetMaskForTrack(
marker.clip, marker.frame, marker.track, &region, mask);
}
} // namespace
@ -152,23 +145,20 @@ bool AutoTrack::TrackMarker(Marker* tracked_marker,
// TODO(keir): Technically this could take a smaller slice from the source
// image instead of taking one the size of the search window.
FloatImage reference_image;
FrameAccessor::Key reference_key = GetImageForMarker(reference_marker,
frame_accessor_,
&reference_image);
FrameAccessor::Key reference_key =
GetImageForMarker(reference_marker, frame_accessor_, &reference_image);
if (!reference_key) {
LG << "Couldn't get frame for reference marker: " << reference_marker;
return false;
}
FloatImage reference_mask;
FrameAccessor::Key reference_mask_key = GetMaskForMarker(reference_marker,
frame_accessor_,
&reference_mask);
FrameAccessor::Key reference_mask_key =
GetMaskForMarker(reference_marker, frame_accessor_, &reference_mask);
FloatImage tracked_image;
FrameAccessor::Key tracked_key = GetImageForMarker(*tracked_marker,
frame_accessor_,
&tracked_image);
FrameAccessor::Key tracked_key =
GetImageForMarker(*tracked_marker, frame_accessor_, &tracked_image);
if (!tracked_key) {
frame_accessor_->ReleaseImage(reference_key);
LG << "Couldn't get frame for tracked marker: " << tracked_marker;
@ -191,9 +181,11 @@ bool AutoTrack::TrackMarker(Marker* tracked_marker,
local_track_region_options.attempt_refine_before_brute = predicted_position;
TrackRegion(reference_image,
tracked_image,
x1, y1,
x1,
y1,
local_track_region_options,
x2, y2,
x2,
y2,
result);
// Copy results over the tracked marker.
@ -230,7 +222,9 @@ void AutoTrack::SetMarkers(vector<Marker>* markers) {
tracks_.SetMarkers(markers);
}
bool AutoTrack::GetMarker(int clip, int frame, int track,
bool AutoTrack::GetMarker(int clip,
int frame,
int track,
Marker* markers) const {
return tracks_.GetMarker(clip, frame, track, markers);
}
@ -242,7 +236,8 @@ void AutoTrack::DetectAndTrack(const DetectAndTrackOptions& options) {
vector<Marker> previous_frame_markers;
// Q: How to decide track #s when detecting?
// Q: How to match markers from previous frame? set of prev frame tracks?
// Q: How to decide what markers should get tracked and which ones should not?
// Q: How to decide what markers should get tracked and which ones should
// not?
for (int frame = 0; frame < num_frames; ++frame) {
if (Cancelled()) {
LG << "Got cancel message while detecting and tracking...";
@ -271,8 +266,7 @@ void AutoTrack::DetectAndTrack(const DetectAndTrackOptions& options) {
for (int i = 0; i < this_frame_markers.size(); ++i) {
tracks_in_this_frame.push_back(this_frame_markers[i].track);
}
std::sort(tracks_in_this_frame.begin(),
tracks_in_this_frame.end());
std::sort(tracks_in_this_frame.begin(), tracks_in_this_frame.end());
// Find tracks in the previous frame that are not in this one.
vector<Marker*> previous_frame_markers_to_track;

View File

@ -23,8 +23,8 @@
#ifndef LIBMV_AUTOTRACK_AUTOTRACK_H_
#define LIBMV_AUTOTRACK_AUTOTRACK_H_
#include "libmv/autotrack/tracks.h"
#include "libmv/autotrack/region.h"
#include "libmv/autotrack/tracks.h"
#include "libmv/tracking/track_region.h"
namespace libmv {
@ -74,8 +74,7 @@ class AutoTrack {
Region search_region;
};
AutoTrack(FrameAccessor* frame_accessor)
: frame_accessor_(frame_accessor) {}
AutoTrack(FrameAccessor* frame_accessor) : frame_accessor_(frame_accessor) {}
// Marker manipulation.
// Clip manipulation.
@ -150,10 +149,9 @@ class AutoTrack {
};
void DetectAndTrack(const DetectAndTrackOptions& options);
struct DetectFeaturesInFrameOptions {
};
void DetectFeaturesInFrame(int clip, int frame,
const DetectFeaturesInFrameOptions* options=NULL) {
struct DetectFeaturesInFrameOptions {};
void DetectFeaturesInFrame(
int clip, int frame, const DetectFeaturesInFrameOptions* options = NULL) {
(void)clip;
(void)frame;
(void)options;

View File

@ -50,10 +50,7 @@ struct FrameAccessor {
virtual void run(const FloatImage& input, FloatImage* output) const = 0;
};
enum InputMode {
MONO,
RGBA
};
enum InputMode { MONO, RGBA };
typedef void* Key;
@ -100,6 +97,6 @@ struct FrameAccessor {
virtual int NumFrames(int clip) = 0;
};
} // namespace libmv
} // namespace mv
#endif // LIBMV_AUTOTRACK_FRAME_ACCESSOR_H_

View File

@ -69,11 +69,7 @@ struct Marker {
// Markers may be inliers or outliers if the tracking fails; this allows
// visualizing the markers in the image.
enum Status {
UNKNOWN,
INLIER,
OUTLIER
};
enum Status { UNKNOWN, INLIER, OUTLIER };
Status status;
// When doing correlation tracking, where to search in the current frame for
@ -90,12 +86,7 @@ struct Marker {
// another primitive (a rectangular prisim). This captures the information
// needed to say that for example a collection of markers belongs to model #2
// (and model #2 is a plane).
enum ModelType {
POINT,
PLANE,
LINE,
CUBE
};
enum ModelType { POINT, PLANE, LINE, CUBE };
ModelType model_type;
// The model ID this track (e.g. the second model, which is a plane).
@ -129,12 +120,8 @@ struct Marker {
};
inline std::ostream& operator<<(std::ostream& out, const Marker& marker) {
out << "{"
<< marker.clip << ", "
<< marker.frame << ", "
<< marker.track << ", ("
<< marker.center.x() << ", "
<< marker.center.y() << ")"
out << "{" << marker.clip << ", " << marker.frame << ", " << marker.track
<< ", (" << marker.center.x() << ", " << marker.center.y() << ")"
<< "}";
return out;
}

View File

@ -23,18 +23,13 @@
#ifndef LIBMV_AUTOTRACK_MODEL_H_
#define LIBMV_AUTOTRACK_MODEL_H_
#include "libmv/numeric/numeric.h"
#include "libmv/autotrack/quad.h"
#include "libmv/numeric/numeric.h"
namespace mv {
struct Model {
enum ModelType {
POINT,
PLANE,
LINE,
CUBE
};
enum ModelType { POINT, PLANE, LINE, CUBE };
// ???
};

View File

@ -20,8 +20,8 @@
//
// Author: mierle@gmail.com (Keir Mierle)
#include "libmv/autotrack/marker.h"
#include "libmv/autotrack/predict_tracks.h"
#include "libmv/autotrack/marker.h"
#include "libmv/autotrack/tracks.h"
#include "libmv/base/vector.h"
#include "libmv/logging/logging.h"
@ -31,8 +31,8 @@ namespace mv {
namespace {
using libmv::vector;
using libmv::Vec2;
using libmv::vector;
// Implied time delta between steps. Set empirically by tweaking and seeing
// what numbers did best at prediction.
@ -57,6 +57,8 @@ const double dt = 3.8;
// For a typical system having constant velocity. This gives smooth-appearing
// predictions, but they are not always as accurate.
//
// clang-format off
const double velocity_state_transition_data[] = {
1, dt, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0,
@ -65,10 +67,13 @@ const double velocity_state_transition_data[] = {
0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 1
};
// clang-format on
#if 0
// This 3rd-order system also models acceleration. This makes for "jerky"
// predictions, but that tend to be more accurate.
//
// clang-format off
const double acceleration_state_transition_data[] = {
1, dt, dt*dt/2, 0, 0, 0,
0, 1, dt, 0, 0, 0,
@ -77,9 +82,12 @@ const double acceleration_state_transition_data[] = {
0, 0, 0, 0, 1, dt,
0, 0, 0, 0, 0, 1
};
// clang-format on
// This system (attempts) to add an angular velocity component. However, it's
// total junk.
//
// clang-format off
const double angular_state_transition_data[] = {
1, dt, -dt, 0, 0, 0, // Position x
0, 1, 0, 0, 0, 0, // Velocity x
@ -88,17 +96,22 @@ const double angular_state_transition_data[] = {
0, 0, 0, 0, 1, 0, // Velocity y
0, 0, 0, 0, 0, 1 // Ignored
};
// clang-format on
#endif
const double* state_transition_data = velocity_state_transition_data;
// Observation matrix.
// clang-format off
const double observation_data[] = {
1., 0., 0., 0., 0., 0.,
0., 0., 0., 1., 0., 0.
};
// clang-format on
// Process covariance.
//
// clang-format off
const double process_covariance_data[] = {
35, 0, 0, 0, 0, 0,
0, 5, 0, 0, 0, 0,
@ -107,14 +120,19 @@ const double process_covariance_data[] = {
0, 0, 0, 0, 5, 0,
0, 0, 0, 0, 0, 5
};
// clang-format on
// Process covariance.
const double measurement_covariance_data[] = {
0.01, 0.00,
0.00, 0.01,
0.01,
0.00,
0.00,
0.01,
};
// Initial covariance.
//
// clang-format off
const double initial_covariance_data[] = {
10, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0,
@ -123,6 +141,7 @@ const double initial_covariance_data[] = {
0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 1
};
// clang-format on
typedef mv::KalmanFilter<double, 6, 2> TrackerKalman;
@ -147,8 +166,8 @@ void RunPrediction(const vector<Marker*> previous_markers,
TrackerKalman::State state;
state.mean << previous_markers[0]->center.x(), 0, 0,
previous_markers[0]->center.y(), 0, 0;
state.covariance = Eigen::Matrix<double, 6, 6, Eigen::RowMajor>(
initial_covariance_data);
state.covariance =
Eigen::Matrix<double, 6, 6, Eigen::RowMajor>(initial_covariance_data);
int current_frame = previous_markers[0]->frame;
int target_frame = predicted_marker->frame;
@ -159,19 +178,18 @@ void RunPrediction(const vector<Marker*> previous_markers,
for (int i = 1; i < previous_markers.size(); ++i) {
// Step forward predicting the state until it is on the current marker.
int predictions = 0;
for (;
current_frame != previous_markers[i]->frame;
for (; current_frame != previous_markers[i]->frame;
current_frame += frame_delta) {
filter.Step(&state);
predictions++;
LG << "Predicted point (frame " << current_frame << "): "
<< state.mean(0) << ", " << state.mean(3);
LG << "Predicted point (frame " << current_frame << "): " << state.mean(0)
<< ", " << state.mean(3);
}
// Log the error -- not actually used, but interesting.
Vec2 error = previous_markers[i]->center.cast<double>() -
Vec2(state.mean(0), state.mean(3));
LG << "Prediction error for " << predictions << " steps: ("
<< error.x() << ", " << error.y() << "); norm: " << error.norm();
LG << "Prediction error for " << predictions << " steps: (" << error.x()
<< ", " << error.y() << "); norm: " << error.norm();
// Now that the state is predicted in the current frame, update the state
// based on the measurement from the current frame.
filter.Update(previous_markers[i]->center.cast<double>(),
@ -184,8 +202,8 @@ void RunPrediction(const vector<Marker*> previous_markers,
// predict until the target frame.
for (; current_frame != target_frame; current_frame += frame_delta) {
filter.Step(&state);
LG << "Final predicted point (frame " << current_frame << "): "
<< state.mean(0) << ", " << state.mean(3);
LG << "Final predicted point (frame " << current_frame
<< "): " << state.mean(0) << ", " << state.mean(3);
}
// The x and y positions are at 0 and 3; ignore acceleration and velocity.
@ -253,13 +271,13 @@ bool PredictMarkerPosition(const Tracks& tracks, Marker* marker) {
} else if (insert_at != -1) {
// Found existing marker; scan before and after it.
forward_scan_begin = insert_at + 1;
forward_scan_end = markers.size() - 1;;
forward_scan_end = markers.size() - 1;
backward_scan_begin = insert_at - 1;
backward_scan_end = 0;
} else {
// Didn't find existing marker but found an insertion point.
forward_scan_begin = insert_before;
forward_scan_end = markers.size() - 1;;
forward_scan_end = markers.size() - 1;
backward_scan_begin = insert_before - 1;
backward_scan_end = 0;
}
@ -301,9 +319,8 @@ bool PredictMarkerPosition(const Tracks& tracks, Marker* marker) {
return false;
}
LG << "Predicting backward";
int predict_begin =
std::min(forward_scan_begin + max_frames_to_predict_from,
forward_scan_end);
int predict_begin = std::min(
forward_scan_begin + max_frames_to_predict_from, forward_scan_end);
int predict_end = forward_scan_begin;
vector<Marker*> previous_markers;
for (int i = predict_begin; i >= predict_end; --i) {
@ -312,7 +329,6 @@ bool PredictMarkerPosition(const Tracks& tracks, Marker* marker) {
RunPrediction(previous_markers, marker);
return false;
}
}
} // namespace mv

View File

@ -35,10 +35,8 @@ static void AddMarker(int frame, float x, float y, Tracks* tracks) {
marker.frame = frame;
marker.center.x() = x;
marker.center.y() = y;
marker.patch.coordinates << x - 1, y - 1,
x + 1, y - 1,
x + 1, y + 1,
x - 1, y + 1;
marker.patch.coordinates << x - 1, y - 1, x + 1, y - 1, x + 1, y + 1, x - 1,
y + 1;
tracks->AddMarker(marker);
}
@ -66,10 +64,8 @@ TEST(PredictMarkerPosition, EasyLinearMotion) {
// Check the patch coordinates as well.
double x = 9, y = 40.0;
Quad2Df expected_patch;
expected_patch.coordinates << x - 1, y - 1,
x + 1, y - 1,
x + 1, y + 1,
x - 1, y + 1;
expected_patch.coordinates << x - 1, y - 1, x + 1, y - 1, x + 1, y + 1, x - 1,
y + 1;
error = (expected_patch.coordinates - predicted.patch.coordinates).norm();
LG << "Patch error: " << error;
@ -101,10 +97,8 @@ TEST(PredictMarkerPosition, EasyBackwardLinearMotion) {
// Check the patch coordinates as well.
double x = 9.0, y = 40.0;
Quad2Df expected_patch;
expected_patch.coordinates << x - 1, y - 1,
x + 1, y - 1,
x + 1, y + 1,
x - 1, y + 1;
expected_patch.coordinates << x - 1, y - 1, x + 1, y - 1, x + 1, y + 1, x - 1,
y + 1;
error = (expected_patch.coordinates - predicted.patch.coordinates).norm();
LG << "Patch error: " << error;

View File

@ -23,8 +23,8 @@
#include "libmv/autotrack/tracks.h"
#include <algorithm>
#include <vector>
#include <iterator>
#include <vector>
#include "libmv/numeric/numeric.h"
@ -34,12 +34,12 @@ Tracks::Tracks(const Tracks& other) {
markers_ = other.markers_;
}
Tracks::Tracks(const vector<Marker>& markers) : markers_(markers) {}
Tracks::Tracks(const vector<Marker>& markers) : markers_(markers) {
}
bool Tracks::GetMarker(int clip, int frame, int track, Marker* marker) const {
for (int i = 0; i < markers_.size(); ++i) {
if (markers_[i].clip == clip &&
markers_[i].frame == frame &&
if (markers_[i].clip == clip && markers_[i].frame == frame &&
markers_[i].track == track) {
*marker = markers_[i];
return true;
@ -60,8 +60,7 @@ void Tracks::GetMarkersForTrackInClip(int clip,
int track,
vector<Marker>* markers) const {
for (int i = 0; i < markers_.size(); ++i) {
if (clip == markers_[i].clip &&
track == markers_[i].track) {
if (clip == markers_[i].clip && track == markers_[i].track) {
markers->push_back(markers_[i]);
}
}
@ -71,15 +70,16 @@ void Tracks::GetMarkersInFrame(int clip,
int frame,
vector<Marker>* markers) const {
for (int i = 0; i < markers_.size(); ++i) {
if (markers_[i].clip == clip &&
markers_[i].frame == frame) {
if (markers_[i].clip == clip && markers_[i].frame == frame) {
markers->push_back(markers_[i]);
}
}
}
void Tracks::GetMarkersForTracksInBothImages(int clip1, int frame1,
int clip2, int frame2,
void Tracks::GetMarkersForTracksInBothImages(int clip1,
int frame1,
int clip2,
int frame2,
vector<Marker>* markers) const {
std::vector<int> image1_tracks;
std::vector<int> image2_tracks;
@ -99,20 +99,19 @@ void Tracks::GetMarkersForTracksInBothImages(int clip1, int frame1,
std::sort(image1_tracks.begin(), image1_tracks.end());
std::sort(image2_tracks.begin(), image2_tracks.end());
std::vector<int> intersection;
std::set_intersection(image1_tracks.begin(), image1_tracks.end(),
image2_tracks.begin(), image2_tracks.end(),
std::set_intersection(image1_tracks.begin(),
image1_tracks.end(),
image2_tracks.begin(),
image2_tracks.end(),
std::back_inserter(intersection));
// Scan through and get the relevant tracks from the two images.
for (int i = 0; i < markers_.size(); ++i) {
// Save markers that are in either frame and are in our candidate set.
if (((markers_[i].clip == clip1 &&
markers_[i].frame == frame1) ||
(markers_[i].clip == clip2 &&
markers_[i].frame == frame2)) &&
std::binary_search(intersection.begin(),
intersection.end(),
markers_[i].track)) {
if (((markers_[i].clip == clip1 && markers_[i].frame == frame1) ||
(markers_[i].clip == clip2 && markers_[i].frame == frame2)) &&
std::binary_search(
intersection.begin(), intersection.end(), markers_[i].track)) {
markers->push_back(markers_[i]);
}
}
@ -122,8 +121,7 @@ void Tracks::AddMarker(const Marker& marker) {
// TODO(keir): This is quadratic for repeated insertions. Fix this by adding
// a smarter data structure like a set<>.
for (int i = 0; i < markers_.size(); ++i) {
if (markers_[i].clip == marker.clip &&
markers_[i].frame == marker.frame &&
if (markers_[i].clip == marker.clip && markers_[i].frame == marker.frame &&
markers_[i].track == marker.track) {
markers_[i] = marker;
return;
@ -139,8 +137,7 @@ void Tracks::SetMarkers(vector<Marker>* markers) {
bool Tracks::RemoveMarker(int clip, int frame, int track) {
int size = markers_.size();
for (int i = 0; i < markers_.size(); ++i) {
if (markers_[i].clip == clip &&
markers_[i].frame == frame &&
if (markers_[i].clip == clip && markers_[i].frame == frame &&
markers_[i].track == track) {
markers_[i] = markers_[size - 1];
markers_.resize(size - 1);

View File

@ -23,8 +23,8 @@
#ifndef LIBMV_AUTOTRACK_TRACKS_H_
#define LIBMV_AUTOTRACK_TRACKS_H_
#include "libmv/base/vector.h"
#include "libmv/autotrack/marker.h"
#include "libmv/base/vector.h"
namespace mv {
@ -51,8 +51,10 @@ class Tracks {
//
// This is not the same as the union of the markers in frame1 and
// frame2; each marker is for a track that appears in both images.
void GetMarkersForTracksInBothImages(int clip1, int frame1,
int clip2, int frame2,
void GetMarkersForTracksInBothImages(int clip1,
int frame1,
int clip2,
int frame2,
vector<Marker>* markers) const;
void AddMarker(const Marker& marker);

View File

@ -22,8 +22,8 @@
#include "libmv/autotrack/tracks.h"
#include "testing/testing.h"
#include "libmv/logging/logging.h"
#include "testing/testing.h"
namespace mv {

View File

@ -28,6 +28,7 @@ class IdGenerator {
public:
IdGenerator() : next_(0) {}
ID Generate() { return next_++; }
private:
ID next_;
};

View File

@ -26,8 +26,8 @@
namespace libmv {
using std::map;
using std::make_pair;
using std::map;
} // namespace libmv

View File

@ -92,8 +92,10 @@ class scoped_array {
T* array_;
// Forbid comparison of different scoped_array types.
template <typename T2> bool operator==(scoped_array<T2> const& p2) const;
template <typename T2> bool operator!=(scoped_array<T2> const& p2) const;
template <typename T2>
bool operator==(scoped_array<T2> const& p2) const;
template <typename T2>
bool operator!=(scoped_array<T2> const& p2) const;
// Disallow evil constructors
scoped_array(const scoped_array&);

View File

@ -19,9 +19,9 @@
// IN THE SOFTWARE.
#include "libmv/base/vector.h"
#include <algorithm>
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
#include <algorithm>
namespace {
using namespace libmv;

View File

@ -18,7 +18,6 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_BASE_VECTOR_UTILS_H_
#define LIBMV_BASE_VECTOR_UTILS_H_

View File

@ -18,16 +18,15 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/image/image.h"
#include <iostream>
#include <cmath>
#include <iostream>
#include "libmv/image/image.h"
namespace libmv {
void FloatArrayToScaledByteArray(const Array3Df& float_array,
Array3Du* byte_array,
bool automatic_range_detection
) {
bool automatic_range_detection) {
byte_array->ResizeLike(float_array);
float minval = HUGE_VAL;
float maxval = -HUGE_VAL;

View File

@ -81,13 +81,9 @@ class ArrayND : public BaseArray {
return *this;
}
const Index &Shapes() const {
return shape_;
}
const Index& Shapes() const { return shape_; }
const Index &Strides() const {
return strides_;
}
const Index& Strides() const { return strides_; }
/// Create an array of shape s.
void Resize(const Index& new_shape) {
@ -115,9 +111,7 @@ class ArrayND : public BaseArray {
}
/// Resizes the array to shape s. All data is lost.
void Resize(const int *new_shape_array) {
Resize(Index(new_shape_array));
}
void Resize(const int* new_shape_array) { Resize(Index(new_shape_array)); }
/// Resize a 1D array to length s0.
void Resize(int s0) {
@ -136,9 +130,7 @@ class ArrayND : public BaseArray {
}
// Match Eigen2's API.
void resize(int rows, int cols) {
Resize(rows, cols);
}
void resize(int rows, int cols) { Resize(rows, cols); }
/// Resize a 3D array to shape (s0,s1,s2).
void Resize(int s0, int s1, int s2) {
@ -171,19 +163,13 @@ class ArrayND : public BaseArray {
}
/// Return a tuple containing the length of each axis.
const Index &Shape() const {
return shape_;
}
const Index& Shape() const { return shape_; }
/// Return the length of an axis.
int Shape(int axis) const {
return shape_(axis);
}
int Shape(int axis) const { return shape_(axis); }
/// Return the distance between neighboring elements along axis.
int Stride(int axis) const {
return strides_(axis);
}
int Stride(int axis) const { return strides_(axis); }
/// Return the number of elements of the array.
int Size() const {
@ -194,9 +180,7 @@ class ArrayND : public BaseArray {
}
/// Return the total amount of memory used by the array.
int MemorySizeInBytes() const {
return sizeof(*this) + Size() * sizeof(T);
}
int MemorySizeInBytes() const { return sizeof(*this) + Size() * sizeof(T); }
/// Pointer to the first element of the array.
T* Data() { return data_; }
@ -237,9 +221,7 @@ class ArrayND : public BaseArray {
}
/// 1D specialization.
T &operator()(int i0) {
return *( Data() + Offset(i0) );
}
T& operator()(int i0) { return *(Data() + Offset(i0)); }
/// 2D specialization.
T& operator()(int i0, int i1) {
@ -262,9 +244,7 @@ class ArrayND : public BaseArray {
}
/// 1D specialization.
const T &operator()(int i0) const {
return *(Data() + Offset(i0));
}
const T& operator()(int i0) const { return *(Data() + Offset(i0)); }
/// 2D specialization.
const T& operator()(int i0, int i1) const {
@ -287,26 +267,24 @@ class ArrayND : public BaseArray {
}
/// 1D specialization.
bool Contains(int i0) const {
return 0 <= i0 && i0 < Shape(0);
}
bool Contains(int i0) const { return 0 <= i0 && i0 < Shape(0); }
/// 2D specialization.
bool Contains(int i0, int i1) const {
return 0 <= i0 && i0 < Shape(0)
&& 0 <= i1 && i1 < Shape(1);
return 0 <= i0 && i0 < Shape(0) && 0 <= i1 && i1 < Shape(1);
}
/// 3D specialization.
bool Contains(int i0, int i1, int i2) const {
return 0 <= i0 && i0 < Shape(0)
&& 0 <= i1 && i1 < Shape(1)
&& 0 <= i2 && i2 < Shape(2);
return 0 <= i0 && i0 < Shape(0) && 0 <= i1 && i1 < Shape(1) && 0 <= i2 &&
i2 < Shape(2);
}
bool operator==(const ArrayND<T, N>& other) const {
if (shape_ != other.shape_) return false;
if (strides_ != other.strides_) return false;
if (shape_ != other.shape_)
return false;
if (strides_ != other.strides_)
return false;
for (int i = 0; i < Size(); ++i) {
if (this->Data()[i] != other.Data()[i])
return false;
@ -346,30 +324,20 @@ class ArrayND : public BaseArray {
template <typename T>
class Array3D : public ArrayND<T, 3> {
typedef ArrayND<T, 3> Base;
public:
Array3D()
: Base() {
}
Array3D(int height, int width, int depth = 1)
: Base(height, width, depth) {
}
Array3D() : Base() {}
Array3D(int height, int width, int depth = 1) : Base(height, width, depth) {}
Array3D(T* data, int height, int width, int depth = 1)
: Base(data, height, width, depth) {
}
: Base(data, height, width, depth) {}
void Resize(int height, int width, int depth = 1) {
Base::Resize(height, width, depth);
}
int Height() const {
return Base::Shape(0);
}
int Width() const {
return Base::Shape(1);
}
int Depth() const {
return Base::Shape(2);
}
int Height() const { return Base::Shape(0); }
int Width() const { return Base::Shape(1); }
int Depth() const { return Base::Shape(2); }
// Match Eigen2's API so that Array3D's and Mat*'s can work together via
// template magic.
@ -420,9 +388,7 @@ void ByteArrayToScaledFloatArray(const Array3Du &byte_array,
Array3Df* float_array);
template <typename AArrayType, typename BArrayType, typename CArrayType>
void MultiplyElements(const AArrayType &a,
const BArrayType &b,
CArrayType *c) {
void MultiplyElements(const AArrayType& a, const BArrayType& b, CArrayType* c) {
// This function does an element-wise multiply between
// the two Arrays A and B, and stores the result in C.
// A and B must have the same dimensions.

View File

@ -21,9 +21,9 @@
#include "libmv/image/array_nd.h"
#include "testing/testing.h"
using libmv::ArrayND;
using libmv::Array3D;
using libmv::Array3Df;
using libmv::ArrayND;
namespace {

View File

@ -64,9 +64,14 @@ void ComputeGaussianKernel(double sigma, Vec *kernel, Vec *derivative) {
}
template <int size, bool vertical>
void FastConvolve(const Vec &kernel, int width, int height,
const float* src, int src_stride, int src_line_stride,
float* dst, int dst_stride) {
void FastConvolve(const Vec& kernel,
int width,
int height,
const float* src,
int src_stride,
int src_line_stride,
float* dst,
int dst_stride) {
double coefficients[2 * size + 1];
for (int k = 0; k < 2 * size + 1; ++k) {
coefficients[k] = kernel(2 * size - k);
@ -119,19 +124,22 @@ void Convolve(const Array3Df &in,
// fast path.
int half_width = kernel.size() / 2;
switch (half_width) {
#define static_convolution(size) case size: \
FastConvolve<size, vertical>(kernel, width, height, src, src_stride, \
src_line_stride, dst, dst_stride); break;
static_convolution(1)
static_convolution(2)
static_convolution(3)
static_convolution(4)
static_convolution(5)
static_convolution(6)
#define static_convolution(size) \
case size: \
FastConvolve<size, vertical>(kernel, \
width, \
height, \
src, \
src_stride, \
src_line_stride, \
dst, \
dst_stride); \
break;
static_convolution(1) static_convolution(2) static_convolution(3)
static_convolution(4) static_convolution(5) static_convolution(6)
static_convolution(7)
#undef static_convolution
default:
int dynamic_size = kernel.size() / 2;
default : int dynamic_size = kernel.size() / 2;
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
double sum = 0;
@ -171,9 +179,7 @@ void ConvolveVertical(const Array3Df &in,
Convolve<true>(in, kernel, out_pointer, plane);
}
void ConvolveGaussian(const Array3Df &in,
double sigma,
Array3Df *out_pointer) {
void ConvolveGaussian(const Array3Df& in, double sigma, Array3Df* out_pointer) {
Vec kernel, derivative;
ComputeGaussianKernel(sigma, &kernel, &derivative);
@ -314,9 +320,7 @@ void BoxFilterVertical(const Array3Df &in,
}
}
void BoxFilter(const Array3Df &in,
int box_width,
Array3Df *out) {
void BoxFilter(const Array3Df& in, int box_width, Array3Df* out) {
Array3Df tmp;
BoxFilterHorizontal(in, box_width, &tmp);
BoxFilterVertical(tmp, box_width, out);
@ -330,13 +334,13 @@ void LaplaceFilter(unsigned char* src,
for (int y = 1; y < height - 1; y++)
for (int x = 1; x < width - 1; x++) {
const unsigned char* s = &src[y * width + x];
int l = 128 +
s[-width-1] + s[-width] + s[-width+1] +
s[1] - 8*s[0] + s[1] +
s[ width-1] + s[ width] + s[ width+1];
int l = 128 + s[-width - 1] + s[-width] + s[-width + 1] + s[1] -
8 * s[0] + s[1] + s[width - 1] + s[width] + s[width + 1];
int d = ((256 - strength) * s[0] + strength * l) / 256;
if (d < 0) d=0;
if (d > 255) d=255;
if (d < 0)
d = 0;
if (d > 255)
d = 255;
dst[y * width + x] = d;
}
}

View File

@ -35,7 +35,8 @@ inline double Gaussian(double x, double sigma) {
// 2D gaussian (zero mean)
// (9) in http://mathworld.wolfram.com/GaussianFunction.html
inline double Gaussian2D(double x, double y, double sigma) {
return 1.0/(2.0*M_PI*sigma*sigma) * exp( -(x*x+y*y)/(2.0*sigma*sigma));
return 1.0 / (2.0 * M_PI * sigma * sigma) *
exp(-(x * x + y * y) / (2.0 * sigma * sigma));
}
inline double GaussianDerivative(double x, double sigma) {
return -x / sigma / sigma * Gaussian(x, sigma);
@ -83,17 +84,17 @@ void BoxFilterVertical(const FloatImage &in,
int window_size,
FloatImage* out_pointer);
void BoxFilter(const FloatImage &in,
int box_width,
FloatImage *out);
void BoxFilter(const FloatImage& in, int box_width, FloatImage* out);
/*!
Convolve \a src into \a dst with the discrete laplacian operator.
\a src and \a dst should be \a width x \a height images.
\a strength is an interpolation coefficient (0-256) between original image and the laplacian.
\a strength is an interpolation coefficient (0-256) between original image
and the laplacian.
\note Make sure the search region is filtered with the same strength as the pattern.
\note Make sure the search region is filtered with the same strength as the
pattern.
*/
void LaplaceFilter(unsigned char* src,
unsigned char* dst,
@ -104,4 +105,3 @@ void LaplaceFilter(unsigned char* src,
} // namespace libmv
#endif // LIBMV_IMAGE_CONVOLVE_H_

View File

@ -21,8 +21,8 @@
#ifndef LIBMV_IMAGE_CORRELATION_H
#define LIBMV_IMAGE_CORRELATION_H
#include "libmv/logging/logging.h"
#include "libmv/image/image.h"
#include "libmv/logging/logging.h"
namespace libmv {
@ -63,9 +63,8 @@ inline double PearsonProductMomentCorrelation(
double covariance_xy = sXY - sX * sY;
double correlation = covariance_xy / sqrt(var_x * var_y);
LG << "Covariance xy: " << covariance_xy
<< ", var 1: " << var_x << ", var 2: " << var_y
<< ", correlation: " << correlation;
LG << "Covariance xy: " << covariance_xy << ", var 1: " << var_x
<< ", var 2: " << var_y << ", correlation: " << correlation;
return correlation;
}

View File

@ -39,14 +39,11 @@ typedef Array3Ds ShortImage;
// is the best solution after all.
class Image {
public:
// Create an image from an array. The image takes ownership of the array.
Image(Array3Du* array) : array_type_(BYTE), array_(array) {}
Image(Array3Df* array) : array_type_(FLOAT), array_(array) {}
Image(const Image &img): array_type_(NONE), array_(NULL) {
*this = img;
}
Image(const Image& img) : array_type_(NONE), array_(NULL) { *this = img; }
// Underlying data type.
enum DataType {
@ -73,9 +70,7 @@ class Image {
case SHORT:
size = reinterpret_cast<Array3Ds*>(array_)->MemorySizeInBytes();
break;
default :
size = 0;
assert(0);
default: size = 0; assert(0);
}
size += sizeof(*this);
return size;
@ -83,24 +78,11 @@ class Image {
~Image() {
switch (array_type_) {
case BYTE:
delete reinterpret_cast<Array3Du *>(array_);
break;
case FLOAT:
delete reinterpret_cast<Array3Df *>(array_);
break;
case INT:
delete reinterpret_cast<Array3Di *>(array_);
break;
case SHORT:
delete reinterpret_cast<Array3Ds *>(array_);
break;
default:
assert(0);
case BYTE: delete reinterpret_cast<Array3Du*>(array_); break;
case FLOAT: delete reinterpret_cast<Array3Df*>(array_); break;
case INT: delete reinterpret_cast<Array3Di*>(array_); break;
case SHORT: delete reinterpret_cast<Array3Ds*>(array_); break;
default: assert(0);
}
}
@ -124,8 +106,7 @@ class Image {
delete reinterpret_cast<Array3Ds*>(array_);
array_ = new Array3Ds(*(Array3Ds*)f.array_);
break;
default:
assert(0);
default: assert(0);
}
}
return *this;

View File

@ -53,7 +53,8 @@ void Rgb2Gray(const ImageIn &imaIn, ImageOut *imaOut) {
for (int j = 0; j < imaIn.Height(); ++j) {
for (int i = 0; i < imaIn.Width(); ++i) {
(*imaOut)(j, i) = RGB2GRAY(imaIn(j, i, 0) , imaIn(j, i, 1), imaIn(j, i, 2));
(*imaOut)(j, i) =
RGB2GRAY(imaIn(j, i, 0), imaIn(j, i, 1), imaIn(j, i, 2));
}
}
}

View File

@ -59,14 +59,18 @@ inline void safePutPixel(int yc, int xc, const Color *col, Image *pim) {
// Add the rotation of the ellipse.
// As the algo. use symmetry we must use 4 rotations.
template <class Image, class Color>
void DrawEllipse(int xc, int yc, int radiusA, int radiusB,
const Color &col, Image *pim, double angle = 0.0) {
void DrawEllipse(int xc,
int yc,
int radiusA,
int radiusB,
const Color& col,
Image* pim,
double angle = 0.0) {
int a = radiusA;
int b = radiusB;
// Counter Clockwise rotation matrix.
double matXY[4] = { cos(angle), sin(angle),
-sin(angle), cos(angle)};
double matXY[4] = {cos(angle), sin(angle), -sin(angle), cos(angle)};
int x, y;
double d1, d2;
x = 0;
@ -139,10 +143,10 @@ void DrawEllipse(int xc, int yc, int radiusA, int radiusB,
template <class Image, class Color>
void DrawCircle(int x, int y, int radius, const Color& col, Image* pim) {
Image& im = *pim;
if ( im.Contains(y + radius, x + radius)
|| im.Contains(y + radius, x - radius)
|| im.Contains(y - radius, x + radius)
|| im.Contains(y - radius, x - radius)) {
if (im.Contains(y + radius, x + radius) ||
im.Contains(y + radius, x - radius) ||
im.Contains(y - radius, x + radius) ||
im.Contains(y - radius, x - radius)) {
int x1 = 0;
int y1 = radius;
int d = radius - 1;
@ -185,13 +189,14 @@ void DrawLine(int xa, int ya, int xb, int yb, const Color &col, Image *pim) {
int width = pim->Width();
int height = pim->Height();
const bool xdir = xa < xb, ydir = ya < yb;
float nx0 = xa, nx1 = xb, ny0 = ya, ny1 = yb,
&xleft = xdir?nx0:nx1, &yleft = xdir?ny0:ny1,
&xright = xdir?nx1:nx0, &yright = xdir?ny1:ny0,
&xup = ydir?nx0:nx1, &yup = ydir?ny0:ny1,
&xdown = ydir?nx1:nx0, &ydown = ydir?ny1:ny0;
float nx0 = xa, nx1 = xb, ny0 = ya, ny1 = yb, &xleft = xdir ? nx0 : nx1,
&yleft = xdir ? ny0 : ny1, &xright = xdir ? nx1 : nx0,
&yright = xdir ? ny1 : ny0, &xup = ydir ? nx0 : nx1,
&yup = ydir ? ny0 : ny1, &xdown = ydir ? nx1 : nx0,
&ydown = ydir ? ny1 : ny0;
if (xright < 0 || xleft >= width) return;
if (xright < 0 || xleft >= width)
return;
if (xleft < 0) {
yleft -= xleft * (yright - yleft) / (xright - xleft);
xleft = 0;
@ -200,7 +205,8 @@ void DrawLine(int xa, int ya, int xb, int yb, const Color &col, Image *pim) {
yright -= (xright - width) * (yright - yleft) / (xright - xleft);
xright = width - 1;
}
if (ydown < 0 || yup >= height) return;
if (ydown < 0 || yup >= height)
return;
if (yup < 0) {
xup -= yup * (xdown - xup) / (ydown - yup);
yup = 0;

View File

@ -23,8 +23,8 @@
#include "libmv/image/image.h"
#include "testing/testing.h"
using libmv::Image;
using libmv::Array3Df;
using libmv::Image;
namespace {

View File

@ -27,16 +27,13 @@ namespace libmv {
/// Nearest neighbor interpolation.
template <typename T>
inline T SampleNearest(const Array3D<T> &image,
float y, float x, int v = 0) {
inline T SampleNearest(const Array3D<T>& image, float y, float x, int v = 0) {
const int i = int(round(y));
const int j = int(round(x));
return image(i, j, v);
}
inline void LinearInitAxis(float x, int size,
int *x1, int *x2,
float *dx) {
inline void LinearInitAxis(float x, int size, int* x1, int* x2, float* dx) {
const int ix = static_cast<int>(x);
if (ix < 0) {
*x1 = 0;
@ -106,10 +103,12 @@ inline void DownsampleChannelsBy2(const Array3Df &in, Array3Df *out) {
for (int r = 0; r < height; ++r) {
for (int c = 0; c < width; ++c) {
for (int k = 0; k < depth; ++k) {
// clang-format off
(*out)(r, c, k) = (in(2 * r, 2 * c, k) +
in(2 * r + 1, 2 * c, k) +
in(2 * r, 2 * c + 1, k) +
in(2 * r + 1, 2 * c + 1, k)) / 4.0f;
// clang-format on
}
}
}
@ -118,7 +117,8 @@ inline void DownsampleChannelsBy2(const Array3Df &in, Array3Df *out) {
// Sample a region centered at x,y in image with size extending by half_width
// from x,y. Channels specifies the number of channels to sample from.
inline void SamplePattern(const FloatImage& image,
double x, double y,
double x,
double y,
int half_width,
int channels,
FloatImage* sampled) {

View File

@ -34,10 +34,14 @@ class Tuple {
Tuple(T initial_value) { Reset(initial_value); }
template <typename D>
Tuple(D *values) { Reset(values); }
Tuple(D* values) {
Reset(values);
}
template <typename D>
Tuple(const Tuple<D, N> &b) { Reset(b); }
Tuple(const Tuple<D, N>& b) {
Reset(b);
}
template <typename D>
Tuple& operator=(const Tuple<D, N>& b) {
@ -46,7 +50,9 @@ class Tuple {
}
template <typename D>
void Reset(const Tuple<D, N>& b) { Reset(b.Data()); }
void Reset(const Tuple<D, N>& b) {
Reset(b.Data());
}
template <typename D>
void Reset(D* values) {
@ -77,9 +83,7 @@ class Tuple {
}
return true;
}
bool operator!=(const Tuple<T, N> &other) const {
return !(*this == other);
}
bool operator!=(const Tuple<T, N>& other) const { return !(*this == other); }
private:
T data_[N];

View File

@ -38,9 +38,11 @@ void PreconditionerFromPoints(const Mat &points, Mat3 *T) {
if (variance(1) < 1e-8)
yfactor = mean(1) = 1.0;
// clang-format off
*T << xfactor, 0, -xfactor * mean(0),
0, yfactor, -yfactor * mean(1),
0, 0, 1;
// clang-format on
}
// HZ 4.4.4 pag.107: Point conditioning (isotropic)
void IsotropicPreconditionerFromPoints(const Mat& points, Mat3* T) {
@ -57,9 +59,11 @@ void IsotropicPreconditionerFromPoints(const Mat &points, Mat3 *T) {
mean.setOnes();
}
// clang-format off
*T << factor, 0, -factor * mean(0),
0, factor, -factor * mean(1),
0, 0, 1;
// clang-format on
}
void ApplyTransformationToPoints(const Mat& points,
@ -73,9 +77,7 @@ void ApplyTransformationToPoints(const Mat &points,
HomogeneousToEuclidean(p, transformed_points);
}
void NormalizePoints(const Mat &points,
Mat *normalized_points,
Mat3 *T) {
void NormalizePoints(const Mat& points, Mat* normalized_points, Mat3* T) {
PreconditionerFromPoints(points, T);
ApplyTransformationToPoints(points, *T, normalized_points);
}

View File

@ -34,9 +34,7 @@ void ApplyTransformationToPoints(const Mat &points,
const Mat3& T,
Mat* transformed_points);
void NormalizePoints(const Mat &points,
Mat *normalized_points,
Mat3 *T);
void NormalizePoints(const Mat& points, Mat* normalized_points, Mat3* T);
void NormalizeIsotropicPoints(const Mat& points,
Mat* normalized_points,

View File

@ -23,8 +23,8 @@
#include <cmath>
#include <limits>
#include <Eigen/SVD>
#include <Eigen/Geometry>
#include <Eigen/SVD>
#include "libmv/base/vector.h"
#include "libmv/logging/logging.h"
@ -37,7 +37,8 @@ typedef unsigned int uint;
bool EuclideanResection(const Mat2X& x_camera,
const Mat3X& X_world,
Mat3 *R, Vec3 *t,
Mat3* R,
Vec3* t,
ResectionMethod method) {
switch (method) {
case RESECTION_ANSAR_DANIILIDIS:
@ -49,8 +50,7 @@ bool EuclideanResection(const Mat2X &x_camera,
case RESECTION_PPNP:
return EuclideanResectionPPnP(x_camera, X_world, R, t);
break;
default:
LOG(FATAL) << "Unknown resection method.";
default: LOG(FATAL) << "Unknown resection method.";
}
return false;
}
@ -58,11 +58,12 @@ bool EuclideanResection(const Mat2X &x_camera,
bool EuclideanResection(const Mat& x_image,
const Mat3X& X_world,
const Mat3& K,
Mat3 *R, Vec3 *t,
Mat3* R,
Vec3* t,
ResectionMethod method) {
CHECK(x_image.rows() == 2 || x_image.rows() == 3)
<< "Invalid size for x_image: "
<< x_image.rows() << "x" << x_image.cols();
<< "Invalid size for x_image: " << x_image.rows() << "x"
<< x_image.cols();
Mat2X x_camera;
if (x_image.rows() == 2) {
@ -73,10 +74,7 @@ bool EuclideanResection(const Mat &x_image,
return EuclideanResection(x_camera, X_world, R, t, method);
}
void AbsoluteOrientation(const Mat3X &X,
const Mat3X &Xp,
Mat3 *R,
Vec3 *t) {
void AbsoluteOrientation(const Mat3X& X, const Mat3X& Xp, Mat3* R, Vec3* t) {
int num_points = X.cols();
Vec3 C = X.rowwise().sum() / num_points; // Centroid of X.
Vec3 Cp = Xp.rowwise().sum() / num_points; // Centroid of Xp.
@ -100,10 +98,12 @@ void AbsoluteOrientation(const Mat3X &X,
double Szy = Xn.row(2).dot(Xpn.row(1));
Mat4 N;
// clang-format off
N << Sxx + Syy + Szz, Syz - Szy, Szx - Sxz, Sxy - Syx,
Syz - Szy, Sxx - Syy - Szz, Sxy + Syx, Szx + Sxz,
Szx - Sxz, Sxy + Syx, -Sxx + Syy - Szz, Syz + Szy,
Sxy - Syx, Szx + Sxz, Syz + Szy, -Sxx - Syy + Szz;
// clang-format on
// Find the unit quaternion q that maximizes qNq. It is the eigenvector
// corresponding to the lagest eigenvalue.
@ -118,6 +118,7 @@ void AbsoluteOrientation(const Mat3X &X,
double q1q3 = q(1) * q(3);
double q2q3 = q(2) * q(3);
// clang-format off
(*R) << qq(0) + qq(1) - qq(2) - qq(3),
2 * (q1q2 - q0q3),
2 * (q1q3 + q0q2),
@ -127,6 +128,7 @@ void AbsoluteOrientation(const Mat3X &X,
2 * (q1q3 - q0q2),
2 * (q2q3 + q0q1),
qq(0) - qq(1) - qq(2) + qq(3);
// clang-format on
// Fix the handedness of the R matrix.
if (R->determinant() < 0) {
@ -176,9 +178,7 @@ static int Sign(double value) {
// Lambda to create the constraints in equation (5) in "Linear Pose Estimation
// from Points or Lines", by Ansar, A. and Daniilidis, PAMI 2003. vol. 25, no.
// 5.
static Vec MatrixToConstraint(const Mat &A,
int num_k_columns,
int num_lambda) {
static Vec MatrixToConstraint(const Mat& A, int num_k_columns, int num_lambda) {
Vec C(num_k_columns);
C.setZero();
int idx = 0;
@ -246,17 +246,17 @@ void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
}
int num_lambda = num_points + 1; // Dimension of the null space of M.
Mat V = M.jacobiSvd(Eigen::ComputeFullV).matrixV().block(0,
num_m_rows,
num_m_columns,
num_lambda);
Mat V = M.jacobiSvd(Eigen::ComputeFullV)
.matrixV()
.block(0, num_m_rows, num_m_columns, num_lambda);
// TODO(vess): The number of constraint equations in K (num_k_rows) must be
// (num_points + 1) * (num_points + 2)/2. This creates a performance issue
// for more than 4 points. It is fine for 4 points at the moment with 18
// instead of 15 equations.
int num_k_rows = num_m_rows + num_points *
(num_points*(num_points-1)/2 - num_points+1);
int num_k_rows =
num_m_rows +
num_points * (num_points * (num_points - 1) / 2 - num_points + 1);
int num_k_columns = num_lambda * (num_lambda + 1) / 2;
Mat K(num_k_rows, num_k_columns);
K.setZero();
@ -317,14 +317,12 @@ void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
}
}
// Ensure positiveness of the largest value corresponding to lambda_ii.
L_sq = L_sq * Sign(L_sq(IJToIndex(max_L_sq_index,
max_L_sq_index,
num_lambda)));
L_sq =
L_sq * Sign(L_sq(IJToIndex(max_L_sq_index, max_L_sq_index, num_lambda)));
Vec L(num_lambda);
L(max_L_sq_index) = sqrt(L_sq(IJToIndex(max_L_sq_index,
max_L_sq_index,
num_lambda)));
L(max_L_sq_index) =
sqrt(L_sq(IJToIndex(max_L_sq_index, max_L_sq_index, num_lambda)));
for (int i = 0; i < num_lambda; ++i) {
if (i != max_L_sq_index) {
@ -408,7 +406,8 @@ static void ComputePointsCoordinatesInCameraFrame(
size_t num_points = alphas.cols();
// Estimates the control points in the camera reference frame.
Mat34 C2b; C2b.setZero();
Mat34 C2b;
C2b.setZero();
for (size_t cu = 0; cu < 4; cu++) {
for (size_t c = 0; c < 4; c++) {
C2b.col(c) += betas(cu) * U.block(11 - cu, c * 3, 1, 3).transpose();
@ -438,7 +437,8 @@ static void ComputePointsCoordinatesInCameraFrame(
bool EuclideanResectionEPnP(const Mat2X& x_camera,
const Mat3X& X_world,
Mat3 *R, Vec3 *t) {
Mat3* R,
Vec3* t) {
CHECK(x_camera.cols() == X_world.cols());
CHECK(x_camera.cols() > 3);
size_t num_points = X_world.cols();
@ -462,6 +462,7 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
double a3 = alphas(3, c);
double ui = x_camera(0, c);
double vi = x_camera(1, c);
// clang-format off
M.block(2*c, 0, 2, 12) << a0, 0,
a0*(-ui), a1, 0,
a1*(-ui), a2, 0,
@ -471,6 +472,7 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
a1, a1*(-vi), 0,
a2, a2*(-vi), 0,
a3, a3*(-vi);
// clang-format on
}
// TODO(julien): Avoid the transpose by rewriting the u2.block() calls.
@ -510,6 +512,7 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
Eigen::Matrix<double, 6, 10> L;
for (size_t r = 0; r < 6; r++) {
// clang-format off
L.row(r) << dv1.row(r).dot(dv1.row(r)),
2.0 * dv1.row(r).dot(dv2.row(r)),
dv2.row(r).dot(dv2.row(r)),
@ -520,19 +523,23 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
2.0 * dv2.row(r).dot(dv4.row(r)),
2.0 * dv3.row(r).dot(dv4.row(r)),
dv4.row(r).dot(dv4.row(r));
// clang-format on
}
Vec6 rho;
// clang-format off
rho << (X_control_points.col(0) - X_control_points.col(1)).squaredNorm(),
(X_control_points.col(0) - X_control_points.col(2)).squaredNorm(),
(X_control_points.col(0) - X_control_points.col(3)).squaredNorm(),
(X_control_points.col(1) - X_control_points.col(2)).squaredNorm(),
(X_control_points.col(1) - X_control_points.col(3)).squaredNorm(),
(X_control_points.col(2) - X_control_points.col(3)).squaredNorm();
// clang-format on
// There are three possible solutions based on the three approximations of L
// (betas). Below, each one is solved for then the best one is chosen.
Mat3X X_camera;
Mat3 K; K.setIdentity();
Mat3 K;
K.setIdentity();
vector<Mat3> Rs(3);
vector<Vec3> ts(3);
Vec rmse(3);
@ -715,7 +722,8 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
// other hand, it did work on the first try.
bool EuclideanResectionPPnP(const Mat2X& x_camera,
const Mat3X& X_world,
Mat3 *R, Vec3 *t) {
Mat3* R,
Vec3* t) {
int n = x_camera.cols();
Mat Z = Mat::Zero(n, n);
Vec e = Vec::Ones(n);
@ -750,7 +758,8 @@ bool EuclideanResectionPPnP(const Mat2X &x_camera,
Mat PR = P * *R; // n x 3
c = (S - Z * PR).transpose() * II;
Mat Y = S - e * c.transpose(); // n x 3
Vec Zmindiag = (PR * Y.transpose()).diagonal()
Vec Zmindiag = (PR * Y.transpose())
.diagonal()
.cwiseQuotient(P.rowwise().squaredNorm());
for (int i = 0; i < n; ++i) {
Zmindiag[i] = std::max(Zmindiag[i], 0.0);
@ -769,6 +778,5 @@ bool EuclideanResectionPPnP(const Mat2X &x_camera,
return true;
}
} // namespace resection
} // namespace euclidean_resection
} // namespace libmv

View File

@ -21,8 +21,8 @@
#ifndef LIBMV_MULTIVIEW_EUCLIDEAN_RESECTION_H_
#define LIBMV_MULTIVIEW_EUCLIDEAN_RESECTION_H_
#include "libmv/numeric/numeric.h"
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
namespace euclidean_resection {
@ -52,7 +52,8 @@ enum ResectionMethod {
*/
bool EuclideanResection(const Mat2X& x_camera,
const Mat3X& X_world,
Mat3 *R, Vec3 *t,
Mat3* R,
Vec3* t,
ResectionMethod method = RESECTION_EPNP);
/**
@ -71,7 +72,8 @@ bool EuclideanResection(const Mat2X &x_camera,
bool EuclideanResection(const Mat& x_image,
const Mat3X& X_world,
const Mat3& K,
Mat3 *R, Vec3 *t,
Mat3* R,
Vec3* t,
ResectionMethod method = RESECTION_EPNP);
/**
@ -84,10 +86,7 @@ bool EuclideanResection(const Mat &x_image,
* Horn, Hilden, "Closed-form solution of absolute orientation using
* orthonormal matrices"
*/
void AbsoluteOrientation(const Mat3X &X,
const Mat3X &Xp,
Mat3 *R,
Vec3 *t);
void AbsoluteOrientation(const Mat3X& X, const Mat3X& Xp, Mat3* R, Vec3* t);
/**
* Computes the extrinsic parameters, R and t for a calibrated camera from 4 or
@ -104,7 +103,8 @@ void AbsoluteOrientation(const Mat3X &X,
*/
void EuclideanResectionAnsarDaniilidis(const Mat2X& x_camera,
const Mat3X& X_world,
Mat3 *R, Vec3 *t);
Mat3* R,
Vec3* t);
/**
* Computes the extrinsic parameters, R and t for a calibrated camera from 4 or
* more 3D points and their images.
@ -122,7 +122,8 @@ void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
*/
bool EuclideanResectionEPnP(const Mat2X& x_camera,
const Mat3X& X_world,
Mat3 *R, Vec3 *t);
Mat3* R,
Vec3* t);
/**
* Computes the extrinsic parameters, R and t for a calibrated camera from 4 or
@ -139,10 +140,10 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
*/
bool EuclideanResectionPPnP(const Mat2X& x_camera,
const Mat3X& X_world,
Mat3 *R, Vec3 *t);
Mat3* R,
Vec3* t);
} // namespace euclidean_resection
} // namespace libmv
#endif /* LIBMV_MULTIVIEW_EUCLIDEAN_RESECTION_H_ */

View File

@ -19,9 +19,9 @@
// IN THE SOFTWARE.
#include "libmv/multiview/euclidean_resection.h"
#include "libmv/numeric/numeric.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
using namespace libmv::euclidean_resection;
@ -76,9 +76,9 @@ TEST(AbsoluteOrientation, QuaternionSolution) {
// Create a random translation and rotation.
Mat3 R_input;
R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ())
* Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY())
* Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ()) *
Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY()) *
Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
Vec3 t_input;
t_input.setRandom();
@ -109,26 +109,29 @@ TEST(EuclideanResection, Points4KnownImagePointsRandomTranslationRotation) {
image_dimensions << 1600, 1200;
Mat3 KK;
// clang-format off
KK << 2796, 0, 804,
0 , 2796, 641,
0, 0, 1;
// clang-format on
// The real image points.
int num_points = 4;
Mat3X x_image(3, num_points);
// clang-format off
x_image << 1164.06, 734.948, 749.599, 430.727,
681.386, 844.59, 496.315, 580.775,
1, 1, 1, 1;
// clang-format on
// A vector of the 4 distances to the 3D points.
Vec X_distances = 100 * Vec::Random(num_points).array().abs();
// Create the random camera motion R and t that resection should recover.
Mat3 R_input;
R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ())
* Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY())
* Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ()) *
Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY()) *
Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
Vec3 T_input;
T_input.setRandom();
@ -140,15 +143,21 @@ TEST(EuclideanResection, Points4KnownImagePointsRandomTranslationRotation) {
Vec3 T_expected;
Mat3X X_world;
Mat2X x_camera;
CreateCameraSystem(KK, x_image, X_distances, R_input, T_input,
&x_camera, &X_world, &R_expected, &T_expected);
CreateCameraSystem(KK,
x_image,
X_distances,
R_input,
T_input,
&x_camera,
&X_world,
&R_expected,
&T_expected);
// Finally, run the code under test.
Mat3 R_output;
Vec3 T_output;
EuclideanResection(x_camera, X_world,
&R_output, &T_output,
RESECTION_ANSAR_DANIILIDIS);
EuclideanResection(
x_camera, X_world, &R_output, &T_output, RESECTION_ANSAR_DANIILIDIS);
EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
@ -173,9 +182,11 @@ TEST(EuclideanResection, Points4KnownImagePointsRandomTranslationRotation) {
// TODO(jmichot): Reduce the code duplication here with the code above.
TEST(EuclideanResection, Points6AllRandomInput) {
Mat3 KK;
// clang-format off
KK << 2796, 0, 804,
0 , 2796, 641,
0, 0, 1;
// clang-format on
// Create random image points for a 1600x1200 image.
int w = 1600;
@ -192,9 +203,9 @@ TEST(EuclideanResection, Points6AllRandomInput) {
// Create the random camera motion R and t that resection should recover.
Mat3 R_input;
R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ())
* Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY())
* Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ()) *
Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY()) *
Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
Vec3 T_input;
T_input.setRandom();
@ -204,33 +215,36 @@ TEST(EuclideanResection, Points6AllRandomInput) {
Mat3 R_expected;
Vec3 T_expected;
Mat3X X_world;
CreateCameraSystem(KK, x_image, X_distances, R_input, T_input,
&x_camera, &X_world, &R_expected, &T_expected);
CreateCameraSystem(KK,
x_image,
X_distances,
R_input,
T_input,
&x_camera,
&X_world,
&R_expected,
&T_expected);
// Test each of the resection methods.
{
Mat3 R_output;
Vec3 T_output;
EuclideanResection(x_camera, X_world,
&R_output, &T_output,
RESECTION_ANSAR_DANIILIDIS);
EuclideanResection(
x_camera, X_world, &R_output, &T_output, RESECTION_ANSAR_DANIILIDIS);
EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
}
{
Mat3 R_output;
Vec3 T_output;
EuclideanResection(x_camera, X_world,
&R_output, &T_output,
RESECTION_EPNP);
EuclideanResection(x_camera, X_world, &R_output, &T_output, RESECTION_EPNP);
EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
}
{
Mat3 R_output;
Vec3 T_output;
EuclideanResection(x_image, X_world, KK,
&R_output, &T_output);
EuclideanResection(x_image, X_world, KK, &R_output, &T_output);
EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
}

View File

@ -22,11 +22,11 @@
#include "ceres/ceres.h"
#include "libmv/logging/logging.h"
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/poly.h"
#include "libmv/multiview/conditioning.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/triangulation.h"
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/poly.h"
namespace libmv {
@ -106,9 +106,7 @@ void EnforceFundamentalRank2Constraint(Mat3 *F) {
}
// HZ 11.2 pag.281 (x1 = x, x2 = x')
double NormalizedEightPointSolver(const Mat &x1,
const Mat &x2,
Mat3 *F) {
double NormalizedEightPointSolver(const Mat& x1, const Mat& x2, Mat3* F) {
DCHECK_EQ(x1.rows(), 2);
DCHECK_GE(x1.cols(), 8);
DCHECK_EQ(x1.rows(), x2.rows());
@ -169,24 +167,28 @@ double FundamentalFrom7CorrespondencesLinear(const Mat &x1,
// Then, use the condition det(F) = 0 to determine F. In other words, solve
// det(F1 + a*F2) = 0 for a.
double a = F1(0, 0), j = F2(0, 0),
b = F1(0, 1), k = F2(0, 1),
c = F1(0, 2), l = F2(0, 2),
d = F1(1, 0), m = F2(1, 0),
e = F1(1, 1), n = F2(1, 1),
f = F1(1, 2), o = F2(1, 2),
g = F1(2, 0), p = F2(2, 0),
h = F1(2, 1), q = F2(2, 1),
i = F1(2, 2), r = F2(2, 2);
double a = F1(0, 0), j = F2(0, 0);
double b = F1(0, 1), k = F2(0, 1);
double c = F1(0, 2), l = F2(0, 2);
double d = F1(1, 0), m = F2(1, 0);
double e = F1(1, 1), n = F2(1, 1);
double f = F1(1, 2), o = F2(1, 2);
double g = F1(2, 0), p = F2(2, 0);
double h = F1(2, 1), q = F2(2, 1);
double i = F1(2, 2), r = F2(2, 2);
// Run fundamental_7point_coeffs.py to get the below coefficients.
// The coefficients are in ascending powers of alpha, i.e. P[N]*x^N.
double P[4] = {
a * e * i + b * f * g + c * d * h - a * f * h - b * d * i - c * e * g,
a*e*r + a*i*n + b*f*p + b*g*o + c*d*q + c*h*m + d*h*l + e*i*j + f*g*k -
a*f*q - a*h*o - b*d*r - b*i*m - c*e*p - c*g*n - d*i*k - e*g*l - f*h*j,
a*n*r + b*o*p + c*m*q + d*l*q + e*j*r + f*k*p + g*k*o + h*l*m + i*j*n -
a*o*q - b*m*r - c*n*p - d*k*r - e*l*p - f*j*q - g*l*n - h*j*o - i*k*m,
a * e * r + a * i * n + b * f * p + b * g * o + c * d * q + c * h * m +
d * h * l + e * i * j + f * g * k - a * f * q - a * h * o -
b * d * r - b * i * m - c * e * p - c * g * n - d * i * k -
e * g * l - f * h * j,
a * n * r + b * o * p + c * m * q + d * l * q + e * j * r + f * k * p +
g * k * o + h * l * m + i * j * n - a * o * q - b * m * r -
c * n * p - d * k * r - e * l * p - f * j * q - g * l * n -
h * j * o - i * k * m,
j * n * r + k * o * p + l * m * q - j * o * q - k * m * r - l * n * p,
};
@ -218,8 +220,8 @@ double FundamentalFromCorrespondences7Point(const Mat &x1,
ApplyTransformationToPoints(x2, T2, &x2_normalized);
// Estimate the fundamental matrix.
double smaller_singular_value =
FundamentalFrom7CorrespondencesLinear(x1_normalized, x2_normalized, &(*F));
double smaller_singular_value = FundamentalFrom7CorrespondencesLinear(
x1_normalized, x2_normalized, &(*F));
for (int k = 0; k < F->size(); ++k) {
Mat3& Fmat = (*F)[k];
@ -244,8 +246,8 @@ double SampsonDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2) {
Vec3 Ft_y = F.transpose() * y;
double y_F_x = y.dot(F_x);
return Square(y_F_x) / ( F_x.head<2>().squaredNorm()
+ Ft_y.head<2>().squaredNorm());
return Square(y_F_x) /
(F_x.head<2>().squaredNorm() + Ft_y.head<2>().squaredNorm());
}
double SymmetricEpipolarDistance(const Mat& F, const Vec2& x1, const Vec2& x2) {
@ -256,8 +258,8 @@ double SymmetricEpipolarDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2) {
Vec3 Ft_y = F.transpose() * y;
double y_F_x = y.dot(F_x);
return Square(y_F_x) * ( 1 / F_x.head<2>().squaredNorm()
+ 1 / Ft_y.head<2>().squaredNorm());
return Square(y_F_x) *
(1 / F_x.head<2>().squaredNorm() + 1 / Ft_y.head<2>().squaredNorm());
}
// HZ 9.6 pag 257 (formula 9.12)
@ -288,11 +290,8 @@ void RelativeCameraMotion(const Mat3 &R1,
}
// HZ 9.6 pag 257
void EssentialFromRt(const Mat3 &R1,
const Vec3 &t1,
const Mat3 &R2,
const Vec3 &t2,
Mat3 *E) {
void EssentialFromRt(
const Mat3& R1, const Vec3& t1, const Mat3& R2, const Vec3& t2, Mat3* E) {
Mat3 R;
Vec3 t;
RelativeCameraMotion(R1, t1, R2, t2, &R, &t);
@ -318,9 +317,11 @@ void MotionFromEssential(const Mat3 &E,
}
Mat3 W;
// clang-format off
W << 0, -1, 0,
1, 0, 0,
0, 0, 1;
// clang-format on
Mat3 U_W_Vt = U * W * Vt;
Mat3 U_Wt_Vt = U * W.transpose() * Vt;
@ -399,8 +400,8 @@ void FundamentalToEssential(const Mat3 &F, Mat3 *E) {
double s = (a + b) / 2.0;
LG << "Initial reconstruction's rotation is non-euclidean by "
<< (((a - b) / std::max(a, b)) * 100) << "%; singular values:"
<< svd.singularValues().transpose();
<< (((a - b) / std::max(a, b)) * 100)
<< "%; singular values:" << svd.singularValues().transpose();
Vec3 diag;
diag << s, s, 0;
@ -410,9 +411,8 @@ void FundamentalToEssential(const Mat3 &F, Mat3 *E) {
// Default settings for fundamental estimation which should be suitable
// for a wide range of use cases.
EstimateFundamentalOptions::EstimateFundamentalOptions(void) :
max_num_iterations(50),
expected_average_symmetric_distance(1e-16) {
EstimateFundamentalOptions::EstimateFundamentalOptions(void)
: max_num_iterations(50), expected_average_symmetric_distance(1e-16) {
}
namespace {
@ -420,8 +420,7 @@ namespace {
// used for fundamental matrix refinement.
class FundamentalSymmetricEpipolarCostFunctor {
public:
FundamentalSymmetricEpipolarCostFunctor(const Vec2 &x,
const Vec2 &y)
FundamentalSymmetricEpipolarCostFunctor(const Vec2& x, const Vec2& y)
: x_(x), y_(y) {}
template <typename T>
@ -454,7 +453,8 @@ class FundamentalSymmetricEpipolarCostFunctor {
// average value.
class TerminationCheckingCallback : public ceres::IterationCallback {
public:
TerminationCheckingCallback(const Mat &x1, const Mat &x2,
TerminationCheckingCallback(const Mat& x1,
const Mat& x2,
const EstimateFundamentalOptions& options,
Mat3* F)
: options_(options), x1_(x1), x2_(x2), F_(F) {}
@ -469,9 +469,7 @@ class TerminationCheckingCallback : public ceres::IterationCallback {
// Calculate average of symmetric epipolar distance.
double average_distance = 0.0;
for (int i = 0; i < x1_.cols(); i++) {
average_distance = SymmetricEpipolarDistance(*F_,
x1_.col(i),
x2_.col(i));
average_distance = SymmetricEpipolarDistance(*F_, x1_.col(i), x2_.col(i));
}
average_distance /= x1_.cols();
@ -506,16 +504,15 @@ bool EstimateFundamentalFromCorrespondences(
// Step 2: Refine matrix using Ceres minimizer.
ceres::Problem problem;
for (int i = 0; i < x1.cols(); i++) {
FundamentalSymmetricEpipolarCostFunctor
*fundamental_symmetric_epipolar_cost_function =
new FundamentalSymmetricEpipolarCostFunctor(x1.col(i),
x2.col(i));
FundamentalSymmetricEpipolarCostFunctor*
fundamental_symmetric_epipolar_cost_function =
new FundamentalSymmetricEpipolarCostFunctor(x1.col(i), x2.col(i));
problem.AddResidualBlock(
new ceres::AutoDiffCostFunction<
FundamentalSymmetricEpipolarCostFunctor,
new ceres::AutoDiffCostFunction<FundamentalSymmetricEpipolarCostFunctor,
2, // num_residuals
9>(fundamental_symmetric_epipolar_cost_function),
9>(
fundamental_symmetric_epipolar_cost_function),
NULL,
F->data());
}

View File

@ -47,9 +47,7 @@ double FundamentalFromCorrespondences7Point(const Mat &x1,
/**
* 8 points (points coordinates must be in image space):
*/
double NormalizedEightPointSolver(const Mat &x1,
const Mat &x2,
Mat3 *F);
double NormalizedEightPointSolver(const Mat& x1, const Mat& x2, Mat3* F);
/**
* Fundamental matrix utility function:
@ -98,11 +96,8 @@ void FundamentalFromEssential(const Mat3 &E,
const Mat3& K2,
Mat3* F);
void EssentialFromRt(const Mat3 &R1,
const Vec3 &t1,
const Mat3 &R2,
const Vec3 &t2,
Mat3 *E);
void EssentialFromRt(
const Mat3& R1, const Vec3& t1, const Mat3& R2, const Vec3& t2, Mat3* E);
void MotionFromEssential(const Mat3& E,
std::vector<Mat3>* Rs,

View File

@ -34,12 +34,14 @@ using namespace libmv;
TEST(Fundamental, FundamentalFromProjections) {
Mat34 P1_gt, P2_gt;
// clang-format off
P1_gt << 1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0;
P2_gt << 1, 1, 1, 3,
0, 2, 0, 3,
0, 1, 1, 0;
// clang-format on
Mat3 F_gt;
FundamentalFromProjections(P1_gt, P2_gt, &F_gt);
@ -55,8 +57,10 @@ TEST(Fundamental, FundamentalFromProjections) {
TEST(Fundamental, PreconditionerFromPoints) {
int n = 4;
Mat points(2, n);
// clang-format off
points << 0, 0, 1, 1,
0, 2, 1, 3;
// clang-format on
Mat3 T;
PreconditionerFromPoints(points, &T);
@ -152,8 +156,8 @@ TEST(Fundamental, MotionFromEssentialAndCorrespondence) {
Mat3 R_estimated;
Vec3 t_estimated;
MotionFromEssentialAndCorrespondence(E, d.K1, x1, d.K2, x2,
&R_estimated, &t_estimated);
MotionFromEssentialAndCorrespondence(
E, d.K1, x1, d.K2, x2, &R_estimated, &t_estimated);
EXPECT_LE(FrobeniusDistance(R_estimated, R), 1e-8);
EXPECT_LE(DistanceL2(t_estimated, t), 1e-8);

View File

@ -44,10 +44,7 @@ namespace libmv {
* (-x2*a+x1*d)*y1 + (-x2*b+x1*e)*y2 + -x2*c+x1*f |0|
*/
static bool Homography2DFromCorrespondencesLinearEuc(
const Mat &x1,
const Mat &x2,
Mat3 *H,
double expected_precision) {
const Mat& x1, const Mat& x2, Mat3* H, double expected_precision) {
assert(2 == x1.rows());
assert(4 <= x1.cols());
assert(x1.rows() == x2.rows());
@ -93,6 +90,7 @@ static bool Homography2DFromCorrespondencesLinearEuc(
}
}
// clang-format off
/** 2D Homography transformation estimation in the case that points are in
* homogeneous coordinates.
*
@ -101,13 +99,14 @@ static bool Homography2DFromCorrespondencesLinearEuc(
* |-x2 x1 0| |g h 1| |y3| -x2*a+x1*d -x2*b+x1*e -x2*c+x1*f |y3| (-x2*a+x1*d)*y1 (-x2*b+x1*e)*y2 (-x2*c+x1*f)*y3 |0|
* X = |a b c d e f g h|^t
*/
// clang-format on
bool Homography2DFromCorrespondencesLinear(const Mat& x1,
const Mat& x2,
Mat3* H,
double expected_precision) {
if (x1.rows() == 2) {
return Homography2DFromCorrespondencesLinearEuc(x1, x2, H,
expected_precision);
return Homography2DFromCorrespondencesLinearEuc(
x1, x2, H, expected_precision);
}
assert(3 == x1.rows());
assert(4 <= x1.cols());
@ -158,8 +157,8 @@ bool Homography2DFromCorrespondencesLinear(const Mat &x1,
// Default settings for homography estimation which should be suitable
// for a wide range of use cases.
EstimateHomographyOptions::EstimateHomographyOptions(void) :
use_normalization(true),
EstimateHomographyOptions::EstimateHomographyOptions(void)
: use_normalization(true),
max_num_iterations(50),
expected_average_symmetric_distance(1e-16) {
}
@ -169,17 +168,15 @@ void GetNormalizedPoints(const Mat &original_points,
Mat* normalized_points,
Mat3* normalization_matrix) {
IsotropicPreconditionerFromPoints(original_points, normalization_matrix);
ApplyTransformationToPoints(original_points,
*normalization_matrix,
normalized_points);
ApplyTransformationToPoints(
original_points, *normalization_matrix, normalized_points);
}
// Cost functor which computes symmetric geometric distance
// used for homography matrix refinement.
class HomographySymmetricGeometricCostFunctor {
public:
HomographySymmetricGeometricCostFunctor(const Vec2 &x,
const Vec2 &y)
HomographySymmetricGeometricCostFunctor(const Vec2& x, const Vec2& y)
: x_(x), y_(y) {}
template <typename T>
@ -221,7 +218,8 @@ class HomographySymmetricGeometricCostFunctor {
// average value.
class TerminationCheckingCallback : public ceres::IterationCallback {
public:
TerminationCheckingCallback(const Mat &x1, const Mat &x2,
TerminationCheckingCallback(const Mat& x1,
const Mat& x2,
const EstimateHomographyOptions& options,
Mat3* H)
: options_(options), x1_(x1), x2_(x2), H_(H) {}
@ -236,9 +234,8 @@ class TerminationCheckingCallback : public ceres::IterationCallback {
// Calculate average of symmetric geometric distance.
double average_distance = 0.0;
for (int i = 0; i < x1_.cols(); i++) {
average_distance = SymmetricGeometricDistance(*H_,
x1_.col(i),
x2_.col(i));
average_distance =
SymmetricGeometricDistance(*H_, x1_.col(i), x2_.col(i));
}
average_distance /= x1_.cols();
@ -272,8 +269,7 @@ bool EstimateHomography2DFromCorrespondences(
assert(x1.rows() == x2.rows());
assert(x1.cols() == x2.cols());
Mat3 T1 = Mat3::Identity(),
T2 = Mat3::Identity();
Mat3 T1 = Mat3::Identity(), T2 = Mat3::Identity();
// Step 1: Algebraic homography estimation.
Mat x1_normalized, x2_normalized;
@ -300,16 +296,15 @@ bool EstimateHomography2DFromCorrespondences(
// Step 2: Refine matrix using Ceres minimizer.
ceres::Problem problem;
for (int i = 0; i < x1.cols(); i++) {
HomographySymmetricGeometricCostFunctor
*homography_symmetric_geometric_cost_function =
new HomographySymmetricGeometricCostFunctor(x1.col(i),
x2.col(i));
HomographySymmetricGeometricCostFunctor*
homography_symmetric_geometric_cost_function =
new HomographySymmetricGeometricCostFunctor(x1.col(i), x2.col(i));
problem.AddResidualBlock(
new ceres::AutoDiffCostFunction<
HomographySymmetricGeometricCostFunctor,
new ceres::AutoDiffCostFunction<HomographySymmetricGeometricCostFunctor,
4, // num_residuals
9>(homography_symmetric_geometric_cost_function),
9>(
homography_symmetric_geometric_cost_function),
NULL,
H->data());
}
@ -335,6 +330,7 @@ bool EstimateHomography2DFromCorrespondences(
return summary.IsSolutionUsable();
}
// clang-format off
/**
* x2 ~ A * x1
* x2^t * Hi * A *x1 = 0
@ -362,6 +358,7 @@ bool EstimateHomography2DFromCorrespondences(
*
* X = |a b c d e f g h i j k l m n o|^t
*/
// clang-format on
bool Homography3DFromCorrespondencesLinear(const Mat& x1,
const Mat& x2,
Mat4* H,

View File

@ -49,11 +49,11 @@ namespace libmv {
* \return True if the transformation estimation has succeeded.
* \note There must be at least 4 non-colinear points.
*/
bool Homography2DFromCorrespondencesLinear(const Mat &x1,
bool Homography2DFromCorrespondencesLinear(
const Mat& x1,
const Mat& x2,
Mat3* H,
double expected_precision =
EigenDouble::dummy_precision());
double expected_precision = EigenDouble::dummy_precision());
/**
* This structure contains options that controls how the homography
@ -129,11 +129,11 @@ bool EstimateHomography2DFromCorrespondences(
* \note Need at least 5 non coplanar points
* \note Points coordinates must be in homogeneous coordinates
*/
bool Homography3DFromCorrespondencesLinear(const Mat &x1,
bool Homography3DFromCorrespondencesLinear(
const Mat& x1,
const Mat& x2,
Mat4* H,
double expected_precision =
EigenDouble::dummy_precision());
double expected_precision = EigenDouble::dummy_precision());
/**
* Calculate symmetric geometric cost:

View File

@ -47,8 +47,7 @@ struct AsymmetricError {
* \param[in] x2 A set of 2D points (2xN or 3xN matrix of column vectors).
* \param[out] dx A 2xN matrix of column vectors of residuals errors
*/
static void Residuals(const Mat &H, const Mat &x1,
const Mat &x2, Mat2X *dx) {
static void Residuals(const Mat& H, const Mat& x1, const Mat& x2, Mat2X* dx) {
dx->resize(2, x1.cols());
Mat3X x2h_est;
if (x1.rows() == 2)
@ -74,8 +73,7 @@ struct AsymmetricError {
* \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[out] dx A vector of size 2 of the residual error
*/
static void Residuals(const Mat &H, const Vec &x1,
const Vec &x2, Vec2 *dx) {
static void Residuals(const Mat& H, const Vec& x1, const Vec& x2, Vec2* dx) {
Vec3 x2h_est;
if (x1.rows() == 2)
x2h_est = H * EuclideanToHomogeneous(static_cast<Vec2>(x1));
@ -171,8 +169,7 @@ struct AlgebraicError {
* \param[in] x2 A set of 2D points (2xN or 3xN matrix of column vectors).
* \param[out] dx A 3xN matrix of column vectors of residuals errors
*/
static void Residuals(const Mat &H, const Mat &x1,
const Mat &x2, Mat3X *dx) {
static void Residuals(const Mat& H, const Mat& x1, const Mat& x2, Mat3X* dx) {
dx->resize(3, x1.cols());
Vec3 col;
for (int i = 0; i < x1.cols(); ++i) {
@ -191,8 +188,7 @@ struct AlgebraicError {
* \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[out] dx A vector of size 3 of the residual error
*/
static void Residuals(const Mat &H, const Vec &x1,
const Vec &x2, Vec3 *dx) {
static void Residuals(const Mat& H, const Vec& x1, const Vec& x2, Vec3* dx) {
Vec3 x2h_est;
if (x1.rows() == 2)
x2h_est = H * EuclideanToHomogeneous(static_cast<Vec2>(x1));

View File

@ -41,16 +41,20 @@ class Homography2DNormalizedParameterization {
/// Convert from the 8 parameters to a H matrix.
static void To(const Parameters& p, Parameterized* h) {
// clang-format off
*h << p(0), p(1), p(2),
p(3), p(4), p(5),
p(6), p(7), 1.0;
// clang-format on
}
/// Convert from a H matrix to the 8 parameters.
static void From(const Parameterized& h, Parameters* p) {
// clang-format off
*p << h(0, 0), h(0, 1), h(0, 2),
h(1, 0), h(1, 1), h(1, 2),
h(2, 0), h(2, 1);
// clang-format on
}
};
@ -71,18 +75,22 @@ class Homography3DNormalizedParameterization {
/// Convert from the 15 parameters to a H matrix.
static void To(const Parameters& p, Parameterized* h) {
// clang-format off
*h << p(0), p(1), p(2), p(3),
p(4), p(5), p(6), p(7),
p(8), p(9), p(10), p(11),
p(12), p(13), p(14), 1.0;
// clang-format on
}
/// Convert from a H matrix to the 15 parameters.
static void From(const Parameterized& h, Parameters* p) {
// clang-format off
*p << h(0, 0), h(0, 1), h(0, 2), h(0, 3),
h(1, 0), h(1, 1), h(1, 2), h(1, 3),
h(2, 0), h(2, 1), h(2, 2), h(2, 3),
h(3, 0), h(3, 1), h(3, 2);
// clang-format on
}
};

View File

@ -18,10 +18,10 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "testing/testing.h"
#include "libmv/multiview/homography.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/homography.h"
#include "testing/testing.h"
namespace {
using namespace libmv;
@ -34,9 +34,7 @@ namespace {
// TODO(sergey): Consider using this in all tests since possible homography
// matrix is not fixed to a single value and different-looking matrices
// might actually crrespond to the same exact transform.
void CheckHomography2DTransform(const Mat3 &H,
const Mat &x1,
const Mat &x2) {
void CheckHomography2DTransform(const Mat3& H, const Mat& x1, const Mat& x2) {
for (int i = 0; i < x2.cols(); ++i) {
Vec3 x2_expected = x2.col(i);
Vec3 x2_observed = H * x1.col(i);
@ -49,15 +47,19 @@ void CheckHomography2DTransform(const Mat3 &H,
TEST(Homography2DTest, Rotation45AndTranslationXY) {
Mat x1(3, 4);
// clang-format off
x1 << 0, 1, 0, 5,
0, 0, 2, 3,
1, 1, 1, 1;
// clang-format on
double angle = 45.0;
Mat3 m;
// clang-format off
m << cos(angle), -sin(angle), -2,
sin(angle), cos(angle), 5,
0, 0, 1;
// clang-format on
Mat x2 = x1;
// Transform point from ground truth matrix
@ -76,13 +78,17 @@ TEST(Homography2DTest, Rotation45AndTranslationXY) {
TEST(Homography2DTest, AffineGeneral4) {
// TODO(julien) find why it doesn't work with 4 points!!!
Mat x1(3, 4);
// clang-format off
x1 << 0, 1, 0, 2,
0, 0, 1, 2,
1, 1, 1, 1;
// clang-format on
Mat3 m;
// clang-format off
m << 3, -1, 4,
6, -2, -3,
0, 0, 1;
// clang-format on
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i) {
@ -109,13 +115,17 @@ TEST(Homography2DTest, AffineGeneral4) {
TEST(Homography2DTest, AffineGeneral5) {
Mat x1(3, 5);
// clang-format off
x1 << 0, 1, 0, 2, 5,
0, 0, 1, 2, 2,
1, 1, 1, 1, 1;
// clang-format on
Mat3 m;
// clang-format off
m << 3, -1, 4,
6, -2, -3,
0, 0, 1;
// clang-format on
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i)
@ -142,13 +152,17 @@ TEST(Homography2DTest, AffineGeneral5) {
TEST(Homography2DTest, HomographyGeneral) {
Mat x1(3, 4);
// clang-format off
x1 << 0, 1, 0, 5,
0, 0, 2, 3,
1, 1, 1, 1;
// clang-format on
Mat3 m;
// clang-format off
m << 3, -1, 4,
6, -2, -3,
1, -3, 1;
// clang-format on
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i)
@ -164,10 +178,12 @@ TEST(Homography2DTest, HomographyGeneral) {
TEST(Homography3DTest, RotationAndTranslationXYZ) {
Mat x1(4, 5);
// clang-format off
x1 << 0, 0, 1, 5, 2,
0, 1, 2, 3, 5,
0, 2, 0, 1, 5,
1, 1, 1, 1, 1;
// clang-format on
Mat4 M;
M.setIdentity();
/*
@ -178,24 +194,30 @@ TEST(Homography3DTest, RotationAndTranslationXYZ) {
// Rotation on x + translation
double angle = 45.0;
Mat4 rot;
// clang-format off
rot << 1, 0, 0, 1,
0, cos(angle), -sin(angle), 3,
0, sin(angle), cos(angle), -2,
0, 0, 0, 1;
// clang-format on
M *= rot;
// Rotation on y
angle = 25.0;
// clang-format off
rot << cos(angle), 0, sin(angle), 0,
0, 1, 0, 0,
-sin(angle), 0, cos(angle), 0,
0, 0, 0, 1;
// clang-format on
M *= rot;
// Rotation on z
angle = 5.0;
// clang-format off
rot << cos(angle), -sin(angle), 0, 0,
sin(angle), cos(angle), 0, 0,
0, 0, 1, 0,
0, 0, 0, 1;
// clang-format on
M *= rot;
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i) {
@ -212,15 +234,19 @@ TEST(Homography3DTest, RotationAndTranslationXYZ) {
TEST(Homography3DTest, AffineGeneral) {
Mat x1(4, 5);
// clang-format off
x1 << 0, 0, 1, 5, 2,
0, 1, 2, 3, 5,
0, 2, 0, 1, 5,
1, 1, 1, 1, 1;
// clang-format on
Mat4 m;
// clang-format off
m << 3, -1, 4, 1,
6, -2, -3, -6,
1, 0, 1, 2,
0, 0, 0, 1;
// clang-format on
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i) {
@ -236,15 +262,19 @@ TEST(Homography3DTest, AffineGeneral) {
TEST(Homography3DTest, HomographyGeneral) {
Mat x1(4, 5);
// clang-format off
x1 << 0, 0, 1, 5, 2,
0, 1, 2, 3, 5,
0, 2, 0, 1, 5,
1, 1, 1, 1, 1;
// clang-format on
Mat4 m;
// clang-format off
m << 3, -1, 4, 1,
6, -2, -3, -6,
1, 0, 1, 2,
-3, 1, 0, 1;
// clang-format on
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i) {

View File

@ -24,7 +24,8 @@
namespace libmv {
static bool Build_Minimal2Point_PolynomialFactor(
const Mat & x1, const Mat & x2,
const Mat& x1,
const Mat& x2,
double* P) { // P must be a double[4]
assert(2 == x1.rows());
assert(2 == x1.cols());
@ -53,8 +54,10 @@ static bool Build_Minimal2Point_PolynomialFactor(
// Coefficients in ascending powers of alpha, i.e. P[N]*x^N.
// Run panography_coeffs.py to get the below coefficients.
P[0] = b1 * b2 * a12 * a12 - a1 * a2 * b12 * b12;
P[1] = -2*a1*a2*b12+2*a12*b1*b2+b1*a12*a12+b2*a12*a12-a1*b12*b12-a2*b12*b12;
P[2] = b1*b2-a1*a2-2*a1*b12-2*a2*b12+2*a12*b1+2*a12*b2+a12*a12-b12*b12;
P[1] = -2 * a1 * a2 * b12 + 2 * a12 * b1 * b2 + b1 * a12 * a12 +
b2 * a12 * a12 - a1 * b12 * b12 - a2 * b12 * b12;
P[2] = b1 * b2 - a1 * a2 - 2 * a1 * b12 - 2 * a2 * b12 + 2 * a12 * b1 +
2 * a12 * b2 + a12 * a12 - b12 * b12;
P[3] = b1 + b2 - 2 * b12 - a1 - a2 + 2 * a12;
// If P[3] equal to 0 we get ill conditionned data
@ -67,7 +70,8 @@ static bool Build_Minimal2Point_PolynomialFactor(
//
// [1] M. Brown and R. Hartley and D. Nister. Minimal Solutions for Panoramic
// Stitching. CVPR07.
void F_FromCorrespondance_2points(const Mat &x1, const Mat &x2,
void F_FromCorrespondance_2points(const Mat& x1,
const Mat& x2,
vector<double>* fs) {
// Build Polynomial factor to get squared focal value.
double P[4];
@ -92,7 +96,8 @@ void F_FromCorrespondance_2points(const Mat &x1, const Mat &x2,
// K. Arun,T. Huand and D. Blostein. Least-squares fitting of 2 3-D point
// sets. IEEE Transactions on Pattern Analysis and Machine Intelligence,
// 9:698-700, 1987.
void GetR_FixedCameraCenter(const Mat &x1, const Mat &x2,
void GetR_FixedCameraCenter(const Mat& x1,
const Mat& x2,
const double focal,
Mat3* R) {
assert(3 == x1.rows());
@ -115,8 +120,8 @@ void GetR_FixedCameraCenter(const Mat &x1, const Mat &x2,
// Solve for rotation. Equations (24) and (25) in [1].
Eigen::JacobiSVD<Mat> svd(C, Eigen::ComputeThinU | Eigen::ComputeThinV);
Mat3 scale = Mat3::Identity();
scale(2, 2) = ((svd.matrixU() * svd.matrixV().transpose()).determinant() > 0.0)
? 1.0
scale(2, 2) =
((svd.matrixU() * svd.matrixV().transpose()).determinant() > 0.0) ? 1.0
: -1.0;
(*R) = svd.matrixU() * scale * svd.matrixV().transpose();

View File

@ -22,9 +22,9 @@
#ifndef LIBMV_MULTIVIEW_PANOGRAPHY_H
#define LIBMV_MULTIVIEW_PANOGRAPHY_H
#include "libmv/base/vector.h"
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/poly.h"
#include "libmv/base/vector.h"
namespace libmv {
@ -53,7 +53,8 @@ namespace libmv {
// K = [0 f 0]
// [0 0 1]
//
void F_FromCorrespondance_2points(const Mat &x1, const Mat &x2,
void F_FromCorrespondance_2points(const Mat& x1,
const Mat& x2,
vector<double>* fs);
// Compute the 3x3 rotation matrix that fits two 3D point clouds in the least
@ -90,7 +91,8 @@ void F_FromCorrespondance_2points(const Mat &x1, const Mat &x2,
//
// R = arg min || X2 - R * x1 ||
//
void GetR_FixedCameraCenter(const Mat &x1, const Mat &x2,
void GetR_FixedCameraCenter(const Mat& x1,
const Mat& x2,
const double focal,
Mat3* R);

View File

@ -23,9 +23,9 @@
#include "libmv/base/vector.h"
#include "libmv/multiview/conditioning.h"
#include "libmv/multiview/homography_error.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/two_view_kernel.h"
#include "libmv/multiview/homography_error.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
@ -37,8 +37,8 @@ struct TwoPointSolver {
static void Solve(const Mat& x1, const Mat& x2, vector<Mat3>* Hs);
};
typedef two_view::kernel::Kernel<
TwoPointSolver, homography::homography2D::AsymmetricError, Mat3>
typedef two_view::kernel::
Kernel<TwoPointSolver, homography::homography2D::AsymmetricError, Mat3>
UnnormalizedKernel;
typedef two_view::kernel::Kernel<

View File

@ -18,8 +18,8 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/logging/logging.h"
#include "libmv/multiview/panography.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/panography_kernel.h"
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
@ -30,10 +30,8 @@ namespace {
TEST(Panography, PrintSomeSharedFocalEstimationValues) {
Mat x1(2, 2), x2(2, 2);
x1<< 158, 78,
124, 113;
x2<< 300, 214,
125, 114;
x1 << 158, 78, 124, 113;
x2 << 300, 214, 125, 114;
// Normalize data (set principal point 0,0 and image border to 1.0).
x1.block<1, 2>(0, 0) /= 320;
@ -53,9 +51,11 @@ TEST(Panography, PrintSomeSharedFocalEstimationValues) {
TEST(Panography, GetR_FixedCameraCenterWithIdentity) {
Mat x1(3, 3);
// clang-format off
x1 << 0.5, 0.6, 0.7,
0.5, 0.5, 0.4,
10.0, 10.0, 10.0;
// clang-format on
Mat3 R;
GetR_FixedCameraCenter(x1, x1, 1.0, &R);
@ -68,16 +68,20 @@ TEST(Panography, Homography_GetR_Test_PitchY30) {
int n = 3;
Mat x1(3, n);
// clang-format off
x1 << 0.5, 0.6, 0.7,
0.5, 0.5, 0.4,
10, 10, 10;
// clang-format on
Mat x2 = x1;
const double alpha = 30.0 * M_PI / 180.0;
Mat3 rotY;
// clang-format off
rotY << cos(alpha), 0, -sin(alpha),
0, 1, 0,
sin(alpha), 0, cos(alpha);
// clang-format on
for (int i = 0; i < n; ++i) {
x2.block<3, 1>(0, i) = rotY * x1.col(i);
@ -101,17 +105,23 @@ TEST(Panography, Homography_GetR_Test_PitchY30) {
TEST(MinimalPanoramic, Real_Case_Kernel) {
const int n = 2;
Mat x1(2, n); // From image 0.jpg
// clang-format off
x1<< 158, 78,
124, 113;
// clang-format on
Mat x2(2, n); // From image 3.jpg
// clang-format off
x2<< 300, 214,
125, 114;
// clang-format on
Mat3 Ground_TruthHomography;
// clang-format off
Ground_TruthHomography<< 1, 0.02, 129.83,
-0.02, 1.012, 0.07823,
0, 0, 1;
// clang-format on
vector<Mat3> Hs;

View File

@ -44,9 +44,11 @@ void KRt_From_P(const Mat34 &P, Mat3 *Kp, Mat3 *Rp, Vec3 *tp) {
c /= l;
s /= l;
Mat3 Qx;
// clang-format off
Qx << 1, 0, 0,
0, c, -s,
0, s, c;
// clang-format on
K = K * Qx;
Q = Qx.transpose() * Q;
}
@ -58,9 +60,11 @@ void KRt_From_P(const Mat34 &P, Mat3 *Kp, Mat3 *Rp, Vec3 *tp) {
c /= l;
s /= l;
Mat3 Qy;
// clang-format off
Qy << c, 0, s,
0, 1, 0,
-s, 0, c;
// clang-format on
K = K * Qy;
Q = Qy.transpose() * Q;
}
@ -72,9 +76,11 @@ void KRt_From_P(const Mat34 &P, Mat3 *Kp, Mat3 *Rp, Vec3 *tp) {
c /= l;
s /= l;
Mat3 Qz;
// clang-format off
Qz << c, -s, 0,
s, c, 0,
0, 0, 1;
// clang-format on
K = K * Qz;
Q = Qz.transpose() * Q;
}
@ -92,17 +98,21 @@ void KRt_From_P(const Mat34 &P, Mat3 *Kp, Mat3 *Rp, Vec3 *tp) {
}
if (K(1, 1) < 0) {
Mat3 S;
// clang-format off
S << 1, 0, 0,
0, -1, 0,
0, 0, 1;
// clang-format on
K = K * S;
R = S * R;
}
if (K(0, 0) < 0) {
Mat3 S;
// clang-format off
S << -1, 0, 0,
0, 1, 0,
0, 0, 1;
// clang-format on
K = K * S;
R = S * R;
}
@ -127,9 +137,11 @@ void ProjectionShiftPrincipalPoint(const Mat34 &P,
const Vec2& principal_point_new,
Mat34* P_new) {
Mat3 T;
// clang-format off
T << 1, 0, principal_point_new(0) - principal_point(0),
0, 1, principal_point_new(1) - principal_point(1),
0, 0, 1;
// clang-format on
*P_new = T * P;
}
@ -139,9 +151,11 @@ void ProjectionChangeAspectRatio(const Mat34 &P,
double aspect_ratio_new,
Mat34* P_new) {
Mat3 T;
// clang-format off
T << 1, 0, 0,
0, aspect_ratio_new / aspect_ratio, 0,
0, 0, 1;
// clang-format on
Mat34 P_temp;
ProjectionShiftPrincipalPoint(P, principal_point, Vec2(0, 0), &P_temp);

View File

@ -29,14 +29,18 @@ using namespace libmv;
TEST(Projection, P_From_KRt) {
Mat3 K, Kp;
// clang-format off
K << 10, 1, 30,
0, 20, 40,
0, 0, 1;
// clang-format on
Mat3 R, Rp;
// clang-format off
R << 1, 0, 0,
0, 1, 0,
0, 0, 1;
// clang-format on
Vec3 t, tp;
t << 1, 2, 3;
@ -62,9 +66,11 @@ Vec4 GetRandomPoint() {
TEST(Projection, isInFrontOfCamera) {
Mat34 P;
// clang-format off
P << 1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0;
// clang-format on
Vec4 X_front = GetRandomPoint();
Vec4 X_back = GetRandomPoint();
@ -82,12 +88,14 @@ TEST(Projection, isInFrontOfCamera) {
TEST(AutoCalibration, ProjectionShiftPrincipalPoint) {
Mat34 P1, P2;
// clang-format off
P1 << 1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0;
P2 << 1, 0, 3, 0,
0, 1, 4, 0,
0, 0, 1, 0;
// clang-format on
Mat34 P1_computed, P2_computed;
ProjectionShiftPrincipalPoint(P1, Vec2(0, 0), Vec2(3, 4), &P2_computed);
ProjectionShiftPrincipalPoint(P2, Vec2(3, 4), Vec2(0, 0), &P1_computed);
@ -98,12 +106,14 @@ TEST(AutoCalibration, ProjectionShiftPrincipalPoint) {
TEST(AutoCalibration, ProjectionChangeAspectRatio) {
Mat34 P1, P2;
// clang-format off
P1 << 1, 0, 3, 0,
0, 1, 4, 0,
0, 0, 1, 0;
P2 << 1, 0, 3, 0,
0, 2, 4, 0,
0, 0, 1, 0;
// clang-format on
Mat34 P1_computed, P2_computed;
ProjectionChangeAspectRatio(P1, Vec2(3, 4), 1, 2, &P2_computed);
ProjectionChangeAspectRatio(P2, Vec2(3, 4), 2, 1, &P1_computed);

View File

@ -20,12 +20,12 @@
#include <iostream>
#include "libmv/logging/logging.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/resection.h"
#include "libmv/multiview/test_data_sets.h"
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
#include "libmv/logging/logging.h"
namespace {

View File

@ -22,24 +22,28 @@
#include <cmath>
#include "libmv/numeric/numeric.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/fundamental.h"
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
TwoViewDataSet TwoRealisticCameras(bool same_K) {
TwoViewDataSet d;
// clang-format off
d.K1 << 320, 0, 160,
0, 320, 120,
0, 0, 1;
// clang-format on
if (same_K) {
d.K2 = d.K1;
} else {
// clang-format off
d.K2 << 360, 0, 170,
0, 360, 110,
0, 0, 1;
// clang-format on
}
d.R1 = RotationAroundZ(-0.1);
d.R2 = RotationAroundX(-0.1);
@ -59,10 +63,8 @@ TwoViewDataSet TwoRealisticCameras(bool same_K) {
return d;
}
nViewDatasetConfigator::nViewDatasetConfigator(int fx , int fy,
int cx, int cy,
double distance,
double jitter_amount) {
nViewDatasetConfigator::nViewDatasetConfigator(
int fx, int fy, int cx, int cy, double distance, double jitter_amount) {
_fx = fx;
_fy = fy;
_cx = cx;
@ -71,7 +73,8 @@ nViewDatasetConfigator::nViewDatasetConfigator(int fx , int fy,
_jitter_amount = jitter_amount;
}
NViewDataSet NRealisticCamerasFull(int nviews, int npoints,
NViewDataSet NRealisticCamerasFull(int nviews,
int npoints,
const nViewDatasetConfigator config) {
NViewDataSet d;
d.n = nviews;
@ -102,9 +105,11 @@ NViewDataSet NRealisticCamerasFull(int nviews, int npoints,
jitter *= config._jitter_amount / camera_center.norm();
lookdir = -camera_center + jitter;
// clang-format off
d.K[i] << config._fx, 0, config._cx,
0, config._fy, config._cy,
0, 0, 1;
// clang-format on
d.R[i] = LookAt(lookdir);
d.t[i] = -d.R[i] * camera_center;
d.x[i] = Project(d.P(i), d.X);
@ -113,9 +118,10 @@ NViewDataSet NRealisticCamerasFull(int nviews, int npoints,
return d;
}
NViewDataSet NRealisticCamerasSparse(int nviews, int npoints,
float view_ratio, unsigned min_projections,
NViewDataSet NRealisticCamerasSparse(int nviews,
int npoints,
float view_ratio,
unsigned min_projections,
const nViewDatasetConfigator config) {
assert(view_ratio <= 1.0);
assert(view_ratio > 0.0);
@ -174,9 +180,11 @@ NViewDataSet NRealisticCamerasSparse(int nviews, int npoints,
jitter *= config._jitter_amount / camera_center.norm();
lookdir = -camera_center + jitter;
// clang-format off
d.K[i] << config._fx, 0, config._cx,
0, config._fy, config._cy,
0, 0, 1;
// clang-format on
d.R[i] = LookAt(lookdir);
d.t[i] = -d.R[i] * camera_center;
j_visible = 0;
@ -192,5 +200,4 @@ NViewDataSet NRealisticCamerasSparse(int nviews, int npoints,
return d;
}
} // namespace libmv

View File

@ -83,22 +83,26 @@ struct nViewDatasetConfigator {
double _dist;
double _jitter_amount;
nViewDatasetConfigator(int fx = 1000, int fy = 1000,
int cx = 500, int cy = 500,
nViewDatasetConfigator(int fx = 1000,
int fy = 1000,
int cx = 500,
int cy = 500,
double distance = 1.5,
double jitter_amount = 0.01);
};
NViewDataSet NRealisticCamerasFull(int nviews, int npoints,
const nViewDatasetConfigator
config = nViewDatasetConfigator());
NViewDataSet NRealisticCamerasFull(
int nviews,
int npoints,
const nViewDatasetConfigator config = nViewDatasetConfigator());
// Generates sparse projections (not all points are projected)
NViewDataSet NRealisticCamerasSparse(int nviews, int npoints,
NViewDataSet NRealisticCamerasSparse(
int nviews,
int npoints,
float view_ratio = 0.6,
unsigned min_projections = 3,
const nViewDatasetConfigator
config = nViewDatasetConfigator());
const nViewDatasetConfigator config = nViewDatasetConfigator());
} // namespace libmv

View File

@ -20,14 +20,16 @@
#include "libmv/multiview/triangulation.h"
#include "libmv/numeric/numeric.h"
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
// HZ 12.2 pag.312
void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
const Mat34 &P2, const Vec2 &x2,
void TriangulateDLT(const Mat34& P1,
const Vec2& x1,
const Mat34& P2,
const Vec2& x2,
Vec4* X_homogeneous) {
Mat4 design;
for (int i = 0; i < 4; ++i) {
@ -39,8 +41,10 @@ void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
Nullspace(&design, X_homogeneous);
}
void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
const Mat34 &P2, const Vec2 &x2,
void TriangulateDLT(const Mat34& P1,
const Vec2& x1,
const Mat34& P2,
const Vec2& x2,
Vec3* X_euclidean) {
Vec4 X_homogeneous;
TriangulateDLT(P1, x1, P2, x2, &X_homogeneous);

View File

@ -25,12 +25,16 @@
namespace libmv {
void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
const Mat34 &P2, const Vec2 &x2,
void TriangulateDLT(const Mat34& P1,
const Vec2& x1,
const Mat34& P2,
const Vec2& x2,
Vec4* X_homogeneous);
void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
const Mat34 &P2, const Vec2 &x2,
void TriangulateDLT(const Mat34& P1,
const Vec2& x1,
const Mat34& P2,
const Vec2& x2,
Vec3* X_euclidean);
} // namespace libmv

View File

@ -20,10 +20,10 @@
#include <iostream>
#include "libmv/multiview/triangulation.h"
#include "libmv/multiview/fundamental.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/test_data_sets.h"
#include "libmv/multiview/triangulation.h"
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"

View File

@ -99,9 +99,7 @@ struct IsotropicNormalizedSolver {
//
// The fit routine must not clear existing entries in the vector of models; it
// should append new solutions to the end.
template<typename SolverArg,
typename ErrorArg,
typename ModelArg = Mat3>
template <typename SolverArg, typename ErrorArg, typename ModelArg = Mat3>
class Kernel {
public:
Kernel(const Mat& x1, const Mat& x2) : x1_(x1), x2_(x2) {}
@ -118,13 +116,12 @@ class Kernel {
static_cast<Vec>(x1_.col(sample)),
static_cast<Vec>(x2_.col(sample)));
}
int NumSamples() const {
return x1_.cols();
}
int NumSamples() const { return x1_.cols(); }
static void Solve(const Mat& x1, const Mat& x2, vector<Model>* models) {
// By offering this, Kernel types can be passed to templates.
Solver::Solve(x1, x2, models);
}
protected:
const Mat& x1_;
const Mat& x2_;

View File

@ -32,9 +32,9 @@
#include <cmath>
#include <cstdio>
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/function_derivative.h"
#include "libmv/logging/logging.h"
#include "libmv/numeric/function_derivative.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
@ -51,10 +51,12 @@ class Dogleg {
typedef typename Function::XMatrixType Parameters;
typedef Matrix<typename Function::FMatrixType::RealScalar,
Function::FMatrixType::RowsAtCompileTime,
Function::XMatrixType::RowsAtCompileTime> JMatrixType;
Function::XMatrixType::RowsAtCompileTime>
JMatrixType;
typedef Matrix<typename JMatrixType::RealScalar,
JMatrixType::ColsAtCompileTime,
JMatrixType::ColsAtCompileTime> AMatrixType;
JMatrixType::ColsAtCompileTime>
AMatrixType;
enum Status {
RUNNING,
@ -71,8 +73,7 @@ class Dogleg {
STEEPEST_DESCENT,
};
Dogleg(const Function &f)
: f_(f), df_(f) {}
Dogleg(const Function& f) : f_(f), df_(f) {}
struct SolverParameters {
SolverParameters()
@ -95,10 +96,15 @@ class Dogleg {
Status status;
};
Status Update(const Parameters &x, const SolverParameters &params,
JMatrixType *J, AMatrixType *A, FVec *error, Parameters *g) {
Status Update(const Parameters& x,
const SolverParameters& params,
JMatrixType* J,
AMatrixType* A,
FVec* error,
Parameters* g) {
*J = df_(x);
// TODO(keir): In the case of m = n, avoid computing A and just do J^-1 directly.
// TODO(keir): In the case of m = n, avoid computing A and just do J^-1
// directly.
*A = (*J).transpose() * (*J);
*error = f_(x);
*g = (*J).transpose() * *error;
@ -137,8 +143,7 @@ class Dogleg {
if (c <= 0) {
*beta = (-c + sqrt(c * c + Mbma2 * (radius2 - Ma2))) / (Mbma2);
} else {
*beta = (radius2 - Ma2) /
(c + sqrt(c*c + Mbma2*(radius2 - Ma2)));
*beta = (radius2 - Ma2) / (c + sqrt(c * c + Mbma2 * (radius2 - Ma2)));
}
*dx_dl = alpha * dx_sd + (*beta) * (dx_gn - alpha * dx_sd);
return DOGLEG;
@ -171,7 +176,10 @@ class Dogleg {
int i = 0;
for (; results.status == RUNNING && i < params.max_iterations; ++i) {
printf("%9d %12g %12g %12g",
i, f_(x).norm(), g.array().abs().maxCoeff(), radius);
i,
f_(x).norm(),
g.array().abs().maxCoeff(),
radius);
// LG << "iteration: " << i;
// LG << "||f(x)||: " << f_(x).norm();
@ -199,8 +207,8 @@ class Dogleg {
// Solve for dogleg direction dx_dl.
Scalar beta = 0;
Step step = SolveDoglegDirection(dx_sd, dx_gn, radius, alpha,
&dx_dl, &beta);
Step step =
SolveDoglegDirection(dx_sd, dx_gn, radius, alpha, &dx_dl, &beta);
Scalar e3 = params.relative_step_threshold;
if (dx_dl.norm() < e3 * (x.norm() + e3)) {
@ -221,9 +229,12 @@ class Dogleg {
}
Scalar rho = actual / predicted;
if (step == GAUSS_NEWTON) printf(" GAUSS");
if (step == STEEPEST_DESCENT) printf(" STEE");
if (step == DOGLEG) printf(" DOGL");
if (step == GAUSS_NEWTON)
printf(" GAUSS");
if (step == STEEPEST_DESCENT)
printf(" STEE");
if (step == DOGLEG)
printf(" DOGL");
printf(" %12g %12g %12g\n", rho, actual, predicted);
@ -256,6 +267,6 @@ class Dogleg {
Jacobian df_;
};
} // namespace mv
} // namespace libmv
#endif // LIBMV_NUMERIC_DOGLEG_H

View File

@ -18,8 +18,8 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "testing/testing.h"
#include "libmv/numeric/dogleg.h"
#include "testing/testing.h"
using namespace libmv;
@ -33,22 +33,22 @@ class F {
double x1 = x.x() - 2;
double y1 = x.y() - 5;
double z1 = x.z();
Vec4 fx; fx << x1*x1 + z1*z1,
y1*y1 + z1*z1,
z1*z1,
x1*x1;
Vec4 fx;
fx << x1 * x1 + z1 * z1, y1 * y1 + z1 * z1, z1 * z1, x1 * x1;
return fx;
}
};
TEST(Dogleg, SimpleCase) {
Vec3 x; x << 0.76026643, -30.01799744, 0.55192142;
Vec3 x;
x << 0.76026643, -30.01799744, 0.55192142;
F f;
Dogleg<F>::SolverParameters params;
Dogleg<F> lm(f);
/* TODO(sergey): Better error handling. */
/* Dogleg<F>::Results results = */ lm.minimize(params, &x);
Vec3 expected_min_x; expected_min_x << 2, 5, 0;
Vec3 expected_min_x;
expected_min_x << 2, 5, 0;
EXPECT_MATRIX_NEAR(expected_min_x, x, 1e-5);
}
@ -62,7 +62,8 @@ class F32 {
Vec2 operator()(const Vec2& x) const {
double x1 = x(0);
double x2 = 10 * x(0) / (x(0) + 0.1) + 2 * x(1) * x(1);
Vec2 fx; fx << x1, x2;
Vec2 fx;
fx << x1, x2;
return fx;
}
};
@ -71,8 +72,8 @@ class JF32 {
public:
JF32(const F32& f) { (void)f; }
Mat2 operator()(const Vec2& x) {
Mat2 J; J << 1, 0,
1./pow(x(0) + 0.1, 2), 4*x(1)*x(1);
Mat2 J;
J << 1, 0, 1. / pow(x(0) + 0.1, 2), 4 * x(1) * x(1);
return J;
}
};

View File

@ -23,8 +23,8 @@
#include <cmath>
#include "libmv/numeric/numeric.h"
#include "libmv/logging/logging.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
@ -87,6 +87,7 @@ class NumericJacobian {
}
return jacobian;
}
private:
const Function& f_;
};

View File

@ -18,9 +18,9 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "testing/testing.h"
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/function_derivative.h"
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
using namespace libmv;
@ -38,14 +38,14 @@ class F {
}
Mat23 J(const Vec3& x) const {
Mat23 jacobian;
jacobian << 0.19, 2*0.19*x(1), 1.0,
3*cos(x(0)), -2*sin(x(1)), 0;
jacobian << 0.19, 2 * 0.19 * x(1), 1.0, 3 * cos(x(0)), -2 * sin(x(1)), 0;
return jacobian;
}
};
TEST(FunctionDerivative, SimpleCase) {
Vec3 x; x << 0.76026643, 0.01799744, 0.55192142;
Vec3 x;
x << 0.76026643, 0.01799744, 0.55192142;
F f;
NumericJacobian<F, CENTRAL> J(f);
EXPECT_MATRIX_NEAR(f.J(x), J(x), 1e-8);

View File

@ -31,9 +31,9 @@
#include <cmath>
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/function_derivative.h"
#include "libmv/logging/logging.h"
#include "libmv/numeric/function_derivative.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
@ -50,10 +50,12 @@ class LevenbergMarquardt {
typedef typename Function::XMatrixType Parameters;
typedef Matrix<typename Function::FMatrixType::RealScalar,
Function::FMatrixType::RowsAtCompileTime,
Function::XMatrixType::RowsAtCompileTime> JMatrixType;
Function::XMatrixType::RowsAtCompileTime>
JMatrixType;
typedef Matrix<typename JMatrixType::RealScalar,
JMatrixType::ColsAtCompileTime,
JMatrixType::ColsAtCompileTime> AMatrixType;
JMatrixType::ColsAtCompileTime>
AMatrixType;
// TODO(keir): Some of these knobs can be derived from each other and
// removed, instead of requiring the user to set them.
@ -65,8 +67,7 @@ class LevenbergMarquardt {
HIT_MAX_ITERATIONS,
};
LevenbergMarquardt(const Function &f)
: f_(f), df_(f) {}
LevenbergMarquardt(const Function& f) : f_(f), df_(f) {}
struct SolverParameters {
SolverParameters()
@ -89,8 +90,12 @@ class LevenbergMarquardt {
Status status;
};
Status Update(const Parameters &x, const SolverParameters &params,
JMatrixType *J, AMatrixType *A, FVec *error, Parameters *g) {
Status Update(const Parameters& x,
const SolverParameters& params,
JMatrixType* J,
AMatrixType* A,
FVec* error,
Parameters* g) {
*J = df_(x);
*A = (*J).transpose() * (*J);
*error = -f_(x);
@ -130,7 +135,8 @@ class LevenbergMarquardt {
VLOG(3) << "u: " << u;
VLOG(3) << "v: " << v;
AMatrixType A_augmented = A + u*AMatrixType::Identity(J.cols(), J.cols());
AMatrixType A_augmented =
A + u * AMatrixType::Identity(J.cols(), J.cols());
Solver solver(A_augmented);
dx = solver.solve(g);
bool solved = (A_augmented * dx).isApprox(g);
@ -146,8 +152,8 @@ class LevenbergMarquardt {
// Rho is the ratio of the actual reduction in error to the reduction
// in error that would be obtained if the problem was linear.
// See [1] for details.
Scalar rho((error.squaredNorm() - f_(x_new).squaredNorm())
/ dx.dot(u*dx + g));
Scalar rho((error.squaredNorm() - f_(x_new).squaredNorm()) /
dx.dot(u * dx + g));
if (rho > 0) {
// Accept the Gauss-Newton step because the linear model fits well.
x = x_new;
@ -178,6 +184,6 @@ class LevenbergMarquardt {
Jacobian df_;
};
} // namespace mv
} // namespace libmv
#endif // LIBMV_NUMERIC_LEVENBERG_MARQUARDT_H

View File

@ -18,8 +18,8 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "testing/testing.h"
#include "libmv/numeric/levenberg_marquardt.h"
#include "testing/testing.h"
using namespace libmv;
@ -33,10 +33,8 @@ class F {
double x1 = x.x() - 2;
double y1 = x.y() - 5;
double z1 = x.z();
Vec4 fx; fx << x1*x1 + z1*z1,
y1*y1 + z1*z1,
z1*z1,
x1*x1;
Vec4 fx;
fx << x1 * x1 + z1 * z1, y1 * y1 + z1 * z1, z1 * z1, x1 * x1;
return fx;
}
};

View File

@ -18,7 +18,6 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/numeric/numeric.h"
namespace libmv {
@ -27,9 +26,11 @@ Mat3 RotationAroundX(double angle) {
double c, s;
sincos(angle, &s, &c);
Mat3 R;
// clang-format off
R << 1, 0, 0,
0, c, -s,
0, s, c;
// clang-format on
return R;
}
@ -37,9 +38,11 @@ Mat3 RotationAroundY(double angle) {
double c, s;
sincos(angle, &s, &c);
Mat3 R;
// clang-format off
R << c, 0, s,
0, 1, 0,
-s, 0, c;
// clang-format on
return R;
}
@ -47,13 +50,14 @@ Mat3 RotationAroundZ(double angle) {
double c, s;
sincos(angle, &s, &c);
Mat3 R;
// clang-format off
R << c, -s, 0,
s, c, 0,
0, 0, 1;
// clang-format on
return R;
}
Mat3 RotationRodrigues(const Vec3& axis) {
double theta = axis.norm();
Vec3 w = axis / theta;
@ -62,7 +66,6 @@ Mat3 RotationRodrigues(const Vec3 &axis) {
return Mat3::Identity() + sin(theta) * W + (1 - cos(theta)) * W * W;
}
Mat3 LookAt(Vec3 center) {
Vec3 zc = center.normalized();
Vec3 xc = Vec3::UnitY().cross(zc).normalized();
@ -76,9 +79,11 @@ Mat3 LookAt(Vec3 center) {
Mat3 CrossProductMatrix(const Vec3& x) {
Mat3 X;
// clang-format off
X << 0, -x(2), x(1),
x(2), 0, -x(0),
-x(1), x(0), 0;
// clang-format on
return X;
}
@ -133,4 +138,3 @@ void MatrixColumn(const Mat &A, int i, Vec4 *v) {
}
} // namespace libmv

View File

@ -34,9 +34,8 @@
#include <Eigen/SVD>
#if !defined(__MINGW64__)
# if defined(_WIN32) || defined(__APPLE__) || \
defined(__FreeBSD__) || defined(__NetBSD__) || \
defined(__HAIKU__)
# if defined(_WIN32) || defined(__APPLE__) || defined(__FreeBSD__) || \
defined(__NetBSD__) || defined(__HAIKU__)
inline void sincos(double x, double* sinx, double* cosx) {
*sinx = sin(x);
*cosx = cos(x);
@ -133,15 +132,13 @@ typedef Eigen::Vector2i Vec2i;
typedef Eigen::Vector3i Vec3i;
typedef Eigen::Vector4i Vec4i;
typedef Eigen::Matrix<float,
Eigen::Dynamic,
Eigen::Dynamic,
Eigen::RowMajor> RMatf;
typedef Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>
RMatf;
typedef Eigen::NumTraits<double> EigenDouble;
using Eigen::Map;
using Eigen::Dynamic;
using Eigen::Map;
using Eigen::Matrix;
// Find U, s, and VT such that
@ -294,7 +291,8 @@ void MeanAndVarianceAlongRows(const Mat &A,
// TODO(bomboze): un-#if this for both platforms once tested under Windows
/* This solution was extensively discussed here
http://forum.kde.org/viewtopic.php?f=74&t=61940 */
#define SUM_OR_DYNAMIC(x, y) (x == Eigen::Dynamic || y == Eigen::Dynamic) ? Eigen::Dynamic : (x+y)
# define SUM_OR_DYNAMIC(x, y) \
(x == Eigen::Dynamic || y == Eigen::Dynamic) ? Eigen::Dynamic : (x + y)
template <typename Derived1, typename Derived2>
struct hstack_return {
@ -313,12 +311,13 @@ void MeanAndVarianceAlongRows(const Mat &A,
ColsAtCompileTime,
Options,
MaxRowsAtCompileTime,
MaxColsAtCompileTime> type;
MaxColsAtCompileTime>
type;
};
template <typename Derived1, typename Derived2>
typename hstack_return<Derived1, Derived2>::type
HStack(const Eigen::MatrixBase<Derived1>& lhs,
typename hstack_return<Derived1, Derived2>::type HStack(
const Eigen::MatrixBase<Derived1>& lhs,
const Eigen::MatrixBase<Derived2>& rhs) {
typename hstack_return<Derived1, Derived2>::type res;
res.resize(lhs.rows(), lhs.cols() + rhs.cols());
@ -326,7 +325,6 @@ void MeanAndVarianceAlongRows(const Mat &A,
return res;
};
template <typename Derived1, typename Derived2>
struct vstack_return {
typedef typename Derived1::Scalar Scalar;
@ -344,12 +342,13 @@ void MeanAndVarianceAlongRows(const Mat &A,
ColsAtCompileTime,
Options,
MaxRowsAtCompileTime,
MaxColsAtCompileTime> type;
MaxColsAtCompileTime>
type;
};
template <typename Derived1, typename Derived2>
typename vstack_return<Derived1, Derived2>::type
VStack(const Eigen::MatrixBase<Derived1>& lhs,
typename vstack_return<Derived1, Derived2>::type VStack(
const Eigen::MatrixBase<Derived1>& lhs,
const Eigen::MatrixBase<Derived2>& rhs) {
typename vstack_return<Derived1, Derived2>::type res;
res.resize(lhs.rows() + rhs.rows(), lhs.cols());
@ -357,29 +356,25 @@ void MeanAndVarianceAlongRows(const Mat &A,
return res;
};
#else // _WIN32
// Since it is not possible to typedef privately here, use a macro.
// Always take dynamic columns if either side is dynamic.
# define COLS \
((ColsLeft == Eigen::Dynamic || ColsRight == Eigen::Dynamic) \
? Eigen::Dynamic : (ColsLeft + ColsRight))
? Eigen::Dynamic \
: (ColsLeft + ColsRight))
// Same as above, except that prefer fixed size if either is fixed.
# define ROWS \
((RowsLeft == Eigen::Dynamic && RowsRight == Eigen::Dynamic) \
? Eigen::Dynamic \
: ((RowsLeft == Eigen::Dynamic) \
? RowsRight \
: RowsLeft \
) \
)
: ((RowsLeft == Eigen::Dynamic) ? RowsRight : RowsLeft))
// TODO(keir): Add a static assert if both rows are at compiletime.
template <typename T, int RowsLeft, int RowsRight, int ColsLeft, int ColsRight>
Eigen::Matrix<T, ROWS, COLS>
HStack(const Eigen::Matrix<T, RowsLeft, ColsLeft> &left,
Eigen::Matrix<T, ROWS, COLS> HStack(
const Eigen::Matrix<T, RowsLeft, ColsLeft>& left,
const Eigen::Matrix<T, RowsRight, ColsRight>& right) {
assert(left.rows() == right.rows());
int n = left.rows();
@ -398,8 +393,8 @@ void MeanAndVarianceAlongRows(const Mat &A,
// TODO(keir): Mail eigen list about making this work for general expressions
// rather than only matrix types.
template <typename T, int RowsLeft, int RowsRight, int ColsLeft, int ColsRight>
Eigen::Matrix<T, COLS, ROWS>
VStack(const Eigen::Matrix<T, ColsLeft, RowsLeft> &top,
Eigen::Matrix<T, COLS, ROWS> VStack(
const Eigen::Matrix<T, ColsLeft, RowsLeft>& top,
const Eigen::Matrix<T, ColsRight, RowsRight>& bottom) {
assert(top.cols() == bottom.cols());
int n1 = top.rows();
@ -415,8 +410,6 @@ void MeanAndVarianceAlongRows(const Mat &A,
# undef ROWS
#endif // _WIN32
void HorizontalStack(const Mat& left, const Mat& right, Mat* stacked);
template <typename TTop, typename TBot, typename TStacked>
@ -474,17 +467,14 @@ FloatType ceil0(const FloatType& value) {
/// Returns the skew anti-symmetric matrix of a vector
inline Mat3 SkewMat(const Vec3& x) {
Mat3 skew;
skew << 0 , -x(2), x(1),
x(2), 0 , -x(0),
-x(1), x(0), 0;
skew << 0, -x(2), x(1), x(2), 0, -x(0), -x(1), x(0), 0;
return skew;
}
/// Returns the skew anti-symmetric matrix of a vector with only
/// the first two (independent) lines
inline Mat23 SkewMatMinimal(const Vec2& x) {
Mat23 skew;
skew << 0, -1, x(1),
1, 0, -x(0);
skew << 0, -1, x(1), 1, 0, -x(0);
return skew;
}
@ -496,7 +486,8 @@ inline Mat3 RotationFromEulerVector(Vec3 euler_vector) {
}
Vec3 w = euler_vector / theta;
Mat3 w_hat = CrossProductMatrix(w);
return Mat3::Identity() + w_hat*sin(theta) + w_hat*w_hat*(1 - cos(theta));
return Mat3::Identity() + w_hat * sin(theta) +
w_hat * w_hat * (1 - cos(theta));
}
} // namespace libmv

View File

@ -27,9 +27,11 @@ namespace {
TEST(Numeric, DynamicSizedNullspace) {
Mat A(3, 4);
// clang-format off
A << 0.76026643, 0.01799744, 0.55192142, 0.8699745,
0.42016166, 0.97863392, 0.33711682, 0.14479271,
0.51016811, 0.66528302, 0.54395496, 0.57794893;
// clang-format on
Vec x;
double s = Nullspace(&A, &x);
EXPECT_NEAR(0.0, s, 1e-15);
@ -39,9 +41,11 @@ TEST(Numeric, DynamicSizedNullspace) {
TEST(Numeric, FixedSizeMatrixNullspace) {
Mat34 A;
// clang-format off
A << 0.76026643, 0.01799744, 0.55192142, 0.8699745,
0.42016166, 0.97863392, 0.33711682, 0.14479271,
0.51016811, 0.66528302, 0.54395496, 0.57794893;
// clang-format on
Vec x;
double s = Nullspace(&A, &x);
EXPECT_NEAR(0.0, s, 1e-15);
@ -51,10 +55,12 @@ TEST(Numeric, FixedSizeMatrixNullspace) {
TEST(Numeric, NullspaceMatchesLapackSVD) {
Mat43 A;
// clang-format off
A << 0.76026643, 0.01799744, 0.55192142,
0.8699745, 0.42016166, 0.97863392,
0.33711682, 0.14479271, 0.51016811,
0.66528302, 0.54395496, 0.57794893;
// clang-format on
Vec x;
double s = Nullspace(&A, &x);
EXPECT_NEAR(1.0, x.norm(), 1e-15);
@ -68,10 +74,12 @@ TEST(Numeric, NullspaceMatchesLapackSVD) {
TEST(Numeric, Nullspace2) {
Mat43 A;
// clang-format off
A << 0.76026643, 0.01799744, 0.55192142,
0.8699745, 0.42016166, 0.97863392,
0.33711682, 0.14479271, 0.51016811,
0.66528302, 0.54395496, 0.57794893;
// clang-format on
Vec3 x1, x2;
double s = Nullspace2(&A, &x1, &x2);
EXPECT_NEAR(1.0, x1.norm(), 1e-15);
@ -130,31 +138,32 @@ TEST(Numeric, Diag) {
TEST(Numeric, Determinant) {
Mat A(2, 2);
A << 1, 2,
-1, 3;
A << 1, 2, -1, 3;
double detA = A.determinant();
EXPECT_NEAR(5, detA, 1e-8);
Mat B(4, 4);
// clang-format off
B << 0, 1, 2, 3,
4, 5, 6, 7,
8, 9, 10, 11,
12, 13, 14, 15;
// clang-format on
double detB = B.determinant();
EXPECT_NEAR(0, detB, 1e-8);
Mat3 C;
C << 0, 1, 2,
3, 4, 5,
6, 7, 1;
C << 0, 1, 2, 3, 4, 5, 6, 7, 1;
double detC = C.determinant();
EXPECT_NEAR(21, detC, 1e-8);
}
TEST(Numeric, Inverse) {
Mat A(2, 2), A1;
// clang-format off
A << 1, 2,
-1, 3;
// clang-format on
Mat I = A * A.inverse();
EXPECT_NEAR(1, I(0, 0), 1e-8);
@ -163,10 +172,12 @@ TEST(Numeric, Inverse) {
EXPECT_NEAR(1, I(1, 1), 1e-8);
Mat B(4, 4), B1;
// clang-format off
B << 0, 1, 2, 3,
4, 5, 6, 7,
8, 9, 2, 11,
12, 13, 14, 4;
// clang-format on
Mat I2 = B * B.inverse();
EXPECT_NEAR(1, I2(0, 0), 1e-8);
EXPECT_NEAR(0, I2(0, 1), 1e-8);
@ -182,8 +193,10 @@ TEST(Numeric, Inverse) {
TEST(Numeric, MeanAndVarianceAlongRows) {
int n = 4;
Mat points(2, n);
// clang-format off
points << 0, 0, 1, 1,
0, 2, 1, 3;
// clang-format on
Vec mean, variance;
MeanAndVarianceAlongRows(points, &mean, &variance);
@ -213,8 +226,10 @@ TEST(Numeric, HStack) {
Mat x(2, 1), y(2, 1), z(2, 2);
x << 1, 2;
y << 3, 4;
// clang-format off
z << 1, 3,
2, 4;
// clang-format on
Vec2 xC = x, yC = y;
Mat2 xy = HStack(x, y);
@ -230,6 +245,7 @@ TEST(Numeric, HStack) {
// resulting stacked matrices properly propagate the fixed dimensions.
TEST(Numeric, VStack) {
Mat x(2, 2), y(2, 2), z(4, 2);
// clang-format off
x << 1, 2,
3, 4;
y << 10, 20,
@ -238,6 +254,7 @@ TEST(Numeric, VStack) {
3, 4,
10, 20,
30, 40;
// clang-format on
Mat2 xC = x, yC = y;
Mat xy = VStack(x, y);
@ -293,17 +310,21 @@ TEST(Numeric, CrossProductMatrix) {
TEST(Numeric, MatrixColumn) {
Mat A2(2, 3);
Vec2 v2;
// clang-format off
A2 << 1, 2, 3,
4, 5, 6;
// clang-format on
MatrixColumn(A2, 1, &v2);
EXPECT_EQ(2, v2(0));
EXPECT_EQ(5, v2(1));
Mat A3(3, 3);
Vec3 v3;
// clang-format off
A3 << 1, 2, 3,
4, 5, 6,
7, 8, 9;
// clang-format on
MatrixColumn(A3, 1, &v3);
EXPECT_EQ(2, v3(0));
EXPECT_EQ(5, v3(1));
@ -311,10 +332,12 @@ TEST(Numeric, MatrixColumn) {
Mat A4(4, 3);
Vec4 v4;
// clang-format off
A4 << 1, 2, 3,
4, 5, 6,
7, 8, 9,
10, 11, 12;
// clang-format on
MatrixColumn(A4, 1, &v4);
EXPECT_EQ(2, v4(0));
EXPECT_EQ(5, v4(1));
@ -337,7 +360,8 @@ TEST(Numeric, Mat3MatProduct) {
// This gives a compile error.
TEST(Numeric, Vec3Negative) {
Vec3 y; y << 1, 2, 3;
Vec3 y;
y << 1, 2, 3;
Vec3 x = -y;
EXPECT_EQ(-1, x(0));
EXPECT_EQ(-2, x(1));
@ -357,19 +381,23 @@ TEST(Numeric, Vec3VecInteroperability) {
// This segfaults inside lapack.
TEST(Numeric, DeterminantLU7) {
Mat A(5, 5);
// clang-format off
A << 1, 0, 0, 0, 0,
0, 1, 0, 0, 0,
0, 0, 1, 0, 0,
0, 0, 0, 1, 0,
0, 0, 0, 0, 1;
// clang-format on
EXPECT_NEAR(1, A.determinant(), 1e-8);
}
// This segfaults inside lapack.
TEST(Numeric, DeterminantLU) {
Mat A(2, 2);
// clang-format off
A << 1, 2,
-1, 3;
// clang-format on
EXPECT_NEAR(5, A.determinant(), 1e-8);
}
@ -377,19 +405,24 @@ TEST(Numeric, DeterminantLU) {
// Keir: Not with eigen2!
TEST(Numeric, InplaceProduct) {
Mat2 K, S;
// clang-format off
K << 1, 0,
0, 1;
S << 1, 0,
0, 1;
K = K * S;
// clang-format on
EXPECT_MATRIX_NEAR(Mat2::Identity(), K, 1e-8);
}
TEST(Numeric, ExtractColumns) {
Mat2X A(2, 5);
// clang-format off
A << 1, 2, 3, 4, 5,
6, 7, 8, 9, 10;
Vec2i columns; columns << 0, 2;
// clang-format on
Vec2i columns;
columns << 0, 2;
Mat2X extracted = ExtractColumns(A, columns);
EXPECT_NEAR(1, extracted(0, 0), 1e-15);
EXPECT_NEAR(3, extracted(0, 1), 1e-15);
@ -418,7 +451,8 @@ TEST(Numeric, RotationRodrigues) {
TEST(Numeric, LookAt) {
// Simple orthogonality check.
Vec3 e; e << 1, 2, 3;
Vec3 e;
e << 1, 2, 3;
Mat3 R = LookAt(e), I = Mat3::Identity();
Mat3 RRT = R * R.transpose();
Mat3 RTR = R.transpose() * R;
@ -428,11 +462,11 @@ TEST(Numeric, LookAt) {
}
TEST(Numeric, Reshape) {
Vec4 x; x << 1, 2, 3, 4;
Vec4 x;
x << 1, 2, 3, 4;
Mat2 M, M_expected;
reshape(x, 2, 2, &M);
M_expected << 1, 2,
3, 4;
M_expected << 1, 2, 3, 4;
EXPECT_MATRIX_NEAR(M_expected, M, 1e-15);
}

View File

@ -21,8 +21,8 @@
#ifndef LIBMV_NUMERIC_POLY_H_
#define LIBMV_NUMERIC_POLY_H_
#include <cmath>
#include <stdio.h>
#include <cmath>
namespace libmv {
@ -36,8 +36,7 @@ namespace libmv {
//
// The GSL cubic solver was used as a reference for this routine.
template <typename Real>
int SolveCubicPolynomial(Real a, Real b, Real c,
Real *x0, Real *x1, Real *x2) {
int SolveCubicPolynomial(Real a, Real b, Real c, Real* x0, Real* x1, Real* x2) {
Real q = a * a - 3 * b;
Real r = 2 * a * a * a - 9 * a * b + 27 * c;
@ -114,10 +113,8 @@ int SolveCubicPolynomial(const Real *coeffs, Real *solutions) {
Real a = coeffs[2] / coeffs[3];
Real b = coeffs[1] / coeffs[3];
Real c = coeffs[0] / coeffs[3];
return SolveCubicPolynomial(a, b, c,
solutions + 0,
solutions + 1,
solutions + 2);
return SolveCubicPolynomial(
a, b, c, solutions + 0, solutions + 1, solutions + 2);
}
} // namespace libmv
#endif // LIBMV_NUMERIC_POLY_H_

View File

@ -18,8 +18,8 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/poly.h"
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
using namespace libmv;
@ -34,8 +34,8 @@ namespace {
//
// x^3 - (c+b+a) * x^2 + (a*b+(b+a)*c) * x - a*b*c = 0.
// = p = q = r
void CoeffsForCubicZeros(double a, double b, double c,
double *p, double *q, double *r) {
void CoeffsForCubicZeros(
double a, double b, double c, double* p, double* q, double* r) {
*p = -(c + b + a);
*q = (a * b + (b + a) * c);
*r = -a * b * c;
@ -45,35 +45,45 @@ TEST(Poly, SolveCubicPolynomial) {
double a, b, c, aa, bb, cc;
double p, q, r;
a = 1; b = 2; c = 3;
a = 1;
b = 2;
c = 3;
CoeffsForCubicZeros(a, b, c, &p, &q, &r);
ASSERT_EQ(3, SolveCubicPolynomial(p, q, r, &aa, &bb, &cc));
EXPECT_NEAR(a, aa, 1e-10);
EXPECT_NEAR(b, bb, 1e-10);
EXPECT_NEAR(c, cc, 1e-10);
a = 0; b = 1; c = 3;
a = 0;
b = 1;
c = 3;
CoeffsForCubicZeros(a, b, c, &p, &q, &r);
ASSERT_EQ(3, SolveCubicPolynomial(p, q, r, &aa, &bb, &cc));
EXPECT_NEAR(a, aa, 1e-10);
EXPECT_NEAR(b, bb, 1e-10);
EXPECT_NEAR(c, cc, 1e-10);
a = -10; b = 0; c = 1;
a = -10;
b = 0;
c = 1;
CoeffsForCubicZeros(a, b, c, &p, &q, &r);
ASSERT_EQ(3, SolveCubicPolynomial(p, q, r, &aa, &bb, &cc));
EXPECT_NEAR(a, aa, 1e-10);
EXPECT_NEAR(b, bb, 1e-10);
EXPECT_NEAR(c, cc, 1e-10);
a = -8; b = 1; c = 3;
a = -8;
b = 1;
c = 3;
CoeffsForCubicZeros(a, b, c, &p, &q, &r);
ASSERT_EQ(3, SolveCubicPolynomial(p, q, r, &aa, &bb, &cc));
EXPECT_NEAR(a, aa, 1e-10);
EXPECT_NEAR(b, bb, 1e-10);
EXPECT_NEAR(c, cc, 1e-10);
a = 28; b = 28; c = 105;
a = 28;
b = 28;
c = 105;
CoeffsForCubicZeros(a, b, c, &p, &q, &r);
ASSERT_EQ(3, SolveCubicPolynomial(p, q, r, &aa, &bb, &cc));
EXPECT_NEAR(a, aa, 1e-10);

View File

@ -32,10 +32,10 @@
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
#include "libmv/simple_pipeline/camera_intrinsics.h"
#include "libmv/simple_pipeline/reconstruction.h"
#include "libmv/simple_pipeline/tracks.h"
#include "libmv/simple_pipeline/distortion_models.h"
#include "libmv/simple_pipeline/packed_intrinsics.h"
#include "libmv/simple_pipeline/reconstruction.h"
#include "libmv/simple_pipeline/tracks.h"
namespace libmv {
@ -63,8 +63,10 @@ template<typename T>
void ApplyDistortionModelUsingIntrinsicsBlock(
const CameraIntrinsics* invariant_intrinsics,
const T* const intrinsics_block,
const T& normalized_x, const T& normalized_y,
T* distorted_x, T* distorted_y) {
const T& normalized_x,
const T& normalized_y,
T* distorted_x,
T* distorted_y) {
// Unpack the intrinsics.
const T& focal_length =
intrinsics_block[PackedIntrinsics::OFFSET_FOCAL_LENGTH];
@ -76,8 +78,7 @@ void ApplyDistortionModelUsingIntrinsicsBlock(
// TODO(keir): Do early bailouts for zero distortion; these are expensive
// jet operations.
switch (invariant_intrinsics->GetDistortionModelType()) {
case DISTORTION_MODEL_POLYNOMIAL:
{
case DISTORTION_MODEL_POLYNOMIAL: {
const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
const T& k3 = intrinsics_block[PackedIntrinsics::OFFSET_K3];
@ -88,15 +89,19 @@ void ApplyDistortionModelUsingIntrinsicsBlock(
focal_length,
principal_point_x,
principal_point_y,
k1, k2, k3,
p1, p2,
normalized_x, normalized_y,
distorted_x, distorted_y);
k1,
k2,
k3,
p1,
p2,
normalized_x,
normalized_y,
distorted_x,
distorted_y);
return;
}
case DISTORTION_MODEL_DIVISION:
{
case DISTORTION_MODEL_DIVISION: {
const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
@ -104,20 +109,21 @@ void ApplyDistortionModelUsingIntrinsicsBlock(
focal_length,
principal_point_x,
principal_point_y,
k1, k2,
normalized_x, normalized_y,
distorted_x, distorted_y);
k1,
k2,
normalized_x,
normalized_y,
distorted_x,
distorted_y);
return;
}
case DISTORTION_MODEL_NUKE:
{
case DISTORTION_MODEL_NUKE: {
LOG(FATAL) << "Unsupported distortion model.";
return;
}
case DISTORTION_MODEL_BROWN:
{
case DISTORTION_MODEL_BROWN: {
const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
const T& k3 = intrinsics_block[PackedIntrinsics::OFFSET_K3];
@ -129,10 +135,16 @@ void ApplyDistortionModelUsingIntrinsicsBlock(
focal_length,
principal_point_x,
principal_point_y,
k1, k2, k3, k4,
p1, p2,
normalized_x, normalized_y,
distorted_x, distorted_y);
k1,
k2,
k3,
k4,
p1,
p2,
normalized_x,
normalized_y,
distorted_x,
distorted_y);
return;
}
}
@ -156,8 +168,10 @@ template<typename T>
void InvertDistortionModelUsingIntrinsicsBlock(
const CameraIntrinsics* invariant_intrinsics,
const T* const intrinsics_block,
const T& image_x, const T& image_y,
T* normalized_x, T* normalized_y) {
const T& image_x,
const T& image_y,
T* normalized_x,
T* normalized_y) {
// Unpack the intrinsics.
const T& focal_length =
intrinsics_block[PackedIntrinsics::OFFSET_FOCAL_LENGTH];
@ -175,8 +189,7 @@ void InvertDistortionModelUsingIntrinsicsBlock(
LOG(FATAL) << "Unsupported distortion model.";
return;
case DISTORTION_MODEL_NUKE:
{
case DISTORTION_MODEL_NUKE: {
const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
@ -186,9 +199,12 @@ void InvertDistortionModelUsingIntrinsicsBlock(
principal_point_y,
invariant_intrinsics->image_width(),
invariant_intrinsics->image_height(),
k1, k2,
image_x, image_y,
normalized_x, normalized_y);
k1,
k2,
image_x,
image_y,
normalized_x,
normalized_y);
return;
}
}
@ -198,8 +214,10 @@ void InvertDistortionModelUsingIntrinsicsBlock(
template <typename T>
void NormalizedToImageSpace(const T* const intrinsics_block,
const T& normalized_x, const T& normalized_y,
T* image_x, T* image_y) {
const T& normalized_x,
const T& normalized_y,
T* image_x,
T* image_y) {
// Unpack the intrinsics.
const T& focal_length =
intrinsics_block[PackedIntrinsics::OFFSET_FOCAL_LENGTH];
@ -219,8 +237,7 @@ void NormalizedToImageSpace(const T* const intrinsics_block,
// This functor can only be used for distortion models which have analytically
// defined Apply() function.
struct ReprojectionErrorApplyIntrinsics {
ReprojectionErrorApplyIntrinsics(
const CameraIntrinsics *invariant_intrinsics,
ReprojectionErrorApplyIntrinsics(const CameraIntrinsics* invariant_intrinsics,
const double observed_distorted_x,
const double observed_distorted_y,
const double weight)
@ -253,11 +270,12 @@ struct ReprojectionErrorApplyIntrinsics {
T yn = x[1] / x[2];
T predicted_distorted_x, predicted_distorted_y;
ApplyDistortionModelUsingIntrinsicsBlock(
invariant_intrinsics_,
ApplyDistortionModelUsingIntrinsicsBlock(invariant_intrinsics_,
intrinsics,
xn, yn,
&predicted_distorted_x, &predicted_distorted_y);
xn,
yn,
&predicted_distorted_x,
&predicted_distorted_y);
// The error is the difference between the predicted and observed position.
residuals[0] = (predicted_distorted_x - T(observed_distorted_x_)) * weight_;
@ -295,8 +313,7 @@ struct ReprojectionErrorInvertIntrinsics {
const T* const X, // Point coordinates 3x1.
T* residuals) const {
// Unpack the intrinsics.
const T& focal_length =
intrinsics[PackedIntrinsics::OFFSET_FOCAL_LENGTH];
const T& focal_length = intrinsics[PackedIntrinsics::OFFSET_FOCAL_LENGTH];
const T& principal_point_x =
intrinsics[PackedIntrinsics::OFFSET_PRINCIPAL_POINT_X];
const T& principal_point_y =
@ -327,14 +344,17 @@ struct ReprojectionErrorInvertIntrinsics {
InvertDistortionModelUsingIntrinsicsBlock(
invariant_intrinsics_,
intrinsics,
T(observed_distorted_x_), T(observed_distorted_y_),
&observed_undistorted_normalized_x, &observed_undistorted_normalized_y);
T(observed_distorted_x_),
T(observed_distorted_y_),
&observed_undistorted_normalized_x,
&observed_undistorted_normalized_y);
T observed_undistorted_image_x, observed_undistorted_image_y;
NormalizedToImageSpace(
intrinsics,
observed_undistorted_normalized_x, observed_undistorted_normalized_y,
&observed_undistorted_image_x, &observed_undistorted_image_y);
NormalizedToImageSpace(intrinsics,
observed_undistorted_normalized_x,
observed_undistorted_normalized_y,
&observed_undistorted_image_x,
&observed_undistorted_image_y);
// The error is the difference between the predicted and observed position.
residuals[0] = (predicted_x - observed_undistorted_image_x) * weight_;
@ -362,7 +382,8 @@ void BundleIntrinsicsLogMessage(const int bundle_intrinsics) {
bundling_message += ", "; \
} \
bundling_message += name; \
} (void)0
} \
(void)0
APPEND_BUNDLING_INTRINSICS("f", BUNDLE_FOCAL_LENGTH);
APPEND_BUNDLING_INTRINSICS("px, py", BUNDLE_PRINCIPAL_POINT);
@ -401,7 +422,6 @@ map<int, Vec6> PackCamerasRotationAndTranslation(
void UnpackCamerasRotationAndTranslation(
const map<int, Vec6>& all_cameras_R_t,
EuclideanReconstruction* reconstruction) {
for (map<int, Vec6>::value_type image_and_camera_R_T : all_cameras_R_t) {
const int image = image_and_camera_R_T.first;
const Vec6& camera_R_t = image_and_camera_R_T.second;
@ -501,9 +521,7 @@ void EuclideanBundlerPerformEvaluation(const Tracks &tracks,
eval_options.parameter_blocks.push_back(&point->X(0));
}
problem->Evaluate(eval_options,
NULL, NULL, NULL,
&evaluated_jacobian);
problem->Evaluate(eval_options, NULL, NULL, NULL, &evaluated_jacobian);
CRSMatrixToEigenMatrix(evaluated_jacobian, &evaluation->jacobian);
}
@ -511,18 +529,20 @@ void EuclideanBundlerPerformEvaluation(const Tracks &tracks,
template <typename CostFunction>
void AddResidualBlockToProblemImpl(const CameraIntrinsics* invariant_intrinsics,
double observed_x, double observed_y,
double observed_x,
double observed_y,
double weight,
double* intrinsics_block,
double* camera_R_t,
EuclideanPoint* point,
ceres::Problem* problem) {
problem->AddResidualBlock(new ceres::AutoDiffCostFunction<
CostFunction, 2, PackedIntrinsics::NUM_PARAMETERS, 6, 3>(
new CostFunction(
invariant_intrinsics,
observed_x, observed_y,
weight)),
problem->AddResidualBlock(
new ceres::AutoDiffCostFunction<CostFunction,
2,
PackedIntrinsics::NUM_PARAMETERS,
6,
3>(new CostFunction(
invariant_intrinsics, observed_x, observed_y, weight)),
NULL,
intrinsics_block,
camera_R_t,
@ -539,7 +559,8 @@ void AddResidualBlockToProblem(const CameraIntrinsics *invariant_intrinsics,
if (NeedUseInvertIntrinsicsPipeline(invariant_intrinsics)) {
AddResidualBlockToProblemImpl<ReprojectionErrorInvertIntrinsics>(
invariant_intrinsics,
marker.x, marker.y,
marker.x,
marker.y,
marker_weight,
intrinsics_block,
camera_R_t,
@ -548,7 +569,8 @@ void AddResidualBlockToProblem(const CameraIntrinsics *invariant_intrinsics,
} else {
AddResidualBlockToProblemImpl<ReprojectionErrorApplyIntrinsics>(
invariant_intrinsics,
marker.x, marker.y,
marker.x,
marker.y,
marker_weight,
intrinsics_block,
camera_R_t,
@ -636,8 +658,7 @@ void EuclideanBundle(const Tracks &tracks,
NULL);
}
void EuclideanBundleCommonIntrinsics(
const Tracks &tracks,
void EuclideanBundleCommonIntrinsics(const Tracks& tracks,
const int bundle_intrinsics,
const int bundle_constraints,
EuclideanReconstruction* reconstruction,
@ -706,7 +727,8 @@ void EuclideanBundleCommonIntrinsics(
point,
&problem);
// We lock the first camera to better deal with scene orientation ambiguity.
// We lock the first camera to better deal with scene orientation
// ambiguity.
if (!have_locked_camera) {
problem.SetParameterBlockConstant(current_camera_R_t);
have_locked_camera = true;
@ -800,8 +822,8 @@ void EuclideanBundleCommonIntrinsics(
LG << "Final intrinsics: " << *intrinsics;
if (evaluation) {
EuclideanBundlerPerformEvaluation(tracks, reconstruction, &all_cameras_R_t,
&problem, evaluation);
EuclideanBundlerPerformEvaluation(
tracks, reconstruction, &all_cameras_R_t, &problem, evaluation);
}
// Separate step to adjust positions of tracks which are

View File

@ -31,11 +31,8 @@ class ProjectiveReconstruction;
class Tracks;
struct BundleEvaluation {
BundleEvaluation() :
num_cameras(0),
num_points(0),
evaluate_jacobian(false) {
}
BundleEvaluation()
: num_cameras(0), num_points(0), evaluate_jacobian(false) {}
// Number of cameras appeared in bundle adjustment problem
int num_cameras;
@ -109,9 +106,7 @@ enum BundleIntrinsics {
BUNDLE_RADIAL_K2 = (1 << 3),
BUNDLE_RADIAL_K3 = (1 << 4),
BUNDLE_RADIAL_K4 = (1 << 5),
BUNDLE_RADIAL = (BUNDLE_RADIAL_K1 |
BUNDLE_RADIAL_K2 |
BUNDLE_RADIAL_K3 |
BUNDLE_RADIAL = (BUNDLE_RADIAL_K1 | BUNDLE_RADIAL_K2 | BUNDLE_RADIAL_K3 |
BUNDLE_RADIAL_K4),
BUNDLE_TANGENTIAL_P1 = (1 << 6),
@ -122,8 +117,7 @@ enum BundleConstraints {
BUNDLE_NO_CONSTRAINTS = 0,
BUNDLE_NO_TRANSLATION = 1,
};
void EuclideanBundleCommonIntrinsics(
const Tracks &tracks,
void EuclideanBundleCommonIntrinsics(const Tracks& tracks,
const int bundle_intrinsics,
const int bundle_constraints,
EuclideanReconstruction* reconstruction,
@ -153,4 +147,3 @@ void ProjectiveBundle(const Tracks &tracks,
} // namespace libmv
#endif // LIBMV_SIMPLE_PIPELINE_BUNDLE_H

View File

@ -29,11 +29,8 @@ namespace libmv {
namespace internal {
LookupWarpGrid::LookupWarpGrid()
: offset_(NULL),
width_(0),
height_(0),
overscan_(0.0),
threads_(1) {}
: offset_(NULL), width_(0), height_(0), overscan_(0.0), threads_(1) {
}
LookupWarpGrid::LookupWarpGrid(const LookupWarpGrid& from)
: offset_(NULL),
@ -64,16 +61,16 @@ void LookupWarpGrid::SetThreads(int threads) {
} // namespace internal
CameraIntrinsics::CameraIntrinsics()
: image_width_(0),
image_height_(0),
K_(Mat3::Identity()) {}
: image_width_(0), image_height_(0), K_(Mat3::Identity()) {
}
CameraIntrinsics::CameraIntrinsics(const CameraIntrinsics& from)
: image_width_(from.image_width_),
image_height_(from.image_height_),
K_(from.K_),
distort_(from.distort_),
undistort_(from.undistort_) {}
undistort_(from.undistort_) {
}
// Set the image size in pixels.
void CameraIntrinsics::SetImageSize(int width, int height) {
@ -89,16 +86,14 @@ void CameraIntrinsics::SetK(const Mat3 new_k) {
}
// Set both x and y focal length in pixels.
void CameraIntrinsics::SetFocalLength(double focal_x,
double focal_y) {
void CameraIntrinsics::SetFocalLength(double focal_x, double focal_y) {
K_(0, 0) = focal_x;
K_(1, 1) = focal_y;
ResetLookupGrids();
}
// Set principal point in pixels.
void CameraIntrinsics::SetPrincipalPoint(double cx,
double cy) {
void CameraIntrinsics::SetPrincipalPoint(double cx, double cy) {
K_(0, 2) = cx;
K_(1, 2) = cy;
ResetLookupGrids();
@ -148,8 +143,7 @@ void CameraIntrinsics::Unpack(const PackedIntrinsics& packed_intrinsics) {
// Polynomial model.
PolynomialCameraIntrinsics::PolynomialCameraIntrinsics()
: CameraIntrinsics() {
PolynomialCameraIntrinsics::PolynomialCameraIntrinsics() : CameraIntrinsics() {
SetRadialDistortion(0.0, 0.0, 0.0);
SetTangentialDistortion(0.0, 0.0);
}
@ -170,8 +164,7 @@ void PolynomialCameraIntrinsics::SetRadialDistortion(double k1,
ResetLookupGrids();
}
void PolynomialCameraIntrinsics::SetTangentialDistortion(double p1,
double p2) {
void PolynomialCameraIntrinsics::SetTangentialDistortion(double p1, double p2) {
parameters_[OFFSET_P1] = p1;
parameters_[OFFSET_P2] = p2;
ResetLookupGrids();
@ -185,16 +178,18 @@ void PolynomialCameraIntrinsics::ApplyIntrinsics(double normalized_x,
focal_length_y(),
principal_point_x(),
principal_point_y(),
k1(), k2(), k3(),
p1(), p2(),
k1(),
k2(),
k3(),
p1(),
p2(),
normalized_x,
normalized_y,
image_x,
image_y);
}
void PolynomialCameraIntrinsics::InvertIntrinsics(
double image_x,
void PolynomialCameraIntrinsics::InvertIntrinsics(double image_x,
double image_y,
double* normalized_x,
double* normalized_y) const {
@ -202,8 +197,11 @@ void PolynomialCameraIntrinsics::InvertIntrinsics(
focal_length_y(),
principal_point_x(),
principal_point_y(),
k1(), k2(), k3(),
p1(), p2(),
k1(),
k2(),
k3(),
p1(),
p2(),
image_x,
image_y,
normalized_x,
@ -230,14 +228,12 @@ void PolynomialCameraIntrinsics::Unpack(
packed_intrinsics.GetK2(),
packed_intrinsics.GetK3());
SetTangentialDistortion(packed_intrinsics.GetP1(),
packed_intrinsics.GetP2());
SetTangentialDistortion(packed_intrinsics.GetP1(), packed_intrinsics.GetP2());
}
// Division model.
DivisionCameraIntrinsics::DivisionCameraIntrinsics()
: CameraIntrinsics() {
DivisionCameraIntrinsics::DivisionCameraIntrinsics() : CameraIntrinsics() {
SetDistortion(0.0, 0.0);
}
@ -247,8 +243,7 @@ DivisionCameraIntrinsics::DivisionCameraIntrinsics(
SetDistortion(from.k1(), from.k1());
}
void DivisionCameraIntrinsics::SetDistortion(double k1,
double k2) {
void DivisionCameraIntrinsics::SetDistortion(double k1, double k2) {
parameters_[OFFSET_K1] = k1;
parameters_[OFFSET_K2] = k2;
ResetLookupGrids();
@ -262,7 +257,8 @@ void DivisionCameraIntrinsics::ApplyIntrinsics(double normalized_x,
focal_length_y(),
principal_point_x(),
principal_point_y(),
k1(), k2(),
k1(),
k2(),
normalized_x,
normalized_y,
image_x,
@ -277,15 +273,15 @@ void DivisionCameraIntrinsics::InvertIntrinsics(double image_x,
focal_length_y(),
principal_point_x(),
principal_point_y(),
k1(), k2(),
k1(),
k2(),
image_x,
image_y,
normalized_x,
normalized_y);
}
void DivisionCameraIntrinsics::Pack(
PackedIntrinsics* packed_intrinsics) const {
void DivisionCameraIntrinsics::Pack(PackedIntrinsics* packed_intrinsics) const {
CameraIntrinsics::Pack(packed_intrinsics);
packed_intrinsics->SetK1(k1());
@ -301,13 +297,11 @@ void DivisionCameraIntrinsics::Unpack(
// Nuke model.
NukeCameraIntrinsics::NukeCameraIntrinsics()
: CameraIntrinsics() {
NukeCameraIntrinsics::NukeCameraIntrinsics() : CameraIntrinsics() {
SetDistortion(0.0, 0.0);
}
NukeCameraIntrinsics::NukeCameraIntrinsics(
const NukeCameraIntrinsics &from)
NukeCameraIntrinsics::NukeCameraIntrinsics(const NukeCameraIntrinsics& from)
: CameraIntrinsics(from) {
SetDistortion(from.k1(), from.k2());
}
@ -326,8 +320,10 @@ void NukeCameraIntrinsics::ApplyIntrinsics(double normalized_x,
focal_length_y(),
principal_point_x(),
principal_point_y(),
image_width(), image_height(),
k1(), k2(),
image_width(),
image_height(),
k1(),
k2(),
normalized_x,
normalized_y,
image_x,
@ -342,24 +338,24 @@ void NukeCameraIntrinsics::InvertIntrinsics(double image_x,
focal_length_y(),
principal_point_x(),
principal_point_y(),
image_width(), image_height(),
k1(), k2(),
image_width(),
image_height(),
k1(),
k2(),
image_x,
image_y,
normalized_x,
normalized_y);
}
void NukeCameraIntrinsics::Pack(
PackedIntrinsics* packed_intrinsics) const {
void NukeCameraIntrinsics::Pack(PackedIntrinsics* packed_intrinsics) const {
CameraIntrinsics::Pack(packed_intrinsics);
packed_intrinsics->SetK1(k1());
packed_intrinsics->SetK2(k2());
}
void NukeCameraIntrinsics::Unpack(
const PackedIntrinsics& packed_intrinsics) {
void NukeCameraIntrinsics::Unpack(const PackedIntrinsics& packed_intrinsics) {
CameraIntrinsics::Unpack(packed_intrinsics);
SetDistortion(packed_intrinsics.GetK1(), packed_intrinsics.GetK2());
@ -367,14 +363,12 @@ void NukeCameraIntrinsics::Unpack(
// Brown model.
BrownCameraIntrinsics::BrownCameraIntrinsics()
: CameraIntrinsics() {
BrownCameraIntrinsics::BrownCameraIntrinsics() : CameraIntrinsics() {
SetRadialDistortion(0.0, 0.0, 0.0, 0.0);
SetTangentialDistortion(0.0, 0.0);
}
BrownCameraIntrinsics::BrownCameraIntrinsics(
const BrownCameraIntrinsics &from)
BrownCameraIntrinsics::BrownCameraIntrinsics(const BrownCameraIntrinsics& from)
: CameraIntrinsics(from) {
SetRadialDistortion(from.k1(), from.k2(), from.k3(), from.k4());
SetTangentialDistortion(from.p1(), from.p2());
@ -391,8 +385,7 @@ void BrownCameraIntrinsics::SetRadialDistortion(double k1,
ResetLookupGrids();
}
void BrownCameraIntrinsics::SetTangentialDistortion(double p1,
double p2) {
void BrownCameraIntrinsics::SetTangentialDistortion(double p1, double p2) {
parameters_[OFFSET_P1] = p1;
parameters_[OFFSET_P2] = p2;
ResetLookupGrids();
@ -406,16 +399,19 @@ void BrownCameraIntrinsics::ApplyIntrinsics(double normalized_x,
focal_length_y(),
principal_point_x(),
principal_point_y(),
k1(), k2(), k3(), k4(),
p1(), p2(),
k1(),
k2(),
k3(),
k4(),
p1(),
p2(),
normalized_x,
normalized_y,
image_x,
image_y);
}
void BrownCameraIntrinsics::InvertIntrinsics(
double image_x,
void BrownCameraIntrinsics::InvertIntrinsics(double image_x,
double image_y,
double* normalized_x,
double* normalized_y) const {
@ -423,16 +419,19 @@ void BrownCameraIntrinsics::InvertIntrinsics(
focal_length_y(),
principal_point_x(),
principal_point_y(),
k1(), k2(), k3(), k4(),
p1(), p2(),
k1(),
k2(),
k3(),
k4(),
p1(),
p2(),
image_x,
image_y,
normalized_x,
normalized_y);
}
void BrownCameraIntrinsics::Pack(
PackedIntrinsics* packed_intrinsics) const {
void BrownCameraIntrinsics::Pack(PackedIntrinsics* packed_intrinsics) const {
CameraIntrinsics::Pack(packed_intrinsics);
packed_intrinsics->SetK1(k1());
@ -444,8 +443,7 @@ void BrownCameraIntrinsics::Pack(
packed_intrinsics->SetP2(p2());
}
void BrownCameraIntrinsics::Unpack(
const PackedIntrinsics& packed_intrinsics) {
void BrownCameraIntrinsics::Unpack(const PackedIntrinsics& packed_intrinsics) {
CameraIntrinsics::Unpack(packed_intrinsics);
SetRadialDistortion(packed_intrinsics.GetK1(),
@ -453,12 +451,10 @@ void BrownCameraIntrinsics::Unpack(
packed_intrinsics.GetK3(),
packed_intrinsics.GetK4());
SetTangentialDistortion(packed_intrinsics.GetP1(),
packed_intrinsics.GetP2());
SetTangentialDistortion(packed_intrinsics.GetP1(), packed_intrinsics.GetP2());
}
std::ostream& operator <<(std::ostream &os,
const CameraIntrinsics &intrinsics) {
std::ostream& operator<<(std::ostream& os, const CameraIntrinsics& intrinsics) {
if (intrinsics.focal_length_x() == intrinsics.focal_length_x()) {
os << "f=" << intrinsics.focal_length();
} else {
@ -467,19 +463,18 @@ std::ostream& operator <<(std::ostream &os,
}
os << " cx=" << intrinsics.principal_point_x()
<< " cy=" << intrinsics.principal_point_y()
<< " w=" << intrinsics.image_width()
<< " h=" << intrinsics.image_height();
<< " w=" << intrinsics.image_width() << " h=" << intrinsics.image_height();
#define PRINT_NONZERO_COEFFICIENT(intrinsics, coeff) \
{ \
if (intrinsics->coeff() != 0.0) { \
os << " " #coeff "=" << intrinsics->coeff(); \
} \
} (void) 0
} \
(void)0
switch (intrinsics.GetDistortionModelType()) {
case DISTORTION_MODEL_POLYNOMIAL:
{
case DISTORTION_MODEL_POLYNOMIAL: {
const PolynomialCameraIntrinsics* polynomial_intrinsics =
static_cast<const PolynomialCameraIntrinsics*>(&intrinsics);
PRINT_NONZERO_COEFFICIENT(polynomial_intrinsics, k1);
@ -489,24 +484,21 @@ std::ostream& operator <<(std::ostream &os,
PRINT_NONZERO_COEFFICIENT(polynomial_intrinsics, p2);
break;
}
case DISTORTION_MODEL_DIVISION:
{
case DISTORTION_MODEL_DIVISION: {
const DivisionCameraIntrinsics* division_intrinsics =
static_cast<const DivisionCameraIntrinsics*>(&intrinsics);
PRINT_NONZERO_COEFFICIENT(division_intrinsics, k1);
PRINT_NONZERO_COEFFICIENT(division_intrinsics, k2);
break;
}
case DISTORTION_MODEL_NUKE:
{
case DISTORTION_MODEL_NUKE: {
const NukeCameraIntrinsics* nuke_intrinsics =
static_cast<const NukeCameraIntrinsics*>(&intrinsics);
PRINT_NONZERO_COEFFICIENT(nuke_intrinsics, k1);
PRINT_NONZERO_COEFFICIENT(nuke_intrinsics, k2);
break;
}
case DISTORTION_MODEL_BROWN:
{
case DISTORTION_MODEL_BROWN: {
const BrownCameraIntrinsics* brown_intrinsics =
static_cast<const BrownCameraIntrinsics*>(&intrinsics);
PRINT_NONZERO_COEFFICIENT(brown_intrinsics, k1);
@ -517,8 +509,7 @@ std::ostream& operator <<(std::ostream &os,
PRINT_NONZERO_COEFFICIENT(brown_intrinsics, p2);
break;
}
default:
LOG(FATAL) << "Unknown distortion model.";
default: LOG(FATAL) << "Unknown distortion model.";
}
#undef PRINT_NONZERO_COEFFICIENT

View File

@ -507,10 +507,8 @@ class BrownCameraIntrinsics : public CameraIntrinsics {
double parameters_[NUM_PARAMETERS];
};
/// A human-readable representation of the camera intrinsic parameters.
std::ostream& operator <<(std::ostream &os,
const CameraIntrinsics &intrinsics);
std::ostream& operator<<(std::ostream& os, const CameraIntrinsics& intrinsics);
} // namespace libmv

View File

@ -44,7 +44,8 @@ struct InvertIntrinsicsFunction {
double* warp_y) {
double normalized_x, normalized_y;
intrinsics.InvertIntrinsics(x, y, &normalized_x, &normalized_y);
intrinsics.NormalizedToImageSpace(normalized_x, normalized_y, warp_x, warp_y);
intrinsics.NormalizedToImageSpace(
normalized_x, normalized_y, warp_x, warp_y);
}
};
@ -63,8 +64,8 @@ void LookupWarpGrid::Compute(const CameraIntrinsics &intrinsics,
double aspx = (double)w / intrinsics.image_width();
double aspy = (double)h / intrinsics.image_height();
#if defined(_OPENMP)
# pragma omp parallel for schedule(static) num_threads(threads_) \
if (threads_ > 1 && height > 100)
# pragma omp parallel for schedule(static) \
num_threads(threads_) if (threads_ > 1 && height > 100)
#endif
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
@ -76,13 +77,25 @@ void LookupWarpGrid::Compute(const CameraIntrinsics &intrinsics,
warp_y = warp_y * aspy + 0.5 * overscan * h;
int ix = int(warp_x), iy = int(warp_y);
int fx = round((warp_x - ix) * 256), fy = round((warp_y - iy) * 256);
if (fx == 256) { fx = 0; ix++; } // NOLINT
if (fy == 256) { fy = 0; iy++; } // NOLINT
if (fx == 256) {
fx = 0;
ix++;
} // NOLINT
if (fy == 256) {
fy = 0;
iy++;
} // NOLINT
// Use nearest border pixel
if (ix < 0) { ix = 0, fx = 0; } // NOLINT
if (iy < 0) { iy = 0, fy = 0; } // NOLINT
if (ix >= width - 2) ix = width - 2;
if (iy >= height - 2) iy = height - 2;
if (ix < 0) {
ix = 0, fx = 0;
} // NOLINT
if (iy < 0) {
iy = 0, fy = 0;
} // NOLINT
if (ix >= width - 2)
ix = width - 2;
if (iy >= height - 2)
iy = height - 2;
Offset offset = {(short)(ix - x),
(short)(iy - y),
@ -98,18 +111,13 @@ void LookupWarpGrid::Update(const CameraIntrinsics &intrinsics,
int width,
int height,
double overscan) {
if (width_ != width ||
height_ != height ||
overscan_ != overscan) {
if (width_ != width || height_ != height || overscan_ != overscan) {
Reset();
}
if (offset_ == NULL) {
offset_ = new Offset[width * height];
Compute<WarpFunction>(intrinsics,
width,
height,
overscan);
Compute<WarpFunction>(intrinsics, width, height, overscan);
}
width_ = width;
@ -125,22 +133,23 @@ void LookupWarpGrid::Apply(const PixelType *input_buffer,
int channels,
PixelType* output_buffer) {
#if defined(_OPENMP)
# pragma omp parallel for schedule(static) num_threads(threads_) \
if (threads_ > 1 && height > 100)
# pragma omp parallel for schedule(static) \
num_threads(threads_) if (threads_ > 1 && height > 100)
#endif
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
Offset offset = offset_[y * width + x];
const int pixel_index = ((y + offset.iy) * width +
(x + offset.ix)) * channels;
const int pixel_index =
((y + offset.iy) * width + (x + offset.ix)) * channels;
const PixelType* s = &input_buffer[pixel_index];
for (int i = 0; i < channels; i++) {
output_buffer[(y * width + x) * channels + i] =
((s[i] * (256 - offset.fx) +
s[channels + i] * offset.fx) * (256 - offset.fy) +
((s[i] * (256 - offset.fx) + s[channels + i] * offset.fx) *
(256 - offset.fy) +
(s[width * channels + i] * (256 - offset.fx) +
s[width * channels + channels + i] * offset.fx) * offset.fy)
/ (256 * 256);
s[width * channels + channels + i] * offset.fx) *
offset.fy) /
(256 * 256);
}
}
}
@ -157,15 +166,9 @@ void CameraIntrinsics::DistortBuffer(const PixelType *input_buffer,
PixelType* output_buffer) {
assert(channels >= 1);
assert(channels <= 4);
distort_.Update<InvertIntrinsicsFunction>(*this,
width,
height,
overscan);
distort_.Apply<PixelType>(input_buffer,
width,
height,
channels,
output_buffer);
distort_.Update<InvertIntrinsicsFunction>(*this, width, height, overscan);
distort_.Apply<PixelType>(
input_buffer, width, height, channels, output_buffer);
}
template <typename PixelType>
@ -177,16 +180,10 @@ void CameraIntrinsics::UndistortBuffer(const PixelType *input_buffer,
PixelType* output_buffer) {
assert(channels >= 1);
assert(channels <= 4);
undistort_.Update<ApplyIntrinsicsFunction>(*this,
width,
height,
overscan);
undistort_.Update<ApplyIntrinsicsFunction>(*this, width, height, overscan);
undistort_.Apply<PixelType>(input_buffer,
width,
height,
channels,
output_buffer);
undistort_.Apply<PixelType>(
input_buffer, width, height, channels, output_buffer);
}
} // namespace libmv

View File

@ -22,10 +22,10 @@
#include <iostream>
#include "testing/testing.h"
#include "libmv/image/image.h"
#include "libmv/image/image_drawing.h"
#include "libmv/logging/logging.h"
#include "testing/testing.h"
namespace libmv {
@ -59,25 +59,35 @@ TEST(PolynomialCameraIntrinsics, ApplyIntrinsics) {
const int N = 5;
double expected[N][N][2] = {
{ {75.312500, -24.687500}, {338.982239, -62.035522},
{640.000000, -72.929688}, {941.017761, -62.035522},
{{75.312500, -24.687500},
{338.982239, -62.035522},
{640.000000, -72.929688},
{941.017761, -62.035522},
{1204.687500, -24.687500}},
{ {37.964478, 238.982239}, {323.664551, 223.664551},
{640.000000, 219.193420}, {956.335449, 223.664551},
{{37.964478, 238.982239},
{323.664551, 223.664551},
{640.000000, 219.193420},
{956.335449, 223.664551},
{1242.035522, 238.982239}},
{ {27.070312, 540.000000}, {319.193420, 540.000000},
{640.000000, 540.000000}, {960.806580, 540.000000},
{{27.070312, 540.000000},
{319.193420, 540.000000},
{640.000000, 540.000000},
{960.806580, 540.000000},
{1252.929688, 540.000000}},
{ {37.964478, 841.017761}, {323.664551, 856.335449},
{640.000000, 860.806580}, {956.335449, 856.335449},
{{37.964478, 841.017761},
{323.664551, 856.335449},
{640.000000, 860.806580},
{956.335449, 856.335449},
{1242.035522, 841.017761}},
{ {75.312500, 1104.687500}, {338.982239, 1142.035522},
{640.000000, 1152.929688}, {941.017761, 1142.035522},
{1204.687500, 1104.687500}}
{{75.312500, 1104.687500},
{338.982239, 1142.035522},
{640.000000, 1152.929688},
{941.017761, 1142.035522},
{1204.687500, 1104.687500}},
};
PolynomialCameraIntrinsics intrinsics;
@ -89,12 +99,11 @@ TEST(PolynomialCameraIntrinsics, ApplyIntrinsics) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
double normalized_x = j * step - 0.5,
normalized_y = i * step - 0.5;
double normalized_x = j * step - 0.5, normalized_y = i * step - 0.5;
double distorted_x, distorted_y;
intrinsics.ApplyIntrinsics(normalized_x, normalized_y,
&distorted_x, &distorted_y);
intrinsics.ApplyIntrinsics(
normalized_x, normalized_y, &distorted_x, &distorted_y);
EXPECT_NEAR(expected[i][j][0], distorted_x, 1e-6);
EXPECT_NEAR(expected[i][j][1], distorted_y, 1e-6);
@ -106,25 +115,35 @@ TEST(PolynomialCameraIntrinsics, InvertIntrinsics) {
const int N = 5;
double expected[N][N][2] = {
{ {-0.524482, -0.437069}, {-0.226237, -0.403994},
{ 0.031876, -0.398446}, { 0.293917, -0.408218},
{{-0.524482, -0.437069},
{-0.226237, -0.403994},
{0.031876, -0.398446},
{0.293917, -0.408218},
{0.632438, -0.465028}},
{ {-0.493496, -0.189173}, {-0.219052, -0.179936},
{ 0.030975, -0.178107}, { 0.283742, -0.181280},
{{-0.493496, -0.189173},
{-0.219052, -0.179936},
{0.030975, -0.178107},
{0.283742, -0.181280},
{0.574557, -0.194335}},
{ {-0.488013, 0.032534}, {-0.217537, 0.031077},
{ 0.030781, 0.030781}, { 0.281635, 0.031293},
{{-0.488013, 0.032534},
{-0.217537, 0.031077},
{0.030781, 0.030781},
{0.281635, 0.031293},
{0.566344, 0.033314}},
{ {-0.498696, 0.257660}, {-0.220424, 0.244041},
{ 0.031150, 0.241409}, { 0.285660, 0.245985},
{{-0.498696, 0.257660},
{-0.220424, 0.244041},
{0.031150, 0.241409},
{0.285660, 0.245985},
{0.582670, 0.265629}},
{ {-0.550617, 0.532263}, {-0.230399, 0.477255},
{ 0.032380, 0.469510}, { 0.299986, 0.483311},
{ 0.684740, 0.584043}}
{{-0.550617, 0.532263},
{-0.230399, 0.477255},
{0.032380, 0.469510},
{0.299986, 0.483311},
{0.684740, 0.584043}},
};
PolynomialCameraIntrinsics intrinsics;
@ -132,17 +151,15 @@ TEST(PolynomialCameraIntrinsics, InvertIntrinsics) {
intrinsics.SetPrincipalPoint(600.0, 500.0);
intrinsics.SetRadialDistortion(-0.2, -0.1, -0.05);
double step_x = 1280.0 / (N - 1),
step_y = 1080.0 / (N - 1);
double step_x = 1280.0 / (N - 1), step_y = 1080.0 / (N - 1);
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
double distorted_x = j * step_x,
distorted_y = i * step_y;
double distorted_x = j * step_x, distorted_y = i * step_y;
double normalized_x, normalized_y;
intrinsics.InvertIntrinsics(distorted_x, distorted_y,
&normalized_x, &normalized_y);
intrinsics.InvertIntrinsics(
distorted_x, distorted_y, &normalized_x, &normalized_y);
EXPECT_NEAR(expected[i][j][0], normalized_x, 1e-6);
EXPECT_NEAR(expected[i][j][1], normalized_y, 1e-6);
@ -193,7 +210,8 @@ TEST(PolynomialCameraIntrinsics, IdentityDistortBuffer) {
intrinsics.SetPrincipalPoint((double)w / 2.0, (double)h / 2.0);
intrinsics.SetRadialDistortion(0.0, 0.0, 0.0);
intrinsics.DistortBuffer(image.Data(),
image.Width(), image.Height(),
image.Width(),
image.Height(),
0.0,
image.Depth(),
distorted_image.Data());
@ -224,7 +242,8 @@ TEST(PolynomialCameraIntrinsics, IdentityUndistortBuffer) {
intrinsics.SetPrincipalPoint((double)w / 2.0, (double)h / 2.0);
intrinsics.SetRadialDistortion(0.0, 0.0, 0.0);
intrinsics.UndistortBuffer(image.Data(),
image.Width(), image.Height(),
image.Width(),
image.Height(),
0.0,
image.Depth(),
distorted_image.Data());

View File

@ -22,14 +22,14 @@
**
****************************************************************************/
#include <stdlib.h>
#include <memory.h>
#include <stdlib.h>
#include <queue>
#include "libmv/base/scoped_ptr.h"
#include "libmv/image/array_nd.h"
#include "libmv/image/image_converter.h"
#include "libmv/image/convolve.h"
#include "libmv/image/image_converter.h"
#include "libmv/logging/logging.h"
#include "libmv/simple_pipeline/detect.h"
@ -72,9 +72,8 @@ void FilterFeaturesByDistance(const vector<Feature> &all_features,
//
// Do this on copy of the input features to prevent possible
// distortion in callee function behavior.
std::priority_queue<Feature,
std::vector<Feature>,
FeatureComparison> priority_features;
std::priority_queue<Feature, std::vector<Feature>, FeatureComparison>
priority_features;
for (int i = 0; i < all_features.size(); i++) {
priority_features.push(all_features.at(i));
@ -111,10 +110,12 @@ void DetectFAST(const FloatImage &grayscale_image,
const int height = grayscale_image.Width() - 2 * margin;
const int stride = grayscale_image.Width();
scoped_array<unsigned char> byte_image(FloatImageToUCharArray(grayscale_image));
scoped_array<unsigned char> byte_image(
FloatImageToUCharArray(grayscale_image));
const int byte_image_offset = margin * stride + margin;
// TODO(MatthiasF): Support targetting a feature count (binary search trackness)
// TODO(MatthiasF): Support targetting a feature count (binary search
// trackness)
int num_features;
xy* all = fast9_detect(byte_image.get() + byte_image_offset,
width,
@ -160,19 +161,24 @@ void DetectFAST(const FloatImage &grayscale_image,
}
#ifdef __SSE2__
static unsigned int SAD(const ubyte* imageA, const ubyte* imageB,
int strideA, int strideB) {
static unsigned int SAD(const ubyte* imageA,
const ubyte* imageB,
int strideA,
int strideB) {
__m128i a = _mm_setzero_si128();
for (int i = 0; i < 16; i++) {
a = _mm_adds_epu16(a,
a = _mm_adds_epu16(
a,
_mm_sad_epu8(_mm_loadu_si128((__m128i*)(imageA + i * strideA)),
_mm_loadu_si128((__m128i*)(imageB + i * strideB))));
}
return _mm_extract_epi16(a, 0) + _mm_extract_epi16(a, 4);
}
#else
static unsigned int SAD(const ubyte* imageA, const ubyte* imageB,
int strideA, int strideB) {
static unsigned int SAD(const ubyte* imageA,
const ubyte* imageB,
int strideA,
int strideB) {
unsigned int sad = 0;
for (int i = 0; i < 16; i++) {
for (int j = 0; j < 16; j++) {
@ -194,7 +200,8 @@ void DetectMORAVEC(const FloatImage &grayscale_image,
const int height = grayscale_image.Width() - 2 * margin;
const int stride = grayscale_image.Width();
scoped_array<unsigned char> byte_image(FloatImageToUCharArray(grayscale_image));
scoped_array<unsigned char> byte_image(
FloatImageToUCharArray(grayscale_image));
unsigned short histogram[256];
memset(histogram, 0, sizeof(histogram));
@ -204,37 +211,46 @@ void DetectMORAVEC(const FloatImage &grayscale_image,
for (int y = distance; y < height - distance; y++) {
for (int x = distance; x < width - distance; x++) {
const ubyte* s = &byte_image[y * stride + x];
int score = // low self-similarity with overlapping patterns
// low self-similarity with overlapping patterns
// OPTI: load pattern once
// clang-format off
int score =
SAD(s, s-r*stride-r, stride, stride)+SAD(s, s-r*stride, stride, stride)+SAD(s, s-r*stride+r, stride, stride)+
SAD(s, s -r, stride, stride)+ SAD(s, s +r, stride, stride)+
SAD(s, s+r*stride-r, stride, stride)+SAD(s, s+r*stride, stride, stride)+SAD(s, s+r*stride+r, stride, stride);
// clang-format on
score /= 256; // normalize
if (pattern) // find only features similar to pattern
score -= SAD(s, pattern, stride, 16);
if (score <= 16) continue; // filter very self-similar features
if (score <= 16)
continue; // filter very self-similar features
score -= 16; // translate to score/histogram values
if (score>255) score=255; // clip
if (score > 255)
score = 255; // clip
ubyte* c = &scores[y * width + x];
for (int i = -distance; i < 0; i++) {
for (int j = -distance; j < distance; j++) {
int s = c[i * width + j];
if (s == 0) continue;
if (s >= score) goto nonmax;
if (s == 0)
continue;
if (s >= score)
goto nonmax;
c[i * width + j] = 0;
histogram[s]--;
}
}
for (int i = 0, j = -distance; j < 0; j++) {
int s = c[i * width + j];
if (s == 0) continue;
if (s >= score) goto nonmax;
if (s == 0)
continue;
if (s >= score)
goto nonmax;
c[i * width + j] = 0;
histogram[s]--;
}
c[0] = score, histogram[score]++;
nonmax:
{ } // Do nothing.
nonmax : {} // Do nothing.
}
}
int min = 255, total = 0;
@ -254,10 +270,8 @@ void DetectMORAVEC(const FloatImage &grayscale_image,
// Score calculation above uses top left corner of the
// patch as the origin, here we need to convert this value
// to a pattrn center by adding 8 pixels.
detected_features->push_back(Feature((float) x + 8.0f,
(float) y + 8.0f,
(float) s,
16.0f));
detected_features->push_back(
Feature((float)x + 8.0f, (float)y + 8.0f, (float)s, 16.0f));
}
}
}
@ -281,9 +295,7 @@ void DetectHarris(const FloatImage &grayscale_image,
MultiplyElements(gradient_y, gradient_y, &gradient_yy);
MultiplyElements(gradient_x, gradient_y, &gradient_xy);
FloatImage gradient_xx_blurred,
gradient_yy_blurred,
gradient_xy_blurred;
FloatImage gradient_xx_blurred, gradient_yy_blurred, gradient_xy_blurred;
ConvolveGaussian(gradient_xx, sigma, &gradient_xx_blurred);
ConvolveGaussian(gradient_yy, sigma, &gradient_yy_blurred);
ConvolveGaussian(gradient_xy, sigma, &gradient_xy_blurred);
@ -304,10 +316,8 @@ void DetectHarris(const FloatImage &grayscale_image,
double traceA = A.trace();
double harris_function = detA - alpha * traceA * traceA;
if (harris_function > threshold) {
all_features.push_back(Feature((float) x,
(float) y,
(float) harris_function,
5.0f));
all_features.push_back(
Feature((float)x, (float)y, (float)harris_function, 5.0f));
}
}
}
@ -324,7 +334,8 @@ DetectOptions::DetectOptions()
fast_min_trackness(kDefaultFastMinTrackness),
moravec_max_count(0),
moravec_pattern(NULL),
harris_threshold(kDefaultHarrisThreshold) {}
harris_threshold(kDefaultHarrisThreshold) {
}
void Detect(const FloatImage& image,
const DetectOptions& options,
@ -350,8 +361,7 @@ void Detect(const FloatImage &image,
}
}
std::ostream& operator <<(std::ostream &os,
const Feature &feature) {
std::ostream& operator<<(std::ostream& os, const Feature& feature) {
os << "x: " << feature.x << ", y: " << feature.y;
os << ", score: " << feature.score;
os << ", size: " << feature.size;

View File

@ -88,8 +88,8 @@ struct DetectOptions {
// Find only features similar to this pattern. Only used by MORAVEC detector.
//
// This is an image patch denoted in byte array with dimensions of 16px by 16px
// used to filter features by similarity to this patch.
// This is an image patch denoted in byte array with dimensions of 16px by
// 16px used to filter features by similarity to this patch.
unsigned char* moravec_pattern;
// Threshold value of the Harris function to add new featrue
@ -105,8 +105,7 @@ void Detect(const FloatImage &image,
const DetectOptions& options,
vector<Feature>* detected_features);
std::ostream& operator <<(std::ostream &os,
const Feature &feature);
std::ostream& operator<<(std::ostream& os, const Feature& feature);
} // namespace libmv

View File

@ -20,8 +20,8 @@
#include "libmv/simple_pipeline/detect.h"
#include "testing/testing.h"
#include "libmv/logging/logging.h"
#include "testing/testing.h"
namespace libmv {
@ -116,8 +116,7 @@ void PreformSingleTriangleTest(const DetectOptions &options) {
int vertex_x = 10, vertex_y = 5;
for (int i = 0; i < 6; ++i) {
int current_x = vertex_x - i,
current_y = vertex_y + i;
int current_x = vertex_x - i, current_y = vertex_y + i;
for (int j = 0; j < i * 2 + 1; ++j, ++current_x) {
image(current_y, current_x) = 0.0;
}

View File

@ -45,9 +45,13 @@ struct InvertPolynomialIntrinsicsCostFunction {
focal_length_y_(focal_length_y),
principal_point_x_(principal_point_x),
principal_point_y_(principal_point_y),
k1_(k1), k2_(k2), k3_(k3),
p1_(p1), p2_(p2),
x_(image_x), y_(image_y) {}
k1_(k1),
k2_(k2),
k3_(k3),
p1_(p1),
p2_(p2),
x_(image_x),
y_(image_y) {}
Vec2 operator()(const Vec2& u) const {
double xx, yy;
@ -56,10 +60,15 @@ struct InvertPolynomialIntrinsicsCostFunction {
focal_length_y_,
principal_point_x_,
principal_point_y_,
k1_, k2_, k3_,
p1_, p2_,
u(0), u(1),
&xx, &yy);
k1_,
k2_,
k3_,
p1_,
p2_,
u(0),
u(1),
&xx,
&yy);
Vec2 fx;
fx << (xx - x_), (yy - y_);
@ -91,8 +100,10 @@ struct InvertDivisionIntrinsicsCostFunction {
focal_length_y_(focal_length_y),
principal_point_x_(principal_point_x),
principal_point_y_(principal_point_y),
k1_(k1), k2_(k2),
x_(image_x), y_(image_y) {}
k1_(k1),
k2_(k2),
x_(image_x),
y_(image_y) {}
Vec2 operator()(const Vec2& u) const {
double xx, yy;
@ -101,9 +112,12 @@ struct InvertDivisionIntrinsicsCostFunction {
focal_length_y_,
principal_point_x_,
principal_point_y_,
k1_, k2_,
u(0), u(1),
&xx, &yy);
k1_,
k2_,
u(0),
u(1),
&xx,
&yy);
Vec2 fx;
fx << (xx - x_), (yy - y_);
@ -138,9 +152,14 @@ struct InvertBrownIntrinsicsCostFunction {
focal_length_y_(focal_length_y),
principal_point_x_(principal_point_x),
principal_point_y_(principal_point_y),
k1_(k1), k2_(k2), k3_(k3), k4_(k4),
p1_(p1), p2_(p2),
x_(image_x), y_(image_y) {}
k1_(k1),
k2_(k2),
k3_(k3),
k4_(k4),
p1_(p1),
p2_(p2),
x_(image_x),
y_(image_y) {}
Vec2 operator()(const Vec2& u) const {
double xx, yy;
@ -149,10 +168,16 @@ struct InvertBrownIntrinsicsCostFunction {
focal_length_y_,
principal_point_x_,
principal_point_y_,
k1_, k2_, k3_, k4_,
p1_, p2_,
u(0), u(1),
&xx, &yy);
k1_,
k2_,
k3_,
k4_,
p1_,
p2_,
u(0),
u(1),
&xx,
&yy);
Vec2 fx;
fx << (xx - x_), (yy - y_);
@ -194,9 +219,13 @@ void InvertPolynomialDistortionModel(const double focal_length_x,
focal_length_y,
principal_point_x,
principal_point_y,
k1, k2, k3,
p1, p2,
image_x, image_y);
k1,
k2,
k3,
p1,
p2,
image_x,
image_y);
Solver::SolverParameters params;
Solver solver(intrinsics_cost);
@ -231,8 +260,10 @@ void InvertDivisionDistortionModel(const double focal_length_x,
focal_length_y,
principal_point_x,
principal_point_y,
k1, k2,
image_x, image_y);
k1,
k2,
image_x,
image_y);
Solver::SolverParameters params;
Solver solver(intrinsics_cost);
@ -270,9 +301,14 @@ void InvertBrownDistortionModel(const double focal_length_x,
focal_length_y,
principal_point_x,
principal_point_y,
k1, k2, k3, k4,
p1, p2,
image_x, image_y);
k1,
k2,
k3,
k4,
p1,
p2,
image_x,
image_y);
Solver::SolverParameters params;
Solver solver(intrinsics_cost);
@ -305,7 +341,8 @@ struct ApplyNukeIntrinsicsCostFunction {
principal_point_y_(principal_point_y),
image_width_(image_width),
image_height_(image_height),
k1_(k1), k2_(k2),
k1_(k1),
k2_(k2),
expected_normalized_x_(expected_normalized_x),
expected_normalized_y_(expected_normalized_y) {}
@ -316,10 +353,14 @@ struct ApplyNukeIntrinsicsCostFunction {
focal_length_y_,
principal_point_x_,
principal_point_y_,
image_width_, image_height_,
k1_, k2_,
image_coordinate(0), image_coordinate(1),
&actual_normalized_x, &actual_normalized_y);
image_width_,
image_height_,
k1_,
k2_,
image_coordinate(0),
image_coordinate(1),
&actual_normalized_x,
&actual_normalized_y);
Vec2 fx;
fx << (actual_normalized_x - expected_normalized_x_),
@ -363,8 +404,10 @@ void ApplyNukeDistortionModel(const double focal_length_x,
principal_point_y,
image_width,
image_height,
k1, k2,
normalized_x, normalized_y);
k1,
k2,
normalized_x,
normalized_y);
Solver::SolverParameters params;
Solver solver(intrinsics_cost);

View File

@ -116,7 +116,6 @@ inline void ApplyDivisionDistortionModel(const T &focal_length_x,
const T& normalized_y,
T* image_x,
T* image_y) {
T x = normalized_x;
T y = normalized_y;
T r2 = x * x + y * y;
@ -255,6 +254,6 @@ inline void ApplyBrownDistortionModel(const T &focal_length_x,
*image_y = focal_length_y * yd + principal_point_y;
} // namespace libmv
}
} // namespace libmv
#endif // LIBMV_SIMPLE_PIPELINE_DISTORTION_MODELS_H_

View File

@ -33,7 +33,8 @@ namespace libmv {
namespace {
void GetImagesInMarkers(const vector<Marker>& markers,
int *image1, int *image2) {
int* image1,
int* image2) {
if (markers.size() < 2) {
return;
}
@ -53,7 +54,8 @@ void GetImagesInMarkers(const vector<Marker> &markers,
bool EuclideanReconstructTwoFrames(const vector<Marker>& markers,
EuclideanReconstruction* reconstruction) {
if (markers.size() < 16) {
LG << "Not enough markers to initialize from two frames: " << markers.size();
LG << "Not enough markers to initialize from two frames: "
<< markers.size();
return false;
}
@ -76,10 +78,8 @@ bool EuclideanReconstructTwoFrames(const vector<Marker> &markers,
Mat3 R;
Vec3 t;
Mat3 K = Mat3::Identity();
if (!MotionFromEssentialAndCorrespondence(E,
K, x1.col(0),
K, x2.col(0),
&R, &t)) {
if (!MotionFromEssentialAndCorrespondence(
E, K, x1.col(0), K, x2.col(0), &R, &t)) {
LG << "Failed to compute R and t from E and K.";
return false;
}
@ -88,8 +88,8 @@ bool EuclideanReconstructTwoFrames(const vector<Marker> &markers,
reconstruction->InsertCamera(image1, Mat3::Identity(), Vec3::Zero());
reconstruction->InsertCamera(image2, R, t);
LG << "From two frame reconstruction got:\nR:\n" << R
<< "\nt:" << t.transpose();
LG << "From two frame reconstruction got:\nR:\n"
<< R << "\nt:" << t.transpose();
return true;
}

View File

@ -37,12 +37,13 @@ class ProjectiveReconstruction;
tracks visible in both frames. The pose estimation of the camera for
these frames will be inserted into \a *reconstruction.
\note The two frames need to have both enough parallax and enough common tracks
for accurate reconstruction. At least 8 tracks are suggested.
\note The origin of the coordinate system is defined to be the camera of
the first keyframe.
\note This assumes a calibrated reconstruction, e.g. the markers are
already corrected for camera intrinsics and radial distortion.
\note The two frames need to have both enough parallax and enough common
tracks for accurate reconstruction. At least 8 tracks are suggested.
\note The origin of the coordinate system is defined to be the camera of the
first keyframe.
\note This assumes a calibrated reconstruction, e.g. the
markers are already corrected for camera intrinsics and radial
distortion.
\note This assumes an outlier-free set of markers.
\sa EuclideanResect, EuclideanIntersect, EuclideanBundle
@ -58,10 +59,10 @@ bool EuclideanReconstructTwoFrames(const vector<Marker> &markers,
tracks visible in both frames. An estimate of the projection matrices for
the two frames will get added to the reconstruction.
\note The two frames need to have both enough parallax and enough common tracks
for accurate reconstruction. At least 8 tracks are suggested.
\note The origin of the coordinate system is defined to be the camera of
the first keyframe.
\note The two frames need to have both enough parallax and enough common
tracks for accurate reconstruction. At least 8 tracks are suggested.
\note The origin of the coordinate system is defined to be the camera of the
first keyframe.
\note This assumes the markers are already corrected for radial distortion.
\note This assumes an outlier-free set of markers.

View File

@ -22,11 +22,11 @@
#include "libmv/base/vector.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/nviewtriangulation.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/triangulation.h"
#include "libmv/multiview/nviewtriangulation.h"
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/levenberg_marquardt.h"
#include "libmv/numeric/numeric.h"
#include "libmv/simple_pipeline/reconstruction.h"
#include "libmv/simple_pipeline/tracks.h"
@ -109,10 +109,10 @@ bool EuclideanIntersect(const vector<Marker> &markers,
*reconstruction->CameraForImage(marker.image);
problem.AddResidualBlock(
new ceres::AutoDiffCostFunction<
EuclideanIntersectCostFunctor,
new ceres::AutoDiffCostFunction<EuclideanIntersectCostFunctor,
2, /* num_residuals */
3>(new EuclideanIntersectCostFunctor(marker, camera)),
3>(
new EuclideanIntersectCostFunctor(marker, camera)),
NULL,
&X(0));
num_residuals++;
@ -156,8 +156,8 @@ bool EuclideanIntersect(const vector<Marker> &markers,
*reconstruction->CameraForImage(markers[i].image);
Vec3 x = camera.R * X + camera.t;
if (x(2) < 0) {
LOG(ERROR) << "POINT BEHIND CAMERA " << markers[i].image
<< ": " << x.transpose();
LOG(ERROR) << "POINT BEHIND CAMERA " << markers[i].image << ": "
<< x.transpose();
return false;
}
}
@ -240,8 +240,8 @@ bool ProjectiveIntersect(const vector<Marker> &markers,
*reconstruction->CameraForImage(markers[i].image);
Vec3 x = camera.P * X;
if (x(2) < 0) {
LOG(ERROR) << "POINT BEHIND CAMERA " << markers[i].image
<< ": " << x.transpose();
LOG(ERROR) << "POINT BEHIND CAMERA " << markers[i].image << ": "
<< x.transpose();
}
}

View File

@ -22,8 +22,8 @@
#define LIBMV_SIMPLE_PIPELINE_INTERSECT_H
#include "libmv/base/vector.h"
#include "libmv/simple_pipeline/tracks.h"
#include "libmv/simple_pipeline/reconstruction.h"
#include "libmv/simple_pipeline/tracks.h"
namespace libmv {
@ -38,7 +38,8 @@ namespace libmv {
\a markers should contain all \link Marker markers \endlink belonging to
tracks visible in all frames.
\a reconstruction should contain the cameras for all frames.
The new \link Point points \endlink will be inserted in \a reconstruction.
The new \link Point points \endlink will be inserted in \a
reconstruction.
\note This assumes a calibrated reconstruction, e.g. the markers are
already corrected for camera intrinsics and radial distortion.
@ -60,7 +61,8 @@ bool EuclideanIntersect(const vector<Marker> &markers,
\a markers should contain all \link Marker markers \endlink belonging to
tracks visible in all frames.
\a reconstruction should contain the cameras for all frames.
The new \link Point points \endlink will be inserted in \a reconstruction.
The new \link Point points \endlink will be inserted in \a
reconstruction.
\note This assumes that radial distortion is already corrected for, but
does not assume that e.g. focal length and principal point are

View File

@ -22,10 +22,10 @@
#include <iostream>
#include "testing/testing.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
#include "libmv/logging/logging.h"
#include "testing/testing.h"
namespace libmv {
@ -40,8 +40,10 @@ TEST(Intersect, EuclideanIntersect) {
// 0, 0, 1;
Mat3 R1 = RotationAroundZ(-0.1);
Mat3 R2 = RotationAroundX(-0.1);
Vec3 t1; t1 << 1, 1, 10;
Vec3 t2; t2 << -2, -1, 10;
Vec3 t1;
t1 << 1, 1, 10;
Vec3 t2;
t2 << -2, -1, 10;
Mat34 P1, P2;
P_From_KRt(K1, R1, t1, &P1);
P_From_KRt(K2, R2, t2, &P2);
@ -78,4 +80,4 @@ TEST(Intersect, EuclideanIntersect) {
EXPECT_NEAR(0, DistanceLInfinity(estimated, expected), 1e-8);
}
}
} // namespace
} // namespace libmv

View File

@ -20,13 +20,13 @@
#include "libmv/simple_pipeline/keyframe_selection.h"
#include "libmv/numeric/numeric.h"
#include "ceres/ceres.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/homography.h"
#include "libmv/multiview/fundamental.h"
#include "libmv/simple_pipeline/intersect.h"
#include "libmv/multiview/homography.h"
#include "libmv/numeric/numeric.h"
#include "libmv/simple_pipeline/bundle.h"
#include "libmv/simple_pipeline/intersect.h"
#include <Eigen/Eigenvalues>
@ -119,11 +119,8 @@ void FilterZeroWeightMarkersFromTracks(const Tracks &tracks,
for (int i = 0; i < all_markers.size(); ++i) {
Marker& marker = all_markers[i];
if (marker.weight != 0.0) {
filtered_tracks->Insert(marker.image,
marker.track,
marker.x,
marker.y,
marker.weight);
filtered_tracks->Insert(
marker.image, marker.track, marker.x, marker.y, marker.weight);
}
}
}
@ -172,9 +169,8 @@ void SelectKeyframesBasedOnGRICAndVariance(const Tracks &_tracks,
candidate_image <= max_image;
candidate_image++) {
// Conjunction of all markers from both keyframes
vector<Marker> all_markers =
filtered_tracks.MarkersInBothImages(current_keyframe,
candidate_image);
vector<Marker> all_markers = filtered_tracks.MarkersInBothImages(
current_keyframe, candidate_image);
// Match keypoints between frames current_keyframe and candidate_image
vector<Marker> tracked_markers =
@ -186,9 +182,8 @@ void SelectKeyframesBasedOnGRICAndVariance(const Tracks &_tracks,
CoordinatesForMarkersInImage(tracked_markers, current_keyframe, &x1);
CoordinatesForMarkersInImage(tracked_markers, candidate_image, &x2);
LG << "Found " << x1.cols()
<< " correspondences between " << current_keyframe
<< " and " << candidate_image;
LG << "Found " << x1.cols() << " correspondences between "
<< current_keyframe << " and " << candidate_image;
// Not enough points to construct fundamental matrix
if (x1.cols() < 8 || x2.cols() < 8)
@ -199,9 +194,8 @@ void SelectKeyframesBasedOnGRICAndVariance(const Tracks &_tracks,
int Tf = all_markers.size();
double Rc = static_cast<double>(Tc) / Tf;
LG << "Correspondence between " << current_keyframe
<< " and " << candidate_image
<< ": " << Rc;
LG << "Correspondence between " << current_keyframe << " and "
<< candidate_image << ": " << Rc;
if (Rc < Tmin || Rc > Tmax)
continue;
@ -210,19 +204,15 @@ void SelectKeyframesBasedOnGRICAndVariance(const Tracks &_tracks,
// Estimate homography using default options.
EstimateHomographyOptions estimate_homography_options;
EstimateHomography2DFromCorrespondences(x1,
x2,
estimate_homography_options,
&H);
EstimateHomography2DFromCorrespondences(
x1, x2, estimate_homography_options, &H);
// Convert homography to original pixel space.
H = N_inverse * H * N;
EstimateFundamentalOptions estimate_fundamental_options;
EstimateFundamentalFromCorrespondences(x1,
x2,
estimate_fundamental_options,
&F);
EstimateFundamentalFromCorrespondences(
x1, x2, estimate_fundamental_options, &F);
// Convert fundamental to original pixel space.
F = N_inverse * F * N;
@ -238,11 +228,11 @@ void SelectKeyframesBasedOnGRICAndVariance(const Tracks &_tracks,
for (int i = 0; i < x1.cols(); i++) {
Vec2 current_x1, current_x2;
intrinsics.NormalizedToImageSpace(x1(0, i), x1(1, i),
&current_x1(0), &current_x1(1));
intrinsics.NormalizedToImageSpace(
x1(0, i), x1(1, i), &current_x1(0), &current_x1(1));
intrinsics.NormalizedToImageSpace(x2(0, i), x2(1, i),
&current_x2(0), &current_x2(1));
intrinsics.NormalizedToImageSpace(
x2(0, i), x2(1, i), &current_x2(0), &current_x2(1));
H_e(i) = SymmetricGeometricDistance(H, current_x1, current_x2);
F_e(i) = SymmetricEpipolarDistance(F, current_x1, current_x2);
@ -255,10 +245,8 @@ void SelectKeyframesBasedOnGRICAndVariance(const Tracks &_tracks,
double GRIC_H = GRIC(H_e, 2, 8, 4);
double GRIC_F = GRIC(F_e, 3, 7, 4);
LG << "GRIC values for frames " << current_keyframe
<< " and " << candidate_image
<< ", H-GRIC: " << GRIC_H
<< ", F-GRIC: " << GRIC_F;
LG << "GRIC values for frames " << current_keyframe << " and "
<< candidate_image << ", H-GRIC: " << GRIC_H << ", F-GRIC: " << GRIC_F;
if (GRIC_H <= GRIC_F)
continue;
@ -295,23 +283,19 @@ void SelectKeyframesBasedOnGRICAndVariance(const Tracks &_tracks,
Vec3 t;
Mat3 K = Mat3::Identity();
if (!MotionFromEssentialAndCorrespondence(E,
K, x1.col(0),
K, x2.col(0),
&R, &t)) {
if (!MotionFromEssentialAndCorrespondence(
E, K, x1.col(0), K, x2.col(0), &R, &t)) {
LG << "Failed to compute R and t from E and K";
continue;
}
LG << "Camera transform between frames " << current_keyframe
<< " and " << candidate_image
<< ":\nR:\n" << R
<< "\nt:" << t.transpose();
LG << "Camera transform between frames " << current_keyframe << " and "
<< candidate_image << ":\nR:\n"
<< R << "\nt:" << t.transpose();
// First camera is identity, second one is relative to it
reconstruction.InsertCamera(current_keyframe,
Mat3::Identity(),
Vec3::Zero());
reconstruction.InsertCamera(
current_keyframe, Mat3::Identity(), Vec3::Zero());
reconstruction.InsertCamera(candidate_image, R, t);
// Reconstruct 3D points
@ -400,8 +384,7 @@ void SelectKeyframesBasedOnGRICAndVariance(const Tracks &_tracks,
double Sc = static_cast<double>(I + A) / Square(3 * I) * Sigma_P.trace();
LG << "Expected estimation error between "
<< current_keyframe << " and "
LG << "Expected estimation error between " << current_keyframe << " and "
<< candidate_image << ": " << Sc;
// Pairing with a lower Sc indicates a better choice

View File

@ -22,8 +22,8 @@
#define LIBMV_SIMPLE_PIPELINE_KEYFRAME_SELECTION_H_
#include "libmv/base/vector.h"
#include "libmv/simple_pipeline/tracks.h"
#include "libmv/simple_pipeline/camera_intrinsics.h"
#include "libmv/simple_pipeline/tracks.h"
namespace libmv {
@ -43,8 +43,7 @@ namespace libmv {
// \param intrinsics: is camera intrinsics.
// \param keyframes: will contain all images number which are considered
// good to be used for reconstruction.
void SelectKeyframesBasedOnGRICAndVariance(
const Tracks &tracks,
void SelectKeyframesBasedOnGRICAndVariance(const Tracks& tracks,
const CameraIntrinsics& intrinsics,
vector<int>& keyframes);

View File

@ -20,9 +20,9 @@
#include "libmv/simple_pipeline/keyframe_selection.h"
#include "testing/testing.h"
#include "libmv/simple_pipeline/camera_intrinsics.h"
#include "libmv/logging/logging.h"
#include "libmv/simple_pipeline/camera_intrinsics.h"
#include "testing/testing.h"
namespace libmv {
@ -66,25 +66,56 @@ TEST(KeyframeSelection, FabrikEingangNeighborFrames) {
intrinsics.SetPrincipalPoint(960.000, 544.000);
intrinsics.SetRadialDistortion(0.0, 0.0, 0.0);
Marker markers[] = {
{1, 0, 737.599983, 646.397594, 1.0}, {2, 0, 737.906628, 648.113327, 1.0}, {1, 1, 863.045425, 646.081905, 1.0},
{2, 1, 863.339767, 647.650040, 1.0}, {1, 2, 736.959972, 574.080151, 1.0}, {2, 2, 737.217350, 575.604900, 1.0},
{1, 3, 864.097424, 573.374908, 1.0}, {2, 3, 864.383469, 574.900307, 1.0}, {1, 4, 789.429073, 631.677521, 1.0},
{2, 4, 789.893131, 633.124451, 1.0}, {1, 5, 791.051960, 573.442028, 1.0}, {2, 5, 791.336575, 575.088890, 1.0},
{1, 6, 738.973961, 485.130308, 1.0}, {2, 6, 739.435501, 486.734207, 1.0}, {1, 7, 862.403240, 514.866074, 1.0},
{2, 7, 862.660618, 516.413261, 1.0}, {1, 8, 802.240162, 485.759838, 1.0}, {2, 8, 802.602253, 487.432899, 1.0},
{1, 9, 754.340630, 500.624559, 1.0}, {2, 9, 754.559956, 502.079920, 1.0}, {1, 10, 849.398689, 484.480545, 1.0},
{2, 10, 849.599934, 486.079937, 1.0}, {1, 11, 788.803768, 515.924391, 1.0}, {2, 11, 789.119911, 517.439932, 1.0},
{1, 12, 838.733940, 558.212688, 1.0}, {2, 12, 839.039898, 559.679916, 1.0}, {1, 13, 760.014782, 575.194466, 1.0},
{2, 13, 760.319881, 576.639904, 1.0}, {1, 14, 765.321636, 616.015957, 1.0}, {2, 14, 765.759945, 617.599915, 1.0},
{1, 15, 800.963230, 660.032082, 1.0}, {2, 15, 801.279945, 661.759876, 1.0}, {1, 16, 846.321087, 602.313053, 1.0},
{2, 16, 846.719913, 603.839878, 1.0}, {1, 17, 864.288311, 616.790524, 1.0}, {2, 17, 864.639931, 618.239918, 1.0},
{1, 18, 800.006790, 602.573425, 1.0}, {2, 18, 800.319958, 604.159912, 1.0}, {1, 19, 739.026890, 617.944138, 1.0},
{2, 19, 739.199924, 619.519924, 1.0}, {1, 20, 801.987419, 544.134888, 1.0}, {2, 20, 802.239933, 545.599911, 1.0},
{1, 21, 753.619823, 542.961300, 1.0}, {2, 21, 753.919945, 544.639874, 1.0}, {1, 22, 787.921257, 499.910206, 1.0},
{2, 22, 788.159924, 501.439917, 1.0}, {1, 23, 839.095459, 529.287903, 1.0}, {2, 23, 839.359932, 530.879934, 1.0},
{1, 24, 811.760330, 630.732269, 1.0}, {2, 24, 812.159901, 632.319859, 1.0}
};
Marker markers[] = {{1, 0, 737.599983, 646.397594, 1.0},
{2, 0, 737.906628, 648.113327, 1.0},
{1, 1, 863.045425, 646.081905, 1.0},
{2, 1, 863.339767, 647.650040, 1.0},
{1, 2, 736.959972, 574.080151, 1.0},
{2, 2, 737.217350, 575.604900, 1.0},
{1, 3, 864.097424, 573.374908, 1.0},
{2, 3, 864.383469, 574.900307, 1.0},
{1, 4, 789.429073, 631.677521, 1.0},
{2, 4, 789.893131, 633.124451, 1.0},
{1, 5, 791.051960, 573.442028, 1.0},
{2, 5, 791.336575, 575.088890, 1.0},
{1, 6, 738.973961, 485.130308, 1.0},
{2, 6, 739.435501, 486.734207, 1.0},
{1, 7, 862.403240, 514.866074, 1.0},
{2, 7, 862.660618, 516.413261, 1.0},
{1, 8, 802.240162, 485.759838, 1.0},
{2, 8, 802.602253, 487.432899, 1.0},
{1, 9, 754.340630, 500.624559, 1.0},
{2, 9, 754.559956, 502.079920, 1.0},
{1, 10, 849.398689, 484.480545, 1.0},
{2, 10, 849.599934, 486.079937, 1.0},
{1, 11, 788.803768, 515.924391, 1.0},
{2, 11, 789.119911, 517.439932, 1.0},
{1, 12, 838.733940, 558.212688, 1.0},
{2, 12, 839.039898, 559.679916, 1.0},
{1, 13, 760.014782, 575.194466, 1.0},
{2, 13, 760.319881, 576.639904, 1.0},
{1, 14, 765.321636, 616.015957, 1.0},
{2, 14, 765.759945, 617.599915, 1.0},
{1, 15, 800.963230, 660.032082, 1.0},
{2, 15, 801.279945, 661.759876, 1.0},
{1, 16, 846.321087, 602.313053, 1.0},
{2, 16, 846.719913, 603.839878, 1.0},
{1, 17, 864.288311, 616.790524, 1.0},
{2, 17, 864.639931, 618.239918, 1.0},
{1, 18, 800.006790, 602.573425, 1.0},
{2, 18, 800.319958, 604.159912, 1.0},
{1, 19, 739.026890, 617.944138, 1.0},
{2, 19, 739.199924, 619.519924, 1.0},
{1, 20, 801.987419, 544.134888, 1.0},
{2, 20, 802.239933, 545.599911, 1.0},
{1, 21, 753.619823, 542.961300, 1.0},
{2, 21, 753.919945, 544.639874, 1.0},
{1, 22, 787.921257, 499.910206, 1.0},
{2, 22, 788.159924, 501.439917, 1.0},
{1, 23, 839.095459, 529.287903, 1.0},
{2, 23, 839.359932, 530.879934, 1.0},
{1, 24, 811.760330, 630.732269, 1.0},
{2, 24, 812.159901, 632.319859, 1.0}};
int num_markers = sizeof(markers) / sizeof(Marker);
Tracks tracks;
@ -108,18 +139,34 @@ TEST(KeyframeSelection, FabrikEingangFarFrames) {
intrinsics.SetPrincipalPoint(960.000, 544.000);
intrinsics.SetRadialDistortion(0.0, 0.0, 0.0);
Marker markers[] = {
{1, 0, 369.459200, 619.315258, 1.0}, {2, 0, 279.677496, 722.086842, 1.0}, {1, 1, 376.831970, 370.278397, 1.0},
{2, 1, 221.695247, 460.065418, 1.0}, {1, 2, 1209.139023, 567.705605, 1.0}, {2, 2, 1080.760117, 659.230083, 1.0},
{1, 3, 1643.495750, 903.620453, 1.0}, {2, 3, 1618.405037, 1015.374908, 1.0}, {1, 4, 1494.849815, 425.302460, 1.0},
{2, 4, 1457.467575, 514.727587, 1.0}, {1, 5, 1794.637299, 328.728609, 1.0}, {2, 5, 1742.161446, 408.988636, 1.0},
{1, 6, 1672.822723, 102.240358, 1.0}, {2, 6, 1539.287224, 153.536892, 1.0}, {1, 7, 1550.843925, 53.424943, 1.0},
{2, 7, 1385.579109, 96.450085, 1.0}, {1, 8, 852.953281, 465.399578, 1.0}, {2, 8, 779.404564, 560.091843, 1.0},
{1, 9, 906.853752, 299.827040, 1.0}, {2, 9, 786.923218, 385.570770, 1.0}, {1, 10, 406.322966, 87.556041, 1.0},
{2, 10, 140.339413, 150.877481, 1.0}, {1, 11, 254.811573, 851.296478, 1.0}, {2, 11, 94.478302, 969.350189, 1.0},
{1, 12, 729.087868, 806.092758, 1.0}, {2, 12, 606.212139, 919.876560, 1.0}, {1, 13, 1525.719452, 920.398083, 1.0},
{2, 13, 1495.579720, 1031.971218, 1.0}
};
Marker markers[] = {{1, 0, 369.459200, 619.315258, 1.0},
{2, 0, 279.677496, 722.086842, 1.0},
{1, 1, 376.831970, 370.278397, 1.0},
{2, 1, 221.695247, 460.065418, 1.0},
{1, 2, 1209.139023, 567.705605, 1.0},
{2, 2, 1080.760117, 659.230083, 1.0},
{1, 3, 1643.495750, 903.620453, 1.0},
{2, 3, 1618.405037, 1015.374908, 1.0},
{1, 4, 1494.849815, 425.302460, 1.0},
{2, 4, 1457.467575, 514.727587, 1.0},
{1, 5, 1794.637299, 328.728609, 1.0},
{2, 5, 1742.161446, 408.988636, 1.0},
{1, 6, 1672.822723, 102.240358, 1.0},
{2, 6, 1539.287224, 153.536892, 1.0},
{1, 7, 1550.843925, 53.424943, 1.0},
{2, 7, 1385.579109, 96.450085, 1.0},
{1, 8, 852.953281, 465.399578, 1.0},
{2, 8, 779.404564, 560.091843, 1.0},
{1, 9, 906.853752, 299.827040, 1.0},
{2, 9, 786.923218, 385.570770, 1.0},
{1, 10, 406.322966, 87.556041, 1.0},
{2, 10, 140.339413, 150.877481, 1.0},
{1, 11, 254.811573, 851.296478, 1.0},
{2, 11, 94.478302, 969.350189, 1.0},
{1, 12, 729.087868, 806.092758, 1.0},
{2, 12, 606.212139, 919.876560, 1.0},
{1, 13, 1525.719452, 920.398083, 1.0},
{2, 13, 1495.579720, 1031.971218, 1.0}};
int num_markers = sizeof(markers) / sizeof(Marker);
Tracks tracks;
@ -144,15 +191,33 @@ TEST(KeyframeSelection, CopterManualKeyFrames) {
intrinsics.SetRadialDistortion(-0.08590, 0.0, 0.0);
Marker markers[] = {
{1, 0, 645.792694, 403.115931, 1.0}, {2, 0, 630.641174, 307.996409, 1.0}, {1, 1, 783.469086, 403.904328, 1.0},
{2, 1, 766.001129, 308.998225, 1.0}, {1, 2, 650.000000, 160.000001, 1.0}, {1, 3, 785.225906, 158.619039, 1.0},
{2, 3, 767.526474, 70.449695, 1.0}, {1, 4, 290.640526, 382.335634, 1.0}, {2, 4, 273.001728, 86.993319, 1.0},
{1, 5, 291.162739, 410.602684, 1.0}, {2, 5, 273.287849, 111.937487, 1.0}, {1, 6, 136.919317, 349.895797, 1.0},
{1, 7, 490.844345, 47.572222, 1.0}, {1, 8, 454.406433, 488.935761, 1.0}, {1, 9, 378.655815, 618.522248, 1.0},
{2, 9, 357.061806, 372.265077, 1.0}, {1, 10, 496.011391, 372.668824, 1.0}, {2, 10, 477.979164, 222.986112, 1.0},
{1, 11, 680.060272, 256.103625, 1.0}, {2, 11, 670.587540, 204.830453, 1.0}, {1, 12, 1070.817108, 218.775322, 1.0},
{2, 12, 1046.129913, 128.969783, 1.0}, {1, 14, 242.516403, 596.048512, 1.0}, {2, 14, 224.182606, 248.272154, 1.0},
{1, 15, 613.936272, 287.519073, 1.0}, {2, 15, 600.467644, 196.085722, 1.0}, {1, 31, 844.637451, 256.354315, 1.0},
{1, 0, 645.792694, 403.115931, 1.0},
{2, 0, 630.641174, 307.996409, 1.0},
{1, 1, 783.469086, 403.904328, 1.0},
{2, 1, 766.001129, 308.998225, 1.0},
{1, 2, 650.000000, 160.000001, 1.0},
{1, 3, 785.225906, 158.619039, 1.0},
{2, 3, 767.526474, 70.449695, 1.0},
{1, 4, 290.640526, 382.335634, 1.0},
{2, 4, 273.001728, 86.993319, 1.0},
{1, 5, 291.162739, 410.602684, 1.0},
{2, 5, 273.287849, 111.937487, 1.0},
{1, 6, 136.919317, 349.895797, 1.0},
{1, 7, 490.844345, 47.572222, 1.0},
{1, 8, 454.406433, 488.935761, 1.0},
{1, 9, 378.655815, 618.522248, 1.0},
{2, 9, 357.061806, 372.265077, 1.0},
{1, 10, 496.011391, 372.668824, 1.0},
{2, 10, 477.979164, 222.986112, 1.0},
{1, 11, 680.060272, 256.103625, 1.0},
{2, 11, 670.587540, 204.830453, 1.0},
{1, 12, 1070.817108, 218.775322, 1.0},
{2, 12, 1046.129913, 128.969783, 1.0},
{1, 14, 242.516403, 596.048512, 1.0},
{2, 14, 224.182606, 248.272154, 1.0},
{1, 15, 613.936272, 287.519073, 1.0},
{2, 15, 600.467644, 196.085722, 1.0},
{1, 31, 844.637451, 256.354315, 1.0},
{2, 31, 823.200150, 165.714952, 1.0},
};
int num_markers = sizeof(markers) / sizeof(Marker);
@ -180,50 +245,138 @@ TEST(KeyframeSelection, ElevatorManualKeyframesFrames) {
intrinsics.SetRadialDistortion(-0.034, 0.0, 0.0);
Marker markers[] = {
{1, 2, 1139.861412, 1034.634984, 1.0}, {2, 2, 1143.512192, 1065.355718, 1.0}, {1, 3, 1760.821953, 644.658036, 1.0},
{2, 3, 1770.901108, 697.899928, 1.0}, {1, 4, 858.071823, 1068.520746, 1.0}, {1, 6, 1633.952408, 797.050145, 1.0},
{2, 6, 1642.508469, 849.157140, 1.0}, {1, 8, 1716.695824, 451.805491, 1.0}, {2, 8, 1726.513939, 502.095687, 1.0},
{1, 9, 269.577627, 724.986935, 1.0}, {2, 9, 269.424820, 764.154246, 1.0}, {1, 10, 1891.321907, 706.948843, 1.0},
{2, 10, 1903.338547, 766.068377, 1.0}, {1, 12, 1806.227074, 956.089604, 1.0}, {2, 12, 1816.619568, 1013.767376, 1.0},
{1, 14, 269.544153, 1002.333570, 1.0}, {2, 14, 269.367542, 1043.509254, 1.0}, {1, 15, 1402.772141, 281.392962, 1.0},
{2, 15, 1409.089165, 318.731629, 1.0}, {1, 16, 195.877233, 919.454341, 1.0}, {2, 16, 192.531109, 997.367899, 1.0},
{1, 17, 1789.584045, 120.036661, 1.0}, {2, 17, 1800.391846, 167.822964, 1.0}, {1, 18, 999.363213, 765.004807, 1.0},
{2, 18, 1002.345772, 790.560122, 1.0}, {1, 19, 647.342491, 1044.805727, 1.0}, {2, 19, 649.328041, 1058.682940, 1.0},
{1, 20, 1365.486832, 440.901829, 1.0}, {2, 20, 1371.413040, 477.888730, 1.0}, {1, 21, 1787.125282, 301.431606, 1.0},
{2, 21, 1798.527260, 355.224531, 1.0}, {1, 22, 1257.805824, 932.797258, 1.0}, {2, 22, 1263.017578, 969.376774, 1.0},
{1, 23, 961.969528, 843.148112, 1.0}, {2, 23, 964.869461, 868.587620, 1.0}, {1, 24, 158.076110, 1052.643592, 1.0},
{1, 25, 1072.884521, 1005.296981, 1.0}, {2, 25, 1076.091156, 1032.776856, 1.0}, {1, 26, 1107.656937, 526.577228, 1.0},
{2, 26, 1111.618423, 555.524454, 1.0}, {1, 27, 1416.410751, 529.857581, 1.0}, {2, 27, 1422.663574, 570.025957, 1.0},
{1, 28, 1498.673630, 1005.453086, 1.0}, {2, 28, 1505.381813, 1051.827149, 1.0}, {1, 29, 1428.647804, 652.473629, 1.0},
{2, 29, 1434.898224, 692.715390, 1.0}, {1, 30, 1332.318764, 503.673599, 1.0}, {2, 30, 1338.000069, 540.507967, 1.0},
{1, 32, 1358.642693, 709.837904, 1.0}, {2, 32, 1364.231529, 748.678265, 1.0}, {1, 33, 1850.911560, 460.475668, 1.0},
{2, 33, 1862.221413, 512.797347, 1.0}, {1, 34, 1226.117821, 607.053959, 1.0}, {2, 34, 1230.736084, 641.091449, 1.0},
{1, 35, 619.598236, 523.341744, 1.0}, {2, 35, 621.601124, 554.453287, 1.0}, {1, 36, 956.591492, 958.223183, 1.0},
{2, 36, 959.289265, 983.289263, 1.0}, {1, 37, 1249.922218, 419.095856, 1.0}, {2, 37, 1255.005913, 452.556177, 1.0},
{1, 39, 1300.528450, 386.251166, 1.0}, {2, 39, 1305.957413, 420.185595, 1.0}, {1, 40, 1128.689919, 972.558346, 1.0},
{2, 40, 1132.413712, 1003.984737, 1.0}, {1, 41, 503.304749, 1053.504388, 1.0}, {2, 41, 505.019703, 1069.175613, 1.0},
{1, 42, 1197.352982, 472.681564, 1.0}, {2, 42, 1201.910706, 503.459880, 1.0}, {1, 43, 1794.391022, 383.911400, 1.0},
{2, 43, 1805.324135, 436.116468, 1.0}, {1, 44, 789.641418, 1058.045647, 1.0}, {1, 45, 1376.575241, 928.714979, 1.0},
{2, 45, 1381.995850, 969.511957, 1.0}, {1, 46, 1598.023567, 93.975592, 1.0}, {2, 46, 1606.937141, 136.827035, 1.0},
{1, 47, 1455.550232, 762.128685, 1.0}, {2, 47, 1462.014313, 805.164878, 1.0}, {1, 48, 1357.123489, 354.460326, 1.0},
{2, 48, 1363.071899, 390.363121, 1.0}, {1, 49, 939.792652, 781.549895, 1.0}, {2, 49, 942.802620, 806.164012, 1.0},
{1, 50, 1380.251083, 805.948620, 1.0}, {2, 50, 1385.637932, 845.592098, 1.0}, {1, 51, 1021.769943, 1049.802361, 1.0},
{1, 52, 1065.634918, 608.099055, 1.0}, {2, 52, 1069.142189, 635.361736, 1.0}, {1, 53, 624.324188, 463.202863, 1.0},
{2, 53, 626.395454, 494.994088, 1.0}, {1, 54, 1451.459885, 881.557624, 1.0}, {2, 54, 1457.679634, 924.345531, 1.0},
{1, 55, 1201.885986, 1057.079022, 1.0}, {1, 56, 581.157532, 947.661438, 1.0}, {2, 56, 583.242359, 960.831449, 1.0},
{1, 58, 513.593102, 954.175858, 1.0}, {2, 58, 515.470047, 971.309574, 1.0}, {1, 59, 928.069038, 901.774421, 1.0},
{2, 59, 930.847950, 925.613744, 1.0}, {1, 60, 1065.860023, 740.395389, 1.0}, {2, 60, 1069.484253, 768.971086, 1.0},
{1, 61, 990.479393, 906.264632, 1.0}, {2, 61, 993.217506, 933.088803, 1.0}, {1, 62, 1776.196747, 776.278453, 1.0},
{2, 62, 1786.292496, 831.136880, 1.0}, {1, 63, 834.454365, 1012.449725, 1.0}, {2, 63, 836.868324, 1033.451807, 1.0},
{1, 64, 1355.190697, 869.184809, 1.0}, {2, 64, 1360.736618, 909.773347, 1.0}, {1, 65, 702.072487, 897.519686, 1.0},
{2, 65, 704.203377, 911.931131, 1.0}, {1, 66, 1214.022903, 856.199934, 1.0}, {2, 66, 1218.109016, 890.753052, 1.0},
{1, 67, 327.676048, 236.814036, 1.0}, {2, 67, 328.335285, 277.251878, 1.0}, {1, 68, 289.064083, 454.793912, 1.0},
{2, 68, 288.651924, 498.882444, 1.0}, {1, 69, 1626.240692, 278.374350, 1.0}, {2, 69, 1634.131508, 315.853672, 1.0},
{1, 70, 1245.375710, 734.862142, 1.0}, {2, 70, 1250.047417, 769.670885, 1.0}, {1, 71, 497.015305, 510.718904, 1.0},
{2, 71, 498.682308, 541.070201, 1.0}, {1, 72, 1280.542030, 153.939185, 1.0}, {2, 72, 1286.993637, 198.436196, 1.0},
{1, 73, 1534.748840, 138.601043, 1.0}, {2, 73, 1542.961349, 180.170819, 1.0}, {1, 74, 1477.412682, 200.608061, 1.0},
{2, 74, 1484.683914, 240.413260, 1.0}, {1, 76, 450.637321, 407.279642, 1.0}, {2, 76, 451.695642, 441.666291, 1.0},
{1, 78, 246.981239, 220.786298, 1.0}, {2, 78, 244.524879, 290.016564, 1.0}, {1, 79, 36.696489, 420.023407, 1.0},
{1, 2, 1139.861412, 1034.634984, 1.0},
{2, 2, 1143.512192, 1065.355718, 1.0},
{1, 3, 1760.821953, 644.658036, 1.0},
{2, 3, 1770.901108, 697.899928, 1.0},
{1, 4, 858.071823, 1068.520746, 1.0},
{1, 6, 1633.952408, 797.050145, 1.0},
{2, 6, 1642.508469, 849.157140, 1.0},
{1, 8, 1716.695824, 451.805491, 1.0},
{2, 8, 1726.513939, 502.095687, 1.0},
{1, 9, 269.577627, 724.986935, 1.0},
{2, 9, 269.424820, 764.154246, 1.0},
{1, 10, 1891.321907, 706.948843, 1.0},
{2, 10, 1903.338547, 766.068377, 1.0},
{1, 12, 1806.227074, 956.089604, 1.0},
{2, 12, 1816.619568, 1013.767376, 1.0},
{1, 14, 269.544153, 1002.333570, 1.0},
{2, 14, 269.367542, 1043.509254, 1.0},
{1, 15, 1402.772141, 281.392962, 1.0},
{2, 15, 1409.089165, 318.731629, 1.0},
{1, 16, 195.877233, 919.454341, 1.0},
{2, 16, 192.531109, 997.367899, 1.0},
{1, 17, 1789.584045, 120.036661, 1.0},
{2, 17, 1800.391846, 167.822964, 1.0},
{1, 18, 999.363213, 765.004807, 1.0},
{2, 18, 1002.345772, 790.560122, 1.0},
{1, 19, 647.342491, 1044.805727, 1.0},
{2, 19, 649.328041, 1058.682940, 1.0},
{1, 20, 1365.486832, 440.901829, 1.0},
{2, 20, 1371.413040, 477.888730, 1.0},
{1, 21, 1787.125282, 301.431606, 1.0},
{2, 21, 1798.527260, 355.224531, 1.0},
{1, 22, 1257.805824, 932.797258, 1.0},
{2, 22, 1263.017578, 969.376774, 1.0},
{1, 23, 961.969528, 843.148112, 1.0},
{2, 23, 964.869461, 868.587620, 1.0},
{1, 24, 158.076110, 1052.643592, 1.0},
{1, 25, 1072.884521, 1005.296981, 1.0},
{2, 25, 1076.091156, 1032.776856, 1.0},
{1, 26, 1107.656937, 526.577228, 1.0},
{2, 26, 1111.618423, 555.524454, 1.0},
{1, 27, 1416.410751, 529.857581, 1.0},
{2, 27, 1422.663574, 570.025957, 1.0},
{1, 28, 1498.673630, 1005.453086, 1.0},
{2, 28, 1505.381813, 1051.827149, 1.0},
{1, 29, 1428.647804, 652.473629, 1.0},
{2, 29, 1434.898224, 692.715390, 1.0},
{1, 30, 1332.318764, 503.673599, 1.0},
{2, 30, 1338.000069, 540.507967, 1.0},
{1, 32, 1358.642693, 709.837904, 1.0},
{2, 32, 1364.231529, 748.678265, 1.0},
{1, 33, 1850.911560, 460.475668, 1.0},
{2, 33, 1862.221413, 512.797347, 1.0},
{1, 34, 1226.117821, 607.053959, 1.0},
{2, 34, 1230.736084, 641.091449, 1.0},
{1, 35, 619.598236, 523.341744, 1.0},
{2, 35, 621.601124, 554.453287, 1.0},
{1, 36, 956.591492, 958.223183, 1.0},
{2, 36, 959.289265, 983.289263, 1.0},
{1, 37, 1249.922218, 419.095856, 1.0},
{2, 37, 1255.005913, 452.556177, 1.0},
{1, 39, 1300.528450, 386.251166, 1.0},
{2, 39, 1305.957413, 420.185595, 1.0},
{1, 40, 1128.689919, 972.558346, 1.0},
{2, 40, 1132.413712, 1003.984737, 1.0},
{1, 41, 503.304749, 1053.504388, 1.0},
{2, 41, 505.019703, 1069.175613, 1.0},
{1, 42, 1197.352982, 472.681564, 1.0},
{2, 42, 1201.910706, 503.459880, 1.0},
{1, 43, 1794.391022, 383.911400, 1.0},
{2, 43, 1805.324135, 436.116468, 1.0},
{1, 44, 789.641418, 1058.045647, 1.0},
{1, 45, 1376.575241, 928.714979, 1.0},
{2, 45, 1381.995850, 969.511957, 1.0},
{1, 46, 1598.023567, 93.975592, 1.0},
{2, 46, 1606.937141, 136.827035, 1.0},
{1, 47, 1455.550232, 762.128685, 1.0},
{2, 47, 1462.014313, 805.164878, 1.0},
{1, 48, 1357.123489, 354.460326, 1.0},
{2, 48, 1363.071899, 390.363121, 1.0},
{1, 49, 939.792652, 781.549895, 1.0},
{2, 49, 942.802620, 806.164012, 1.0},
{1, 50, 1380.251083, 805.948620, 1.0},
{2, 50, 1385.637932, 845.592098, 1.0},
{1, 51, 1021.769943, 1049.802361, 1.0},
{1, 52, 1065.634918, 608.099055, 1.0},
{2, 52, 1069.142189, 635.361736, 1.0},
{1, 53, 624.324188, 463.202863, 1.0},
{2, 53, 626.395454, 494.994088, 1.0},
{1, 54, 1451.459885, 881.557624, 1.0},
{2, 54, 1457.679634, 924.345531, 1.0},
{1, 55, 1201.885986, 1057.079022, 1.0},
{1, 56, 581.157532, 947.661438, 1.0},
{2, 56, 583.242359, 960.831449, 1.0},
{1, 58, 513.593102, 954.175858, 1.0},
{2, 58, 515.470047, 971.309574, 1.0},
{1, 59, 928.069038, 901.774421, 1.0},
{2, 59, 930.847950, 925.613744, 1.0},
{1, 60, 1065.860023, 740.395389, 1.0},
{2, 60, 1069.484253, 768.971086, 1.0},
{1, 61, 990.479393, 906.264632, 1.0},
{2, 61, 993.217506, 933.088803, 1.0},
{1, 62, 1776.196747, 776.278453, 1.0},
{2, 62, 1786.292496, 831.136880, 1.0},
{1, 63, 834.454365, 1012.449725, 1.0},
{2, 63, 836.868324, 1033.451807, 1.0},
{1, 64, 1355.190697, 869.184809, 1.0},
{2, 64, 1360.736618, 909.773347, 1.0},
{1, 65, 702.072487, 897.519686, 1.0},
{2, 65, 704.203377, 911.931131, 1.0},
{1, 66, 1214.022903, 856.199934, 1.0},
{2, 66, 1218.109016, 890.753052, 1.0},
{1, 67, 327.676048, 236.814036, 1.0},
{2, 67, 328.335285, 277.251878, 1.0},
{1, 68, 289.064083, 454.793912, 1.0},
{2, 68, 288.651924, 498.882444, 1.0},
{1, 69, 1626.240692, 278.374350, 1.0},
{2, 69, 1634.131508, 315.853672, 1.0},
{1, 70, 1245.375710, 734.862142, 1.0},
{2, 70, 1250.047417, 769.670885, 1.0},
{1, 71, 497.015305, 510.718904, 1.0},
{2, 71, 498.682308, 541.070201, 1.0},
{1, 72, 1280.542030, 153.939185, 1.0},
{2, 72, 1286.993637, 198.436196, 1.0},
{1, 73, 1534.748840, 138.601043, 1.0},
{2, 73, 1542.961349, 180.170819, 1.0},
{1, 74, 1477.412682, 200.608061, 1.0},
{2, 74, 1484.683914, 240.413260, 1.0},
{1, 76, 450.637321, 407.279642, 1.0},
{2, 76, 451.695642, 441.666291, 1.0},
{1, 78, 246.981239, 220.786298, 1.0},
{2, 78, 244.524879, 290.016564, 1.0},
{1, 79, 36.696489, 420.023407, 1.0},
{2, 79, 21.364746, 591.245492, 1.0},
};
int num_markers = sizeof(markers) / sizeof(Marker);
@ -249,41 +402,110 @@ TEST(KeyframeSelection, ElevatorReconstructionVarianceTest) {
intrinsics.SetRadialDistortion(-0.034, 0.0, 0.0);
Marker markers[] = {
{1, 0, 182.999997, 1047.000010, 1.0}, {2, 0, 181.475730, 1052.091079, 1.0}, {3, 0, 181.741562, 1057.893341, 1.0},
{4, 0, 183.190498, 1068.310440, 1.0}, {1, 1, 271.000013, 666.000009, 1.0}, {2, 1, 270.596180, 668.665760, 1.0},
{3, 1, 270.523510, 671.559069, 1.0}, {4, 1, 271.856518, 676.818151, 1.0}, {5, 1, 268.989000, 727.051570, 1.0},
{1, 2, 264.999990, 1018.000031, 1.0}, {2, 2, 264.020061, 1021.157591, 1.0}, {3, 2, 264.606056, 1024.823506, 1.0},
{4, 2, 266.200933, 1031.168690, 1.0}, {1, 3, 270.000000, 938.000014, 1.0}, {2, 3, 269.022617, 941.153390, 1.0},
{3, 3, 269.605579, 944.454954, 1.0}, {4, 3, 271.281366, 949.452167, 1.0}, {5, 3, 268.963480, 1004.417453, 1.0},
{1, 4, 200.999994, 799.000003, 1.0}, {2, 4, 199.841366, 803.891838, 1.0}, {3, 4, 200.262208, 809.323246, 1.0},
{4, 4, 201.456513, 819.271195, 1.0}, {5, 4, 195.026493, 924.363234, 1.0}, {1, 5, 1775.000038, 49.999998, 1.0},
{2, 5, 1775.255127, 53.718264, 1.0}, {3, 5, 1776.449890, 55.951670, 1.0}, {4, 5, 1778.815727, 61.923309, 1.0},
{5, 5, 1790.274124, 123.074923, 1.0}, {1, 6, 164.000001, 927.999988, 1.0}, {2, 6, 162.665462, 933.169527, 1.0},
{3, 6, 163.067923, 938.577182, 1.0}, {4, 6, 164.370360, 948.840945, 1.0}, {5, 6, 157.199407, 1057.762341, 1.0},
{1, 7, 618.000011, 477.999998, 1.0}, {2, 7, 617.583504, 480.124243, 1.0}, {3, 7, 618.356495, 482.441897, 1.0},
{4, 7, 619.792500, 486.428132, 1.0}, {5, 7, 619.546051, 525.222627, 1.0}, {1, 8, 499.999981, 1036.999984, 1.0},
{2, 8, 499.080162, 1038.720160, 1.0}, {3, 8, 499.949398, 1039.014344, 1.0}, {4, 8, 501.828003, 1041.286647, 1.0},
{5, 8, 502.777576, 1055.196369, 1.0}, {1, 9, 1587.000046, 31.999999, 1.0}, {2, 9, 1586.988373, 34.635853, 1.0},
{3, 9, 1588.155899, 37.444186, 1.0}, {4, 9, 1589.973106, 42.492081, 1.0}, {5, 9, 1598.683205, 96.526332, 1.0},
{1, 10, 622.999992, 416.999999, 1.0}, {2, 10, 622.449017, 419.233485, 1.0}, {3, 10, 623.283234, 421.500703, 1.0},
{4, 10, 624.620132, 425.537406, 1.0}, {5, 10, 624.290829, 465.078338, 1.0}, {1, 11, 577.999992, 931.999998, 1.0},
{2, 11, 577.042294, 932.872703, 1.0}, {3, 11, 577.832451, 934.045451, 1.0}, {4, 11, 579.729137, 935.735435, 1.0},
{5, 11, 580.691242, 948.396256, 1.0}, {1, 12, 510.999985, 931.999998, 1.0}, {2, 12, 510.111237, 933.152146, 1.0},
{3, 12, 510.797081, 934.454219, 1.0}, {4, 12, 512.647362, 936.595910, 1.0}, {5, 12, 513.247204, 955.144157, 1.0},
{1, 13, 330.459995, 177.059993, 1.0}, {2, 13, 329.876347, 179.615586, 1.0}, {3, 13, 330.681696, 182.757810, 1.0},
{4, 13, 331.345053, 187.903853, 1.0}, {5, 13, 327.824135, 239.611639, 1.0}, {1, 14, 291.813097, 388.516195, 1.0},
{2, 14, 290.984058, 391.382725, 1.0}, {3, 14, 291.526737, 394.778595, 1.0}, {4, 14, 292.763815, 400.310973, 1.0},
{5, 14, 288.714552, 457.548015, 1.0}, {1, 15, 496.491680, 466.534005, 1.0}, {2, 15, 495.909519, 468.518561, 1.0},
{3, 15, 496.588383, 470.853596, 1.0}, {4, 15, 497.976780, 474.731458, 1.0}, {5, 15, 496.998882, 512.568694, 1.0},
{1, 16, 1273.000031, 89.000000, 1.0}, {2, 16, 1272.951965, 92.003637, 1.0}, {3, 16, 1273.934784, 94.972191, 1.0},
{4, 16, 1275.493584, 100.139952, 1.0}, {5, 16, 1281.003571, 156.880163, 1.0}, {1, 17, 1524.713173, 78.852922, 1.0},
{2, 17, 1524.782066, 81.427142, 1.0}, {3, 17, 1525.759048, 84.057939, 1.0}, {4, 17, 1527.579689, 88.966550, 1.0},
{5, 17, 1535.262451, 141.186054, 1.0}, {1, 18, 1509.425011, 94.371824, 1.0}, {1, 19, 451.000013, 357.000003, 1.0},
{2, 19, 450.354881, 359.312410, 1.0}, {3, 19, 451.107473, 361.837088, 1.0}, {4, 19, 452.186537, 366.318061, 1.0},
{5, 19, 450.507660, 409.257599, 1.0}, {1, 20, 254.004936, 114.784185, 1.0}, {2, 20, 253.291512, 119.288486, 1.0},
{3, 20, 253.745584, 124.114957, 1.0}, {4, 20, 254.453287, 132.795120, 1.0}, {5, 20, 246.772242, 225.165337, 1.0},
{1, 21, 65.262880, 147.889409, 1.0}, {2, 21, 63.634465, 157.656807, 1.0}, {3, 21, 63.306799, 169.067053, 1.0},
{4, 21, 62.462311, 189.724241, 1.0}, {5, 21, 35.396615, 430.308380, 1.0},
{1, 0, 182.999997, 1047.000010, 1.0},
{2, 0, 181.475730, 1052.091079, 1.0},
{3, 0, 181.741562, 1057.893341, 1.0},
{4, 0, 183.190498, 1068.310440, 1.0},
{1, 1, 271.000013, 666.000009, 1.0},
{2, 1, 270.596180, 668.665760, 1.0},
{3, 1, 270.523510, 671.559069, 1.0},
{4, 1, 271.856518, 676.818151, 1.0},
{5, 1, 268.989000, 727.051570, 1.0},
{1, 2, 264.999990, 1018.000031, 1.0},
{2, 2, 264.020061, 1021.157591, 1.0},
{3, 2, 264.606056, 1024.823506, 1.0},
{4, 2, 266.200933, 1031.168690, 1.0},
{1, 3, 270.000000, 938.000014, 1.0},
{2, 3, 269.022617, 941.153390, 1.0},
{3, 3, 269.605579, 944.454954, 1.0},
{4, 3, 271.281366, 949.452167, 1.0},
{5, 3, 268.963480, 1004.417453, 1.0},
{1, 4, 200.999994, 799.000003, 1.0},
{2, 4, 199.841366, 803.891838, 1.0},
{3, 4, 200.262208, 809.323246, 1.0},
{4, 4, 201.456513, 819.271195, 1.0},
{5, 4, 195.026493, 924.363234, 1.0},
{1, 5, 1775.000038, 49.999998, 1.0},
{2, 5, 1775.255127, 53.718264, 1.0},
{3, 5, 1776.449890, 55.951670, 1.0},
{4, 5, 1778.815727, 61.923309, 1.0},
{5, 5, 1790.274124, 123.074923, 1.0},
{1, 6, 164.000001, 927.999988, 1.0},
{2, 6, 162.665462, 933.169527, 1.0},
{3, 6, 163.067923, 938.577182, 1.0},
{4, 6, 164.370360, 948.840945, 1.0},
{5, 6, 157.199407, 1057.762341, 1.0},
{1, 7, 618.000011, 477.999998, 1.0},
{2, 7, 617.583504, 480.124243, 1.0},
{3, 7, 618.356495, 482.441897, 1.0},
{4, 7, 619.792500, 486.428132, 1.0},
{5, 7, 619.546051, 525.222627, 1.0},
{1, 8, 499.999981, 1036.999984, 1.0},
{2, 8, 499.080162, 1038.720160, 1.0},
{3, 8, 499.949398, 1039.014344, 1.0},
{4, 8, 501.828003, 1041.286647, 1.0},
{5, 8, 502.777576, 1055.196369, 1.0},
{1, 9, 1587.000046, 31.999999, 1.0},
{2, 9, 1586.988373, 34.635853, 1.0},
{3, 9, 1588.155899, 37.444186, 1.0},
{4, 9, 1589.973106, 42.492081, 1.0},
{5, 9, 1598.683205, 96.526332, 1.0},
{1, 10, 622.999992, 416.999999, 1.0},
{2, 10, 622.449017, 419.233485, 1.0},
{3, 10, 623.283234, 421.500703, 1.0},
{4, 10, 624.620132, 425.537406, 1.0},
{5, 10, 624.290829, 465.078338, 1.0},
{1, 11, 577.999992, 931.999998, 1.0},
{2, 11, 577.042294, 932.872703, 1.0},
{3, 11, 577.832451, 934.045451, 1.0},
{4, 11, 579.729137, 935.735435, 1.0},
{5, 11, 580.691242, 948.396256, 1.0},
{1, 12, 510.999985, 931.999998, 1.0},
{2, 12, 510.111237, 933.152146, 1.0},
{3, 12, 510.797081, 934.454219, 1.0},
{4, 12, 512.647362, 936.595910, 1.0},
{5, 12, 513.247204, 955.144157, 1.0},
{1, 13, 330.459995, 177.059993, 1.0},
{2, 13, 329.876347, 179.615586, 1.0},
{3, 13, 330.681696, 182.757810, 1.0},
{4, 13, 331.345053, 187.903853, 1.0},
{5, 13, 327.824135, 239.611639, 1.0},
{1, 14, 291.813097, 388.516195, 1.0},
{2, 14, 290.984058, 391.382725, 1.0},
{3, 14, 291.526737, 394.778595, 1.0},
{4, 14, 292.763815, 400.310973, 1.0},
{5, 14, 288.714552, 457.548015, 1.0},
{1, 15, 496.491680, 466.534005, 1.0},
{2, 15, 495.909519, 468.518561, 1.0},
{3, 15, 496.588383, 470.853596, 1.0},
{4, 15, 497.976780, 474.731458, 1.0},
{5, 15, 496.998882, 512.568694, 1.0},
{1, 16, 1273.000031, 89.000000, 1.0},
{2, 16, 1272.951965, 92.003637, 1.0},
{3, 16, 1273.934784, 94.972191, 1.0},
{4, 16, 1275.493584, 100.139952, 1.0},
{5, 16, 1281.003571, 156.880163, 1.0},
{1, 17, 1524.713173, 78.852922, 1.0},
{2, 17, 1524.782066, 81.427142, 1.0},
{3, 17, 1525.759048, 84.057939, 1.0},
{4, 17, 1527.579689, 88.966550, 1.0},
{5, 17, 1535.262451, 141.186054, 1.0},
{1, 18, 1509.425011, 94.371824, 1.0},
{1, 19, 451.000013, 357.000003, 1.0},
{2, 19, 450.354881, 359.312410, 1.0},
{3, 19, 451.107473, 361.837088, 1.0},
{4, 19, 452.186537, 366.318061, 1.0},
{5, 19, 450.507660, 409.257599, 1.0},
{1, 20, 254.004936, 114.784185, 1.0},
{2, 20, 253.291512, 119.288486, 1.0},
{3, 20, 253.745584, 124.114957, 1.0},
{4, 20, 254.453287, 132.795120, 1.0},
{5, 20, 246.772242, 225.165337, 1.0},
{1, 21, 65.262880, 147.889409, 1.0},
{2, 21, 63.634465, 157.656807, 1.0},
{3, 21, 63.306799, 169.067053, 1.0},
{4, 21, 62.462311, 189.724241, 1.0},
{5, 21, 35.396615, 430.308380, 1.0},
};
int num_markers = sizeof(markers) / sizeof(Marker);

View File

@ -47,7 +47,9 @@ void ModalSolverLogProgress(ProgressUpdateCallback *update_callback,
if (update_callback) {
char message[256];
snprintf(message, sizeof(message), "Solving progress %d%%",
snprintf(message,
sizeof(message),
"Solving progress %d%%",
(int)(progress * 100));
update_callback->invoke(progress, message);
@ -59,8 +61,10 @@ struct ModalReprojectionError {
double observed_y,
const double weight,
const Vec3& bundle)
: observed_x_(observed_x), observed_y_(observed_y),
weight_(weight), bundle_(bundle) { }
: observed_x_(observed_x),
observed_y_(observed_y),
weight_(weight),
bundle_(bundle) {}
// TODO(keir): This should support bundling focal length as well.
template <typename T>
@ -168,8 +172,7 @@ void ModalSolver(const Tracks &tracks,
ceres::AngleAxisToQuaternion(&angle_axis(0), &quaternion(0));
LG << "Analytically computed quaternion "
<< quaternion.transpose();
LG << "Analytically computed quaternion " << quaternion.transpose();
}
// STEP 2: Refine rotation with Ceres.
@ -185,13 +188,11 @@ void ModalSolver(const Tracks &tracks,
EuclideanPoint* point = reconstruction->PointForTrack(marker.track);
if (point && marker.weight != 0.0) {
problem.AddResidualBlock(new ceres::AutoDiffCostFunction<
ModalReprojectionError,
problem.AddResidualBlock(
new ceres::AutoDiffCostFunction<ModalReprojectionError,
2, /* num_residuals */
4>(new ModalReprojectionError(marker.x,
marker.y,
marker.weight,
point->X)),
4>(new ModalReprojectionError(
marker.x, marker.y, marker.weight, point->X)),
NULL,
&quaternion(0));
num_residuals++;

Some files were not shown because too many files have changed in this diff Show More