Camera tracking: support of tripod motion solving

Expose option into interface to use modal solver which currently
supports only tripod motion.

This solver requires two tracks at least to reconstruct motion.
Using more tracks aren't improving solution in general, just adds
instability into solution and slows down things a lot.

Refirement of camera intrinsics was disabled due to it's not only
refines camera intrinsics but also adjusts camera position which
isn't necessary here

To use this solver just activate "Tripod Motion" checkbox in
solver panel.

Merged from tomato: svn merge ^/branches/soc-2011-tomato -r45622:45624 -r46036:46037

P.S. Quite experimental yet, requires more checking and probably
tweaks to prevent camera jumps when tracks apperars/disappears
from the screen.
This commit is contained in:
Sergey Sharybin 2012-04-28 14:54:45 +00:00
commit 51a4188105
11 changed files with 300 additions and 37 deletions

View File

@ -69,6 +69,7 @@ set(SRC
libmv/simple_pipeline/detect.cc
libmv/simple_pipeline/initialize_reconstruction.cc
libmv/simple_pipeline/intersect.cc
libmv/simple_pipeline/modal_solver.cc
libmv/simple_pipeline/pipeline.cc
libmv/simple_pipeline/reconstruction.cc
libmv/simple_pipeline/resect.cc
@ -126,6 +127,7 @@ set(SRC
libmv/simple_pipeline/detect.h
libmv/simple_pipeline/initialize_reconstruction.h
libmv/simple_pipeline/intersect.h
libmv/simple_pipeline/modal_solver.h
libmv/simple_pipeline/pipeline.h
libmv/simple_pipeline/reconstruction.h
libmv/simple_pipeline/resect.h

View File

@ -1,3 +1,16 @@
commit a44312a7beb2963b8e3bf8015c516d2eff40cc3d
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Thu Apr 12 13:56:02 2012 +0600
Added solver for modal camera motion, currently supports only tripod solving
This solver is intended to deal with such camera motions as tripod and panning,
where it's impossible to reconstruct exact position of markers in 3d view.
It projects markers onto sphere and uses rigid registration of rotation to
find rotation angles which makes bundles from previous and current frame be
as closest as it's possible.
commit fa3842e472e3b9c789e47bf6d8f592aa40a84f16
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Thu Apr 12 12:32:48 2012 +0600
@ -520,9 +533,3 @@ Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
Date: Fri Aug 19 16:04:37 2011 +0200
MSVC compatibility: heap allocate pattern, explicit float cast.
commit 702658d2f8616964a6eeb3743fd85e97ac7ff09d
Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
Date: Fri Aug 19 14:59:24 2011 +0200
Expose regularization parameters (areaPenalty and conditionPenalty) in API.

View File

@ -42,6 +42,8 @@ libmv/simple_pipeline/initialize_reconstruction.cc
libmv/simple_pipeline/initialize_reconstruction.h
libmv/simple_pipeline/intersect.cc
libmv/simple_pipeline/intersect.h
libmv/simple_pipeline/modal_solver.cc
libmv/simple_pipeline/modal_solver.h
libmv/simple_pipeline/pipeline.cc
libmv/simple_pipeline/pipeline.h
libmv/simple_pipeline/reconstruction.cc

View File

@ -54,6 +54,7 @@
#include "libmv/simple_pipeline/pipeline.h"
#include "libmv/simple_pipeline/camera_intrinsics.h"
#include "libmv/simple_pipeline/rigid_registration.h"
#include "libmv/simple_pipeline/modal_solver.h"
#include <stdlib.h>
#include <assert.h>
@ -384,6 +385,31 @@ int libmv_refineParametersAreValid(int parameters) {
LIBMV_REFINE_RADIAL_DISTORTION_K1));
}
void libmv_solveRefineIntrinsics(libmv::Tracks *tracks, libmv::CameraIntrinsics *intrinsics,
libmv::EuclideanReconstruction *reconstruction, int refine_intrinsics,
reconstruct_progress_update_cb progress_update_callback, void *callback_customdata)
{
/* only a few combinations are supported but trust the caller */
int libmv_refine_flags = 0;
if (refine_intrinsics & LIBMV_REFINE_FOCAL_LENGTH) {
libmv_refine_flags |= libmv::BUNDLE_FOCAL_LENGTH;
}
if (refine_intrinsics & LIBMV_REFINE_PRINCIPAL_POINT) {
libmv_refine_flags |= libmv::BUNDLE_PRINCIPAL_POINT;
}
if (refine_intrinsics & LIBMV_REFINE_RADIAL_DISTORTION_K1) {
libmv_refine_flags |= libmv::BUNDLE_RADIAL_K1;
}
if (refine_intrinsics & LIBMV_REFINE_RADIAL_DISTORTION_K2) {
libmv_refine_flags |= libmv::BUNDLE_RADIAL_K2;
}
progress_update_callback(callback_customdata, 1.0, "Refining solution");
libmv::EuclideanBundleCommonIntrinsics(*(libmv::Tracks *)tracks, libmv_refine_flags,
reconstruction, intrinsics);
}
libmv_Reconstruction *libmv_solveReconstruction(libmv_Tracks *tracks, int keyframe1, int keyframe2,
int refine_intrinsics, double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
@ -423,24 +449,8 @@ libmv_Reconstruction *libmv_solveReconstruction(libmv_Tracks *tracks, int keyfra
libmv::EuclideanCompleteReconstruction(normalized_tracks, reconstruction, &update_callback);
if (refine_intrinsics) {
/* only a few combinations are supported but trust the caller */
int libmv_refine_flags = 0;
if (refine_intrinsics & LIBMV_REFINE_FOCAL_LENGTH) {
libmv_refine_flags |= libmv::BUNDLE_FOCAL_LENGTH;
}
if (refine_intrinsics & LIBMV_REFINE_PRINCIPAL_POINT) {
libmv_refine_flags |= libmv::BUNDLE_PRINCIPAL_POINT;
}
if (refine_intrinsics & LIBMV_REFINE_RADIAL_DISTORTION_K1) {
libmv_refine_flags |= libmv::BUNDLE_RADIAL_K1;
}
if (refine_intrinsics & LIBMV_REFINE_RADIAL_DISTORTION_K2) {
libmv_refine_flags |= libmv::BUNDLE_RADIAL_K2;
}
progress_update_callback(callback_customdata, 1.0, "Refining solution");
libmv::EuclideanBundleCommonIntrinsics(*(libmv::Tracks *)tracks, libmv_refine_flags,
reconstruction, intrinsics);
libmv_solveRefineIntrinsics((libmv::Tracks *)tracks, intrinsics, reconstruction,
refine_intrinsics, progress_update_callback, callback_customdata);
}
progress_update_callback(callback_customdata, 1.0, "Finishing solution");
@ -450,6 +460,41 @@ libmv_Reconstruction *libmv_solveReconstruction(libmv_Tracks *tracks, int keyfra
return (libmv_Reconstruction *)libmv_reconstruction;
}
struct libmv_Reconstruction *libmv_solveModal(struct libmv_Tracks *tracks, double focal_length,
double principal_x, double principal_y, double k1, double k2, double k3,
reconstruct_progress_update_cb progress_update_callback, void *callback_customdata)
{
/* Invert the camera intrinsics. */
libmv::vector<libmv::Marker> markers = ((libmv::Tracks*)tracks)->AllMarkers();
libmv_Reconstruction *libmv_reconstruction = new libmv_Reconstruction();
libmv::EuclideanReconstruction *reconstruction = &libmv_reconstruction->reconstruction;
libmv::CameraIntrinsics *intrinsics = &libmv_reconstruction->intrinsics;
ReconstructUpdateCallback update_callback =
ReconstructUpdateCallback(progress_update_callback, callback_customdata);
intrinsics->SetFocalLength(focal_length, focal_length);
intrinsics->SetPrincipalPoint(principal_x, principal_y);
intrinsics->SetRadialDistortion(k1, k2, k3);
for (int i = 0; i < markers.size(); ++i) {
intrinsics->InvertIntrinsics(markers[i].x,
markers[i].y,
&(markers[i].x),
&(markers[i].y));
}
libmv::Tracks normalized_tracks(markers);
libmv::ModalSolver(normalized_tracks, reconstruction, &update_callback);
progress_update_callback(callback_customdata, 1.0, "Finishing solution");
libmv_reconstruction->tracks = *(libmv::Tracks *)tracks;
libmv_reconstruction->error = libmv::EuclideanReprojectionError(*(libmv::Tracks *)tracks, *reconstruction, *intrinsics);
return (libmv_Reconstruction *)libmv_reconstruction;
}
int libmv_reporojectionPointForTrack(libmv_Reconstruction *libmv_reconstruction, int track, double pos[3])
{
libmv::EuclideanReconstruction *reconstruction = &libmv_reconstruction->reconstruction;

View File

@ -68,6 +68,9 @@ int libmv_refineParametersAreValid(int parameters);
struct libmv_Reconstruction *libmv_solveReconstruction(struct libmv_Tracks *tracks, int keyframe1, int keyframe2,
int refine_intrinsics, double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
reconstruct_progress_update_cb progress_update_callback, void *callback_customdata);
struct libmv_Reconstruction *libmv_solveModal(struct libmv_Tracks *tracks, double focal_length,
double principal_x, double principal_y, double k1, double k2, double k3,
reconstruct_progress_update_cb progress_update_callback, void *callback_customdata);
int libmv_reporojectionPointForTrack(struct libmv_Reconstruction *libmv_reconstruction, int track, double pos[3]);
double libmv_reporojectionErrorForTrack(struct libmv_Reconstruction *libmv_reconstruction, int track);
double libmv_reporojectionErrorForImage(struct libmv_Reconstruction *libmv_reconstruction, int image);

View File

@ -0,0 +1,126 @@
// Copyright (c) 2012 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include <cstdio>
#include "libmv/logging/logging.h"
#include "libmv/simple_pipeline/modal_solver.h"
#include "libmv/simple_pipeline/rigid_registration.h"
#ifdef _MSC_VER
# define snprintf _snprintf
#endif
namespace libmv {
static void ProjectMarkerOnSphere(Marker &marker, Vec3 &X) {
X(0) = marker.x;
X(1) = marker.y;
X(2) = 1.0;
X *= 5.0 / X.norm();
}
static void ModalSolverLogProress(ProgressUpdateCallback *update_callback,
double progress)
{
if (update_callback) {
char message[256];
snprintf(message, sizeof(message), "Solving progress %d%%", (int)(progress * 100));
update_callback->invoke(progress, message);
}
}
void ModalSolver(Tracks &tracks,
EuclideanReconstruction *reconstruction,
ProgressUpdateCallback *update_callback) {
int max_image = tracks.MaxImage();
int max_track = tracks.MaxTrack();
LG << "Max image: " << max_image;
LG << "Max track: " << max_track;
Mat3 R = Mat3::Identity();
for (int image = 0; image <= max_image; ++image) {
vector<Marker> all_markers = tracks.MarkersInImage(image);
ModalSolverLogProress(update_callback, (float) image / max_image);
// Skip empty frames without doing anything
if (all_markers.size() == 0) {
LG << "Skipping frame: " << image;
continue;
}
vector<Vec3> points, reference_points;
// Cnstruct pairs of markers from current and previous image,
// to reproject them and find rigid transformation between
// previous and current image
for (int track = 0; track <= max_track; ++track) {
EuclideanPoint *point = reconstruction->PointForTrack(track);
if (point) {
Marker marker = tracks.MarkerInImageForTrack(image, track);
if (marker.image == image) {
Vec3 X;
LG << "Use track " << track << " for rigid registration between image " <<
image - 1 << " and " << image;
ProjectMarkerOnSphere(marker, X);
points.push_back(point->X);
reference_points.push_back(X);
}
}
}
if (points.size()) {
// Find rigid delta transformation to current image
RigidRegistration(reference_points, points, R);
}
reconstruction->InsertCamera(image, R, Vec3::Zero());
// Review if there's new tracks for which position might be reconstructed
for (int track = 0; track <= max_track; ++track) {
if (!reconstruction->PointForTrack(track)) {
Marker marker = tracks.MarkerInImageForTrack(image, track);
if (marker.image == image) {
// New track appeared on this image, project it's position onto sphere
LG << "Projecting track " << track << " at image " << image;
Vec3 X;
ProjectMarkerOnSphere(marker, X);
reconstruction->InsertPoint(track, R.inverse() * X);
}
}
}
}
}
} // namespace libmv

View File

@ -0,0 +1,48 @@
// Copyright (c) 2012 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_SIMPLE_PIPELINE_MODAL_SOLVER_H_
#define LIBMV_SIMPLE_PIPELINE_MODAL_SOLVER_H_
#include "libmv/simple_pipeline/tracks.h"
#include "libmv/simple_pipeline/reconstruction.h"
#include "libmv/simple_pipeline/callbacks.h"
namespace libmv {
/*!
This solver solves such camera motion as tripod rotation, reconstructing
only camera motion itself. Bundles are not reconstructing properly, they're
just getting projected onto sphere.
Markers from tracks object would be used for recosntruction, and algorithm
assumes thir's positions are undistorted already and they're in nnormalized
space.
Reconstructed cameras and projected bundles would be added to reconstruction
object.
*/
void ModalSolver(Tracks &tracks,
EuclideanReconstruction *reconstruction,
ProgressUpdateCallback *update_callback = NULL);
} // namespace libmv
#endif // LIBMV_SIMPLE_PIPELINE_MODAL_SOLVER_H_

View File

@ -227,12 +227,16 @@ class CLIP_PT_tools_solve(Panel):
else "Object Motion")
col.operator("clip.clear_solution")
col = layout.column()
col.prop(settings, "use_tripod_solver")
col = layout.column(align=True)
col.active = not settings.use_tripod_solver
col.prop(settings, "keyframe_a")
col.prop(settings, "keyframe_b")
col = layout.column(align=True)
col.active = tracking_object.is_camera
col.active = tracking_object.is_camera and not settings.use_tripod_solver
col.label(text="Refine:")
col.prop(settings, "refine_intrinsics", text="")

View File

@ -1523,6 +1523,7 @@ typedef struct MovieReconstructContext {
#endif
char object_name[MAX_NAME];
int is_camera;
short motion_flag;
float focal_length;
float principal_point[2];
@ -1752,7 +1753,11 @@ int BKE_tracking_can_reconstruct(MovieTracking *tracking, MovieTrackingObject *o
#if WITH_LIBMV
ListBase *tracksbase = BKE_tracking_object_tracks(tracking, object);
if (count_tracks_on_both_keyframes(tracking, tracksbase)<8) {
if (tracking->settings.motion_flag & TRACKING_MOTION_MODAL) {
/* TODO: check for number of tracks? */
return TRUE;
}
else if (count_tracks_on_both_keyframes(tracking, tracksbase) < 8) {
BLI_strncpy(error_msg, "At least 8 common tracks on both of keyframes are needed for reconstruction", error_size);
return FALSE;
@ -1781,7 +1786,8 @@ MovieReconstructContext* BKE_tracking_reconstruction_context_new(MovieTracking *
MovieTrackingTrack *track;
BLI_strncpy(context->object_name, object->name, sizeof(context->object_name));
context->is_camera = object->flag&TRACKING_OBJECT_CAMERA;
context->is_camera = object->flag & TRACKING_OBJECT_CAMERA;
context->motion_flag = tracking->settings.motion_flag;
context->tracks_map = tracks_map_new(context->object_name, context->is_camera, num_tracks, 0);
@ -1894,13 +1900,22 @@ void BKE_tracking_solve_reconstruction(MovieReconstructContext *context, short *
progressdata.stats_message = stats_message;
progressdata.message_size = message_size;
context->reconstruction = libmv_solveReconstruction(context->tracks,
context->keyframe1, context->keyframe2,
context->refine_flags,
context->focal_length,
context->principal_point[0], context->principal_point[1],
context->k1, context->k2, context->k3,
solve_reconstruction_update_cb, &progressdata);
if (context->motion_flag & TRACKING_MOTION_MODAL) {
context->reconstruction = libmv_solveModal(context->tracks,
context->focal_length,
context->principal_point[0], context->principal_point[1],
context->k1, context->k2, context->k3,
solve_reconstruction_update_cb, &progressdata);
}
else {
context->reconstruction = libmv_solveReconstruction(context->tracks,
context->keyframe1, context->keyframe2,
context->refine_flags,
context->focal_length,
context->principal_point[0], context->principal_point[1],
context->k1, context->k2, context->k3,
solve_reconstruction_update_cb, &progressdata);
}
error = libmv_reprojectionError(context->reconstruction);

View File

@ -123,7 +123,7 @@ typedef struct MovieTrackingSettings {
short default_pattern_match; /* re-adjust every N frames */
short default_flag; /* default flags like color channels used by default */
short pod;
short motion_flag; /* flags describes motion type */
/* ** common tracker settings ** */
short speed; /* speed of tracking */
@ -131,8 +131,8 @@ typedef struct MovieTrackingSettings {
/* ** reconstruction settings ** */
int keyframe1, keyframe2; /* two keyframes for reconstrution initialization */
/* ** which camera intrinsics to refine. uses on the REFINE_* flags */
short refine_camera_intrinsics, pad23;
/* which camera intrinsics to refine. uses on the REFINE_* flags */
short refine_camera_intrinsics, pad2;
/* ** tool settings ** */
@ -243,6 +243,11 @@ enum {
/* MovieTrackingSettings->flag */
#define TRACKING_SETTINGS_SHOW_DEFAULT_EXPANDED (1<<0)
/* MovieTrackingSettings->motion_flag */
#define TRACKING_MOTION_TRIPOD (1<<0)
#define TRACKING_MOTION_MODAL (TRACKING_MOTION_TRIPOD)
/* MovieTrackingSettings->speed */
#define TRACKING_SPEED_FASTEST 0
#define TRACKING_SPEED_REALTIME 1

View File

@ -607,6 +607,12 @@ static void rna_def_trackingSettings(BlenderRNA *brna)
RNA_def_property_ui_text(prop, "Show Expanded", "Show the expanded in the user interface");
RNA_def_property_ui_icon(prop, ICON_TRIA_RIGHT, 1);
/* solver settings */
prop = RNA_def_property(srna, "use_tripod_solver", PROP_BOOLEAN, PROP_NONE);
RNA_def_property_clear_flag(prop, PROP_ANIMATABLE);
RNA_def_property_boolean_sdna(prop, NULL, "motion_flag", TRACKING_MOTION_TRIPOD);
RNA_def_property_ui_text(prop, "Tripod Motion", "Tracking footage is shooted by tripod camera and should use special sovler for this");
/* limit frames */
prop = RNA_def_property(srna, "default_frames_limit", PROP_INT, PROP_NONE);
RNA_def_property_clear_flag(prop, PROP_ANIMATABLE);