Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
388 changes: 308 additions & 80 deletions gtsam/linear/LossFunctions.cpp

Large diffs are not rendered by default.

228 changes: 193 additions & 35 deletions gtsam/linear/LossFunctions.h

Large diffs are not rendered by default.

257 changes: 250 additions & 7 deletions gtsam/linear/tests/testNoiseModel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -640,6 +640,23 @@ TEST(NoiseModel, robustFunctionHuber)
DOUBLES_EQUAL(0.5000, huber->loss(error4), 1e-8);
}

TEST(NoiseModel, robustFunctionHuberGraduated) {
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should find a way to copy to double down on copy/pasta among the new (and existing) tests. Codex 5.4 is pretty good at refactoring along those lines.

const double k = 5.0, e1 = 1.0, e2 = 10.0;
const mEstimator::Huber::shared_ptr huber = mEstimator::Huber::Create(k);
// Convex For large \mu
DOUBLES_EQUAL(1.0, huber->graduatedWeight(e1, 1e8), 1e-6);
DOUBLES_EQUAL(1.0, huber->graduatedWeight(e2, 1e8), 1e-6);
// Standard for \mu = 1
DOUBLES_EQUAL(huber->weight(e1), huber->graduatedWeight(e1, 1.0), 1e-6);
DOUBLES_EQUAL(huber->weight(e2), huber->graduatedWeight(e2, 1.0), 1e-6);
// Convex For large \mu
DOUBLES_EQUAL(0.5 * e1 * e1, huber->graduatedLoss(e1, 1e8), 1e-6);
DOUBLES_EQUAL(0.5 * e2 * e2, huber->graduatedLoss(e2, 1e8), 1e-6);
// Standard for \mu = 1
DOUBLES_EQUAL(huber->loss(e1), huber->graduatedLoss(e1, 1.0), 1e-6);
DOUBLES_EQUAL(huber->loss(e2), huber->graduatedLoss(e2, 1.0), 1e-6);
}

TEST(NoiseModel, robustFunctionCauchy)
{
const double k = 5.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0;
Expand All @@ -656,6 +673,23 @@ TEST(NoiseModel, robustFunctionCauchy)
DOUBLES_EQUAL(0.490258914416017, cauchy->loss(error4), 1e-8);
}

TEST(NoiseModel, robustFunctionCauchyGraduated) {
const double k = 5.0, e1 = 1.0, e2 = 10.0;
const mEstimator::Cauchy::shared_ptr cauchy = mEstimator::Cauchy::Create(k);
// Convex For large \mu
DOUBLES_EQUAL(1.0, cauchy->graduatedWeight(e1, 1e8), 1e-6);
DOUBLES_EQUAL(1.0, cauchy->graduatedWeight(e2, 1e8), 1e-6);
// Standard for \mu = 1
DOUBLES_EQUAL(cauchy->weight(e1), cauchy->graduatedWeight(e1, 1.0), 1e-6);
DOUBLES_EQUAL(cauchy->weight(e2), cauchy->graduatedWeight(e2, 1.0), 1e-6);
// Convex For large \mu
DOUBLES_EQUAL(0.5 * e1 * e1, cauchy->graduatedLoss(e1, 1e8), 1e-6);
DOUBLES_EQUAL(0.5 * e2 * e2, cauchy->graduatedLoss(e2, 1e8), 1e-6);
// Standard for \mu = 1
DOUBLES_EQUAL(cauchy->loss(e1), cauchy->graduatedLoss(e1, 1.0), 1e-6);
DOUBLES_EQUAL(cauchy->loss(e2), cauchy->graduatedLoss(e2, 1.0), 1e-6);
}

TEST(NoiseModel, robustFunctionAsymmetricCauchy)
{
const double k = 5.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0;
Expand Down Expand Up @@ -687,6 +721,67 @@ TEST(NoiseModel, robustFunctionGemanMcClure)
DOUBLES_EQUAL(0.2500, gmc->loss(error4), 1e-8);
}

TEST(NoiseModel, robustFunctionGemanMcClureGraduatedScaled) {
const double k = 1.0, e1 = 1.0, e2 = 10.0;
const mEstimator::GemanMcClure::shared_ptr gmc =
mEstimator::GemanMcClure::Create(k);
// Convex For large \mu
DOUBLES_EQUAL(1.0, gmc->graduatedWeight(e1, 1e12), 1e-6);
DOUBLES_EQUAL(1.0, gmc->graduatedWeight(e2, 1e12), 1e-6);
// Standard for \mu = 1
DOUBLES_EQUAL(gmc->weight(e1), gmc->graduatedWeight(e1, 1.0), 1e-6);
DOUBLES_EQUAL(gmc->weight(e2), gmc->graduatedWeight(e2, 1.0), 1e-6);
// Convex For large \mu
DOUBLES_EQUAL(0.5 * e1 * e1, gmc->graduatedLoss(e1, 1e12), 1e-6);
DOUBLES_EQUAL(0.5 * e2 * e2, gmc->graduatedLoss(e2, 1e12), 1e-6);
// Standard for \mu = 1
DOUBLES_EQUAL(gmc->loss(e1), gmc->graduatedLoss(e1, 1.0), 1e-6);
DOUBLES_EQUAL(gmc->loss(e2), gmc->graduatedLoss(e2, 1.0), 1e-6);
}

/* ************************************************************************* */
TEST(NoiseModel, robustFunctionGemanMcClureGraduatedScaleInvariant) {
mEstimator::GemanMcClure::shared_ptr gmc = mEstimator::GemanMcClure::Create(
1.0, mEstimator::GemanMcClure::GradScheme::SCALE_INVARIANT);
// At zero error is not dependent on mu
DOUBLES_EQUAL(0.0, gmc->graduatedLoss(0.0, 0.0), 1e-9);
CHECK(assert_equal(0.0, gmc->graduatedLoss(0.0, 0.5), 1e-9));
CHECK(assert_equal(0.0, gmc->graduatedLoss(0.0, 1.0), 1e-9));

// For Mu = 0.0 error is quadratic
DOUBLES_EQUAL(0.0025, gmc->graduatedLoss(0.1, 0.0), 1e-9);
DOUBLES_EQUAL(0.4225, gmc->graduatedLoss(1.3, 0.0), 1e-9);
DOUBLES_EQUAL(38.750625, gmc->graduatedLoss(12.45, 0.0), 1e-9);

// For 0.0 < Mu Error depends on shape
DOUBLES_EQUAL(0.00454545454, gmc->graduatedLoss(0.1, 0.5), 1e-9);
DOUBLES_EQUAL(0.36739130434, gmc->graduatedLoss(1.3, 0.5), 1e-9);
DOUBLES_EQUAL(5.76217472119, gmc->graduatedLoss(12.45, 0.5), 1e-9);

// For Mu == 1.0 error depends is Geman-Mcclure
DOUBLES_EQUAL(0.00495049504, gmc->graduatedLoss(0.1, 1.0), 1e-9);
DOUBLES_EQUAL(0.31412639405, gmc->graduatedLoss(1.3, 1.0), 1e-9);
DOUBLES_EQUAL(0.49679492315, gmc->graduatedLoss(12.45, 1.0), 1e-9);

// At Mu = 0 weights are identical at 0.5
DOUBLES_EQUAL(0.5, gmc->graduatedWeight(0.1, 0.0), 1e-9);
DOUBLES_EQUAL(0.5, gmc->graduatedWeight(1.3, 0.0), 1e-9);
DOUBLES_EQUAL(0.5, gmc->graduatedWeight(12.45, 0.0), 1e-9);

// At Mu = 1 weights are higher for low error
DOUBLES_EQUAL(0.9802960494, gmc->graduatedWeight(0.1, 1.0), 1e-9);
DOUBLES_EQUAL(0.13819598955, gmc->graduatedWeight(1.3, 1.0), 1e-9);
DOUBLES_EQUAL(0.00004109007, gmc->graduatedWeight(12.45, 1.0), 1e-9);

// At Mu = 1 large residuals have ~0 weight
DOUBLES_EQUAL(0.0, gmc->graduatedWeight(2000.0, 1.0), 1e-9);

// Across Mu residual of 0 will have weight = 1
DOUBLES_EQUAL(1.0, gmc->graduatedWeight(0.0, 0.1), 1e-9);
DOUBLES_EQUAL(1.0, gmc->graduatedWeight(0.0, 0.5), 1e-9);
DOUBLES_EQUAL(1.0, gmc->graduatedWeight(0.0, 0.8), 1e-9);
}

TEST(NoiseModel, robustFunctionTLS)
{
const double k = 4.0, error1 = 0.5, error2 = 10.0, error3 = -10.0, error4 = -0.5;
Expand All @@ -702,6 +797,56 @@ TEST(NoiseModel, robustFunctionTLS)
DOUBLES_EQUAL(0.1250, tls->loss(error4), 1e-8);
}

TEST(NoiseModel, robustFunctionTruncatedLeastSquaresGraduatedStandard) {
const double k = 5.0, e1 = 1.0, e2 = 10.0;
const mEstimator::TruncatedLeastSquares::shared_ptr tls =
mEstimator::TruncatedLeastSquares::Create(k);
// Convex For large \mu
DOUBLES_EQUAL(1.0, tls->graduatedWeight(e1, 1e12), 1e-6);
DOUBLES_EQUAL(1.0, tls->graduatedWeight(e2, 1e12), 1e-6);
// Standard for \mu = 1
DOUBLES_EQUAL(tls->weight(e1), tls->graduatedWeight(e1, 1.0), 1e-6);
DOUBLES_EQUAL(tls->weight(e2), tls->graduatedWeight(e2, 1.0), 1e-6);
// Convex For large \mu
DOUBLES_EQUAL(0.5 * e1 * e1, tls->graduatedLoss(e1, 1e12), 1e-6);
DOUBLES_EQUAL(0.5 * e2 * e2, tls->graduatedLoss(e2, 1e12), 1e-6);
// Standard for \mu = 1
DOUBLES_EQUAL(tls->loss(e1), tls->graduatedLoss(e1, 1.0), 1e-6);
DOUBLES_EQUAL(tls->loss(e2), tls->graduatedLoss(e2, 1.0), 1e-6);
}

TEST(NoiseModel, robustFunctionTruncatedLeastSquaresGraduatedLinear) {
const double k = 5.0, e1 = 1.0, e2 = 10.0;
const mEstimator::TruncatedLeastSquares::shared_ptr tls =
mEstimator::TruncatedLeastSquares::Create(
k, mEstimator::TruncatedLeastSquares::GradScheme::GNC_LINEAR);
// Convex for \mu = 0
DOUBLES_EQUAL(0.005, tls->graduatedWeight(e1, 1e-6), 1e-6);
DOUBLES_EQUAL(0.0005, tls->graduatedWeight(e2, 1e-6), 1e-6);
// Standard for large \mu
DOUBLES_EQUAL(tls->weight(e1), tls->graduatedWeight(e1, 1e8), 1e-6);
DOUBLES_EQUAL(tls->weight(e2), tls->graduatedWeight(e2, 1e8), 1e-6);
// Convex for \mu = 0
DOUBLES_EQUAL(0.0005, tls->graduatedLoss(e1, 1e-8), 1e-4);
DOUBLES_EQUAL(0.005, tls->graduatedLoss(e2, 1e-8), 1e-4);
// Standard for large \mu
DOUBLES_EQUAL(tls->loss(e1), tls->graduatedLoss(e1, 1e8), 1e-6);
DOUBLES_EQUAL(tls->loss(e2), tls->graduatedLoss(e2, 1e8), 1e-6);
}

TEST(NoiseModel, robustFunctionTruncatedLeastSquaresGraduatedSuperLinear) {
const double k = 5.0, e1 = 1.0, e2 = 10.0;
const mEstimator::TruncatedLeastSquares::shared_ptr tls =
mEstimator::TruncatedLeastSquares::Create(
k, mEstimator::TruncatedLeastSquares::GradScheme::GNC_SUPERLINEAR);
// Convex for \mu = 0
DOUBLES_EQUAL(1, tls->graduatedWeight(e1, 1e-6), 1e-6);
DOUBLES_EQUAL(0.5, tls->graduatedWeight(e2, 1e-6), 1e-6);
// Standard for large \mu
DOUBLES_EQUAL(tls->weight(e1), tls->graduatedWeight(e1, 1e8), 1e-6);
DOUBLES_EQUAL(tls->weight(e2), tls->graduatedWeight(e2, 1e8), 1e-6);
}

TEST(NoiseModel, robustFunctionWelsch)
{
const double k = 5.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0;
Expand All @@ -718,6 +863,23 @@ TEST(NoiseModel, robustFunctionWelsch)
DOUBLES_EQUAL(0.490132010595960, welsch->loss(error4), 1e-8);
}

TEST(NoiseModel, robustFunctionWelshGraduated) {
const double k = 5.0, e1 = 1.0, e2 = 10.0;
const mEstimator::Welsch::shared_ptr welsch = mEstimator::Welsch::Create(k);
// Convex For large \mu
DOUBLES_EQUAL(1.0, welsch->graduatedWeight(e1, 1e12), 1e-6);
DOUBLES_EQUAL(1.0, welsch->graduatedWeight(e2, 1e12), 1e-6);
// Standard for \mu = 1
DOUBLES_EQUAL(welsch->weight(e1), welsch->graduatedWeight(e1, 1.0), 1e-6);
DOUBLES_EQUAL(welsch->weight(e2), welsch->graduatedWeight(e2, 1.0), 1e-6);
// Convex For large \mu
DOUBLES_EQUAL(0.5 * e1 * e1, welsch->graduatedLoss(e1, 1e12), 1e-6);
DOUBLES_EQUAL(0.5 * e2 * e2, welsch->graduatedLoss(e2, 1e12), 1e-6);
// Standard for \mu = 1
DOUBLES_EQUAL(welsch->loss(e1), welsch->graduatedLoss(e1, 1.0), 1e-6);
DOUBLES_EQUAL(welsch->loss(e2), welsch->graduatedLoss(e2, 1.0), 1e-6);
}

TEST(NoiseModel, robustFunctionTukey)
{
const double k = 5.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0;
Expand All @@ -734,6 +896,25 @@ TEST(NoiseModel, robustFunctionTukey)
DOUBLES_EQUAL(0.480266666666667, tukey->loss(error4), 1e-8);
}


TEST(NoiseModel, robustFunctionTukeyGraduated) {
const double k = 5.0, e1 = 1.0, e2 = 10.0;
const mEstimator::Tukey::shared_ptr tukey = mEstimator::Tukey::Create(k);
// Convex For large \mu
DOUBLES_EQUAL(1.0, tukey->graduatedWeight(e1, 1e6), 1e-6);
DOUBLES_EQUAL(1.0, tukey->graduatedWeight(e2, 1e6), 1e-6);
// Standard for \mu = 1
DOUBLES_EQUAL(tukey->weight(e1), tukey->graduatedWeight(e1, 1.0), 1e-6);
DOUBLES_EQUAL(tukey->weight(e2), tukey->graduatedWeight(e2, 1.0), 1e-6);
// Convex For large \mu
// Note: Tukey is not numerically stable for very large values of \mu
DOUBLES_EQUAL(0.5 * e1 * e1, tukey->graduatedLoss(e1, 1e5), 1e-5);
DOUBLES_EQUAL(0.5 * e2 * e2, tukey->graduatedLoss(e2, 1e5), 1e-5);
// Standard for \mu = 1
DOUBLES_EQUAL(tukey->loss(e1), tukey->graduatedLoss(e1, 1.0), 1e-6);
DOUBLES_EQUAL(tukey->loss(e2), tukey->graduatedLoss(e2, 1.0), 1e-6);
}

TEST(NoiseModel, robustFunctionAsymmetricTukey)
{
const double k = 5.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0;
Expand Down Expand Up @@ -762,6 +943,24 @@ TEST(NoiseModel, robustFunctionDCS)
DOUBLES_EQUAL(0.9900990099, dcs->loss(error2), 1e-8);
}

TEST(NoiseModel, robustFunctionDCSGraduated)
{
const double k = 5.0, e1 = 1.0, e2 = 10.0;
const mEstimator::DCS::shared_ptr dcs = mEstimator::DCS::Create(k);
// Convex For large \mu
DOUBLES_EQUAL(1.0, dcs->graduatedWeight(e1, 1e12), 1e-6);
DOUBLES_EQUAL(1.0, dcs->graduatedWeight(e2, 1e12), 1e-6);
// Standard for \mu = 1
DOUBLES_EQUAL(dcs->weight(e1), dcs->graduatedWeight(e1, 1.0), 1e-6);
DOUBLES_EQUAL(dcs->weight(e2), dcs->graduatedWeight(e2, 1.0), 1e-6);
// Convex For large \mu
DOUBLES_EQUAL(e1 * e1, dcs->graduatedLoss(e1, 1e12), 1e-6);
DOUBLES_EQUAL(e2 * e2, dcs->graduatedLoss(e2, 1e12), 1e-6);
// Standard for \mu = 1
DOUBLES_EQUAL(dcs->loss(e1), dcs->graduatedLoss(e1, 1.0), 1e-6);
DOUBLES_EQUAL(dcs->loss(e2), dcs->graduatedLoss(e2, 1.0), 1e-6);
}

TEST(NoiseModel, robustFunctionL2WithDeadZone)
{
const double k = 1.0, e0 = -10.0, e1 = -1.01, e2 = -0.99, e3 = 0.99, e4 = 1.01, e5 = 10.0;
Expand Down Expand Up @@ -809,9 +1008,11 @@ TEST(NoiseModel, robustNoiseGemanMcClure)
const double a00 = 1.0, a01 = 10.0, a10 = 100.0, a11 = 1000.0;
Matrix A = (Matrix(2, 2) << a00, a01, a10, a11).finished();
Vector b = Vector2(error1, error2);
const Robust::shared_ptr robust = Robust::Create(
mEstimator::GemanMcClure::Create(k, mEstimator::GemanMcClure::Scalar),
Unit::Create(2));
const Robust::shared_ptr robust =
Robust::Create(mEstimator::GemanMcClure::Create(
k, mEstimator::GemanMcClure::GradScheme::STANDARD,
mEstimator::GemanMcClure::Scalar),
Unit::Create(2));

robust->WhitenSystem(A, b);

Expand All @@ -838,8 +1039,10 @@ TEST(NoiseModel, robustNoiseTLS)
Matrix A = (Matrix(2, 2) << a00, a01, a10, a11).finished();
Vector b = Vector2(error1, error2);
const Robust::shared_ptr robust = Robust::Create(
mEstimator::TruncatedLeastSquares::Create(k, mEstimator::TruncatedLeastSquares::Scalar),
Unit::Create(2));
mEstimator::TruncatedLeastSquares::Create(
k, mEstimator::TruncatedLeastSquares::GradScheme::STANDARD,
mEstimator::TruncatedLeastSquares::Scalar),
Unit::Create(2));

robust->WhitenSystem(A, b);

Expand Down Expand Up @@ -908,6 +1111,7 @@ TEST(NoiseModel, robustNoiseCustomHuber) {
const auto abs_e = std::abs(e);
return abs_e <= k ? abs_e * abs_e / 2.0 : k * abs_e - k * k / 2.0;
},
std::nullopt, std::nullopt,
noiseModel::mEstimator::Custom::Scalar, "Huber"),
Unit::Create(2));

Expand All @@ -922,8 +1126,7 @@ TEST(NoiseModel, robustNoiseCustomHuber) {
DOUBLES_EQUAL(sqrt(k / 100.0) * 1000.0, A(1, 1), 1e-8);
}

TEST(NoiseModel, lossFunctionAtZero)
{
TEST(NoiseModel, lossFunctionAtZero) {
const double k = 5.0;
auto fair = mEstimator::Fair::Create(k);
DOUBLES_EQUAL(fair->loss(0), 0, 1e-8);
Expand Down Expand Up @@ -955,8 +1158,48 @@ TEST(NoiseModel, lossFunctionAtZero)
auto assy_tukey = mEstimator::AsymmetricTukey::Create(k);
DOUBLES_EQUAL(lsdz->loss(0), 0, 1e-8);
DOUBLES_EQUAL(lsdz->weight(0), 0, 1e-8);
auto tls = mEstimator::TruncatedLeastSquares::Create(k);
DOUBLES_EQUAL(tls->loss(0), 0, 1e-8);
DOUBLES_EQUAL(tls->weight(0), 1, 1e-8);
}

TEST(NoiseModel, lossFunctionAtZeroGraduated) {
const double k = 5.0;
const double mu = 10;
auto huber = mEstimator::Huber::Create(k);
DOUBLES_EQUAL(huber->graduatedLoss(0, mu), 0, 1e-8);
DOUBLES_EQUAL(huber->graduatedWeight(0, mu), 1, 1e-8);
auto cauchy = mEstimator::Cauchy::Create(k);
DOUBLES_EQUAL(cauchy->graduatedLoss(0, mu), 0, 1e-8);
DOUBLES_EQUAL(cauchy->graduatedWeight(0, mu), 1, 1e-8);
auto gmc = mEstimator::GemanMcClure::Create(k);
DOUBLES_EQUAL(gmc->graduatedLoss(0, mu), 0, 1e-8);
DOUBLES_EQUAL(gmc->graduatedWeight(0, mu), 1, 1e-8);
auto gmc_si = mEstimator::GemanMcClure::Create(
k, mEstimator::GemanMcClure::GradScheme::SCALE_INVARIANT);
DOUBLES_EQUAL(gmc_si->graduatedLoss(0, 0.5), 0, 1e-8);
DOUBLES_EQUAL(gmc_si->graduatedWeight(0, 0.5), 1, 1e-8);
auto welsch = mEstimator::Welsch::Create(k);
DOUBLES_EQUAL(welsch->graduatedLoss(0, mu), 0, 1e-8);
DOUBLES_EQUAL(welsch->graduatedWeight(0, mu), 1, 1e-8);
auto tukey = mEstimator::Tukey::Create(k);
DOUBLES_EQUAL(tukey->graduatedLoss(0, mu), 0, 1e-8);
DOUBLES_EQUAL(tukey->graduatedWeight(0, mu), 1, 1e-8);
auto dcs = mEstimator::DCS::Create(k);
DOUBLES_EQUAL(dcs->graduatedLoss(0, mu), 0, 1e-8);
DOUBLES_EQUAL(dcs->graduatedWeight(0, mu), 1, 1e-8);
auto tls_std = mEstimator::TruncatedLeastSquares::Create(
k, mEstimator::TruncatedLeastSquares::GradScheme::STANDARD);
DOUBLES_EQUAL(tls_std->graduatedLoss(0, mu), 0, 1e-8);
DOUBLES_EQUAL(tls_std->graduatedWeight(0, mu), 1, 1e-8);
auto tls_lin = mEstimator::TruncatedLeastSquares::Create(
k, mEstimator::TruncatedLeastSquares::GradScheme::GNC_LINEAR);
DOUBLES_EQUAL(tls_lin->graduatedLoss(0, mu), 0, 1e-8);
DOUBLES_EQUAL(tls_lin->graduatedWeight(0, mu), 1, 1e-8);
auto tls_sup = mEstimator::TruncatedLeastSquares::Create(
k, mEstimator::TruncatedLeastSquares::GradScheme::GNC_SUPERLINEAR);
DOUBLES_EQUAL(tls_sup->graduatedWeight(0, mu), 1, 1e-8);
}

/* ************************************************************************* */
#define TEST_GAUSSIAN(gaussian)\
Expand Down
32 changes: 11 additions & 21 deletions gtsam/nonlinear/GncOptimizer.h
Original file line number Diff line number Diff line change
Expand Up @@ -515,7 +515,7 @@ class GncOptimizer {
for (size_t k = 0; k < nfg_.size(); k++) {
if (needsWeightUpdate(factorTypes_[k])) {
double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual
weights[k] = noiseModel::mEstimator::GemanMcClure::Weight(u2_k, mu * barcSq_[k]);
weights[k] = noiseModel::mEstimator::GemanMcClure::Weight(mu * barcSq_[k], u2_k);
}
}
return weights;
Expand All @@ -526,29 +526,19 @@ class GncOptimizer {
double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual
switch (params_.scheduler) {
case GncScheduler::SuperLinear: {
double lowerbound = barcSq_[k];
double upperbound = ((mu + 1.0) * (mu + 1.0) / (mu * mu)) * barcSq_[k];
auto w = noiseModel::mEstimator::TruncatedLeastSquares::Weight(u2_k, lowerbound, upperbound);
if (w) {
weights[k] = *w;
}
else {
double transition_weight = std::sqrt(barcSq_[k] / u2_k) * (mu + 1.0) - mu;
weights[k] = std::clamp(transition_weight, 0.0, 1.0);
}
weights[k] = noiseModel::mEstimator::TruncatedLeastSquares::
GraduatedWeight(
noiseModel::mEstimator::TruncatedLeastSquares::
GradScheme::GNC_SUPERLINEAR,
barcSq_[k], u2_k, mu);
break;
}
case GncScheduler::Linear: { // use eq (14) in GNC paper
double upperbound = ((mu + 1.0) / mu) * barcSq_[k];
double lowerbound = (mu / (mu + 1.0)) * barcSq_[k];
auto w = noiseModel::mEstimator::TruncatedLeastSquares::Weight(u2_k, lowerbound, upperbound);
if (w) {
weights[k] = *w;
}
else {
double transition_weight = std::sqrt(barcSq_[k] * mu * (mu + 1.0) / u2_k) - mu;
weights[k] = std::clamp(transition_weight, 0.0, 1.0);
}
weights[k] = noiseModel::mEstimator::TruncatedLeastSquares::
GraduatedWeight(
noiseModel::mEstimator::TruncatedLeastSquares::
GradScheme::GNC_LINEAR,
barcSq_[k], u2_k, mu);
break;
}
default:
Expand Down
Loading
Loading