Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c234af25 authored by Okan Arikan's avatar Okan Arikan
Browse files

Polynomial pose prediction.

Adds PolynomialPosePredictor. It is not enabled by default in sensord.

I will do a grid search for the best parameters before turning it on.

You can set dvr.predictor_type to 1 and restart sensord to test.

Bug: 35362498
Test: Run predictor_eval to test the numerical and performance. Set dvr.predictor_type to 1 for qualitative testing.
Change-Id: I435fcc4d141f5949047d0f9d972fcfc8f2a94519
(cherry picked from commit 0b024945982fd2630709534ce448e55657768696)
parent 07c0b54a
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -15,7 +15,10 @@
LOCAL_PATH := $(call my-dir)

sourceFiles := \
        pose_predictor.cpp \
        buffered_predictor.cpp \
        linear_pose_predictor.cpp \
        polynomial_pose_predictor.cpp \

includeFiles := \
        $(LOCAL_PATH)/include
@@ -43,7 +46,9 @@ include $(BUILD_STATIC_LIBRARY)
include $(CLEAR_VARS)
LOCAL_MODULE_TAGS := optional
LOCAL_SRC_FILES := \
        pose_predictor_tests.cpp \
        linear_pose_predictor_tests.cpp \
        polynomial_pose_predictor_tests.cpp \

LOCAL_STATIC_LIBRARIES := libposepredictor $(staticLibraries)
LOCAL_SHARED_LIBRARIES := $(sharedLibraries)
+40 −0
Original line number Diff line number Diff line
#include <private/dvr/buffered_predictor.h>

namespace android {
namespace dvr {

BufferedPredictor::BufferedPredictor(size_t buffer_size) {
  buffer_.resize(buffer_size);
}

void BufferedPredictor::BufferSample(const Sample& sample) {
  const auto& prev_sample = buffer_[current_pose_index_];

  // If we are updating a sample (the same time stamp), do not advance the
  // counter.
  if (sample.time_ns != prev_sample.time_ns) {
    current_pose_index_ = (current_pose_index_ + 1) % buffer_.size();
  }

  buffer_[current_pose_index_] = sample;

  // Make sure the subsequent orientations are the closest in quaternion space.
  if (PrevSample(1).orientation.coeffs().dot(sample.orientation.coeffs()) < 0) {
    // Flip the quaternion to be closest to the previous sample.
    buffer_[current_pose_index_].orientation =
        quatd(-sample.orientation.w(), -sample.orientation.x(),
              -sample.orientation.y(), -sample.orientation.z());
  }

  ++num_poses_added_;
}

const PosePredictor::Sample& BufferedPredictor::PrevSample(size_t index) const {
  // We must not request a pose too far in the past.
  assert(index < buffer_.size());
  return buffer_[(current_pose_index_ - index + buffer_.size()) %
                 buffer_.size()];
}

}  // namespace dvr
}  // namespace android
+42 −0
Original line number Diff line number Diff line
#ifndef ANDROID_DVR_BUFFERED_PREDICTOR_H_
#define ANDROID_DVR_BUFFERED_PREDICTOR_H_

#include <vector>

#include "pose_predictor.h"

namespace android {
namespace dvr {

// Keeps the previous n poses around in a ring buffer.
// The orientations are also unrolled so that a . b > 0 for two subsequent
// quaternions a and b.
class BufferedPredictor : public PosePredictor {
 public:
  BufferedPredictor(size_t buffer_size);
  ~BufferedPredictor() = default;

 protected:
  // Add a pose sample into the buffer.
  void BufferSample(const Sample& sample);

  // Grab a previous sample.
  // index = 0: last sample
  // index = 1: the one before that
  // ...
  const Sample& PrevSample(size_t index) const;

  // Where we keep the last n poses.
  std::vector<Sample> buffer_;

  // Where the last valid pose is in the buffer.
  size_t current_pose_index_ = 0;

  // The number of poses we have added.
  size_t num_poses_added_ = 0;
};

}  // namespace dvr
}  // namespace android

#endif  // ANDROID_DVR_BUFFERED_PREDICTOR_H_
+219 −0
Original line number Diff line number Diff line
#ifndef ANDROID_DVR_POLYNOMIAL_POSE_PREDICTOR_H_
#define ANDROID_DVR_POLYNOMIAL_POSE_PREDICTOR_H_

#include <vector>

#include <Eigen/Dense>

#include "buffered_predictor.h"

namespace android {
namespace dvr {

// Make a polynomial prediction of the form
// y = coefficients_[0] + coefficients_[1] * t + coefficients_[2] * t^2 + ...
// where t is time and y is the position and orientation.
// We recompute the coefficients whenever we add a new sample using
// training_window previous samples.
template <size_t PolynomialDegree, size_t TrainingWindow>
class PolynomialPosePredictor : public BufferedPredictor {
 public:
  PolynomialPosePredictor(double regularization = 1e-9)
      : BufferedPredictor(TrainingWindow), regularization_(regularization) {
    static_assert(PolynomialDegree + 1 >= TrainingWindow,
                  "Underconstrained polynomial regressor");
  }

  ~PolynomialPosePredictor() = default;

  // We convert pose samples into a vector for matrix arithmetic using this
  // mapping.
  enum Components {
    kPositionX = 0,
    kPositionY,
    kPositionZ,
    kOrientationX,
    kOrientationY,
    kOrientationZ,
    kOrientationW,
    kNumComponents
  };

  // Add a new sample.
  void Add(const Sample& sample, DvrPoseAsync* out_pose) override {
    // Add the sample to the ring buffer.
    BufferedPredictor::BufferSample(sample);

    Eigen::Matrix<double, TrainingWindow, kNumComponents> values;

    // Get the pose samples into matrices for fitting.
    double t_vector[TrainingWindow];
    for (size_t i = 0; i < TrainingWindow; ++i) {
      const auto& prev_sample = PrevSample(i);

      t_vector[i] = NsToT(prev_sample.time_ns);

      // Save the values we will be fitting to at each sample time.
      values(i, kPositionX) = prev_sample.position.x();
      values(i, kPositionY) = prev_sample.position.y();
      values(i, kPositionZ) = prev_sample.position.z();
      values(i, kOrientationX) = prev_sample.orientation.x();
      values(i, kOrientationY) = prev_sample.orientation.y();
      values(i, kOrientationZ) = prev_sample.orientation.z();
      values(i, kOrientationW) = prev_sample.orientation.w();
    }

    // Some transient matrices for solving for coefficient matrix.
    Eigen::Matrix<double, PolynomialDegree + 1, PolynomialDegree + 1> M;
    Eigen::Vector<double, PolynomialDegree + 1> d;
    Eigen::Vector<double, PolynomialDegree + 1> p;

    // Create a polynomial fit for each component.
    for (size_t component = 0; component < kNumComponents; ++component) {
      // A = [ 1 t t^2 ... ]'
      // x = [ coefficients[0] coefficients[1] .... ]'
      // b = [ position.x ]'
      // We would like to solve A' x + regularization * I = b'
      // given the samples we have in our training window.
      //
      // The loop below will compute:
      // M = A' * A
      // d = A' * b
      // so we can solve M * coefficients + regularization * I = b

      M.setIdentity();
      d.setZero();
      p[0] = 1;

      // M = regularization * I
      M = M * regularization_;

      // Accumulate the poses in the training window.
      for (size_t i = 0; i < TrainingWindow; ++i) {
        // Compute the polynomial at this sample.
        for (size_t j = 1; j <= PolynomialDegree; ++j) {
          p[j] = p[j - 1] * t_vector[i];
        }

        // Accumulate the left and right hand sides.
        M = M + p * p.transpose();
        d = d + p * values(i, component);
      }

      // M is symmetric, positive semi-definite.
      // Note: This is not the most accurate solver out there but is fast.
      coefficients_.row(component) = Eigen::LLT<Eigen::MatrixXd>(M).solve(d);
    }

    // Fill out the out_pose at this sample.
    Predict(sample.time_ns, sample.time_ns, out_pose);
  }

  // Predict using the polynomial coefficients.
  void Predict(int64_t left_time_ns, int64_t right_time_ns,
               DvrPoseAsync* out_pose) const override {
    // Predict the left side.
    const auto left = SamplePolynomial(left_time_ns);
    out_pose->translation = {static_cast<float>(left[kPositionX]),
                             static_cast<float>(left[kPositionY]),
                             static_cast<float>(left[kPositionZ])};
    out_pose->orientation = normalize(left[kOrientationX], left[kOrientationY],
                                      left[kOrientationZ], left[kOrientationW]);

    // Predict the right side.
    const auto right = SamplePolynomial(right_time_ns);
    out_pose->right_translation = {static_cast<float>(right[kPositionX]),
                                   static_cast<float>(right[kPositionY]),
                                   static_cast<float>(right[kPositionZ])};
    out_pose->right_orientation =
        normalize(right[kOrientationX], right[kOrientationY],
                  right[kOrientationZ], right[kOrientationW]);

    // Finite differencing to estimate the velocities.
    const auto a = SamplePolynomial(
        (left_time_ns + right_time_ns - kFiniteDifferenceNs) / 2);
    const auto b = SamplePolynomial(
        (left_time_ns + right_time_ns + kFiniteDifferenceNs) / 2);

    out_pose->velocity = {static_cast<float>((b[kPositionX] - a[kPositionX]) /
                                             NsToSeconds(kFiniteDifferenceNs)),
                          static_cast<float>((b[kPositionY] - a[kPositionY]) /
                                             NsToSeconds(kFiniteDifferenceNs)),
                          static_cast<float>((b[kPositionZ] - a[kPositionZ]) /
                                             NsToSeconds(kFiniteDifferenceNs)),
                          0.0f};

    // Get the predicted orientations into quaternions, which are probably not
    // quite unit.
    const quatd a_orientation(a[kOrientationW], a[kOrientationX],
                              a[kOrientationY], a[kOrientationZ]);
    const quatd b_orientation(b[kOrientationW], b[kOrientationX],
                              b[kOrientationY], b[kOrientationZ]);
    const auto angular_velocity =
        AngularVelocity(a_orientation.normalized(), b_orientation.normalized(),
                        NsToSeconds(kFiniteDifferenceNs));

    out_pose->angular_velocity = {static_cast<float>(angular_velocity[0]),
                                  static_cast<float>(angular_velocity[1]),
                                  static_cast<float>(angular_velocity[2]),
                                  0.0f};
    out_pose->timestamp_ns = left_time_ns;
    out_pose->flags = DVR_POSE_FLAG_HEAD | DVR_POSE_FLAG_VALID;
    memset(out_pose->pad, 0, sizeof(out_pose->pad));
  }

 private:
  // Take a quaternion and return a normalized version in a float32x4_t.
  static float32x4_t normalize(double x, double y, double z, double w) {
    const auto l = std::sqrt(x * x + y * y + z * z + w * w);
    return {static_cast<float>(x / l), static_cast<float>(y / l),
            static_cast<float>(z / l), static_cast<float>(w / l)};
  }

  // Evaluate the polynomial at a particular time.
  Eigen::Vector<double, kNumComponents> SamplePolynomial(
      int64_t time_ns) const {
    const auto t = NsToT(time_ns);
    Eigen::Vector<double, PolynomialDegree + 1> polynomial;
    double current_polynomial = t;

    // Compute polynomial = [ 1 t t^2 ... ]
    polynomial[0] = 1;
    for (size_t degree = 1; degree <= PolynomialDegree;
         ++degree, current_polynomial *= t) {
      polynomial[degree] = polynomial[degree - 1] * t;
    }

    // The coefficients_ = [ numComponents x (polynomial degree + 1) ].
    return coefficients_ * polynomial;
  }

  // Convert a time in nanoseconds to t.
  // We could use the seconds as t but this would create make it more difficult
  // to tweak the regularization amount. So we subtract the last sample time so
  // the scale of the regularization constant doesn't change as a function of
  // time.
  double NsToT(int64_t time_ns) const {
    return NsToSeconds(time_ns - buffer_[current_pose_index_].time_ns);
  }

  // The ridge regularization constant.
  double regularization_;

  // This is where we store the polynomial coefficients.
  Eigen::Matrix<double, kNumComponents, PolynomialDegree + 1> coefficients_;
};

// Some common polynomial types.
extern template class PolynomialPosePredictor<1, 2>;
extern template class PolynomialPosePredictor<2, 3>;
extern template class PolynomialPosePredictor<3, 4>;
extern template class PolynomialPosePredictor<4, 5>;

using QuadricPosePredictor = PolynomialPosePredictor<2, 3>;
using CubicPosePredictor = PolynomialPosePredictor<3, 4>;
using QuarticPosePredictor = PolynomialPosePredictor<4, 5>;
}  // namespace dvr
}  // namespace android

#endif  // ANDROID_DVR_POSE_PREDICTOR_H_
+13 −0
Original line number Diff line number Diff line
@@ -17,6 +17,9 @@ class PosePredictor {
  PosePredictor() = default;
  virtual ~PosePredictor() = default;

  // The nanoseconds to use for finite differencing.
  static constexpr int64_t kFiniteDifferenceNs = 100;

  // Encapsulates a pose sample.
  struct Sample {
    vec3d position = vec3d::Zero();
@@ -24,6 +27,16 @@ class PosePredictor {
    int64_t time_ns = 0;
  };

  // Compute the angular velocity from orientation start_orientation to
  // end_orientation in delta_time.
  static vec3d AngularVelocity(const quatd& start_orientation,
                               const quatd& end_orientation, double delta_time);

  // Initialize the out pose from a sample.
  static void InitializeFromSample(const Sample& sample, DvrPoseAsync* out_pose,
                                   const vec3d& velocity,
                                   const vec3d& angular_velocity);

  // Add a pose sample coming from the sensors.
  // Returns this sample as a dvr pose.
  //
Loading