Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8be052fe authored by Siarhei Vishniakou's avatar Siarhei Vishniakou Committed by Android (Google) Code Review
Browse files

Merge "Add native MotionPredictor"

parents bc925d8d 39147cee
Loading
Loading
Loading
Loading
+79 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2022 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#pragma once

#include <android-base/thread_annotations.h>
#include <android/sysprop/InputProperties.sysprop.h>
#include <input/Input.h>

namespace android {

static inline bool isMotionPredictionEnabled() {
    return sysprop::InputProperties::enable_motion_prediction().value_or(true);
}

/**
 * Given a set of MotionEvents for the current gesture, predict the motion. The returned MotionEvent
 * contains a set of samples in the future, up to "presentation time + offset".
 *
 * The typical usage is like this:
 *
 * MotionPredictor predictor(offset = MY_OFFSET);
 * predictor.setExpectedPresentationTimeNanos(NEXT_PRESENT_TIME);
 * predictor.record(DOWN_MOTION_EVENT);
 * predictor.record(MOVE_MOTION_EVENT);
 * prediction = predictor.predict();
 *
 * The presentation time should be set some time before calling .predict(). It could be set before
 * or after the recorded motion events. Must be done on every frame.
 *
 * The resulting motion event will have eventTime <= (NEXT_PRESENT_TIME + MY_OFFSET). It might
 * contain historical data, which are additional samples from the latest recorded MotionEvent's
 * eventTime to the NEXT_PRESENT_TIME + MY_OFFSET.
 *
 * The offset is used to provide additional flexibility to the caller, in case the default present
 * time (typically provided by the choreographer) does not account for some delays, or to simply
 * reduce the aggressiveness of the prediction. Offset can be both positive and negative.
 */
class MotionPredictor {
public:
    /**
     * Parameters:
     * predictionTimestampOffsetNanos: additional, constant shift to apply to the target
     * presentation time. The prediction will target the time t=(presentationTime +
     * predictionTimestampOffsetNanos).
     *
     * checkEnableMotionPredition: the function to check whether the prediction should run. Used to
     * provide an additional way of turning prediction on and off. Can be toggled at runtime.
     */
    MotionPredictor(nsecs_t predictionTimestampOffsetNanos,
                    std::function<bool()> checkEnableMotionPrediction = isMotionPredictionEnabled);
    void record(const MotionEvent& event);
    std::vector<std::unique_ptr<MotionEvent>> predict();
    bool isPredictionAvailable(int32_t deviceId, int32_t source);
    void setExpectedPresentationTimeNanos(int64_t expectedPresentationTimeNanos);

private:
    std::mutex mLock;
    int64_t mExpectedPresentationTimeNanos GUARDED_BY(mLock) = 0;
    int64_t getExpectedPresentationTimeNanos();
    std::vector<MotionEvent> mEvents;
    const nsecs_t mPredictionTimestampOffsetNanos;
    const std::function<bool()> mCheckMotionPredictionEnabled;
};

} // namespace android
+3 −1
Original line number Diff line number Diff line
@@ -50,6 +50,7 @@ cc_library {
        "Keyboard.cpp",
        "KeyCharacterMap.cpp",
        "KeyLayoutMap.cpp",
        "MotionPredictor.cpp",
        "PrintTools.cpp",
        "PropertyMap.cpp",
        "TouchVideoFrame.cpp",
@@ -63,8 +64,9 @@ cc_library {

    shared_libs: [
        "libbase",
        "liblog",
        "libcutils",
        "liblog",
        "libPlatformProperties",
        "libvintf",
    ],

+156 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2022 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#define LOG_TAG "MotionPredictor"

#include <input/MotionPredictor.h>

/**
 * Log debug messages about predictions.
 * Enable this via "adb shell setprop log.tag.MotionPredictor DEBUG"
 */
static bool isDebug() {
    return __android_log_is_loggable(ANDROID_LOG_DEBUG, LOG_TAG, ANDROID_LOG_INFO);
}

namespace android {

// --- MotionPredictor ---

MotionPredictor::MotionPredictor(nsecs_t predictionTimestampOffsetNanos,
                                 std::function<bool()> checkMotionPredictionEnabled)
      : mPredictionTimestampOffsetNanos(predictionTimestampOffsetNanos),
        mCheckMotionPredictionEnabled(std::move(checkMotionPredictionEnabled)) {}

void MotionPredictor::record(const MotionEvent& event) {
    mEvents.push_back({});
    mEvents.back().copyFrom(&event, /*keepHistory=*/true);
    if (mEvents.size() > 2) {
        // Just need 2 samples in order to extrapolate
        mEvents.erase(mEvents.begin());
    }
}

/**
 * This is an example implementation that should be replaced with the actual prediction.
 * The returned MotionEvent should be similar to the incoming MotionEvent, except for the
 * fields that are predicted:
 *
 * 1) event.getEventTime
 * 2) event.getPointerCoords
 *
 * The returned event should not contain any of the real, existing data. It should only
 * contain the predicted samples.
 */
std::vector<std::unique_ptr<MotionEvent>> MotionPredictor::predict() {
    if (mEvents.size() < 2) {
        return {};
    }

    const MotionEvent& event = mEvents.back();
    if (!isPredictionAvailable(event.getDeviceId(), event.getSource())) {
        return {};
    }

    std::unique_ptr<MotionEvent> prediction = std::make_unique<MotionEvent>();
    std::vector<PointerCoords> futureCoords;
    const int64_t futureTime = getExpectedPresentationTimeNanos() + mPredictionTimestampOffsetNanos;
    const nsecs_t currentTime = event.getEventTime();
    const MotionEvent& previous = mEvents.rbegin()[1];
    const nsecs_t oldTime = previous.getEventTime();
    if (currentTime == oldTime) {
        // This can happen if it's an ACTION_POINTER_DOWN event, for example.
        return {}; // prevent division by zero.
    }

    for (size_t i = 0; i < event.getPointerCount(); i++) {
        const int32_t pointerId = event.getPointerId(i);
        PointerCoords coords;
        coords.clear();

        ssize_t index = previous.findPointerIndex(pointerId);
        if (index >= 0) {
            // We have old data for this pointer. Compute the prediction.
            const float oldX = previous.getRawX(index);
            const float oldY = previous.getRawY(index);
            const float currentX = event.getRawX(i);
            const float currentY = event.getRawY(i);

            // Let's do a linear interpolation while waiting for a real model
            const float scale =
                    static_cast<float>(futureTime - currentTime) / (currentTime - oldTime);
            const float futureX = currentX + (currentX - oldX) * scale;
            const float futureY = currentY + (currentY - oldY) * scale;

            coords.setAxisValue(AMOTION_EVENT_AXIS_X, futureX);
            coords.setAxisValue(AMOTION_EVENT_AXIS_Y, futureY);
        }

        futureCoords.push_back(coords);
    }

    ALOGD_IF(isDebug(), "Prediction is %.1f ms away from the event",
             (futureTime - event.getEventTime()) * 1E-6);
    /**
     * The process of adding samples is different for the first and subsequent samples:
     * 1. Add the first sample via 'initialize' as below
     * 2. Add subsequent samples via 'addSample'
     */
    prediction->initialize(event.getId(), event.getDeviceId(), event.getSource(),
                           event.getDisplayId(), event.getHmac(), event.getAction(),
                           event.getActionButton(), event.getFlags(), event.getEdgeFlags(),
                           event.getMetaState(), event.getButtonState(), event.getClassification(),
                           event.getTransform(), event.getXPrecision(), event.getYPrecision(),
                           event.getRawXCursorPosition(), event.getRawYCursorPosition(),
                           event.getRawTransform(), event.getDownTime(), futureTime,
                           event.getPointerCount(), event.getPointerProperties(),
                           futureCoords.data());

    // To add more predicted samples, use 'addSample':
    prediction->addSample(futureTime + 1, futureCoords.data());

    std::vector<std::unique_ptr<MotionEvent>> out;
    out.push_back(std::move(prediction));
    return out;
}

bool MotionPredictor::isPredictionAvailable(int32_t /*deviceId*/, int32_t source) {
    // Global flag override
    if (!mCheckMotionPredictionEnabled()) {
        ALOGD_IF(isDebug(), "Prediction not available due to flag override");
        return false;
    }

    // Prediction is only supported for stylus sources.
    if (!isFromSource(source, AINPUT_SOURCE_STYLUS)) {
        ALOGD_IF(isDebug(), "Prediction not available for non-stylus source: %s",
                 inputEventSourceToString(source).c_str());
        return false;
    }
    return true;
}

int64_t MotionPredictor::getExpectedPresentationTimeNanos() {
    std::scoped_lock lock(mLock);
    return mExpectedPresentationTimeNanos;
}

void MotionPredictor::setExpectedPresentationTimeNanos(int64_t expectedPresentationTimeNanos) {
    std::scoped_lock lock(mLock);
    mExpectedPresentationTimeNanos = expectedPresentationTimeNanos;
}

} // namespace android
+2 −0
Original line number Diff line number Diff line
@@ -17,6 +17,7 @@ cc_test {
        "InputDevice_test.cpp",
        "InputEvent_test.cpp",
        "InputPublisherAndConsumer_test.cpp",
        "MotionPredictor_test.cpp",
        "TouchResampling_test.cpp",
        "TouchVideoFrame_test.cpp",
        "VelocityTracker_test.cpp",
@@ -37,6 +38,7 @@ cc_test {
        "libbinder",
        "libcutils",
        "liblog",
        "libPlatformProperties",
        "libutils",
        "libvintf",
    ],
+121 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2022 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <gtest/gtest.h>
#include <gui/constants.h>
#include <input/Input.h>
#include <input/MotionPredictor.h>

namespace android {

constexpr int32_t DOWN = AMOTION_EVENT_ACTION_DOWN;
constexpr int32_t MOVE = AMOTION_EVENT_ACTION_MOVE;

static MotionEvent getMotionEvent(int32_t action, float x, float y, nsecs_t eventTime) {
    MotionEvent event;
    constexpr size_t pointerCount = 1;
    std::vector<PointerProperties> pointerProperties;
    std::vector<PointerCoords> pointerCoords;
    for (size_t i = 0; i < pointerCount; i++) {
        PointerProperties properties;
        properties.clear();
        properties.id = i;
        pointerProperties.push_back(properties);
        PointerCoords coords;
        coords.clear();
        coords.setAxisValue(AMOTION_EVENT_AXIS_X, x);
        coords.setAxisValue(AMOTION_EVENT_AXIS_Y, y);
        pointerCoords.push_back(coords);
    }

    ui::Transform identityTransform;
    event.initialize(InputEvent::nextId(), /*deviceId=*/0, AINPUT_SOURCE_STYLUS,
                     ADISPLAY_ID_DEFAULT, {0}, action, /*actionButton=*/0, /*flags=*/0,
                     AMOTION_EVENT_EDGE_FLAG_NONE, AMETA_NONE, /*buttonState=*/0,
                     MotionClassification::NONE, identityTransform, /*xPrecision=*/0.1,
                     /*yPrecision=*/0.2, /*xCursorPosition=*/280, /*yCursorPosition=*/540,
                     identityTransform, /*downTime=*/100, eventTime, pointerCount,
                     pointerProperties.data(), pointerCoords.data());
    return event;
}

/**
 * A linear motion should be predicted to be linear in the future
 */
TEST(MotionPredictorTest, LinearPrediction) {
    MotionPredictor predictor(/*predictionTimestampOffsetNanos=*/0,
                              []() { return true /*enable prediction*/; });

    predictor.record(getMotionEvent(DOWN, 0, 1, 0));
    predictor.record(getMotionEvent(MOVE, 1, 3, 10));
    predictor.record(getMotionEvent(MOVE, 2, 5, 20));
    predictor.record(getMotionEvent(MOVE, 3, 7, 30));
    predictor.setExpectedPresentationTimeNanos(40);
    std::vector<std::unique_ptr<MotionEvent>> predicted = predictor.predict();
    ASSERT_EQ(1u, predicted.size());
    ASSERT_EQ(predicted[0]->getX(0), 4);
    ASSERT_EQ(predicted[0]->getY(0), 9);
}

/**
 * A still motion should be predicted to remain still
 */
TEST(MotionPredictorTest, StationaryPrediction) {
    MotionPredictor predictor(/*predictionTimestampOffsetNanos=*/0,
                              []() { return true /*enable prediction*/; });

    predictor.record(getMotionEvent(DOWN, 0, 1, 0));
    predictor.record(getMotionEvent(MOVE, 0, 1, 10));
    predictor.record(getMotionEvent(MOVE, 0, 1, 20));
    predictor.record(getMotionEvent(MOVE, 0, 1, 30));
    predictor.setExpectedPresentationTimeNanos(40);
    std::vector<std::unique_ptr<MotionEvent>> predicted = predictor.predict();
    ASSERT_EQ(1u, predicted.size());
    ASSERT_EQ(predicted[0]->getX(0), 0);
    ASSERT_EQ(predicted[0]->getY(0), 1);
}

TEST(MotionPredictorTest, IsPredictionAvailable) {
    MotionPredictor predictor(/*predictionTimestampOffsetNanos=*/0,
                              []() { return true /*enable prediction*/; });
    ASSERT_TRUE(predictor.isPredictionAvailable(/*deviceId=*/1, AINPUT_SOURCE_STYLUS));
    ASSERT_FALSE(predictor.isPredictionAvailable(/*deviceId=*/1, AINPUT_SOURCE_TOUCHSCREEN));
}

TEST(MotionPredictorTest, Offset) {
    MotionPredictor predictor(/*predictionTimestampOffsetNanos=*/1,
                              []() { return true /*enable prediction*/; });
    predictor.setExpectedPresentationTimeNanos(40);
    predictor.record(getMotionEvent(DOWN, 0, 1, 30));
    predictor.record(getMotionEvent(MOVE, 0, 1, 35));
    std::vector<std::unique_ptr<MotionEvent>> predicted = predictor.predict();
    ASSERT_EQ(1u, predicted.size());
    ASSERT_GE(predicted[0]->getEventTime(), 41);
}

TEST(MotionPredictionTest, FlagDisablesPrediction) {
    MotionPredictor predictor(/*predictionTimestampOffsetNanos=*/0,
                              []() { return false /*disable prediction*/; });
    predictor.setExpectedPresentationTimeNanos(40);
    predictor.record(getMotionEvent(DOWN, 0, 1, 30));
    predictor.record(getMotionEvent(MOVE, 0, 1, 35));
    std::vector<std::unique_ptr<MotionEvent>> predicted = predictor.predict();
    ASSERT_EQ(0u, predicted.size());
    ASSERT_FALSE(predictor.isPredictionAvailable(/*deviceId=*/1, AINPUT_SOURCE_STYLUS));
    ASSERT_FALSE(predictor.isPredictionAvailable(/*deviceId=*/1, AINPUT_SOURCE_TOUCHSCREEN));
}

} // namespace android