Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7ac691e1 authored by Siarhei Vishniakou's avatar Siarhei Vishniakou
Browse files

Calculate predictions based on PointerCoords data

Previously, the predictions were based on the MotionEvent's getRawX,
getRawY APIs, which do some additional processing on the values of the
PointerCoords.

That caused the prediction API to work correctly in portrait mode, but
not in landscape, because the transform was getting missed.

Now, we will use the PointerCoords directly. Confirmed that this fixes
the issue in both portrait and landscape modes.

Bug: 210158587
Test: m MotionPrediction && adb install $ANDROID_PRODUCT_OUT/system/app/MotionPrediction/MotionPrediction.apk
Change-Id: Iba0667226d802ab344ec689c654f9cfbf073b622
parent 28ce35e5
Loading
Loading
Loading
Loading
+11 −6
Original line number Original line Diff line number Diff line
@@ -78,16 +78,19 @@ std::vector<std::unique_ptr<MotionEvent>> MotionPredictor::predict(nsecs_t times


    for (size_t i = 0; i < event.getPointerCount(); i++) {
    for (size_t i = 0; i < event.getPointerCount(); i++) {
        const int32_t pointerId = event.getPointerId(i);
        const int32_t pointerId = event.getPointerId(i);
        const PointerCoords* currentPointerCoords = event.getRawPointerCoords(i);
        const float currentX = currentPointerCoords->getAxisValue(AMOTION_EVENT_AXIS_X);
        const float currentY = currentPointerCoords->getAxisValue(AMOTION_EVENT_AXIS_Y);

        PointerCoords coords;
        PointerCoords coords;
        coords.clear();
        coords.clear();


        ssize_t index = previous.findPointerIndex(pointerId);
        ssize_t index = previous.findPointerIndex(pointerId);
        if (index >= 0) {
        if (index >= 0) {
            // We have old data for this pointer. Compute the prediction.
            // We have old data for this pointer. Compute the prediction.
            const float oldX = previous.getRawX(index);
            const PointerCoords* oldPointerCoords = previous.getRawPointerCoords(index);
            const float oldY = previous.getRawY(index);
            const float oldX = oldPointerCoords->getAxisValue(AMOTION_EVENT_AXIS_X);
            const float currentX = event.getRawX(i);
            const float oldY = oldPointerCoords->getAxisValue(AMOTION_EVENT_AXIS_Y);
            const float currentY = event.getRawY(i);


            // Let's do a linear interpolation while waiting for a real model
            // Let's do a linear interpolation while waiting for a real model
            const float scale =
            const float scale =
@@ -97,13 +100,15 @@ std::vector<std::unique_ptr<MotionEvent>> MotionPredictor::predict(nsecs_t times


            coords.setAxisValue(AMOTION_EVENT_AXIS_X, futureX);
            coords.setAxisValue(AMOTION_EVENT_AXIS_X, futureX);
            coords.setAxisValue(AMOTION_EVENT_AXIS_Y, futureY);
            coords.setAxisValue(AMOTION_EVENT_AXIS_Y, futureY);
            ALOGD_IF(isDebug(),
                     "Prediction by %.1f ms, (%.1f, %.1f), (%.1f, %.1f) --> (%.1f, %.1f)",
                     (futureTime - event.getEventTime()) * 1E-6, oldX, oldY, currentX, currentY,
                     futureX, futureY);
        }
        }


        futureCoords.push_back(coords);
        futureCoords.push_back(coords);
    }
    }


    ALOGD_IF(isDebug(), "Prediction is %.1f ms away from the event",
             (futureTime - event.getEventTime()) * 1E-6);
    /**
    /**
     * The process of adding samples is different for the first and subsequent samples:
     * The process of adding samples is different for the first and subsequent samples:
     * 1. Add the first sample via 'initialize' as below
     * 1. Add the first sample via 'initialize' as below