Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit be6f8762 authored by android-build-team Robot's avatar android-build-team Robot
Browse files

Snap for 6508895 from dd1c5aee to mainline-release

Change-Id: Id818e1ab8b299184e4090e121f661258fac971c8
parents 661bb4c5 dd1c5aee
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -602,10 +602,11 @@ b69a7615c508acf5c5201efd1bfa3262167874fc3594e2db5a3ff93addd8ac75 android.hardwar
eb2fa0c883c2185d514be0b84c179b283753ef0c1b77b45b4f359bd23bba8b75 android.hardware.neuralnetworks@1.0::IPreparedModel
92e101b30e47bdf526a01c52cecfbe730def5997b8260ab497eb949eb2a6dcdf android.hardware.neuralnetworks@1.0::types
5f6d3097ba84cb63c430787123f4de1b31c11f90b531b98eae9a8623a5ae962a android.hardware.neuralnetworks@1.1::types
c2711d8748ccbcc858d5d5ec1abf145d9ab4c0b27db8ca215d7c39665a9b6652 android.hardware.neuralnetworks@1.1::types # b/155508675, b/155662254, b/155238914
fb382e986c10b8fbb797a8546e8f9ea6d1107bfe6f3fb7e57f6bbbf1f807a906 android.hardware.neuralnetworks@1.2::IDevice
40e71cd693de5b832325c5d8f081f2ff20a7ba2b89d401cee5b4b3eb0e241681 android.hardware.neuralnetworks@1.2::IPreparedModel
ee1a0dee5be00a6fe2d4d3270068c78016dcb194d768fe07ed894ea20904037f android.hardware.neuralnetworks@1.2::types
882b1c042ff842d7c52a794fab60bf6c599ef6b100ce99fa1772615096811d05 android.hardware.neuralnetworks@1.2::types # b/155508675
9c53b727cfa9efde38ebe3914e1e95939cff29c072a1b8c8f419d24853b98831 android.hardware.neuralnetworks@1.2::types # b/155508675, b/155662254, b/155238914, b/155660285
a785a57447a81e9c130eef6904c3a5c256076c6a04588c40620ebd6fa2660d77 android.hardware.radio@1.2::types
1a6e2bd289f22931c526b21916910f1d4c436b7acb9556e4243de4ce8e6cc2e4 android.hardware.soundtrigger@2.0::ISoundTriggerHwCallback
fd65298e1e09e0e3c781ab18305920d757dbe55a3b459ce17814ec5cf6dfee99 android.hardware.wifi@1.0::IWifiP2pIface
@@ -719,7 +720,7 @@ a3eddd9bbdc87e8c22764070037dd1154f1cf006e6fba93364c4f85d4c134a19 android.hardwar
ee9dc34b9925b8367b1111c72bd6d9d375432735e451572ca5a665d8516a7744 android.hardware.neuralnetworks@1.3::IPreparedModel
eee3430cc86c97c7b407495863d8fb61da6f1a64b7721e77b9b4909b11b174e9 android.hardware.neuralnetworks@1.3::IPreparedModelCallback
acf84925f8ee0a651f2ec547ac334034de266479b93af5434f6c1f25e66aba96 android.hardware.neuralnetworks@1.3::types
07801d19ca8a4f20543dae6b4d0c4d8b87e5161d3c431e973a1839cb7915a666 android.hardware.neuralnetworks@1.3::types # b/155508675
e9080d04218e98512b63aace9ff3da52f0130238391f15cbbf7df396a3ec9072 android.hardware.neuralnetworks@1.3::types # b/155508675, b/155662254, b/155238914, b/155660285
b454df853441c12f6e425e8a60dd29fda20f5e6e39b93d1103e4b37495db38aa android.hardware.radio@1.5::IRadio
fcbb0742a88215ee7a6d7ce0825d253eb2b50391fc6c8c48667f9fd7f6d4549e android.hardware.radio@1.5::IRadioIndication
b809193970a91ca637a4b0184767315601d32e3ef3d5992ffbc7a8d14a14f015 android.hardware.radio@1.5::IRadioResponse
+6 −0
Original line number Diff line number Diff line
@@ -126,6 +126,8 @@ enum OperationType : @1.0::OperationType {
     * * 0: A tensor of the same {@link OperandType} as input0.
     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
     *      the scale and zeroPoint must be the same as input0.
     *      If all dimensions are reduced and keep_dims is false, the output
     *      shape is [1].
     */
    MEAN = 31,

@@ -232,6 +234,8 @@ enum OperationType : @1.0::OperationType {
     *      removed.
     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
     *      the scale and zeroPoint must be the same as input0.
     *      If all input dimensions are equal to 1 and are to be squeezed, the
     *      output shape is [1].
     */
    SQUEEZE = 34,

@@ -278,6 +282,8 @@ enum OperationType : @1.0::OperationType {
     *      where k is the number of bits set in shrink_axis_mask.
     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
     *      the scale and zeroPoint must be the same as input0.
     *      If shrink_axis_mask is true for all input dimensions, the output
     *      shape is [1].
     */
    STRIDED_SLICE = 35,

+8 −0
Original line number Diff line number Diff line
@@ -1955,6 +1955,8 @@ enum OperationType : int32_t {
     * * 0: A tensor of the same {@link OperandType} as input0.
     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
     *      the scale and zeroPoint must be the same as input0.
     *      If all dimensions are reduced and keep_dims is false, the output
     *      shape is [1].
     */
    MEAN = @1.1::OperationType:MEAN,

@@ -2078,6 +2080,8 @@ enum OperationType : int32_t {
     *      removed.
     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
     *      the scale and zeroPoint must be the same as input0.
     *      If all input dimensions are equal to 1 and are to be squeezed, the
     *      output shape is [1].
     */
    SQUEEZE = @1.1::OperationType:SQUEEZE,

@@ -2125,6 +2129,8 @@ enum OperationType : int32_t {
     *      where k is the number of bits set in shrink_axis_mask.
     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
     *      the scale and zeroPoint must be the same as input0.
     *      If shrink_axis_mask is true for all input dimensions, the output
     *      shape is [1].
     */
    STRIDED_SLICE = @1.1::OperationType:STRIDED_SLICE,

@@ -2239,6 +2245,7 @@ enum OperationType : int32_t {
     *
     * Outputs:
     * * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
     *      If input is 1-dimensional, the output shape is [1].
     */
    // There is no underscore in ARG_MAX to avoid name conflict with
    // the macro defined in libc/kernel/uapi/linux/limits.h.
@@ -2263,6 +2270,7 @@ enum OperationType : int32_t {
     *
     * Outputs:
     * * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
     *      If input is 1-dimensional, the output shape is [1].
     */
    ARGMIN = 40,  // See ARGMAX for naming discussion.

+8 −0
Original line number Diff line number Diff line
@@ -2012,6 +2012,8 @@ enum OperationType : int32_t {
     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
     *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
     *      the scale and zeroPoint must be the same as input0.
     *      If all dimensions are reduced and keep_dims is false, the output
     *      shape is [1].
     */
    MEAN = @1.2::OperationType:MEAN,

@@ -2141,6 +2143,8 @@ enum OperationType : int32_t {
     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
     *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
     *      the scale and zeroPoint must be the same as input0.
     *      If all input dimensions are equal to 1 and are to be squeezed, the
     *      output shape is [1].
     */
    SQUEEZE = @1.2::OperationType:SQUEEZE,

@@ -2190,6 +2194,8 @@ enum OperationType : int32_t {
     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
     *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
     *      the scale and zeroPoint must be the same as input0.
     *      If shrink_axis_mask is true for all input dimensions, the output
     *      shape is [1].
     */
    STRIDED_SLICE = @1.2::OperationType:STRIDED_SLICE,

@@ -2313,6 +2319,7 @@ enum OperationType : int32_t {
     *
     * Outputs:
     * * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
     *      If input is 1-dimensional, the output shape is [1].
     */
    // There is no underscore in ARG_MAX to avoid name conflict with
    // the macro defined in libc/kernel/uapi/linux/limits.h.
@@ -2338,6 +2345,7 @@ enum OperationType : int32_t {
     *
     * Outputs:
     * * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
     *      If input is 1-dimensional, the output shape is [1].
     */
    ARGMIN = @1.2::OperationType:ARGMIN,  // See ARGMAX for naming discussion.

+115 −0
Original line number Diff line number Diff line
@@ -246,6 +246,120 @@ AssertionResult FrontendTests::stopScanFrontend() {
    return AssertionResult(status == Result::SUCCESS);
}

void FrontendTests::verifyFrontendStatus(vector<FrontendStatusType> statusTypes,
                                         vector<FrontendStatus> expectStatuses) {
    ASSERT_TRUE(mFrontend) << "Frontend is not opened yet.";
    Result status;
    vector<FrontendStatus> realStatuses;

    mFrontend->getStatus(statusTypes, [&](Result result, const hidl_vec<FrontendStatus>& statuses) {
        status = result;
        realStatuses = statuses;
    });

    ASSERT_TRUE(realStatuses.size() == statusTypes.size());
    for (int i = 0; i < statusTypes.size(); i++) {
        FrontendStatusType type = statusTypes[i];
        switch (type) {
            case FrontendStatusType::DEMOD_LOCK: {
                ASSERT_TRUE(realStatuses[i].isDemodLocked() == expectStatuses[i].isDemodLocked());
                break;
            }
            case FrontendStatusType::SNR: {
                ASSERT_TRUE(realStatuses[i].snr() == expectStatuses[i].snr());
                break;
            }
            case FrontendStatusType::BER: {
                ASSERT_TRUE(realStatuses[i].ber() == expectStatuses[i].ber());
                break;
            }
            case FrontendStatusType::PER: {
                ASSERT_TRUE(realStatuses[i].per() == expectStatuses[i].per());
                break;
            }
            case FrontendStatusType::PRE_BER: {
                ASSERT_TRUE(realStatuses[i].preBer() == expectStatuses[i].preBer());
                break;
            }
            case FrontendStatusType::SIGNAL_QUALITY: {
                ASSERT_TRUE(realStatuses[i].signalQuality() == expectStatuses[i].signalQuality());
                break;
            }
            case FrontendStatusType::SIGNAL_STRENGTH: {
                ASSERT_TRUE(realStatuses[i].signalStrength() == expectStatuses[i].signalStrength());
                break;
            }
            case FrontendStatusType::SYMBOL_RATE: {
                ASSERT_TRUE(realStatuses[i].symbolRate() == expectStatuses[i].symbolRate());
                break;
            }
            case FrontendStatusType::FEC: {
                ASSERT_TRUE(realStatuses[i].innerFec() == expectStatuses[i].innerFec());
                break;
            }
            case FrontendStatusType::MODULATION: {
                // TODO: check modulation status
                break;
            }
            case FrontendStatusType::SPECTRAL: {
                ASSERT_TRUE(realStatuses[i].inversion() == expectStatuses[i].inversion());
                break;
            }
            case FrontendStatusType::LNB_VOLTAGE: {
                ASSERT_TRUE(realStatuses[i].lnbVoltage() == expectStatuses[i].lnbVoltage());
                break;
            }
            case FrontendStatusType::PLP_ID: {
                ASSERT_TRUE(realStatuses[i].plpId() == expectStatuses[i].plpId());
                break;
            }
            case FrontendStatusType::EWBS: {
                ASSERT_TRUE(realStatuses[i].isEWBS() == expectStatuses[i].isEWBS());
                break;
            }
            case FrontendStatusType::AGC: {
                ASSERT_TRUE(realStatuses[i].agc() == expectStatuses[i].agc());
                break;
            }
            case FrontendStatusType::LNA: {
                ASSERT_TRUE(realStatuses[i].isLnaOn() == expectStatuses[i].isLnaOn());
                break;
            }
            case FrontendStatusType::LAYER_ERROR: {
                vector<bool> realLayberError = realStatuses[i].isLayerError();
                vector<bool> expectLayerError = expectStatuses[i].isLayerError();
                ASSERT_TRUE(realLayberError.size() == expectLayerError.size());
                for (int i = 0; i < realLayberError.size(); i++) {
                    ASSERT_TRUE(realLayberError[i] == expectLayerError[i]);
                }
                break;
            }
            case FrontendStatusType::MER: {
                ASSERT_TRUE(realStatuses[i].mer() == expectStatuses[i].mer());
                break;
            }
            case FrontendStatusType::FREQ_OFFSET: {
                ASSERT_TRUE(realStatuses[i].freqOffset() == expectStatuses[i].freqOffset());
                break;
            }
            case FrontendStatusType::HIERARCHY: {
                ASSERT_TRUE(realStatuses[i].hierarchy() == expectStatuses[i].hierarchy());
                break;
            }
            case FrontendStatusType::RF_LOCK: {
                ASSERT_TRUE(realStatuses[i].isRfLocked() == expectStatuses[i].isRfLocked());
                break;
            }
            case FrontendStatusType::ATSC3_PLP_INFO:
                // TODO: verify plpinfo
                break;
            default:
                continue;
        }
    }
    ASSERT_TRUE(status == Result::SUCCESS);
}

AssertionResult FrontendTests::tuneFrontend(FrontendConfig config) {
    EXPECT_TRUE(mFrontendCallback)
            << "test with openFrontendById/setFrontendCallback/getFrontendInfo first.";
@@ -294,6 +408,7 @@ void FrontendTests::tuneTest(FrontendConfig frontendConf) {
    ASSERT_TRUE(openFrontendById(feId));
    ASSERT_TRUE(setFrontendCallback());
    ASSERT_TRUE(tuneFrontend(frontendConf));
    verifyFrontendStatus(frontendConf.tuneStatusTypes, frontendConf.expectTuneStatuses);
    ASSERT_TRUE(stopTuneFrontend());
    ASSERT_TRUE(closeFrontend());
}
Loading