Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 594cc78a authored by Michael Butler's avatar Michael Butler
Browse files

Remove uid from NN HIDL adapter

Having the adapter retrieve the uid is redundant because the implementor
is already able to get the uid directly themselves with
IPCThreadState::self()->getCallingUid().

Bug: N/A
Test: mma
Change-Id: Ifeffea053cb92556be1aae8b17a94fafa1ac98e0
parent 53455639
Loading
Loading
Loading
Loading
+5 −4
Original line number Diff line number Diff line
@@ -20,7 +20,6 @@
#include <android/hardware/neuralnetworks/1.3/IDevice.h>
#include <nnapi/IDevice.h>
#include <nnapi/Types.h>
#include <sys/types.h>
#include <functional>
#include <memory>

@@ -37,10 +36,12 @@ using Task = std::function<void()>;
/**
 * A type-erased executor which executes a task asynchronously.
 *
 * This executor is also provided with an Application ID (Android User ID) and an optional deadline
 * for when the caller expects is the upper bound for the amount of time to complete the task.
 * This executor is also provided an optional deadline for when the caller expects is the upper
 * bound for the amount of time to complete the task. If needed, the Executor can retrieve the
 * Application ID (Android User ID) by calling IPCThreadState::self()->getCallingUid() in
 * hwbinder/IPCThreadState.h.
 */
using Executor = std::function<void(Task, uid_t, nn::OptionalTimePoint)>;
using Executor = std::function<void(Task, nn::OptionalTimePoint)>;

/**
 * Adapt an NNAPI canonical interface object to a HIDL NN HAL interface object.
+1 −2
Original line number Diff line number Diff line
@@ -39,7 +39,7 @@ namespace android::hardware::neuralnetworks::adapter {
// Class that adapts nn::IPreparedModel to V1_3::IPreparedModel.
class PreparedModel final : public V1_3::IPreparedModel {
  public:
    PreparedModel(nn::SharedPreparedModel preparedModel, Executor executor, uid_t userId);
    PreparedModel(nn::SharedPreparedModel preparedModel, Executor executor);

    Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
                                      const sp<V1_0::IExecutionCallback>& callback) override;
@@ -71,7 +71,6 @@ class PreparedModel final : public V1_3::IPreparedModel {
  private:
    const nn::SharedPreparedModel kPreparedModel;
    const Executor kExecutor;
    const uid_t kUserId;
};

}  // namespace android::hardware::neuralnetworks::adapter
+1 −2
Original line number Diff line number Diff line
@@ -21,7 +21,6 @@
#include <android/hardware/neuralnetworks/1.3/IDevice.h>
#include <nnapi/IDevice.h>
#include <nnapi/Types.h>
#include <sys/types.h>

#include <functional>
#include <memory>
@@ -37,7 +36,7 @@ sp<V1_3::IDevice> adapt(nn::SharedDevice device, Executor executor) {
}

sp<V1_3::IDevice> adapt(nn::SharedDevice device) {
    Executor defaultExecutor = [](Task task, uid_t /*uid*/, nn::OptionalTimePoint /*deadline*/) {
    Executor defaultExecutor = [](Task task, nn::OptionalTimePoint /*deadline*/) {
        std::thread(std::move(task)).detach();
    };
    return adapt(std::move(device), std::move(defaultExecutor));
+22 −32
Original line number Diff line number Diff line
@@ -28,7 +28,6 @@
#include <android/hardware/neuralnetworks/1.3/IDevice.h>
#include <android/hardware/neuralnetworks/1.3/IPreparedModelCallback.h>
#include <android/hardware/neuralnetworks/1.3/types.h>
#include <hwbinder/IPCThreadState.h>
#include <nnapi/IBuffer.h>
#include <nnapi/IDevice.h>
#include <nnapi/IPreparedModel.h>
@@ -43,7 +42,6 @@
#include <nnapi/hal/1.2/Utils.h>
#include <nnapi/hal/1.3/Conversions.h>
#include <nnapi/hal/1.3/Utils.h>
#include <sys/types.h>

#include <memory>

@@ -64,12 +62,11 @@ auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>

using PrepareModelResult = nn::GeneralResult<nn::SharedPreparedModel>;

sp<PreparedModel> adaptPreparedModel(nn::SharedPreparedModel preparedModel, Executor executor,
                                     uid_t userId) {
sp<PreparedModel> adaptPreparedModel(nn::SharedPreparedModel preparedModel, Executor executor) {
    if (preparedModel == nullptr) {
        return nullptr;
    }
    return sp<PreparedModel>::make(std::move(preparedModel), std::move(executor), userId);
    return sp<PreparedModel>::make(std::move(preparedModel), std::move(executor));
}

void notify(V1_0::IPreparedModelCallback* callback, nn::ErrorStatus status,
@@ -108,15 +105,14 @@ void notify(V1_3::IPreparedModelCallback* callback, nn::ErrorStatus status,
}

template <typename CallbackType>
void notify(CallbackType* callback, PrepareModelResult result, Executor executor, uid_t userId) {
void notify(CallbackType* callback, PrepareModelResult result, Executor executor) {
    if (!result.has_value()) {
        const auto [message, status] = std::move(result).error();
        LOG(ERROR) << message;
        notify(callback, status, nullptr);
    } else {
        auto preparedModel = std::move(result).value();
        auto hidlPreparedModel =
                adaptPreparedModel(std::move(preparedModel), std::move(executor), userId);
        auto hidlPreparedModel = adaptPreparedModel(std::move(preparedModel), std::move(executor));
        notify(callback, nn::ErrorStatus::NONE, std::move(hidlPreparedModel));
    }
}
@@ -137,13 +133,12 @@ nn::GeneralResult<void> prepareModel(const nn::SharedDevice& device, const Execu

    auto nnModel = NN_TRY(convertInput(model));

    const uid_t userId = hardware::IPCThreadState::self()->getCallingUid();
    Task task = [device, nnModel = std::move(nnModel), userId, executor, callback] {
    Task task = [device, nnModel = std::move(nnModel), executor, callback] {
        auto result = device->prepareModel(nnModel, nn::ExecutionPreference::DEFAULT,
                                           nn::Priority::DEFAULT, {}, {}, {}, {});
        notify(callback.get(), std::move(result), executor, userId);
        notify(callback.get(), std::move(result), executor);
    };
    executor(std::move(task), userId, {});
    executor(std::move(task), {});

    return {};
}
@@ -159,13 +154,12 @@ nn::GeneralResult<void> prepareModel_1_1(const nn::SharedDevice& device, const E
    auto nnModel = NN_TRY(convertInput(model));
    const auto nnPreference = NN_TRY(convertInput(preference));

    const uid_t userId = hardware::IPCThreadState::self()->getCallingUid();
    Task task = [device, nnModel = std::move(nnModel), nnPreference, userId, executor, callback] {
    Task task = [device, nnModel = std::move(nnModel), nnPreference, executor, callback] {
        auto result =
                device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {}, {}, {}, {});
        notify(callback.get(), std::move(result), executor, userId);
        notify(callback.get(), std::move(result), executor);
    };
    executor(std::move(task), userId, {});
    executor(std::move(task), {});

    return {};
}
@@ -187,15 +181,14 @@ nn::GeneralResult<void> prepareModel_1_2(const nn::SharedDevice& device, const E
    auto nnDataCache = NN_TRY(convertInput(dataCache));
    const auto nnToken = nn::CacheToken(token);

    const uid_t userId = hardware::IPCThreadState::self()->getCallingUid();
    Task task = [device, nnModel = std::move(nnModel), nnPreference,
                 nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache),
                 nnToken, userId, executor, callback] {
                 nnToken, executor, callback] {
        auto result = device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {},
                                           nnModelCache, nnDataCache, nnToken);
        notify(callback.get(), std::move(result), executor, userId);
        notify(callback.get(), std::move(result), executor);
    };
    executor(std::move(task), userId, {});
    executor(std::move(task), {});

    return {};
}
@@ -218,15 +211,14 @@ nn::GeneralResult<void> prepareModel_1_3(
    auto nnDataCache = NN_TRY(convertInput(dataCache));
    const auto nnToken = nn::CacheToken(token);

    const uid_t userId = hardware::IPCThreadState::self()->getCallingUid();
    Task task = [device, nnModel = std::move(nnModel), nnPreference, nnPriority, nnDeadline,
                 nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache),
                 nnToken, userId, executor, callback] {
                 nnToken, executor, callback] {
        auto result = device->prepareModel(nnModel, nnPreference, nnPriority, nnDeadline,
                                           nnModelCache, nnDataCache, nnToken);
        notify(callback.get(), std::move(result), executor, userId);
        notify(callback.get(), std::move(result), executor);
    };
    executor(std::move(task), userId, nnDeadline);
    executor(std::move(task), nnDeadline);

    return {};
}
@@ -245,13 +237,12 @@ nn::GeneralResult<void> prepareModelFromCache(const nn::SharedDevice& device,
    auto nnDataCache = NN_TRY(convertInput(dataCache));
    const auto nnToken = nn::CacheToken(token);

    const uid_t userId = hardware::IPCThreadState::self()->getCallingUid();
    Task task = [device, nnModelCache = std::move(nnModelCache),
                 nnDataCache = std::move(nnDataCache), nnToken, userId, executor, callback] {
                 nnDataCache = std::move(nnDataCache), nnToken, executor, callback] {
        auto result = device->prepareModelFromCache({}, nnModelCache, nnDataCache, nnToken);
        notify(callback.get(), std::move(result), executor, userId);
        notify(callback.get(), std::move(result), executor);
    };
    executor(std::move(task), userId, {});
    executor(std::move(task), {});

    return {};
}
@@ -270,13 +261,12 @@ nn::GeneralResult<void> prepareModelFromCache_1_3(
    auto nnDataCache = NN_TRY(convertInput(dataCache));
    const auto nnToken = nn::CacheToken(token);

    const uid_t userId = hardware::IPCThreadState::self()->getCallingUid();
    auto task = [device, nnDeadline, nnModelCache = std::move(nnModelCache),
                 nnDataCache = std::move(nnDataCache), nnToken, userId, executor, callback] {
                 nnDataCache = std::move(nnDataCache), nnToken, executor, callback] {
        auto result = device->prepareModelFromCache(nnDeadline, nnModelCache, nnDataCache, nnToken);
        notify(callback.get(), std::move(result), executor, userId);
        notify(callback.get(), std::move(result), executor);
    };
    executor(std::move(task), userId, nnDeadline);
    executor(std::move(task), nnDeadline);

    return {};
}
+12 −15
Original line number Diff line number Diff line
@@ -28,7 +28,6 @@
#include <android/hardware/neuralnetworks/1.3/IFencedExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.3/types.h>
#include <hwbinder/IPCThreadState.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/TypeUtils.h>
#include <nnapi/Types.h>
@@ -37,7 +36,6 @@
#include <nnapi/hal/1.2/Utils.h>
#include <nnapi/hal/1.3/Conversions.h>
#include <nnapi/hal/1.3/Utils.h>
#include <sys/types.h>

#include <memory>
#include <thread>
@@ -145,7 +143,7 @@ void notify(CallbackType* callback, ExecutionResult result) {
    }
}

nn::GeneralResult<void> execute(const nn::SharedPreparedModel& preparedModel, uid_t userId,
nn::GeneralResult<void> execute(const nn::SharedPreparedModel& preparedModel,
                                const Executor& executor, const V1_0::Request& request,
                                const sp<V1_0::IExecutionCallback>& callback) {
    if (callback.get() == nullptr) {
@@ -164,12 +162,12 @@ nn::GeneralResult<void> execute(const nn::SharedPreparedModel& preparedModel, ui
        auto result = preparedModel->execute(nnRequest, nn::MeasureTiming::NO, {}, {});
        notify(callback.get(), std::move(result));
    };
    executor(std::move(task), userId, {});
    executor(std::move(task), {});

    return {};
}

nn::GeneralResult<void> execute_1_2(const nn::SharedPreparedModel& preparedModel, uid_t userId,
nn::GeneralResult<void> execute_1_2(const nn::SharedPreparedModel& preparedModel,
                                    const Executor& executor, const V1_0::Request& request,
                                    V1_2::MeasureTiming measure,
                                    const sp<V1_2::IExecutionCallback>& callback) {
@@ -190,12 +188,12 @@ nn::GeneralResult<void> execute_1_2(const nn::SharedPreparedModel& preparedModel
        auto result = preparedModel->execute(nnRequest, nnMeasure, {}, {});
        notify(callback.get(), std::move(result));
    };
    executor(std::move(task), userId, {});
    executor(std::move(task), {});

    return {};
}

nn::GeneralResult<void> execute_1_3(const nn::SharedPreparedModel& preparedModel, uid_t userId,
nn::GeneralResult<void> execute_1_3(const nn::SharedPreparedModel& preparedModel,
                                    const Executor& executor, const V1_3::Request& request,
                                    V1_2::MeasureTiming measure,
                                    const V1_3::OptionalTimePoint& deadline,
@@ -222,7 +220,7 @@ nn::GeneralResult<void> execute_1_3(const nn::SharedPreparedModel& preparedModel
                preparedModel->execute(nnRequest, nnMeasure, nnDeadline, nnLoopTimeoutDuration);
        notify(callback.get(), std::move(result));
    };
    executor(std::move(task), userId, nnDeadline);
    executor(std::move(task), nnDeadline);

    return {};
}
@@ -305,8 +303,8 @@ nn::GeneralResult<std::pair<hidl_handle, sp<V1_3::IFencedExecutionCallback>>> ex

}  // namespace

PreparedModel::PreparedModel(nn::SharedPreparedModel preparedModel, Executor executor, uid_t userId)
    : kPreparedModel(std::move(preparedModel)), kExecutor(std::move(executor)), kUserId(userId) {
PreparedModel::PreparedModel(nn::SharedPreparedModel preparedModel, Executor executor)
    : kPreparedModel(std::move(preparedModel)), kExecutor(std::move(executor)) {
    CHECK(kPreparedModel != nullptr);
    CHECK(kExecutor != nullptr);
}
@@ -317,7 +315,7 @@ nn::SharedPreparedModel PreparedModel::getUnderlyingPreparedModel() const {

Return<V1_0::ErrorStatus> PreparedModel::execute(const V1_0::Request& request,
                                                 const sp<V1_0::IExecutionCallback>& callback) {
    auto result = adapter::execute(kPreparedModel, kUserId, kExecutor, request, callback);
    auto result = adapter::execute(kPreparedModel, kExecutor, request, callback);
    if (!result.has_value()) {
        auto [message, code] = std::move(result).error();
        LOG(ERROR) << "adapter::PreparedModel::execute failed with " << code << ": " << message;
@@ -330,8 +328,7 @@ Return<V1_0::ErrorStatus> PreparedModel::execute(const V1_0::Request& request,
Return<V1_0::ErrorStatus> PreparedModel::execute_1_2(const V1_0::Request& request,
                                                     V1_2::MeasureTiming measure,
                                                     const sp<V1_2::IExecutionCallback>& callback) {
    auto result =
            adapter::execute_1_2(kPreparedModel, kUserId, kExecutor, request, measure, callback);
    auto result = adapter::execute_1_2(kPreparedModel, kExecutor, request, measure, callback);
    if (!result.has_value()) {
        auto [message, code] = std::move(result).error();
        LOG(ERROR) << "adapter::PreparedModel::execute_1_2 failed with " << code << ": " << message;
@@ -346,8 +343,8 @@ Return<V1_3::ErrorStatus> PreparedModel::execute_1_3(
        const V1_3::OptionalTimePoint& deadline,
        const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
        const sp<V1_3::IExecutionCallback>& callback) {
    auto result = adapter::execute_1_3(kPreparedModel, kUserId, kExecutor, request, measure,
                                       deadline, loopTimeoutDuration, callback);
    auto result = adapter::execute_1_3(kPreparedModel, kExecutor, request, measure, deadline,
                                       loopTimeoutDuration, callback);
    if (!result.has_value()) {
        auto [message, code] = std::move(result).error();
        LOG(ERROR) << "adapter::PreparedModel::execute_1_3 failed with " << code << ": " << message;