Loading neuralnetworks/1.2/IDevice.hal +20 −0 Original line number Diff line number Diff line Loading @@ -55,6 +55,26 @@ interface IDevice extends @1.1::IDevice { */ getVersionString() generates (ErrorStatus status, string version); /** * Get the type of a given device. * * The device type can be used to help application developers to distribute * Machine Learning workloads and other workloads such as graphical rendering. * E.g., for an app which renders AR scenes based on real time object detection * results, the developer could choose an ACCELERATOR type device for ML * workloads, and reserve GPU for graphical rendering. * * @param status Error status returned from querying the device type. Must be: * - NONE if the query was successful * - DEVICE_UNAVAILABLE if driver is offline or busy * - GENERAL_FAILURE if the query resulted in an * unspecified error * @param type The DeviceType of the device. Please note, this is not a * bitfield of DeviceTypes. Each device must only be of a * single DeviceType. */ getType() generates (ErrorStatus status, DeviceType type); /** * Gets the supported operations in a model. * Loading neuralnetworks/1.2/types.hal +20 −0 Original line number Diff line number Diff line Loading @@ -179,6 +179,26 @@ enum OperationTypeRange : uint32_t { OPERATION_OEM_MAX = 10000, }; /** * Device types. * * The type of NNAPI device. */ enum DeviceType : int32_t { // Leaving 0 unused as it means unknown type in NDK NNAPI. There is no // HAL equivalent of unknown type and a 1.2 HAL implementation must belong // to one of the categories below. /** The device does not fall into any category below. */ OTHER = 1, /** The device runs NNAPI models on single or multi-core CPU. */ CPU = 2, /** The device can run NNAPI models and also accelerate graphics APIs such * as OpenGL ES and Vulkan. */ GPU = 3, /** Dedicated accelerator for Machine Learning workloads. */ ACCELERATOR = 4, }; /** * Describes one operation of the model's graph. */ Loading neuralnetworks/1.2/vts/functional/BasicTests.cpp +10 −0 Original line number Diff line number Diff line Loading @@ -45,6 +45,16 @@ TEST_F(NeuralnetworksHidlTest, GetDeviceVersionStringTest) { }); EXPECT_TRUE(ret.isOk()); } // device type test TEST_F(NeuralnetworksHidlTest, GetDeviceTypeTest) { Return<void> ret = device->getType([](ErrorStatus status, DeviceType type) { EXPECT_EQ(ErrorStatus::NONE, status); EXPECT_TRUE(type == DeviceType::OTHER || type == DeviceType::CPU || type == DeviceType::GPU || type == DeviceType::ACCELERATOR); }); EXPECT_TRUE(ret.isOk()); } } // namespace functional } // namespace vts } // namespace V1_2 Loading Loading
neuralnetworks/1.2/IDevice.hal +20 −0 Original line number Diff line number Diff line Loading @@ -55,6 +55,26 @@ interface IDevice extends @1.1::IDevice { */ getVersionString() generates (ErrorStatus status, string version); /** * Get the type of a given device. * * The device type can be used to help application developers to distribute * Machine Learning workloads and other workloads such as graphical rendering. * E.g., for an app which renders AR scenes based on real time object detection * results, the developer could choose an ACCELERATOR type device for ML * workloads, and reserve GPU for graphical rendering. * * @param status Error status returned from querying the device type. Must be: * - NONE if the query was successful * - DEVICE_UNAVAILABLE if driver is offline or busy * - GENERAL_FAILURE if the query resulted in an * unspecified error * @param type The DeviceType of the device. Please note, this is not a * bitfield of DeviceTypes. Each device must only be of a * single DeviceType. */ getType() generates (ErrorStatus status, DeviceType type); /** * Gets the supported operations in a model. * Loading
neuralnetworks/1.2/types.hal +20 −0 Original line number Diff line number Diff line Loading @@ -179,6 +179,26 @@ enum OperationTypeRange : uint32_t { OPERATION_OEM_MAX = 10000, }; /** * Device types. * * The type of NNAPI device. */ enum DeviceType : int32_t { // Leaving 0 unused as it means unknown type in NDK NNAPI. There is no // HAL equivalent of unknown type and a 1.2 HAL implementation must belong // to one of the categories below. /** The device does not fall into any category below. */ OTHER = 1, /** The device runs NNAPI models on single or multi-core CPU. */ CPU = 2, /** The device can run NNAPI models and also accelerate graphics APIs such * as OpenGL ES and Vulkan. */ GPU = 3, /** Dedicated accelerator for Machine Learning workloads. */ ACCELERATOR = 4, }; /** * Describes one operation of the model's graph. */ Loading
neuralnetworks/1.2/vts/functional/BasicTests.cpp +10 −0 Original line number Diff line number Diff line Loading @@ -45,6 +45,16 @@ TEST_F(NeuralnetworksHidlTest, GetDeviceVersionStringTest) { }); EXPECT_TRUE(ret.isOk()); } // device type test TEST_F(NeuralnetworksHidlTest, GetDeviceTypeTest) { Return<void> ret = device->getType([](ErrorStatus status, DeviceType type) { EXPECT_EQ(ErrorStatus::NONE, status); EXPECT_TRUE(type == DeviceType::OTHER || type == DeviceType::CPU || type == DeviceType::GPU || type == DeviceType::ACCELERATOR); }); EXPECT_TRUE(ret.isOk()); } } // namespace functional } // namespace vts } // namespace V1_2 Loading