Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 33ea2dff authored by Eino-Ville Talvala's avatar Eino-Ville Talvala Committed by Android (Google) Code Review
Browse files

Merge "Camera2: Move frame processing to its own thread." into jb-mr1-dev

parents 928da7e2 c8474b68
Loading
Loading
Loading
Loading
+86 −30
Original line number Diff line number Diff line
@@ -61,6 +61,7 @@ Camera2Client::Camera2Client(const sp<CameraService>& cameraService,
        mRecordingHeapCount(kDefaultRecordingHeapCount)
{
    ATRACE_CALL();
    ALOGV("%s: Created client for camera %d", __FUNCTION__, cameraId);

    mDevice = new Camera2Device(cameraId);

@@ -80,9 +81,14 @@ status_t Camera2Client::checkPid(const char* checkLocation) const {
status_t Camera2Client::initialize(camera_module_t *module)
{
    ATRACE_CALL();
    ALOGV("%s: E", __FUNCTION__);
    ALOGV("%s: Initializing client for camera %d", __FUNCTION__, mCameraId);
    status_t res;

    mFrameProcessor = new FrameProcessor(this);
    String8 frameThreadName = String8::format("Camera2Client[%d]::FrameProcessor",
            mCameraId);
    mFrameProcessor->run(frameThreadName.string());

    res = mDevice->initialize(module);
    if (res != OK) {
        ALOGE("%s: Camera %d: unable to initialize device: %s (%d)",
@@ -91,7 +97,6 @@ status_t Camera2Client::initialize(camera_module_t *module)
    }

    res = mDevice->setNotifyCallback(this);
    res = mDevice->setFrameListener(this);

    res = buildDeviceInfo();
    res = buildDefaultParameters();
@@ -113,13 +118,16 @@ status_t Camera2Client::initialize(camera_module_t *module)

Camera2Client::~Camera2Client() {
    ATRACE_CALL();
    ALOGV("%s: Camera %d: Shutting down", __FUNCTION__, mCameraId);
    ALOGV("%s: Camera %d: Shutting down client.", __FUNCTION__, mCameraId);

    mDestructionStarted = true;

    // Rewrite mClientPid to allow shutdown by CameraService
    mClientPid = getCallingPid();
    disconnect();

    mFrameProcessor->requestExit();
    ALOGV("%s: Camera %d: Shutdown complete", __FUNCTION__, mCameraId);
}

status_t Camera2Client::dump(int fd, const Vector<String16>& args) {
@@ -315,6 +323,8 @@ status_t Camera2Client::dump(int fd, const Vector<String16>& args) {
        write(fd, result.string(), result.size());
    }

    mFrameProcessor->dump(fd, args);

    result = "  Device dump:\n";
    write(fd, result.string(), result.size());

@@ -1986,36 +1996,76 @@ void Camera2Client::notifyAutoWhitebalance(uint8_t newState, int triggerId) {
            __FUNCTION__, newState, triggerId);
}

void Camera2Client::onNewFrameAvailable() {
Camera2Client::FrameProcessor::FrameProcessor(wp<Camera2Client> client):
        Thread(false), mClient(client) {
}

Camera2Client::FrameProcessor::~FrameProcessor() {
    ALOGV("%s: Exit", __FUNCTION__);
}

void Camera2Client::FrameProcessor::dump(int fd, const Vector<String16>& args) {
    String8 result("    Latest received frame:\n");
    write(fd, result.string(), result.size());
    mLastFrame.dump(fd, 2, 6);
}

bool Camera2Client::FrameProcessor::threadLoop() {
    status_t res;

    sp<Camera2Device> device;
    {
        sp<Camera2Client> client = mClient.promote();
        if (client == 0) return false;
        device = client->mDevice;
    }

    res = device->waitForNextFrame(kWaitDuration);
    if (res == OK) {
        sp<Camera2Client> client = mClient.promote();
        if (client == 0) return false;
        processNewFrames(client);
    } else if (res != TIMED_OUT) {
        ALOGE("Camera2Client::FrameProcessor: Error waiting for new "
                "frames: %s (%d)", strerror(-res), res);
    }

    return true;
}

void Camera2Client::FrameProcessor::processNewFrames(sp<Camera2Client> &client) {
    status_t res;
    CameraMetadata frame;
    while ( (res = mDevice->getNextFrame(&frame)) == OK) {
    while ( (res = client->mDevice->getNextFrame(&frame)) == OK) {
        camera_metadata_entry_t entry;
        entry = frame.find(ANDROID_REQUEST_FRAME_COUNT);
        if (entry.count == 0) {
            ALOGE("%s: Camera %d: Error reading frame number: %s (%d)",
                    __FUNCTION__, mCameraId, strerror(-res), res);
                    __FUNCTION__, client->mCameraId, strerror(-res), res);
            break;
        }

        res = processFrameFaceDetect(frame);
        res = processFaceDetect(frame, client);
        if (res != OK) break;

        mLastFrame.acquire(frame);
    }
    if (res != NOT_ENOUGH_DATA) {
        ALOGE("%s: Camera %d: Error getting next frame: %s (%d)",
                __FUNCTION__, mCameraId, strerror(-res), res);
                __FUNCTION__, client->mCameraId, strerror(-res), res);
        return;
    }

    return;
}

status_t Camera2Client::processFrameFaceDetect(const CameraMetadata &frame) {
status_t Camera2Client::FrameProcessor::processFaceDetect(
    const CameraMetadata &frame, sp<Camera2Client> &client) {
    status_t res;
    camera_metadata_ro_entry_t entry;
    bool enableFaceDetect;
    {
        LockedParameters::Key k(mParameters);
        LockedParameters::Key k(client->mParameters);
        enableFaceDetect = k.mParameters.enableFaceDetect;
    }
    entry = frame.find(ANDROID_STATS_FACE_DETECT_MODE);
@@ -2031,16 +2081,16 @@ status_t Camera2Client::processFrameFaceDetect(const CameraMetadata &frame) {
        entry = frame.find(ANDROID_STATS_FACE_RECTANGLES);
        if (entry.count == 0) {
            ALOGE("%s: Camera %d: Unable to read face rectangles",
                    __FUNCTION__, mCameraId);
                    __FUNCTION__, client->mCameraId);
            return res;
        }
        camera_frame_metadata metadata;
        metadata.number_of_faces = entry.count / 4;
        if (metadata.number_of_faces >
                mDeviceInfo->maxFaces) {
                client->mDeviceInfo->maxFaces) {
            ALOGE("%s: Camera %d: More faces than expected! (Got %d, max %d)",
                    __FUNCTION__, mCameraId,
                    metadata.number_of_faces, mDeviceInfo->maxFaces);
                    __FUNCTION__, client->mCameraId,
                    metadata.number_of_faces, client->mDeviceInfo->maxFaces);
            return res;
        }
        const int32_t *faceRects = entry.data.i32;
@@ -2048,7 +2098,7 @@ status_t Camera2Client::processFrameFaceDetect(const CameraMetadata &frame) {
        entry = frame.find(ANDROID_STATS_FACE_SCORES);
        if (entry.count == 0) {
            ALOGE("%s: Camera %d: Unable to read face scores",
                    __FUNCTION__, mCameraId);
                    __FUNCTION__, client->mCameraId);
            return res;
        }
        const uint8_t *faceScores = entry.data.u8;
@@ -2060,7 +2110,7 @@ status_t Camera2Client::processFrameFaceDetect(const CameraMetadata &frame) {
            entry = frame.find(ANDROID_STATS_FACE_LANDMARKS);
            if (entry.count == 0) {
                ALOGE("%s: Camera %d: Unable to read face landmarks",
                        __FUNCTION__, mCameraId);
                        __FUNCTION__, client->mCameraId);
                return res;
            }
            faceLandmarks = entry.data.i32;
@@ -2069,7 +2119,7 @@ status_t Camera2Client::processFrameFaceDetect(const CameraMetadata &frame) {

            if (entry.count == 0) {
                ALOGE("%s: Camera %d: Unable to read face IDs",
                        __FUNCTION__, mCameraId);
                        __FUNCTION__, client->mCameraId);
                return res;
            }
            faceIds = entry.data.i32;
@@ -2081,20 +2131,26 @@ status_t Camera2Client::processFrameFaceDetect(const CameraMetadata &frame) {
        for (int i = 0; i < metadata.number_of_faces; i++) {
            camera_face_t face;

            face.rect[0] = arrayXToNormalized(faceRects[i*4 + 0]);
            face.rect[1] = arrayYToNormalized(faceRects[i*4 + 1]);
            face.rect[2] = arrayXToNormalized(faceRects[i*4 + 2]);
            face.rect[3] = arrayYToNormalized(faceRects[i*4 + 3]);
            face.rect[0] = client->arrayXToNormalized(faceRects[i*4 + 0]);
            face.rect[1] = client->arrayYToNormalized(faceRects[i*4 + 1]);
            face.rect[2] = client->arrayXToNormalized(faceRects[i*4 + 2]);
            face.rect[3] = client->arrayYToNormalized(faceRects[i*4 + 3]);

            face.score = faceScores[i];
            if (faceDetectMode == ANDROID_STATS_FACE_DETECTION_FULL) {
                face.id = faceIds[i];
                face.left_eye[0] = arrayXToNormalized(faceLandmarks[i*6 + 0]);
                face.left_eye[1] = arrayYToNormalized(faceLandmarks[i*6 + 1]);
                face.right_eye[0] = arrayXToNormalized(faceLandmarks[i*6 + 2]);
                face.right_eye[1] = arrayYToNormalized(faceLandmarks[i*6 + 3]);
                face.mouth[0] = arrayXToNormalized(faceLandmarks[i*6 + 4]);
                face.mouth[1] = arrayYToNormalized(faceLandmarks[i*6 + 5]);
                face.left_eye[0] =
                        client->arrayXToNormalized(faceLandmarks[i*6 + 0]);
                face.left_eye[1] =
                        client->arrayYToNormalized(faceLandmarks[i*6 + 1]);
                face.right_eye[0] =
                        client->arrayXToNormalized(faceLandmarks[i*6 + 2]);
                face.right_eye[1] =
                        client->arrayYToNormalized(faceLandmarks[i*6 + 3]);
                face.mouth[0] =
                        client->arrayXToNormalized(faceLandmarks[i*6 + 4]);
                face.mouth[1] =
                        client->arrayYToNormalized(faceLandmarks[i*6 + 5]);
            } else {
                face.id = 0;
                face.left_eye[0] = face.left_eye[1] = -2000;
@@ -2106,9 +2162,9 @@ status_t Camera2Client::processFrameFaceDetect(const CameraMetadata &frame) {

        metadata.faces = faces.editArray();
        {
            Mutex::Autolock iccl(mICameraClientLock);
            if (mCameraClient != NULL) {
                mCameraClient->dataCallback(CAMERA_MSG_PREVIEW_METADATA,
            Mutex::Autolock iccl(client->mICameraClientLock);
            if (client->mCameraClient != NULL) {
                client->mCameraClient->dataCallback(CAMERA_MSG_PREVIEW_METADATA,
                        NULL, &metadata);
            }
        }
+23 −6
Original line number Diff line number Diff line
@@ -33,8 +33,7 @@ namespace android {
 */
class Camera2Client :
        public CameraService::Client,
        public Camera2Device::NotificationListener,
        public Camera2Device::FrameListener
        public Camera2Device::NotificationListener
{
public:
    // ICamera interface (see ICamera for details)
@@ -83,8 +82,6 @@ public:
    virtual void notifyAutoExposure(uint8_t newState, int triggerId);
    virtual void notifyAutoWhitebalance(uint8_t newState, int triggerId);

    virtual void onNewFrameAvailable();

private:
    enum State {
        DISCONNECTED,
@@ -299,9 +296,29 @@ private:
    // Used with stream IDs
    static const int NO_STREAM = -1;

    /* Output frame metadata processing methods */
    /* Output frame metadata processing thread.  This thread waits for new
     * frames from the device, and analyzes them as necessary.
     */
    class FrameProcessor: public Thread {
      public:
        FrameProcessor(wp<Camera2Client> client);
        ~FrameProcessor();

        void dump(int fd, const Vector<String16>& args);
      private:
        static const nsecs_t kWaitDuration = 10000000; // 10 ms
        wp<Camera2Client> mClient;

        virtual bool threadLoop();

        void processNewFrames(sp<Camera2Client> &client);
        status_t processFaceDetect(const CameraMetadata &frame,
                sp<Camera2Client> &client);

        CameraMetadata mLastFrame;
    };

    status_t processFrameFaceDetect(const CameraMetadata &frame);
    sp<FrameProcessor> mFrameProcessor;

    /* Preview related members */

+7 −27
Original line number Diff line number Diff line
@@ -33,12 +33,12 @@ Camera2Device::Camera2Device(int id):
        mId(id),
        mDevice(NULL)
{
    ALOGV("%s: E", __FUNCTION__);
    ALOGV("%s: Created device for camera %d", __FUNCTION__, id);
}

Camera2Device::~Camera2Device()
{
    ALOGV("%s: E", __FUNCTION__);
    ALOGV("%s: Shutting down device for camera %d", __FUNCTION__, mId);
    if (mDevice) {
        status_t res;
        res = mDevice->common.close(&mDevice->common);
@@ -49,11 +49,12 @@ Camera2Device::~Camera2Device()
        }
        mDevice = NULL;
    }
    ALOGV("%s: Shutdown complete", __FUNCTION__);
}

status_t Camera2Device::initialize(camera_module_t *module)
{
    ALOGV("%s: E", __FUNCTION__);
    ALOGV("%s: Initializing device for camera %d", __FUNCTION__, mId);

    status_t res;
    char name[10];
@@ -347,8 +348,8 @@ void Camera2Device::notificationCallback(int32_t msg_type,
    }
}

status_t Camera2Device::setFrameListener(FrameListener *listener) {
    return mFrameQueue.setListener(listener);
status_t Camera2Device::waitForNextFrame(nsecs_t timeout) {
    return mFrameQueue.waitForBuffer(timeout);
}

status_t Camera2Device::getNextFrame(CameraMetadata *frame) {
@@ -406,13 +407,6 @@ status_t Camera2Device::triggerPrecaptureMetering(uint32_t id) {
Camera2Device::NotificationListener::~NotificationListener() {
}

/**
 * Camera2Device::FrameListener
 */

Camera2Device::FrameListener::~FrameListener() {
}

/**
 * Camera2Device::MetadataQueue
 */
@@ -422,8 +416,7 @@ Camera2Device::MetadataQueue::MetadataQueue():
            mFrameCount(0),
            mCount(0),
            mStreamSlotCount(0),
            mSignalConsumer(true),
            mListener(NULL)
            mSignalConsumer(true)
{
    camera2_request_queue_src_ops::dequeue_request = consumer_dequeue;
    camera2_request_queue_src_ops::request_count = consumer_buffer_count;
@@ -541,12 +534,6 @@ status_t Camera2Device::MetadataQueue::waitForBuffer(nsecs_t timeout)
    return OK;
}

status_t Camera2Device::MetadataQueue::setListener(FrameListener *listener) {
    Mutex::Autolock l(mMutex);
    mListener = listener;
    return OK;
}

status_t Camera2Device::MetadataQueue::setStreamSlot(camera_metadata_t *buf)
{
    ALOGV("%s: E", __FUNCTION__);
@@ -659,13 +646,6 @@ status_t Camera2Device::MetadataQueue::signalConsumerLocked() {
        res = mDevice->ops->notify_request_queue_not_empty(mDevice);
        mMutex.lock();
    }
    if (mListener != NULL) {
        FrameListener *listener = mListener;
        mMutex.unlock();
        ALOGVV("%s: Signaling listener", __FUNCTION__);
        listener->onNewFrameAvailable();
        mMutex.lock();
    }
    return res;
}

+3 −14
Original line number Diff line number Diff line
@@ -129,19 +129,10 @@ class Camera2Device : public virtual RefBase {
    status_t setNotifyCallback(NotificationListener *listener);

    /**
     * Abstract class for HAL frame available notifications
     * Wait for a new frame to be produced, with timeout in nanoseconds.
     * Returns TIMED_OUT when no frame produced within the specified duration
     */
    class FrameListener {
      public:
        virtual void onNewFrameAvailable() = 0;
      protected:
        virtual ~FrameListener();
    };

    /**
     * Set a frame listener to be notified about new frames.
     */
    status_t setFrameListener(FrameListener *listener);
    status_t waitForNextFrame(nsecs_t timeout);

    /**
     * Get next metadata frame from the frame queue. Returns NULL if the queue
@@ -206,7 +197,6 @@ class Camera2Device : public virtual RefBase {
        status_t dequeue(camera_metadata_t **buf, bool incrementCount = true);
        int      getBufferCount();
        status_t waitForBuffer(nsecs_t timeout);
        status_t setListener(FrameListener *listener);

        // Set repeating buffer(s); if the queue is empty on a dequeue call, the
        // queue copies the contents of the stream slot into the queue, and then
@@ -235,7 +225,6 @@ class Camera2Device : public virtual RefBase {
        List<camera_metadata_t*> mStreamSlot;

        bool mSignalConsumer;
        FrameListener *mListener;

        static MetadataQueue* getInstance(
            const camera2_frame_queue_dst_ops_t *q);