Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2571356e authored by Nipun Kwatra's avatar Nipun Kwatra Committed by Android (Google) Code Review
Browse files

Merge "Support for video size to be different from captured picture size."

parents c2aa115f dce4beb1
Loading
Loading
Loading
Loading
+33 −0
Original line number Diff line number Diff line
@@ -49,6 +49,23 @@ private:
    // If false, will use the videocamera frames instead.
    bool mUseStillCameraForTimeLapse;

    // Size of picture taken from still camera. This may be larger than the size
    // of the video, as still camera may not support the exact video resolution
    // demanded. See setPictureSizeToClosestSupported().
    int32_t mPictureWidth;
    int32_t mPictureHeight;

    // size of the encoded video.
    int32_t mVideoWidth;
    int32_t mVideoHeight;

    // True if we need to crop the still camera image to get the video frame.
    bool mNeedCropping;

    // Start location of the cropping rectangle.
    int32_t mCropRectStartX;
    int32_t mCropRectStartY;

    // Time between capture of two frames during time lapse recording
    // Negative value indicates that timelapse is disabled.
    int64_t mTimeBetweenTimeLapseFrameCaptureUs;
@@ -107,6 +124,22 @@ private:
    virtual void dataCallbackTimestamp(int64_t timestampUs, int32_t msgType,
            const sp<IMemory> &data);

    // The still camera may not support the demanded video width and height.
    // We look for the supported picture sizes from the still camera and
    // choose the size with either dimensions higher than the corresponding video
    // dimensions. The still picture will be cropped to get the video frame.
    void setPictureSizeToClosestSupported(int32_t width, int32_t height);

    // Computes the offset of the rectangle from where to start cropping the
    // still image into the video frame. We choose the center of the image to be
    // cropped. The offset is stored in (mCropRectStartX, mCropRectStartY).
    bool computeCropRectangleOffset();

    // Crops the source data into a smaller image starting at
    // (mCropRectStartX, mCropRectStartY) and of the size of the video frame.
    // The data is returned into a newly allocated IMemory.
    sp<IMemory> cropYUVImage(const sp<IMemory> &source_data);

    // When video camera is used for time lapse capture, returns true
    // until enough time has passed for the next time lapse frame. When
    // the frame needs to be encoded, it returns false and also modifies
+1 −0
Original line number Diff line number Diff line
@@ -57,6 +57,7 @@ LOCAL_SHARED_LIBRARIES := \
        libsonivox        \
        libvorbisidec     \
        libsurfaceflinger_client \
        libstagefright_yuv \
        libcamera_client

LOCAL_STATIC_LIBRARIES := \
+93 −19
Original line number Diff line number Diff line
@@ -24,9 +24,13 @@
#include <media/stagefright/CameraSourceTimeLapse.h>
#include <media/stagefright/MediaDebug.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/YUVImage.h>
#include <media/stagefright/YUVCanvas.h>
#include <camera/Camera.h>
#include <camera/CameraParameters.h>
#include <ui/Rect.h>
#include <utils/String8.h>
#include "OMX_Video.h"

namespace android {

@@ -72,7 +76,11 @@ CameraSourceTimeLapse::CameraSourceTimeLapse(const sp<Camera> &camera,
      mSkipCurrentFrame(false) {

    LOGV("starting time lapse mode");
    mVideoWidth = width;
    mVideoHeight = height;
    if (mUseStillCameraForTimeLapse) {
        setPictureSizeToClosestSupported(width, height);
        mNeedCropping = computeCropRectangleOffset();
        mMeta->setInt32(kKeyWidth, width);
        mMeta->setInt32(kKeyHeight, height);
    }
@@ -81,6 +89,31 @@ CameraSourceTimeLapse::CameraSourceTimeLapse(const sp<Camera> &camera,
CameraSourceTimeLapse::~CameraSourceTimeLapse() {
}

void CameraSourceTimeLapse::setPictureSizeToClosestSupported(int32_t width, int32_t height) {
    // TODO: Currently fixed to the highest resolution.
    // Need to poll the camera and set accordingly.
    mPictureWidth = 2048;
    mPictureHeight = 1536;
}

bool CameraSourceTimeLapse::computeCropRectangleOffset() {
    if ((mPictureWidth == mVideoWidth) && (mPictureHeight == mVideoHeight)) {
        return false;
    }

    CHECK((mPictureWidth > mVideoWidth) && (mPictureHeight > mVideoHeight));

    int32_t widthDifference = mPictureWidth - mVideoWidth;
    int32_t heightDifference = mPictureHeight - mVideoHeight;

    mCropRectStartX = widthDifference/2;
    mCropRectStartY = heightDifference/2;

    LOGV("setting crop rectangle offset to (%d, %d)", mCropRectStartX, mCropRectStartY);

    return true;
}

// static
void *CameraSourceTimeLapse::ThreadTimeLapseWrapper(void *me) {
    CameraSourceTimeLapse *source = static_cast<CameraSourceTimeLapse *>(me);
@@ -106,17 +139,12 @@ void CameraSourceTimeLapse::startCameraRecording() {
    if (mUseStillCameraForTimeLapse) {
        LOGV("start time lapse recording using still camera");

        int32_t width;
        int32_t height;
        mMeta->findInt32(kKeyWidth, &width);
        mMeta->findInt32(kKeyHeight, &height);

        int64_t token = IPCThreadState::self()->clearCallingIdentity();
        String8 s = mCamera->getParameters();
        IPCThreadState::self()->restoreCallingIdentity(token);

        CameraParameters params(s);
        params.setPictureSize(width, height);
        params.setPictureSize(mPictureWidth, mPictureHeight);
        mCamera->setParameters(params.flatten());
        mCameraIdle = true;

@@ -158,6 +186,13 @@ sp<IMemory> CameraSourceTimeLapse::createIMemoryCopy(const sp<IMemory> &source_d
    return newMemory;
}

// Allocates IMemory of final type MemoryBase with the given size.
sp<IMemory> allocateIMemory(size_t size) {
    sp<MemoryHeapBase> newMemoryHeap = new MemoryHeapBase(size);
    sp<MemoryBase> newMemory = new MemoryBase(newMemoryHeap, 0, size);
    return newMemory;
}

// static
void *CameraSourceTimeLapse::ThreadStartPreviewWrapper(void *me) {
    CameraSourceTimeLapse *source = static_cast<CameraSourceTimeLapse *>(me);
@@ -182,10 +217,43 @@ void CameraSourceTimeLapse::restartPreview() {
    pthread_attr_destroy(&attr);
}

sp<IMemory> CameraSourceTimeLapse::cropYUVImage(const sp<IMemory> &source_data) {
    // find the YUV format
    int32_t srcFormat;
    CHECK(mMeta->findInt32(kKeyColorFormat, &srcFormat));
    YUVImage::YUVFormat yuvFormat;
    if (srcFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
        yuvFormat = YUVImage::YUV420SemiPlanar;
    } else if (srcFormat == OMX_COLOR_FormatYUV420Planar) {
        yuvFormat = YUVImage::YUV420Planar;
    }

    // allocate memory for cropped image and setup a canvas using it.
    sp<IMemory> croppedImageMemory = allocateIMemory(
            YUVImage::bufferSize(yuvFormat, mVideoWidth, mVideoHeight));
    YUVImage yuvImageCropped(yuvFormat,
            mVideoWidth, mVideoHeight,
            (uint8_t *)croppedImageMemory->pointer());
    YUVCanvas yuvCanvasCrop(yuvImageCropped);

    YUVImage yuvImageSource(yuvFormat,
            mPictureWidth, mPictureHeight,
            (uint8_t *)source_data->pointer());
    yuvCanvasCrop.CopyImageRect(
            Rect(mCropRectStartX, mCropRectStartY,
                mCropRectStartX + mVideoWidth,
                mCropRectStartY + mVideoHeight),
            0, 0,
            yuvImageSource);

    return croppedImageMemory;
}

void CameraSourceTimeLapse::dataCallback(int32_t msgType, const sp<IMemory> &data) {
    if (msgType == CAMERA_MSG_COMPRESSED_IMAGE) {
        // takePicture will complete after this callback, so restart preview.
        restartPreview();
        return;
    }
    if (msgType != CAMERA_MSG_RAW_IMAGE) {
        return;
@@ -200,9 +268,15 @@ void CameraSourceTimeLapse::dataCallback(int32_t msgType, const sp<IMemory> &dat
    } else {
        timestampUs = mLastFrameTimestampUs + mTimeBetweenTimeLapseVideoFramesUs;
    }

    if (mNeedCropping) {
        sp<IMemory> croppedImageData = cropYUVImage(data);
        dataCallbackTimestamp(timestampUs, msgType, croppedImageData);
    } else {
        sp<IMemory> dataCopy = createIMemoryCopy(data);
        dataCallbackTimestamp(timestampUs, msgType, dataCopy);
    }
}

bool CameraSourceTimeLapse::skipCurrentFrame(int64_t timestampUs) {
    if (mSkipCurrentFrame) {