Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 16f57911 authored by TreeHugger Robot's avatar TreeHugger Robot Committed by Android (Google) Code Review
Browse files

Merge "Use renderengine on mediaserver side"

parents 34a7610f fe8641e5
Loading
Loading
Loading
Loading
+48 −0
Original line number Diff line number Diff line
@@ -122,6 +122,53 @@ cc_library_static {
    },
}

cc_library_shared {
    name: "libstagefright_framecapture_utils",

    srcs: [
        "FrameCaptureLayer.cpp",
        "FrameCaptureProcessor.cpp",
    ],

    shared_libs: [
        "libbase",
        "libcutils",
        "libEGL",
        "libGLESv1_CM",
        "libGLESv2",
        "libgui",
        "liblog",
        "libprocessgroup",
        "libstagefright_foundation",
        "libsync",
        "libui",
        "libutils",
    ],

    static_libs: [
        "librenderengine",
    ],

    export_include_dirs: [
        "include",
    ],

    cflags: [
        "-Wno-multichar",
        "-Werror",
        "-Wno-error=deprecated-declarations",
        "-Wall",
    ],

    sanitize: {
        // TODO: re-enabled cfi for this lib after b/139945549 fixed
        cfi: false,
        misc_undefined: [
            "unsigned-integer-overflow",
            "signed-integer-overflow",
        ],
    },
}
cc_library {
    name: "libstagefright",

@@ -174,6 +221,7 @@ cc_library {
    ],

    shared_libs: [
        "libstagefright_framecapture_utils",
        "libaudioutils",
        "libbase",
        "libbinder",
+247 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2019 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

//#define LOG_NDEBUG 0
#define LOG_TAG "FrameCaptureLayer"

#include <include/FrameCaptureLayer.h>
#include <media/stagefright/FrameCaptureProcessor.h>
#include <gui/BufferQueue.h>
#include <gui/GLConsumer.h>
#include <gui/IGraphicBufferConsumer.h>
#include <gui/Surface.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/MediaErrors.h>
#include <renderengine/RenderEngine.h>
#include <utils/Log.h>

namespace android {

static const int64_t kAcquireBufferTimeoutNs = 100000000LL;

ui::Dataspace translateDataspace(ui::Dataspace dataspace) {
    ui::Dataspace updatedDataspace = dataspace;
    // translate legacy dataspaces to modern dataspaces
    switch (dataspace) {
        case ui::Dataspace::SRGB:
            updatedDataspace = ui::Dataspace::V0_SRGB;
            break;
        case ui::Dataspace::SRGB_LINEAR:
            updatedDataspace = ui::Dataspace::V0_SRGB_LINEAR;
            break;
        case ui::Dataspace::JFIF:
            updatedDataspace = ui::Dataspace::V0_JFIF;
            break;
        case ui::Dataspace::BT601_625:
            updatedDataspace = ui::Dataspace::V0_BT601_625;
            break;
        case ui::Dataspace::BT601_525:
            updatedDataspace = ui::Dataspace::V0_BT601_525;
            break;
        case ui::Dataspace::BT709:
            updatedDataspace = ui::Dataspace::V0_BT709;
            break;
        default:
            break;
    }

    return updatedDataspace;
}

bool isHdrY410(const BufferItem &bi) {
    ui::Dataspace dataspace = translateDataspace(static_cast<ui::Dataspace>(bi.mDataSpace));
    // pixel format is HDR Y410 masquerading as RGBA_1010102
    return (dataspace == ui::Dataspace::BT2020_ITU_PQ &&
            bi.mGraphicBuffer->getPixelFormat() == HAL_PIXEL_FORMAT_RGBA_1010102);
}

struct FrameCaptureLayer::BufferLayer : public FrameCaptureProcessor::Layer {
    BufferLayer(const BufferItem &bi) : mBufferItem(bi) {}
    void getLayerSettings(
            const Rect &sourceCrop, uint32_t textureName,
            renderengine::LayerSettings *layerSettings) override;
    BufferItem mBufferItem;
};

void FrameCaptureLayer::BufferLayer::getLayerSettings(
        const Rect &sourceCrop, uint32_t textureName,
        renderengine::LayerSettings *layerSettings) {
    layerSettings->geometry.boundaries = sourceCrop.toFloatRect();
    layerSettings->alpha = 1.0f;

    layerSettings->sourceDataspace = translateDataspace(
            static_cast<ui::Dataspace>(mBufferItem.mDataSpace));

    // from BufferLayer
    layerSettings->source.buffer.buffer = mBufferItem.mGraphicBuffer;
    layerSettings->source.buffer.isOpaque = true;
    layerSettings->source.buffer.fence = mBufferItem.mFence;
    layerSettings->source.buffer.textureName = textureName;
    layerSettings->source.buffer.usePremultipliedAlpha = false;
    layerSettings->source.buffer.isY410BT2020 = isHdrY410(mBufferItem);

    // Set filtering to false since the capture itself doesn't involve
    // any scaling, metadata retriever JNI is scaling the bitmap if
    // display size is different from decoded size. If that scaling
    // needs to be handled by server side, consider enable this based
    // display size vs decoded size.
    const bool useFiltering = false;
    layerSettings->source.buffer.useTextureFiltering = useFiltering;

    float textureMatrix[16];
    GLConsumer::computeTransformMatrix(
            textureMatrix, mBufferItem.mGraphicBuffer,
            mBufferItem.mCrop, mBufferItem.mTransform, useFiltering);

    // Flip y-coordinates because GLConsumer expects OpenGL convention.
    mat4 tr = mat4::translate(vec4(.5, .5, 0, 1)) * mat4::scale(vec4(1, -1, 1, 1)) *
            mat4::translate(vec4(-.5, -.5, 0, 1));

    layerSettings->source.buffer.textureTransform =
            mat4(static_cast<const float*>(textureMatrix)) * tr;
}

status_t FrameCaptureLayer::init() {
    if (FrameCaptureProcessor::getInstance() == nullptr) {
        ALOGE("failed to get capture processor");
        return ERROR_UNSUPPORTED;
    }

    // Mimic surfaceflinger's BufferQueueLayer::onFirstRef() to create a
    // BufferQueue for encoder output
    sp<IGraphicBufferProducer> producer;
    sp<IGraphicBufferConsumer> consumer;

    BufferQueue::createBufferQueue(&producer, &consumer);
    // We don't need HW_COMPOSER usage since we're not using hwc to compose.
    // The buffer is only used as a GL texture.
    consumer->setConsumerUsageBits(GraphicBuffer::USAGE_HW_TEXTURE);
    consumer->setConsumerName(String8("FrameDecoder"));

    status_t err = consumer->consumerConnect(
            new BufferQueue::ProxyConsumerListener(this), false);
    if (NO_ERROR != err) {
        ALOGE("Error connecting to BufferQueue: %s (%d)", strerror(-err), err);
        return err;
    }

    mConsumer = consumer;
    mSurface = new Surface(producer);

    return OK;
}

status_t FrameCaptureLayer::capture(const ui::PixelFormat reqPixelFormat,
        const Rect &sourceCrop, sp<GraphicBuffer> *outBuffer) {
    ALOGV("capture: reqPixelFormat %d, crop {%d, %d, %d, %d}", reqPixelFormat,
            sourceCrop.left, sourceCrop.top, sourceCrop.right, sourceCrop.bottom);

    BufferItem bi;
    status_t err = acquireBuffer(&bi);
    if (err != OK) {
        return err;
    }

    // create out buffer
    const uint32_t usage =
            GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN |
            GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_TEXTURE;
    sp<GraphicBuffer> buffer = new GraphicBuffer(
            sourceCrop.getWidth(), sourceCrop.getHeight(),
            static_cast<android_pixel_format>(reqPixelFormat),
            1, usage, std::string("thumbnail"));

    err = FrameCaptureProcessor::getInstance()->capture(
            new BufferLayer(bi), sourceCrop, buffer);
    if (err == OK) {
        *outBuffer = buffer;
    }

    (void)releaseBuffer(bi);
    return err;
}

FrameCaptureLayer::FrameCaptureLayer() : mFrameAvailable(false) {}

void FrameCaptureLayer::onFrameAvailable(const BufferItem& /*item*/) {
    ALOGV("onFrameAvailable");
    Mutex::Autolock _lock(mLock);

    mFrameAvailable = true;
    mCondition.signal();
}

void FrameCaptureLayer::onBuffersReleased() {
    ALOGV("onBuffersReleased");
    Mutex::Autolock _lock(mLock);

    uint64_t mask = 0;
    mConsumer->getReleasedBuffers(&mask);
    for (int i = 0; i < BufferQueue::NUM_BUFFER_SLOTS; i++) {
        if (mask & (1ULL << i)) {
            mSlotToBufferMap[i] = nullptr;
        }
    }
}

void FrameCaptureLayer::onSidebandStreamChanged() {
    ALOGV("onSidebandStreamChanged");
}

status_t FrameCaptureLayer::acquireBuffer(BufferItem *bi) {
    ALOGV("acquireBuffer");
    Mutex::Autolock _lock(mLock);

    if (!mFrameAvailable) {
        // The output buffer is already released to the codec at this point.
        // Use a small timeout of 100ms in case the buffer hasn't arrived
        // at the consumer end of the output surface yet.
        if (mCondition.waitRelative(mLock, kAcquireBufferTimeoutNs) != OK) {
            ALOGE("wait for buffer timed out");
            return TIMED_OUT;
        }
    }
    mFrameAvailable = false;

    status_t err = mConsumer->acquireBuffer(bi, 0);
    if (err != OK) {
        ALOGE("failed to acquire buffer!");
        return err;
    }

    if (bi->mGraphicBuffer != nullptr) {
        mSlotToBufferMap[bi->mSlot] = bi->mGraphicBuffer;
    } else {
        bi->mGraphicBuffer = mSlotToBufferMap[bi->mSlot];
    }

    if (bi->mGraphicBuffer == nullptr) {
        ALOGE("acquired null buffer!");
        return BAD_VALUE;
    }
    return OK;
}

status_t FrameCaptureLayer::releaseBuffer(const BufferItem &bi) {
    ALOGV("releaseBuffer");
    Mutex::Autolock _lock(mLock);

    return mConsumer->releaseBuffer(bi.mSlot, bi.mFrameNumber,
            EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, bi.mFence);
}

}  // namespace android
+213 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2019 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

//#define LOG_NDEBUG 0
#define LOG_TAG "FrameCaptureProcessor"

#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/FrameCaptureProcessor.h>
#include <media/stagefright/MediaErrors.h>
#include <renderengine/RenderEngine.h>
#include <ui/Fence.h>
#include <ui/PixelFormat.h>
#include <utils/Log.h>

namespace android {

//static
Mutex FrameCaptureProcessor::sLock;
//static
sp<FrameCaptureProcessor> FrameCaptureProcessor::sInstance;

//static
sp<FrameCaptureProcessor> FrameCaptureProcessor::getInstance() {
    Mutex::Autolock _l(sLock);
    if (sInstance == nullptr) {
        sInstance = new FrameCaptureProcessor();
        sInstance->createRenderEngine();
    }
    // init only once, if failed nullptr will be returned afterwards.
    return (sInstance->initCheck() == OK) ? sInstance : nullptr;
}

//static
status_t FrameCaptureProcessor::PostAndAwaitResponse(
        const sp<AMessage> &msg, sp<AMessage> *response) {
    status_t err = msg->postAndAwaitResponse(response);

    if (err != OK) {
        return err;
    }

    if (!(*response)->findInt32("err", &err)) {
        err = OK;
    }

    return err;
}

//static
void FrameCaptureProcessor::PostReplyWithError(
        const sp<AReplyToken> &replyID, status_t err) {
    sp<AMessage> response = new AMessage;
    if (err != OK) {
        response->setInt32("err", err);
    }
    response->postReply(replyID);
}

FrameCaptureProcessor::FrameCaptureProcessor()
    : mInitStatus(NO_INIT), mTextureName(0) {}

FrameCaptureProcessor::~FrameCaptureProcessor() {
    if (mLooper != nullptr) {
        mLooper->unregisterHandler(id());
        mLooper->stop();
    }
}

void FrameCaptureProcessor::createRenderEngine() {
    // this method should only be called once, immediately after ctor
    CHECK(mInitStatus == NO_INIT);

    mLooper = new ALooper();
    mLooper->setName("capture_looper");
    mLooper->start(); // default priority
    mLooper->registerHandler(this);

    sp<AMessage> response;
    status_t err = PostAndAwaitResponse(new AMessage(kWhatCreate, this), &response);
    if (err != OK) {
        mInitStatus = ERROR_UNSUPPORTED;

        mLooper->unregisterHandler(id());
        mLooper->stop();
        mLooper.clear();
        return;
    }

    // only need one texture name
    mRE->genTextures(1, &mTextureName);

    mInitStatus = OK;
}

status_t FrameCaptureProcessor::capture(
        const sp<Layer> &layer, const Rect &sourceCrop, const sp<GraphicBuffer> &buffer) {
    sp<AMessage> msg = new AMessage(kWhatCapture, this);
    msg->setObject("layer", layer);
    msg->setRect("crop", sourceCrop.left, sourceCrop.top, sourceCrop.right, sourceCrop.bottom);
    msg->setObject("buffer", buffer);
    sp<AMessage> response;
    return PostAndAwaitResponse(msg, &response);
}

status_t FrameCaptureProcessor::onCreate() {
    mRE = renderengine::RenderEngine::create(
            renderengine::RenderEngineCreationArgs::Builder()
                .setPixelFormat(static_cast<int>(ui::PixelFormat::RGBA_8888))
                .setImageCacheSize(2 /*maxFrameBufferAcquiredBuffers*/)
                .setUseColorManagerment(true)
                .setEnableProtectedContext(false)
                .setPrecacheToneMapperShaderOnly(true)
                .setContextPriority(renderengine::RenderEngine::ContextPriority::LOW)
                .build());

    if (mRE == nullptr) {
        return ERROR_UNSUPPORTED;
    }
    return OK;
}

status_t FrameCaptureProcessor::onCapture(const sp<Layer> &layer,
        const Rect &sourceCrop, const sp<GraphicBuffer> &buffer) {
    renderengine::DisplaySettings clientCompositionDisplay;
    std::vector<renderengine::LayerSettings> clientCompositionLayers;

    clientCompositionDisplay.physicalDisplay = sourceCrop;
    clientCompositionDisplay.clip = sourceCrop;

    clientCompositionDisplay.outputDataspace = ui::Dataspace::V0_SRGB;
    clientCompositionDisplay.maxLuminance = sDefaultMaxLumiance;
    clientCompositionDisplay.clearRegion = Region::INVALID_REGION;

    // from Layer && BufferLayer
    renderengine::LayerSettings layerSettings;

    layer->getLayerSettings(sourceCrop, mTextureName, &layerSettings);

    clientCompositionLayers.push_back(layerSettings);

    // Use an empty fence for the buffer fence, since we just created the buffer so
    // there is no need for synchronization with the GPU.
    base::unique_fd bufferFence;
    base::unique_fd drawFence;
    mRE->useProtectedContext(false);
    status_t err = mRE->drawLayers(clientCompositionDisplay, clientCompositionLayers, buffer.get(),
            /*useFramebufferCache=*/false, std::move(bufferFence), &drawFence);

    sp<Fence> fence = new Fence(std::move(drawFence));

    if (err != OK) {
        ALOGE("drawLayers returned err %d", err);
        return err;
    }

    err = fence->wait(500);
    if (err != OK) {
        ALOGW("wait for fence returned err %d", err);
    }
    return OK;
}

void FrameCaptureProcessor::onMessageReceived(const sp<AMessage> &msg) {
    switch (msg->what()) {
        case kWhatCreate:
        {
            sp<AReplyToken> replyID;
            CHECK(msg->senderAwaitsResponse(&replyID));

            status_t err = onCreate();

            PostReplyWithError(replyID, err);
            break;
        }
        case kWhatCapture:
        {
            sp<AReplyToken> replyID;
            CHECK(msg->senderAwaitsResponse(&replyID));

            sp<RefBase> layerObj, bufferObj;
            int32_t left, top, right, bottom;
            CHECK(msg->findObject("layer", &layerObj));
            CHECK(msg->findRect("crop", &left, &top, &right, &bottom));
            CHECK(msg->findObject("buffer", &bufferObj));

            sp<GraphicBuffer> buffer = static_cast<GraphicBuffer*>(bufferObj.get());
            sp<Layer> layer = static_cast<Layer*>(layerObj.get());

            PostReplyWithError(replyID,
                    onCapture(layer, Rect(left, top, right, bottom), buffer));

            break;
        }
        default:
            TRESPASS();
    }
}

}  // namespace android
+20 −63
Original line number Diff line number Diff line
@@ -18,10 +18,10 @@
#define LOG_TAG "FrameDecoder"

#include "include/FrameDecoder.h"
#include "include/FrameCaptureLayer.h"
#include <binder/MemoryBase.h>
#include <binder/MemoryHeapBase.h>
#include <gui/Surface.h>
#include <gui/SurfaceComposerClient.h>
#include <inttypes.h>
#include <mediadrm/ICrypto.h>
#include <media/IMediaSource.h>
@@ -31,6 +31,7 @@
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/ColorUtils.h>
#include <media/stagefright/ColorConverter.h>
#include <media/stagefright/FrameCaptureProcessor.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/MediaDefs.h>
@@ -512,7 +513,7 @@ sp<AMessage> VideoFrameDecoder::onGetFormatAndSeekOptions(
    }

    if (isHDR(videoFormat)) {
        *window = initSurfaceControl();
        *window = initSurface();
        if (*window == NULL) {
            ALOGE("Failed to init surface control for HDR, fallback to non-hdr");
        } else {
@@ -589,7 +590,7 @@ status_t VideoFrameDecoder::onOutputReceived(
    }

    if (!outputFormat->findInt32("stride", &stride)) {
        if (mSurfaceControl == NULL) {
        if (mCaptureLayer == NULL) {
            ALOGE("format must have stride for byte buffer mode: %s",
                    outputFormat->debugString().c_str());
            return ERROR_MALFORMED;
@@ -613,7 +614,7 @@ status_t VideoFrameDecoder::onOutputReceived(
                0,
                0,
                dstBpp(),
                mSurfaceControl != nullptr /*allocRotated*/);
                mCaptureLayer != nullptr /*allocRotated*/);
        mFrame = static_cast<VideoFrame*>(frameMem->unsecurePointer());

        setFrame(frameMem);
@@ -621,8 +622,8 @@ status_t VideoFrameDecoder::onOutputReceived(

    mFrame->mDurationUs = durationUs;

    if (mSurfaceControl != nullptr) {
        return captureSurfaceControl();
    if (mCaptureLayer != nullptr) {
        return captureSurface();
    }

    ColorConverter converter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat());
@@ -655,70 +656,26 @@ status_t VideoFrameDecoder::onOutputReceived(
    return ERROR_UNSUPPORTED;
}

sp<Surface> VideoFrameDecoder::initSurfaceControl() {
    sp<SurfaceComposerClient> client = new SurfaceComposerClient();
    if (client->initCheck() != NO_ERROR) {
        ALOGE("failed to get SurfaceComposerClient");
        return NULL;
    }

    // create a container layer to hold the capture layer, so that we can
    // use full frame drop. If without the container, the crop will be set
    // to display size.
    sp<SurfaceControl> parent = client->createSurface(
            String8("parent"),
            0 /* width */, 0 /* height */,
            PIXEL_FORMAT_RGBA_8888,
            ISurfaceComposerClient::eFXSurfaceContainer );

    if (!parent) {
        ALOGE("failed to get surface control parent");
        return NULL;
    }

    // create the surface with unknown size 1x1 for now, real size will
    // be set before the capture when we have output format info.
    sp<SurfaceControl> surfaceControl = client->createSurface(
            String8("thumbnail"),
            1 /* width */, 1 /* height */,
            PIXEL_FORMAT_RGBA_8888,
            ISurfaceComposerClient::eFXSurfaceBufferQueue,
            parent.get());

    if (!surfaceControl) {
        ALOGE("failed to get surface control");
        return NULL;
sp<Surface> VideoFrameDecoder::initSurface() {
    // create the consumer listener interface, and hold sp so that this
    // interface lives as long as the GraphicBufferSource.
    sp<FrameCaptureLayer> captureLayer = new FrameCaptureLayer();
    if (captureLayer->init() != OK) {
        ALOGE("failed to init capture layer");
        return nullptr;
    }
    mCaptureLayer = captureLayer;

    SurfaceComposerClient::Transaction t;
    t.hide(parent)
            .show(surfaceControl)
            .apply(true);

    mSurfaceControl = surfaceControl;
    mParent = parent;

    return surfaceControl->getSurface();
    return captureLayer->getSurface();
}

status_t VideoFrameDecoder::captureSurfaceControl() {
    // set the layer size to the output size before the capture
    SurfaceComposerClient::Transaction()
        .setSize(mSurfaceControl, mFrame->mWidth, mFrame->mHeight)
        .apply(true);

status_t VideoFrameDecoder::captureSurface() {
    sp<GraphicBuffer> outBuffer;
    status_t err = ScreenshotClient::captureChildLayers(
            mParent->getHandle(),
            ui::Dataspace::V0_SRGB,
            captureFormat(),
            Rect(0, 0, mFrame->mWidth, mFrame->mHeight),
            {},
            1.0f /*frameScale*/,
            &outBuffer);
    status_t err = mCaptureLayer->capture(
            captureFormat(), Rect(0, 0, mFrame->mWidth, mFrame->mHeight), &outBuffer);

    if (err != OK) {
        ALOGE("failed to captureLayers: err %d", err);
        ALOGE("failed to capture layer (err %d)", err);
        return err;
    }

+77 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2019 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef FRAME_CAPTURE_LAYER_H_
#define FRAME_CAPTURE_LAYER_H_

#include <media/stagefright/foundation/ABase.h>
#include <gui/IConsumerListener.h>
#include <ui/GraphicTypes.h>
#include <utils/Mutex.h>
#include <utils/Condition.h>

namespace android {

class GraphicBuffer;
class IGraphicBufferConsumer;
class Rect;
class Surface;

/*
 * This class is a simple BufferQueue consumer implementation to
 * obtain a decoded buffer output from MediaCodec. The output
 * buffer is then sent to FrameCaptureProcessor to be converted
 * to sRGB properly.
 */
struct FrameCaptureLayer : public ConsumerListener {
    FrameCaptureLayer();
    ~FrameCaptureLayer() = default;

    // ConsumerListener
    void onFrameAvailable(const BufferItem& /*item*/) override;
    void onBuffersReleased() override;
    void onSidebandStreamChanged() override;

    status_t init();

    sp<Surface> getSurface() { return mSurface; }

    status_t capture(const ui::PixelFormat reqPixelFormat,
            const Rect &sourceCrop, sp<GraphicBuffer> *outBuffer);

private:
    struct BufferLayer;
    // Note: do not hold any sp ref to GraphicBufferSource
    // GraphicBufferSource is holding an sp to us, holding any sp ref
    // to GraphicBufferSource will cause circular dependency and both
    // object will not be released.
    sp<IGraphicBufferConsumer> mConsumer;
    sp<Surface> mSurface;
    std::map<int32_t, sp<GraphicBuffer> > mSlotToBufferMap;

    Mutex mLock;
    Condition mCondition;
    bool mFrameAvailable GUARDED_BY(mLock);

    status_t acquireBuffer(BufferItem *bi);
    status_t releaseBuffer(const BufferItem &bi);

    DISALLOW_EVIL_CONSTRUCTORS(FrameCaptureLayer);
};

}  // namespace android

#endif  // FRAME_CAPTURE_LAYER_H_
Loading