Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9ae496fe authored by Jan Sebechlebsky's avatar Jan Sebechlebsky
Browse files

Do not write into EglSurfaceTexture after creation

... beacuse it prevents other clients from connecting.
Instead, render/compress black image into output buffers
directly if the input surface texture doesn't have any
buffer yet.

Bug: 301023410
Test: OpenCamera
Test: atest virtual_camera_tests
Change-Id: I289ff00b590c9bc18052ae0cc15a9d7320b8f033
parent 2c9229b7
Loading
Loading
Loading
Loading
+50 −28
Original line number Diff line number Diff line
@@ -24,6 +24,7 @@
#include <mutex>
#include <thread>

#include "GLES/gl.h"
#include "VirtualCameraSessionContext.h"
#include "aidl/android/hardware/camera/common/Status.h"
#include "aidl/android/hardware/camera/device/BufferStatus.h"
@@ -271,9 +272,9 @@ void VirtualCameraRenderThread::processCaptureRequest(
    }

    auto status = streamConfig->format == PixelFormat::BLOB
                      ? renderIntoBlobStreamBuffer(
                            reqBuffer.getStreamId(), reqBuffer.getBufferId(),
                            streamConfig->bufferSize, reqBuffer.getFence())
                      ? renderIntoBlobStreamBuffer(reqBuffer.getStreamId(),
                                                   reqBuffer.getBufferId(),
                                                   reqBuffer.getFence())
                      : renderIntoImageStreamBuffer(reqBuffer.getStreamId(),
                                                    reqBuffer.getBufferId(),
                                                    reqBuffer.getFence());
@@ -354,17 +355,21 @@ void VirtualCameraRenderThread::flushCaptureRequest(
}

ndk::ScopedAStatus VirtualCameraRenderThread::renderIntoBlobStreamBuffer(
    const int streamId, const int bufferId, const size_t bufferSize,
    sp<Fence> fence) {
    const int streamId, const int bufferId, sp<Fence> fence) {
  ALOGV("%s", __func__);
  sp<GraphicBuffer> gBuffer = mEglSurfaceTexture->getCurrentBuffer();
  if (gBuffer == nullptr) {
    // Most probably nothing was yet written to input surface if we reached this.
    ALOGE("%s: Cannot fetch most recent buffer from SurfaceTexture", __func__);
    return cameraStatus(Status::INTERNAL_ERROR);
  }
  std::shared_ptr<AHardwareBuffer> hwBuffer =
      mSessionContext.fetchHardwareBuffer(streamId, bufferId);
  if (hwBuffer == nullptr) {
    ALOGE("%s: Failed to fetch hardware buffer %d for streamId %d", __func__,
          bufferId, streamId);
    return cameraStatus(Status::INTERNAL_ERROR);
  }

  std::optional<Stream> stream = mSessionContext.getStreamConfig(streamId);
  if (!stream.has_value()) {
    ALOGE("%s, failed to fetch information about stream %d", __func__, streamId);
    return cameraStatus(Status::INTERNAL_ERROR);
  }

  AHardwareBuffer_Planes planes_info;

@@ -377,6 +382,9 @@ ndk::ScopedAStatus VirtualCameraRenderThread::renderIntoBlobStreamBuffer(
    return cameraStatus(Status::INTERNAL_ERROR);
  }

  sp<GraphicBuffer> gBuffer = mEglSurfaceTexture->getCurrentBuffer();
  bool compressionSuccess = true;
  if (gBuffer != nullptr) {
    android_ycbcr ycbcr;
    status_t status =
        gBuffer->lockYCbCr(AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN, &ycbcr);
@@ -387,16 +395,22 @@ ndk::ScopedAStatus VirtualCameraRenderThread::renderIntoBlobStreamBuffer(
      return cameraStatus(Status::INTERNAL_ERROR);
    }

  bool success = compressJpeg(gBuffer->getWidth(), gBuffer->getHeight(), ycbcr,
                              bufferSize, planes_info.planes[0].data);
    compressionSuccess =
        compressJpeg(gBuffer->getWidth(), gBuffer->getHeight(), ycbcr,
                     stream->bufferSize, planes_info.planes[0].data);

    status_t res = gBuffer->unlock();
    if (res != NO_ERROR) {
      ALOGE("Failed to unlock graphic buffer: %d", res);
    }
  } else {
    compressionSuccess =
        compressBlackJpeg(stream->width, stream->height, stream->bufferSize,
                          planes_info.planes[0].data);
  }
  AHardwareBuffer_unlock(hwBuffer.get(), nullptr);
  ALOGV("Unlocked buffers");
  return success ? ndk::ScopedAStatus::ok()
  return compressionSuccess ? ndk::ScopedAStatus::ok()
                            : cameraStatus(Status::INTERNAL_ERROR);
}

@@ -435,7 +449,15 @@ ndk::ScopedAStatus VirtualCameraRenderThread::renderIntoImageStreamBuffer(
  mEglDisplayContext->makeCurrent();
  framebuffer->beforeDraw();

  if (mEglSurfaceTexture->getCurrentBuffer() == nullptr) {
    // If there's no current buffer, nothing was written to the surface and
    // texture is not initialized yet. Let's render the framebuffer black
    // instead of rendering the texture.
    glClearColor(0.0f, 0.5f, 0.5f, 0.0f);
    glClear(GL_COLOR_BUFFER_BIT);
  } else {
    mEglTextureProgram->draw(mEglSurfaceTexture->updateTexture());
  }
  framebuffer->afterDraw();

  const std::chrono::nanoseconds after =
+0 −1
Original line number Diff line number Diff line
@@ -126,7 +126,6 @@ class VirtualCameraRenderThread {
  // Always called on render thread.
  ndk::ScopedAStatus renderIntoBlobStreamBuffer(const int streamId,
                                                const int bufferId,
                                                const size_t bufferSize,
                                                sp<Fence> fence = nullptr);

  // Render current image to the YCbCr buffer.
+3 −36
Original line number Diff line number Diff line
@@ -31,28 +31,12 @@ namespace companion {
namespace virtualcamera {
namespace {

using ::testing::Eq;
using ::testing::NotNull;
using ::testing::IsNull;

constexpr int kWidth = 64;
constexpr int kHeight = 64;
constexpr char kGlExtYuvTarget[] = "GL_EXT_YUV_target";

uint8_t getY(const android_ycbcr& ycbcr, const int x, const int y) {
    uint8_t* yPtr = reinterpret_cast<uint8_t*>(ycbcr.y);
    return *(yPtr + ycbcr.ystride * y + x);
}

uint8_t getCb(const android_ycbcr& ycbcr, const int x, const int y) {
    uint8_t* cbPtr = reinterpret_cast<uint8_t*>(ycbcr.cb);
    return *(cbPtr + ycbcr.cstride * (y / 2) + (x / 2) * ycbcr.chroma_step);
}

uint8_t getCr(const android_ycbcr& ycbcr, const int x, const int y) {
    uint8_t* crPtr = reinterpret_cast<uint8_t*>(ycbcr.cr);
    return *(crPtr + ycbcr.cstride * (y / 2) + (x / 2) * ycbcr.chroma_step);
}

TEST(EglDisplayContextTest, SuccessfulInitialization) {
  EglDisplayContext displayContext;

@@ -88,7 +72,7 @@ TEST_F(EglTest, EglTextureProgramSuccessfulInit) {
  EXPECT_TRUE(eglTextureProgram.isInitialized());
}

TEST_F(EglTest, EglSurfaceTextureBlackAfterInit) {
TEST_F(EglTest, EglSurfaceCurrentBufferNullAfterInit) {
  if (!isGlExtensionSupported(kGlExtYuvTarget)) {
      GTEST_SKIP() << "Skipping test because of missing required GL extension " << kGlExtYuvTarget;
  }
@@ -97,24 +81,7 @@ TEST_F(EglTest, EglSurfaceTextureBlackAfterInit) {
  surfaceTexture.updateTexture();
  sp<GraphicBuffer> buffer = surfaceTexture.getCurrentBuffer();

  ASSERT_THAT(buffer, NotNull());
  const int width = buffer->getWidth();
  const int height = buffer->getHeight();
  ASSERT_THAT(width, Eq(kWidth));
  ASSERT_THAT(height, Eq(kHeight));

  android_ycbcr ycbcr;
  status_t ret = buffer->lockYCbCr(AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN, &ycbcr);
  ASSERT_THAT(ret, Eq(NO_ERROR));
  for (int i = 0; i < width; ++i) {
      for (int j = 0; j < height; ++j) {
          EXPECT_THAT(getY(ycbcr, i, j), Eq(0x00));
          EXPECT_THAT(getCb(ycbcr, i, j), Eq(0x7f));
          EXPECT_THAT(getCr(ycbcr, i, j), Eq(0x7f));
      }
  }

  buffer->unlock();
  EXPECT_THAT(buffer, IsNull());
}

}  // namespace
+0 −28
Original line number Diff line number Diff line
@@ -31,26 +31,6 @@ namespace android {
namespace companion {
namespace virtualcamera {

namespace {

void submitBlackBufferYCbCr420(Surface& surface) {
    ANativeWindow_Buffer buffer;

    int ret = surface.lock(&buffer, nullptr);
    if (ret != NO_ERROR) {
        ALOGE("%s: Cannot lock output surface: %d", __func__, ret);
        return;
    }
    uint8_t* data = reinterpret_cast<uint8_t*>(buffer.bits);
    const int yPixNr = buffer.width * buffer.height;
    const int uvPixNr = (buffer.width / 2) * (buffer.height / 2);
    memset(data, 0x00, yPixNr);
    memset(data + yPixNr, 0x7f, 2 * uvPixNr);
    surface.unlockAndPost();
}

}  // namespace

EglSurfaceTexture::EglSurfaceTexture(const uint32_t width, const uint32_t height)
    : mWidth(width), mHeight(height) {
  glGenTextures(1, &mTextureId);
@@ -67,14 +47,6 @@ EglSurfaceTexture::EglSurfaceTexture(const uint32_t width, const uint32_t height
  mGlConsumer->setDefaultBufferFormat(AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420);

  mSurface = sp<Surface>::make(mBufferProducer);
  // Submit black buffer to the surface to make sure there's input buffer
  // to process in case capture request comes before client writes something
  // to the surface.
  //
  // Note that if the client does write something before capture request is
  // processed (& updateTexture is called), this black buffer will be
  // skipped (and recycled).
  submitBlackBufferYCbCr420(*mSurface);
}

EglSurfaceTexture::~EglSurfaceTexture() {
+56 −33
Original line number Diff line number Diff line
@@ -20,6 +20,7 @@
#include <cstddef>
#include <cstdint>
#include <memory>
#include <vector>

#include "android/hardware_buffer.h"
#include "jpeglib.h"
@@ -37,10 +38,9 @@ constexpr int kJpegQuality = 80;

class LibJpegContext {
 public:
  LibJpegContext(int width, int height, const android_ycbcr& ycbcr,
                 const size_t outBufferSize, void* outBuffer)
      : mYCbCr(ycbcr),
        mWidth(width),
  LibJpegContext(int width, int height, const size_t outBufferSize,
                 void* outBuffer)
      : mWidth(width),
        mHeight(height),
        mDstBufferSize(outBufferSize),
        mDstBuffer(outBuffer) {
@@ -94,15 +94,15 @@ class LibJpegContext {
    mCompressStruct.comp_info[2].v_samp_factor = 1;
  }

  bool compress() {
  bool compress(const android_ycbcr& ycbr) {
    // Prepare arrays of pointers to scanlines of each plane.
    std::vector<JSAMPROW> yLines(mHeight);
    std::vector<JSAMPROW> cbLines(mHeight / 2);
    std::vector<JSAMPROW> crLines(mHeight / 2);

    uint8_t* y = static_cast<uint8_t*>(mYCbCr.y);
    uint8_t* cb = static_cast<uint8_t*>(mYCbCr.cb);
    uint8_t* cr = static_cast<uint8_t*>(mYCbCr.cr);
    uint8_t* y = static_cast<uint8_t*>(ycbr.y);
    uint8_t* cb = static_cast<uint8_t*>(ycbr.cb);
    uint8_t* cr = static_cast<uint8_t*>(ycbr.cr);

    // Since UV samples might be interleaved (semiplanar) we need to copy
    // them to separate planes, since libjpeg doesn't directly
@@ -115,38 +115,32 @@ class LibJpegContext {
    for (int i = 0; i < c_samples; ++i) {
      cb_plane[i] = *cb;
      cr_plane[i] = *cr;
      cb += mYCbCr.chroma_step;
      cr += mYCbCr.chroma_step;
      cb += ycbr.chroma_step;
      cr += ycbr.chroma_step;
    }

    // Collect pointers to individual scanline of each plane.
    for (int i = 0; i < mHeight; ++i) {
      yLines[i] = y + i * mYCbCr.ystride;
      yLines[i] = y + i * ycbr.ystride;
    }
    for (int i = 0; i < (mHeight / 2); ++i) {
      cbLines[i] = cb_plane.data() + i * (mWidth / 2);
      crLines[i] = cr_plane.data() + i * (mWidth / 2);
    }

    // Perform actual compression.
    jpeg_start_compress(&mCompressStruct, TRUE);
    return compress(yLines, cbLines, crLines);
  }

    while (mCompressStruct.next_scanline < mCompressStruct.image_height) {
      const uint32_t batchSize = DCTSIZE * 2;
      const uint32_t nl = mCompressStruct.next_scanline;
      JSAMPARRAY planes[3]{&yLines[nl], &cbLines[nl / 2], &crLines[nl / 2]};
  bool compressBlackImage() {
    // We only really need to prepare one scanline for Y and one shared scanline
    // for Cb & Cr.
    std::vector<uint8_t> yLine(mWidth, 0);
    std::vector<uint8_t> chromaLine(mWidth / 2, 0xff / 2);

      uint32_t done = jpeg_write_raw_data(&mCompressStruct, planes, batchSize);
    std::vector<JSAMPROW> yLines(mHeight, yLine.data());
    std::vector<JSAMPROW> cLines(mHeight / 2, chromaLine.data());

      if (done != batchSize) {
        ALOGE("%s: compressed %u lines, expected %u (total %u/%u)",
              __FUNCTION__, done, batchSize, mCompressStruct.next_scanline,
              mCompressStruct.image_height);
        return false;
      }
    }
    jpeg_finish_compress(&mCompressStruct);
    return mSuccess;
    return compress(yLines, cLines, cLines);
  }

 private:
@@ -166,6 +160,34 @@ class LibJpegContext {
    ALOGV("%s:%d Done with jpeg: %zu", __FUNCTION__, __LINE__, mEncodedSize);
  }

  // Perform actual compression.
  //
  // Takes vector of pointers to Y / Cb / Cr scanlines as an input. Length of
  // each vector needs to correspond to height of corresponding plane.
  //
  // Returns true if compression is successful, false otherwise.
  bool compress(std::vector<JSAMPROW>& yLines, std::vector<JSAMPROW>& cbLines,
                std::vector<JSAMPROW>& crLines) {
    jpeg_start_compress(&mCompressStruct, TRUE);

    while (mCompressStruct.next_scanline < mCompressStruct.image_height) {
      const uint32_t batchSize = DCTSIZE * 2;
      const uint32_t nl = mCompressStruct.next_scanline;
      JSAMPARRAY planes[3]{&yLines[nl], &cbLines[nl / 2], &crLines[nl / 2]};

      uint32_t done = jpeg_write_raw_data(&mCompressStruct, planes, batchSize);

      if (done != batchSize) {
        ALOGE("%s: compressed %u lines, expected %u (total %u/%u)",
              __FUNCTION__, done, batchSize, mCompressStruct.next_scanline,
              mCompressStruct.image_height);
        return false;
      }
    }
    jpeg_finish_compress(&mCompressStruct);
    return mSuccess;
  }

  // === libjpeg callbacks below ===

  static void onOutputError(j_common_ptr cinfo) {
@@ -195,9 +217,6 @@ class LibJpegContext {
  jpeg_error_mgr mErrorMgr;
  jpeg_destination_mgr mDestinationMgr;

  // Layout of the input image.
  android_ycbcr mYCbCr;

  // Dimensions of the input image.
  int mWidth;
  int mHeight;
@@ -216,11 +235,15 @@ class LibJpegContext {

}  // namespace

// Returns true if the EGL is in an error state and logs the error.
bool compressJpeg(int width, int height, const android_ycbcr& ycbcr,
                  size_t outBufferSize, void* outBuffer) {
  return LibJpegContext(width, height, ycbcr, outBufferSize, outBuffer)
      .compress();
  return LibJpegContext(width, height, outBufferSize, outBuffer).compress(ycbcr);
}

bool compressBlackJpeg(int width, int height, size_t outBufferSize,
                       void* outBuffer) {
  return LibJpegContext(width, height, outBufferSize, outBuffer)
      .compressBlackImage();
}

}  // namespace virtualcamera
Loading