Newer
Older
* Copyright (C) 2013-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "Camera3-OutputStream"
#define ATRACE_TAG ATRACE_TAG_CAMERA
//#define LOG_NDEBUG 0
#include <algorithm>
#include <ctime>
#include <fstream>
Jayant Chowdhary
committed
#include <aidl/android/hardware/camera/device/CameraBlob.h>
#include <aidl/android/hardware/camera/device/CameraBlobId.h>
#include <android-base/unique_fd.h>
#include <cutils/properties.h>
#include <utils/Log.h>
#include <utils/Trace.h>
Jayant Chowdhary
committed
#include <common/CameraDeviceBase.h>
#include "api1/client2/JpegProcessor.h"
#include "Camera3OutputStream.h"
#include "utils/TraceHFR.h"
#ifndef container_of
#define container_of(ptr, type, member) \
(type *)((char*)(ptr) - offsetof(type, member))
#endif
namespace android {
namespace camera3 {
Jayant Chowdhary
committed
using aidl::android::hardware::camera::device::CameraBlob;
using aidl::android::hardware::camera::device::CameraBlobId;
Camera3OutputStream::Camera3OutputStream(int id,
sp<Surface> consumer,
uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera_stream_rotation_t rotation,
nsecs_t timestampOffset, const String8& physicalCameraId,
Jayant Chowdhary
committed
const std::unordered_set<int32_t> &sensorPixelModesUsed, IPCTransport transport,
int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height,
/*maxSize*/0, format, dataSpace, rotation,
physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
dynamicRangeProfile, streamUseCase, deviceTimeBaseIsRealtime,
timestampBase),
mUseBufferManager(false),
Jayant Chowdhary
committed
mDequeueBufferLatency(kDequeueLatencyBinSize),
mIPCTransport(transport) {
if (mConsumer == NULL) {
ALOGE("%s: Consumer is NULL!", __FUNCTION__);
mState = STATE_ERROR;
}
bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
}
Camera3OutputStream::Camera3OutputStream(int id,
sp<Surface> consumer,
uint32_t width, uint32_t height, size_t maxSize, int format,
android_dataspace dataSpace, camera_stream_rotation_t rotation,
Jayant Chowdhary
committed
nsecs_t timestampOffset, const String8& physicalCameraId,
Jayant Chowdhary
committed
const std::unordered_set<int32_t> &sensorPixelModesUsed, IPCTransport transport,
int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height, maxSize,
Jayant Chowdhary
committed
format, dataSpace, rotation, physicalCameraId, sensorPixelModesUsed,
setId, isMultiResolution, dynamicRangeProfile, streamUseCase,
deviceTimeBaseIsRealtime, timestampBase),
mConsumer(consumer),
mUseBufferManager(false),
Jayant Chowdhary
committed
mDequeueBufferLatency(kDequeueLatencyBinSize),
mIPCTransport(transport) {
if (format != HAL_PIXEL_FORMAT_BLOB && format != HAL_PIXEL_FORMAT_RAW_OPAQUE) {
ALOGE("%s: Bad format for size-only stream: %d", __FUNCTION__,
format);
mState = STATE_ERROR;
}
if (mConsumer == NULL) {
ALOGE("%s: Consumer is NULL!", __FUNCTION__);
mState = STATE_ERROR;
}
bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
Camera3OutputStream::Camera3OutputStream(int id,
uint32_t width, uint32_t height, int format,
uint64_t consumerUsage, android_dataspace dataSpace,
camera_stream_rotation_t rotation, nsecs_t timestampOffset,
Jayant Chowdhary
committed
const String8& physicalCameraId,
Jayant Chowdhary
committed
const std::unordered_set<int32_t> &sensorPixelModesUsed, IPCTransport transport,
int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height,
/*maxSize*/0, format, dataSpace, rotation,
physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
dynamicRangeProfile, streamUseCase, deviceTimeBaseIsRealtime,
timestampBase),
mConsumer(nullptr),
mTransform(0),
mTraceFirstBuffer(true),
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
mConsumerUsage(consumerUsage),
Jayant Chowdhary
committed
mDequeueBufferLatency(kDequeueLatencyBinSize),
mIPCTransport(transport) {
// Deferred consumer only support preview surface format now.
if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
ALOGE("%s: Deferred consumer only supports IMPLEMENTATION_DEFINED format now!",
__FUNCTION__);
mState = STATE_ERROR;
}
// Validation check for the consumer usage flag.
if ((consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) == 0 &&
(consumerUsage & GraphicBuffer::USAGE_HW_COMPOSER) == 0) {
ALOGE("%s: Deferred consumer usage flag is illegal %" PRIu64 "!",
__FUNCTION__, consumerUsage);
mState = STATE_ERROR;
}
mConsumerName = String8("Deferred");
bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
Camera3OutputStream::Camera3OutputStream(int id, camera_stream_type_t type,
uint32_t width, uint32_t height,
camera_stream_rotation_t rotation,
const String8& physicalCameraId,
const std::unordered_set<int32_t> &sensorPixelModesUsed,
Jayant Chowdhary
committed
IPCTransport transport,
uint64_t consumerUsage, nsecs_t timestampOffset,
int setId, bool isMultiResolution,
int64_t dynamicRangeProfile, int64_t streamUseCase,
bool deviceTimeBaseIsRealtime, int timestampBase,
int mirrorMode) :
Camera3IOStreamBase(id, type, width, height,
/*maxSize*/0,
format, dataSpace, rotation,
physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
dynamicRangeProfile, streamUseCase, deviceTimeBaseIsRealtime,
timestampBase),
mTransform(0),
mTraceFirstBuffer(true),
mTimestampOffset(timestampOffset),
mConsumerUsage(consumerUsage),
Jayant Chowdhary
committed
mDequeueBufferLatency(kDequeueLatencyBinSize),
mIPCTransport(transport) {
bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
// Subclasses expected to initialize mConsumer themselves
}
Camera3OutputStream::~Camera3OutputStream() {
disconnectLocked();
}
status_t Camera3OutputStream::getBufferLocked(camera_stream_buffer *buffer,
const std::vector<size_t>&) {
ATRACE_HFR_CALL();
ANativeWindowBuffer* anb;
status_t res;
res = getBufferLockedCommon(&anb, &fenceFd);
if (res != OK) {
return res;
/**
* FenceFD now owned by HAL except in case of error,
* in which case we reassign it to acquire_fence
*/
handoutBufferLocked(*buffer, &(anb->handle), /*acquireFence*/fenceFd,
/*releaseFence*/-1, CAMERA_BUFFER_STATUS_OK, /*output*/true);
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
status_t Camera3OutputStream::getBuffersLocked(std::vector<OutstandingBuffer>* outBuffers) {
status_t res;
if ((res = getBufferPreconditionCheckLocked()) != OK) {
return res;
}
if (mUseBufferManager) {
ALOGE("%s: stream %d is managed by buffer manager and does not support batch operation",
__FUNCTION__, mId);
return INVALID_OPERATION;
}
sp<Surface> consumer = mConsumer;
/**
* Release the lock briefly to avoid deadlock for below scenario:
* Thread 1: StreamingProcessor::startStream -> Camera3Stream::isConfiguring().
* This thread acquired StreamingProcessor lock and try to lock Camera3Stream lock.
* Thread 2: Camera3Stream::returnBuffer->StreamingProcessor::onFrameAvailable().
* This thread acquired Camera3Stream lock and bufferQueue lock, and try to lock
* StreamingProcessor lock.
* Thread 3: Camera3Stream::getBuffer(). This thread acquired Camera3Stream lock
* and try to lock bufferQueue lock.
* Then there is circular locking dependency.
*/
mLock.unlock();
size_t numBuffersRequested = outBuffers->size();
std::vector<Surface::BatchBuffer> buffers(numBuffersRequested);
nsecs_t dequeueStart = systemTime(SYSTEM_TIME_MONOTONIC);
res = consumer->dequeueBuffers(&buffers);
nsecs_t dequeueEnd = systemTime(SYSTEM_TIME_MONOTONIC);
mDequeueBufferLatency.add(dequeueStart, dequeueEnd);
mLock.lock();
if (res != OK) {
if (shouldLogError(res, mState)) {
ALOGE("%s: Stream %d: Can't dequeue %zu output buffers: %s (%d)",
__FUNCTION__, mId, numBuffersRequested, strerror(-res), res);
}
checkRetAndSetAbandonedLocked(res);
return res;
}
checkRemovedBuffersLocked();
/**
* FenceFD now owned by HAL except in case of error,
* in which case we reassign it to acquire_fence
*/
for (size_t i = 0; i < numBuffersRequested; i++) {
handoutBufferLocked(*(outBuffers->at(i).outBuffer),
&(buffers[i].buffer->handle), /*acquireFence*/buffers[i].fenceFd,
/*releaseFence*/-1, CAMERA_BUFFER_STATUS_OK, /*output*/true);
}
return OK;
}
status_t Camera3OutputStream::queueBufferToConsumer(sp<ANativeWindow>& consumer,
ANativeWindowBuffer* buffer, int anwReleaseFence,
const std::vector<size_t>&) {
return consumer->queueBuffer(consumer.get(), buffer, anwReleaseFence);
}
status_t Camera3OutputStream::returnBufferLocked(
const camera_stream_buffer &buffer,
nsecs_t timestamp, nsecs_t readoutTimestamp,
int32_t transform, const std::vector<size_t>& surface_ids) {
ATRACE_HFR_CALL();
if (mHandoutTotalBufferCount == 1) {
returnPrefetchedBuffersLocked();
}
status_t res = returnAnyBufferLocked(buffer, timestamp, readoutTimestamp,
/*output*/true, transform, surface_ids);
if (res != OK) {
return res;
mFrameCount++;
Jayant Chowdhary
committed
status_t Camera3OutputStream::fixUpHidlJpegBlobHeader(ANativeWindowBuffer* anwBuffer, int fence) {
// Lock the JPEG buffer for CPU read
sp<GraphicBuffer> graphicBuffer = GraphicBuffer::from(anwBuffer);
void* mapped = nullptr;
base::unique_fd fenceFd(dup(fence));
// Use USAGE_SW_WRITE_RARELY since we're going to re-write the CameraBlob
// header.
GraphicBufferLocker gbLocker(graphicBuffer);
status_t res =
gbLocker.lockAsync(
GraphicBuffer::USAGE_SW_READ_OFTEN | GraphicBuffer::USAGE_SW_WRITE_RARELY,
&mapped, fenceFd.release());
Jayant Chowdhary
committed
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
if (res != OK) {
ALOGE("%s: Failed to lock the buffer: %s (%d)", __FUNCTION__, strerror(-res), res);
return res;
}
uint8_t *hidlHeaderStart =
static_cast<uint8_t*>(mapped) + graphicBuffer->getWidth() - sizeof(camera_jpeg_blob_t);
// Check that the jpeg buffer is big enough to contain HIDL camera blob
if (hidlHeaderStart < static_cast<uint8_t *>(mapped)) {
ALOGE("%s, jpeg buffer not large enough to fit HIDL camera blob %" PRIu32, __FUNCTION__,
graphicBuffer->getWidth());
return BAD_VALUE;
}
camera_jpeg_blob_t *hidlBlobHeader = reinterpret_cast<camera_jpeg_blob_t *>(hidlHeaderStart);
// Check that the blob is indeed the jpeg blob id.
if (hidlBlobHeader->jpeg_blob_id != CAMERA_JPEG_BLOB_ID) {
ALOGE("%s, jpeg blob id %d is not correct", __FUNCTION__, hidlBlobHeader->jpeg_blob_id);
return BAD_VALUE;
}
// Retrieve id and blob size
CameraBlobId blobId = static_cast<CameraBlobId>(hidlBlobHeader->jpeg_blob_id);
uint32_t blobSizeBytes = hidlBlobHeader->jpeg_size;
if (blobSizeBytes > (graphicBuffer->getWidth() - sizeof(camera_jpeg_blob_t))) {
ALOGE("%s, blobSize in HIDL jpeg blob : %d is corrupt, buffer size %" PRIu32, __FUNCTION__,
blobSizeBytes, graphicBuffer->getWidth());
}
uint8_t *aidlHeaderStart =
static_cast<uint8_t*>(mapped) + graphicBuffer->getWidth() - sizeof(CameraBlob);
// Check that the jpeg buffer is big enough to contain AIDL camera blob
if (aidlHeaderStart < static_cast<uint8_t *>(mapped)) {
ALOGE("%s, jpeg buffer not large enough to fit AIDL camera blob %" PRIu32, __FUNCTION__,
graphicBuffer->getWidth());
return BAD_VALUE;
}
if (static_cast<uint8_t*>(mapped) + blobSizeBytes > aidlHeaderStart) {
ALOGE("%s, jpeg blob with size %d , buffer size %" PRIu32 " not large enough to fit"
" AIDL camera blob without corrupting jpeg", __FUNCTION__, blobSizeBytes,
graphicBuffer->getWidth());
return BAD_VALUE;
}
// Fill in JPEG header
CameraBlob aidlHeader = {
.blobId = blobId,
.blobSizeBytes = static_cast<int32_t>(blobSizeBytes)
};
memcpy(aidlHeaderStart, &aidlHeader, sizeof(CameraBlob));
Jayant Chowdhary
committed
graphicBuffer->unlock();
return OK;
}
status_t Camera3OutputStream::returnBufferCheckedLocked(
const camera_stream_buffer &buffer,
nsecs_t readoutTimestamp,
int32_t transform,
const std::vector<size_t>& surface_ids,
/*out*/
sp<Fence> *releaseFenceOut) {
(void)output;
ALOG_ASSERT(output, "Expected output to be true");
status_t res;
// Fence management - always honor release fence from HAL
sp<Fence> releaseFence = new Fence(buffer.release_fence);
int anwReleaseFence = releaseFence->dup();
/**
* Release the lock briefly to avoid deadlock with
* StreamingProcessor::startStream -> Camera3Stream::isConfiguring (this
* thread will go into StreamingProcessor::onFrameAvailable) during
* queueBuffer
*/
sp<ANativeWindow> currentConsumer = mConsumer;
StreamState state = mState;
mLock.unlock();
ANativeWindowBuffer *anwBuffer = container_of(buffer.buffer, ANativeWindowBuffer, handle);
bool bufferDeferred = false;
/**
* Return buffer back to ANativeWindow
*/
if (buffer.status == CAMERA_BUFFER_STATUS_ERROR || mDropBuffers || timestamp == 0) {
// Cancel buffer
if (mDropBuffers) {
ALOGV("%s: Dropping a frame for stream %d.", __FUNCTION__, mId);
} else if (buffer.status == CAMERA_BUFFER_STATUS_ERROR) {
ALOGV("%s: A frame is dropped for stream %d due to buffer error.", __FUNCTION__, mId);
} else {
ALOGE("%s: Stream %d: timestamp shouldn't be 0", __FUNCTION__, mId);
res = currentConsumer->cancelBuffer(currentConsumer.get(),
anwReleaseFence);
if (shouldLogError(res, state)) {
ALOGE("%s: Stream %d: Error cancelling buffer to native window:"
" %s (%d)", __FUNCTION__, mId, strerror(-res), res);
}
notifyBufferReleased(anwBuffer);
if (mUseBufferManager) {
// Return this buffer back to buffer manager.
mBufferProducerListener->onBufferReleased();
} else {
if (mTraceFirstBuffer && (stream_type == CAMERA_STREAM_OUTPUT)) {
{
char traceLog[48];
snprintf(traceLog, sizeof(traceLog), "Stream %d: first full buffer\n", mId);
ATRACE_NAME(traceLog);
}
mTraceFirstBuffer = false;
}
Jayant Chowdhary
committed
// Fix CameraBlob id type discrepancy between HIDL and AIDL, details : http://b/229688810
if (getFormat() == HAL_PIXEL_FORMAT_BLOB && getDataSpace() == HAL_DATASPACE_V0_JFIF) {
if (mIPCTransport == IPCTransport::HIDL) {
fixUpHidlJpegBlobHeader(anwBuffer, anwReleaseFence);
}
// If this is a JPEG output, and image dump mask is set, save image to
// disk.
if (mImageDumpMask) {
dumpImageToDisk(timestamp, anwBuffer, anwReleaseFence);
}
nsecs_t captureTime = (mUseReadoutTime && readoutTimestamp != 0 ?
readoutTimestamp : timestamp) - mTimestampOffset;
if (mPreviewFrameSpacer != nullptr) {
nsecs_t readoutTime = (readoutTimestamp != 0 ? readoutTimestamp : timestamp)
- mTimestampOffset;
res = mPreviewFrameSpacer->queuePreviewBuffer(captureTime, readoutTime,
transform, anwBuffer, anwReleaseFence);
if (res != OK) {
ALOGE("%s: Stream %d: Error queuing buffer to preview buffer spacer: %s (%d)",
__FUNCTION__, mId, strerror(-res), res);
return res;
}
bufferDeferred = true;
} else {
nsecs_t presentTime = mSyncToDisplay ?
syncTimestampToDisplayLocked(captureTime) : captureTime;
setTransform(transform, true/*mayChangeMirror*/);
res = native_window_set_buffers_timestamp(mConsumer.get(), presentTime);
if (res != OK) {
ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
__FUNCTION__, mId, strerror(-res), res);
return res;
}
queueHDRMetadata(anwBuffer->handle, currentConsumer, dynamic_range_profile);
res = queueBufferToConsumer(currentConsumer, anwBuffer, anwReleaseFence, surface_ids);
if (shouldLogError(res, state)) {
ALOGE("%s: Stream %d: Error queueing buffer to native window:"
" %s (%d)", __FUNCTION__, mId, strerror(-res), res);
}
}
if (bufferDeferred) {
mCachedOutputBufferCount++;
}
// Once a valid buffer has been returned to the queue, can no longer
// dequeue all buffers for preallocation.
if (buffer.status != CAMERA_BUFFER_STATUS_ERROR) {
mStreamUnpreparable = true;
}
if (res != OK) {
close(anwReleaseFence);
*releaseFenceOut = releaseFence;
return res;
}
void Camera3OutputStream::dump(int fd, const Vector<String16> &args) const {
(void) args;
String8 lines;
lines.appendFormat(" Stream[%d]: Output\n", mId);
lines.appendFormat(" Consumer name: %s\n", mConsumerName.string());
write(fd, lines.string(), lines.size());
Camera3IOStreamBase::dump(fd, args);
mDequeueBufferLatency.dump(fd,
" DequeueBuffer latency histogram:");
status_t Camera3OutputStream::setTransform(int transform, bool mayChangeMirror) {
ATRACE_CALL();
Mutex::Autolock l(mLock);
if (mMirrorMode != OutputConfiguration::MIRROR_MODE_AUTO && mayChangeMirror) {
// If the mirroring mode is not AUTO, do not allow transform update
// which may change mirror.
return OK;
}
return setTransformLocked(transform);
}
status_t Camera3OutputStream::setTransformLocked(int transform) {
status_t res = OK;
if (transform == -1) return res;
if (mState == STATE_ERROR) {
ALOGE("%s: Stream in error state", __FUNCTION__);
return INVALID_OPERATION;
}
mTransform = transform;
if (mState == STATE_CONFIGURED) {
res = native_window_set_buffers_transform(mConsumer.get(),
transform);
if (res != OK) {
ALOGE("%s: Unable to configure stream transform to %x: %s (%d)",
__FUNCTION__, transform, strerror(-res), res);
}
}
return res;
}
status_t Camera3OutputStream::configureQueueLocked() {
status_t res;
if ((res = Camera3IOStreamBase::configureQueueLocked()) != OK) {
return res;
if ((res = configureConsumerQueueLocked(true /*allowPreviewRespace*/)) != OK) {
return res;
}
// Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
// We need skip these cases as timeout will disable the non-blocking (async) mode.
if (!(isConsumedByHWComposer() || isConsumedByHWTexture())) {
if (mUseBufferManager) {
// When buffer manager is handling the buffer, we should have available buffers in
// buffer queue before we calls into dequeueBuffer because buffer manager is tracking
// free buffers.
// There are however some consumer side feature (ImageReader::discardFreeBuffers) that
// can discard free buffers without notifying buffer manager. We want the timeout to
// happen immediately here so buffer manager can try to update its internal state and
// try to allocate a buffer instead of waiting.
mConsumer->setDequeueTimeout(0);
} else {
mConsumer->setDequeueTimeout(kDequeueBufferTimeout);
}
}
return OK;
}
status_t Camera3OutputStream::configureConsumerQueueLocked(bool allowPreviewRespace) {
status_t res;
mTraceFirstBuffer = true;
ALOG_ASSERT(mConsumer != 0, "mConsumer should never be NULL");
// Configure consumer-side ANativeWindow interface. The listener may be used
// to notify buffer manager (if it is used) of the returned buffers.
res = mConsumer->connect(NATIVE_WINDOW_API_CAMERA,
/*reportBufferRemoval*/true,
/*listener*/mBufferProducerListener);
if (res != OK) {
ALOGE("%s: Unable to connect to native window for stream %d",
__FUNCTION__, mId);
return res;
}
mConsumerName = mConsumer->getConsumerName();
res = native_window_set_usage(mConsumer.get(), mUsage);
ALOGE("%s: Unable to configure usage %" PRIu64 " for stream %d",
__FUNCTION__, mUsage, mId);
return res;
}
res = native_window_set_scaling_mode(mConsumer.get(),
NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
if (res != OK) {
ALOGE("%s: Unable to configure stream scaling: %s (%d)",
__FUNCTION__, strerror(-res), res);
return res;
}
if (mMaxSize == 0) {
// For buffers of known size
res = native_window_set_buffers_dimensions(mConsumer.get(),
camera_stream::width, camera_stream::height);
} else {
// For buffers with bounded size
res = native_window_set_buffers_dimensions(mConsumer.get(),
mMaxSize, 1);
ALOGE("%s: Unable to configure stream buffer dimensions"
" %d x %d (maxSize %zu) for stream %d",
__FUNCTION__, camera_stream::width, camera_stream::height,
mMaxSize, mId);
return res;
}
res = native_window_set_buffers_format(mConsumer.get(),
if (res != OK) {
ALOGE("%s: Unable to configure stream buffer format %#x for stream %d",
__FUNCTION__, camera_stream::format, mId);
res = native_window_set_buffers_data_space(mConsumer.get(),
if (res != OK) {
ALOGE("%s: Unable to configure stream dataspace %#x for stream %d",
__FUNCTION__, camera_stream::data_space, mId);
return res;
}
res = static_cast<ANativeWindow*>(mConsumer.get())->query(
mConsumer.get(),
NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxConsumerBuffers);
if (res != OK) {
ALOGE("%s: Unable to query consumer undequeued"
" buffer count for stream %d", __FUNCTION__, mId);
return res;
}
ALOGV("%s: Consumer wants %d buffers, HAL wants %d", __FUNCTION__,
maxConsumerBuffers, camera_stream::max_buffers);
if (camera_stream::max_buffers == 0) {
ALOGE("%s: Camera HAL requested max_buffer count: %d, requires at least 1",
__FUNCTION__, camera_stream::max_buffers);
mTotalBufferCount = maxConsumerBuffers + camera_stream::max_buffers;
int timestampBase = getTimestampBase();
bool isDefaultTimeBase = (timestampBase ==
OutputConfiguration::TIMESTAMP_BASE_DEFAULT);
if (allowPreviewRespace) {
bool forceChoreographer = (timestampBase ==
OutputConfiguration::TIMESTAMP_BASE_CHOREOGRAPHER_SYNCED);
bool defaultToChoreographer = (isDefaultTimeBase &&
isConsumedByHWComposer());
bool defaultToSpacer = (isDefaultTimeBase &&
isConsumedByHWTexture() &&
!isConsumedByCPU() &&
!isVideoStream());
if (forceChoreographer || defaultToChoreographer) {
// For choreographer synced stream, extra buffers aren't kept by
// camera service. So no need to update mMaxCachedBufferCount.
mTotalBufferCount += kDisplaySyncExtraBuffer;
} else if (defaultToSpacer) {
mPreviewFrameSpacer = new PreviewFrameSpacer(this, mConsumer);
// For preview frame spacer, the extra buffer is kept by camera
// service. So update mMaxCachedBufferCount.
mMaxCachedBufferCount = 1;
mTotalBufferCount += mMaxCachedBufferCount;
res = mPreviewFrameSpacer->run(String8::format("PreviewSpacer-%d", mId).string());
if (res != OK) {
ALOGE("%s: Unable to start preview spacer", __FUNCTION__);
return res;
}
mFrameCount = 0;
mLastTimestamp = 0;
mUseReadoutTime =
(timestampBase == OutputConfiguration::TIMESTAMP_BASE_READOUT_SENSOR || mSyncToDisplay);
if (isDeviceTimeBaseRealtime()) {
if (isDefaultTimeBase && !isConsumedByHWComposer() && !isVideoStream()) {
// Default time base, but not hardware composer or video encoder
mTimestampOffset = 0;
} else if (timestampBase == OutputConfiguration::TIMESTAMP_BASE_REALTIME ||
timestampBase == OutputConfiguration::TIMESTAMP_BASE_SENSOR ||
timestampBase == OutputConfiguration::TIMESTAMP_BASE_READOUT_SENSOR) {
mTimestampOffset = 0;
}
// If timestampBase is CHOREOGRAPHER SYNCED or MONOTONIC, leave
// timestamp offset as bootTime - monotonicTime.
} else {
if (timestampBase == OutputConfiguration::TIMESTAMP_BASE_REALTIME) {
// Reverse offset for monotonicTime -> bootTime
mTimestampOffset = -mTimestampOffset;
} else {
// If timestampBase is DEFAULT, MONOTONIC, SENSOR, READOUT_SENSOR or
// CHOREOGRAPHER_SYNCED, timestamp offset is 0.
mTimestampOffset = 0;
}
}
res = native_window_set_buffer_count(mConsumer.get(),
mTotalBufferCount);
if (res != OK) {
ALOGE("%s: Unable to set buffer count for stream %d",
__FUNCTION__, mId);
return res;
}
res = native_window_set_buffers_transform(mConsumer.get(),
mTransform);
if (res != OK) {
ALOGE("%s: Unable to configure stream transform to %x: %s (%d)",
__FUNCTION__, mTransform, strerror(-res), res);
* Camera3 Buffer manager is only supported by HAL3.3 onwards, as the older HALs requires
* buffers to be statically allocated for internal static buffer registration, while the
* buffers provided by buffer manager are really dynamically allocated. Camera3Device only
* sets the mBufferManager if device version is > HAL3.2, which guarantees that the buffer
* manager setup is skipped in below code. Note that HAL3.2 is also excluded here, as some
* HAL3.2 devices may not support the dynamic buffer registeration.
* Also Camera3BufferManager does not support display/texture streams as they have its own
* buffer management logic.
if (mBufferManager != 0 && mSetId > CAMERA3_STREAM_SET_ID_INVALID &&
!(isConsumedByHWComposer() || isConsumedByHWTexture())) {
uint64_t consumerUsage = 0;
getEndpointUsage(&consumerUsage);
uint32_t width = (mMaxSize == 0) ? getWidth() : mMaxSize;
uint32_t height = (mMaxSize == 0) ? getHeight() : 1;
getId(), getStreamSetId(), width, height, getFormat(), getDataSpace(),
mUsage | consumerUsage, mTotalBufferCount,
/*isConfigured*/true, isMultiResolution());
wp<Camera3OutputStream> weakThis(this);
res = mBufferManager->registerStream(weakThis,
streamInfo);
if (res == OK) {
// Disable buffer allocation for this BufferQueue, buffer manager will take over
// the buffer allocation responsibility.
mConsumer->getIGraphicBufferProducer()->allowAllocation(false);
mUseBufferManager = true;
} else {
ALOGE("%s: Unable to register stream %d to camera3 buffer manager, "
"(error %d %s), fall back to BufferQueue for buffer management!",
__FUNCTION__, mId, res, strerror(-res));
}
}
status_t Camera3OutputStream::getBufferLockedCommon(ANativeWindowBuffer** anb, int* fenceFd) {
ATRACE_HFR_CALL();
status_t res;
if ((res = getBufferPreconditionCheckLocked()) != OK) {
return res;
}
bool gotBufferFromManager = false;
if (mUseBufferManager) {
sp<GraphicBuffer> gb;
res = mBufferManager->getBufferForStream(getId(), getStreamSetId(),
isMultiResolution(), &gb, fenceFd);
if (res == OK) {
// Attach this buffer to the bufferQueue: the buffer will be in dequeue state after a
// successful return.
*anb = gb.get();
res = mConsumer->attachBuffer(*anb);
if (shouldLogError(res, mState)) {
ALOGE("%s: Stream %d: Can't attach the output buffer to this surface: %s (%d)",
__FUNCTION__, mId, strerror(-res), res);
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
return res;
}
gotBufferFromManager = true;
ALOGV("Stream %d: Attached new buffer", getId());
} else if (res == ALREADY_EXISTS) {
// Have sufficient free buffers already attached, can just
// dequeue from buffer queue
ALOGV("Stream %d: Reusing attached buffer", getId());
gotBufferFromManager = false;
} else if (res != OK) {
ALOGE("%s: Stream %d: Can't get next output buffer from buffer manager: %s (%d)",
__FUNCTION__, mId, strerror(-res), res);
return res;
}
}
if (!gotBufferFromManager) {
/**
* Release the lock briefly to avoid deadlock for below scenario:
* Thread 1: StreamingProcessor::startStream -> Camera3Stream::isConfiguring().
* This thread acquired StreamingProcessor lock and try to lock Camera3Stream lock.
* Thread 2: Camera3Stream::returnBuffer->StreamingProcessor::onFrameAvailable().
* This thread acquired Camera3Stream lock and bufferQueue lock, and try to lock
* StreamingProcessor lock.
* Thread 3: Camera3Stream::getBuffer(). This thread acquired Camera3Stream lock
* and try to lock bufferQueue lock.
* Then there is circular locking dependency.
*/
sp<Surface> consumer = mConsumer;
size_t remainingBuffers = (mState == STATE_PREPARING ? mTotalBufferCount :
camera_stream::max_buffers) - mHandoutTotalBufferCount;
nsecs_t dequeueStart = systemTime(SYSTEM_TIME_MONOTONIC);
size_t batchSize = mBatchSize.load();
if (batchSize == 1) {
sp<ANativeWindow> anw = consumer;
res = anw->dequeueBuffer(anw.get(), anb, fenceFd);
} else {
std::unique_lock<std::mutex> batchLock(mBatchLock);
res = OK;
if (mBatchedBuffers.size() == 0) {
if (remainingBuffers == 0) {
ALOGE("%s: cannot get buffer while all buffers are handed out", __FUNCTION__);
return INVALID_OPERATION;
}
if (batchSize > remainingBuffers) {
batchSize = remainingBuffers;
}
// Refill batched buffers
std::vector<Surface::BatchBuffer> batchedBuffers;
batchedBuffers.resize(batchSize);
res = consumer->dequeueBuffers(&batchedBuffers);
batchLock.lock();
if (res != OK) {
ALOGE("%s: batch dequeueBuffers call failed! %s (%d)",
__FUNCTION__, strerror(-res), res);
} else {
mBatchedBuffers = std::move(batchedBuffers);
}
}
if (res == OK) {
// Dispatch batch buffers
*anb = mBatchedBuffers.back().buffer;
*fenceFd = mBatchedBuffers.back().fenceFd;
mBatchedBuffers.pop_back();
}
}
nsecs_t dequeueEnd = systemTime(SYSTEM_TIME_MONOTONIC);
mDequeueBufferLatency.add(dequeueStart, dequeueEnd);
if (mUseBufferManager && res == TIMED_OUT) {
checkRemovedBuffersLocked();
sp<GraphicBuffer> gb;
res = mBufferManager->getBufferForStream(
getId(), getStreamSetId(), isMultiResolution(),
&gb, fenceFd, /*noFreeBuffer*/true);
if (res == OK) {
// Attach this buffer to the bufferQueue: the buffer will be in dequeue state after
// a successful return.
*anb = gb.get();
res = mConsumer->attachBuffer(*anb);
gotBufferFromManager = true;
ALOGV("Stream %d: Attached new buffer", getId());
if (res != OK) {
if (shouldLogError(res, mState)) {
ALOGE("%s: Stream %d: Can't attach the output buffer to this surface:"
" %s (%d)", __FUNCTION__, mId, strerror(-res), res);
}
checkRetAndSetAbandonedLocked(res);
return res;
}
} else {
ALOGE("%s: Stream %d: Can't get next output buffer from buffer manager:"
" %s (%d)", __FUNCTION__, mId, strerror(-res), res);
return res;
if (shouldLogError(res, mState)) {
ALOGE("%s: Stream %d: Can't dequeue next output buffer: %s (%d)",
__FUNCTION__, mId, strerror(-res), res);
}
return res;
}
}
if (res == OK) {
return res;
}
void Camera3OutputStream::checkRemovedBuffersLocked(bool notifyBufferManager) {
std::vector<sp<GraphicBuffer>> removedBuffers;
status_t res = mConsumer->getAndFlushRemovedBuffers(&removedBuffers);
if (res == OK) {
onBuffersRemovedLocked(removedBuffers);
if (notifyBufferManager && mUseBufferManager && removedBuffers.size() > 0) {
mBufferManager->onBuffersRemoved(getId(), getStreamSetId(), isMultiResolution(),
removedBuffers.size());
void Camera3OutputStream::checkRetAndSetAbandonedLocked(status_t res) {
// Only transition to STATE_ABANDONED from STATE_CONFIGURED. (If it is
// STATE_PREPARING, let prepareNextBuffer handle the error.)
if ((res == NO_INIT || res == DEAD_OBJECT) && mState == STATE_CONFIGURED) {
mState = STATE_ABANDONED;
}
bool Camera3OutputStream::shouldLogError(status_t res, StreamState state) {
if (res == OK) {
return false;
}
if ((res == DEAD_OBJECT || res == NO_INIT) && state == STATE_ABANDONED) {
return false;
}
return true;
}
void Camera3OutputStream::onCachedBufferQueued() {
Mutex::Autolock l(mLock);
mCachedOutputBufferCount--;
// Signal whoever is waiting for the buffer to be returned to the buffer
// queue.
mOutputBufferReturnedSignal.signal();
}
status_t Camera3OutputStream::disconnectLocked() {
status_t res;
if ((res = Camera3IOStreamBase::disconnectLocked()) != OK) {
return res;