Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1e05a85f authored by Treehugger Robot's avatar Treehugger Robot Committed by Gerrit Code Review
Browse files

Merge changes from topic "c2allocatorbuf"

* changes:
  codec2: C2Store: Integrate C2DmaBufAllocator as a possible allocator
  codec2: Add C2AllocatorBuf to implement a libdmabufheap BufferAllocator
parents d5e35f0c a7002cb8
Loading
Loading
Loading
Loading
+29 −0
Original line number Diff line number Diff line
@@ -254,6 +254,8 @@ enum C2ParamIndexKind : C2Param::type_index_t {
    kParamIndexTunneledMode, // struct
    kParamIndexTunnelHandle, // int32[]
    kParamIndexTunnelSystemTime, // int64

    kParamIndexStoreDmaBufUsage,  // store, struct
};

}
@@ -2040,6 +2042,33 @@ struct C2StoreIonUsageStruct {
typedef C2GlobalParam<C2Info, C2StoreIonUsageStruct, kParamIndexStoreIonUsage>
        C2StoreIonUsageInfo;

/**
 * This structure describes the preferred DMA-Buf allocation parameters for a given memory usage.
 */
struct C2StoreDmaBufUsageStruct {
    inline C2StoreDmaBufUsageStruct() { memset(this, 0, sizeof(*this)); }

    inline C2StoreDmaBufUsageStruct(size_t flexCount, uint64_t usage_, uint32_t capacity_)
        : usage(usage_), capacity(capacity_), allocFlags(0) {
        memset(heapName, 0, flexCount);
    }

    uint64_t usage;                         ///< C2MemoryUsage
    uint32_t capacity;                      ///< capacity
    int32_t allocFlags;                     ///< ion allocation flags
    char heapName[];                        ///< dmabuf heap name

    DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(StoreDmaBufUsage, heapName)
    C2FIELD(usage, "usage")
    C2FIELD(capacity, "capacity")
    C2FIELD(allocFlags, "alloc-flags")
    C2FIELD(heapName, "heap-name")
};

// store, private
typedef C2GlobalParam<C2Info, C2StoreDmaBufUsageStruct, kParamIndexStoreDmaBufUsage>
        C2StoreDmaBufUsageInfo;

/**
 * Flexible pixel format descriptors
 */
+21 −0
Original line number Diff line number Diff line
@@ -122,6 +122,18 @@ private:
                })
                .withSetter(SetIonUsage)
                .build());

            addParameter(
                DefineParam(mDmaBufUsageInfo, "dmabuf-usage")
                .withDefault(new C2StoreDmaBufUsageInfo())
                .withFields({
                    C2F(mDmaBufUsageInfo, usage).flags({C2MemoryUsage::CPU_READ | C2MemoryUsage::CPU_WRITE}),
                    C2F(mDmaBufUsageInfo, capacity).inRange(0, UINT32_MAX, 1024),
                    C2F(mDmaBufUsageInfo, heapName).any(),
                    C2F(mDmaBufUsageInfo, allocFlags).flags({}),
                })
                .withSetter(SetDmaBufUsage)
                .build());
        }

        virtual ~Interface() = default;
@@ -135,7 +147,16 @@ private:
            return C2R::Ok();
        }

        static C2R SetDmaBufUsage(bool /* mayBlock */, C2P<C2StoreDmaBufUsageInfo> &me) {
            // Vendor's TODO: put appropriate mapping logic
            strncpy(me.set().m.heapName, "system", me.v.flexCount());
            me.set().m.allocFlags = 0;
            return C2R::Ok();
        }


        std::shared_ptr<C2StoreIonUsageInfo> mIonUsageInfo;
        std::shared_ptr<C2StoreDmaBufUsageInfo> mDmaBufUsageInfo;
    };
    std::shared_ptr<C2ReflectorHelper> mReflectorHelper;
    Interface mInterface;
+2 −0
Original line number Diff line number Diff line
@@ -26,6 +26,7 @@ cc_library_shared {
        "C2AllocatorGralloc.cpp",
        "C2Buffer.cpp",
        "C2Config.cpp",
        "C2DmaBufAllocator.cpp",
        "C2PlatformStorePluginLoader.cpp",
        "C2Store.cpp",
        "platform/C2BqBuffer.cpp",
@@ -64,6 +65,7 @@ cc_library_shared {
        "libhardware",
        "libhidlbase",
        "libion",
        "libdmabufheap",
        "libfmq",
        "liblog",
        "libnativewindow",
+401 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2020 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

//#define LOG_NDEBUG 0
#define LOG_TAG "C2DmaBufAllocator"
#include <BufferAllocator/BufferAllocator.h>
#include <C2Buffer.h>
#include <C2Debug.h>
#include <C2DmaBufAllocator.h>
#include <C2ErrnoUtils.h>
#include <linux/ion.h>
#include <sys/mman.h>
#include <unistd.h>  // getpagesize, size_t, close, dup
#include <utils/Log.h>

#include <list>

#ifdef __ANDROID_APEX__
#include <android-base/properties.h>
#endif

namespace android {

namespace {
constexpr size_t USAGE_LRU_CACHE_SIZE = 1024;
}

/* =========================== BUFFER HANDLE =========================== */
/**
 * Buffer handle
 *
 * Stores dmabuf fd & metadata
 *
 * This handle will not capture mapped fd-s as updating that would require a
 * global mutex.
 */

struct C2HandleBuf : public C2Handle {
    C2HandleBuf(int bufferFd, size_t size)
        : C2Handle(cHeader),
          mFds{bufferFd},
          mInts{int(size & 0xFFFFFFFF), int((uint64_t(size) >> 32) & 0xFFFFFFFF), kMagic} {}

    static bool IsValid(const C2Handle* const o);

    int bufferFd() const { return mFds.mBuffer; }
    size_t size() const {
        return size_t(unsigned(mInts.mSizeLo)) | size_t(uint64_t(unsigned(mInts.mSizeHi)) << 32);
    }

   protected:
    struct {
        int mBuffer;  // dmabuf fd
    } mFds;
    struct {
        int mSizeLo;  // low 32-bits of size
        int mSizeHi;  // high 32-bits of size
        int mMagic;
    } mInts;

   private:
    typedef C2HandleBuf _type;
    enum {
        kMagic = '\xc2io\x00',
        numFds = sizeof(mFds) / sizeof(int),
        numInts = sizeof(mInts) / sizeof(int),
        version = sizeof(C2Handle)
    };
    // constexpr static C2Handle cHeader = { version, numFds, numInts, {} };
    const static C2Handle cHeader;
};

const C2Handle C2HandleBuf::cHeader = {
        C2HandleBuf::version, C2HandleBuf::numFds, C2HandleBuf::numInts, {}};

// static
bool C2HandleBuf::IsValid(const C2Handle* const o) {
    if (!o || memcmp(o, &cHeader, sizeof(cHeader))) {
        return false;
    }
    const C2HandleBuf* other = static_cast<const C2HandleBuf*>(o);
    return other->mInts.mMagic == kMagic;
}

/* =========================== DMABUF ALLOCATION =========================== */
class C2DmaBufAllocation : public C2LinearAllocation {
   public:
    /* Interface methods */
    virtual c2_status_t map(size_t offset, size_t size, C2MemoryUsage usage, C2Fence* fence,
                            void** addr /* nonnull */) override;
    virtual c2_status_t unmap(void* addr, size_t size, C2Fence* fenceFd) override;
    virtual ~C2DmaBufAllocation() override;
    virtual const C2Handle* handle() const override;
    virtual id_t getAllocatorId() const override;
    virtual bool equals(const std::shared_ptr<C2LinearAllocation>& other) const override;

    // internal methods
    C2DmaBufAllocation(BufferAllocator& alloc, size_t size, C2String heap_name, unsigned flags,
                       C2Allocator::id_t id);
    C2DmaBufAllocation(size_t size, int shareFd, C2Allocator::id_t id);

    c2_status_t status() const;

   protected:
    virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes,
                                    int prot, int flags, void** base, void** addr) {
        c2_status_t err = C2_OK;
        *base = mmap(nullptr, mapSize, prot, flags, mHandle.bufferFd(), mapOffset);
        ALOGV("mmap(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
              "returned (%d)",
              mapSize, prot, flags, mHandle.bufferFd(), mapOffset, errno);
        if (*base == MAP_FAILED) {
            *base = *addr = nullptr;
            err = c2_map_errno<EINVAL>(errno);
        } else {
            *addr = (uint8_t*)*base + alignmentBytes;
        }
        return err;
    }

    C2Allocator::id_t mId;
    C2HandleBuf mHandle;
    c2_status_t mInit;
    struct Mapping {
        void* addr;
        size_t alignmentBytes;
        size_t size;
    };
    std::list<Mapping> mMappings;

    // TODO: we could make this encapsulate shared_ptr and copiable
    C2_DO_NOT_COPY(C2DmaBufAllocation);
};

c2_status_t C2DmaBufAllocation::map(size_t offset, size_t size, C2MemoryUsage usage, C2Fence* fence,
                                    void** addr) {
    (void)fence;  // TODO: wait for fence
    *addr = nullptr;
    if (!mMappings.empty()) {
        ALOGV("multiple map");
        // TODO: technically we should return DUPLICATE here, but our block views
        // don't actually unmap, so we end up remapping the buffer multiple times.
        //
        // return C2_DUPLICATE;
    }
    if (size == 0) {
        return C2_BAD_VALUE;
    }

    int prot = PROT_NONE;
    int flags = MAP_SHARED;
    if (usage.expected & C2MemoryUsage::CPU_READ) {
        prot |= PROT_READ;
    }
    if (usage.expected & C2MemoryUsage::CPU_WRITE) {
        prot |= PROT_WRITE;
    }

    size_t alignmentBytes = offset % PAGE_SIZE;
    size_t mapOffset = offset - alignmentBytes;
    size_t mapSize = size + alignmentBytes;
    Mapping map = {nullptr, alignmentBytes, mapSize};

    c2_status_t err =
            mapInternal(mapSize, mapOffset, alignmentBytes, prot, flags, &(map.addr), addr);
    if (map.addr) {
        mMappings.push_back(map);
    }
    return err;
}

c2_status_t C2DmaBufAllocation::unmap(void* addr, size_t size, C2Fence* fence) {
    if (mMappings.empty()) {
        ALOGD("tried to unmap unmapped buffer");
        return C2_NOT_FOUND;
    }
    for (auto it = mMappings.begin(); it != mMappings.end(); ++it) {
        if (addr != (uint8_t*)it->addr + it->alignmentBytes ||
            size + it->alignmentBytes != it->size) {
            continue;
        }
        int err = munmap(it->addr, it->size);
        if (err != 0) {
            ALOGD("munmap failed");
            return c2_map_errno<EINVAL>(errno);
        }
        if (fence) {
            *fence = C2Fence();  // not using fences
        }
        (void)mMappings.erase(it);
        ALOGV("successfully unmapped: %d", mHandle.bufferFd());
        return C2_OK;
    }
    ALOGD("unmap failed to find specified map");
    return C2_BAD_VALUE;
}

c2_status_t C2DmaBufAllocation::status() const {
    return mInit;
}

C2Allocator::id_t C2DmaBufAllocation::getAllocatorId() const {
    return mId;
}

bool C2DmaBufAllocation::equals(const std::shared_ptr<C2LinearAllocation>& other) const {
    if (!other || other->getAllocatorId() != getAllocatorId()) {
        return false;
    }
    // get user handle to compare objects
    std::shared_ptr<C2DmaBufAllocation> otherAsBuf =
            std::static_pointer_cast<C2DmaBufAllocation>(other);
    return mHandle.bufferFd() == otherAsBuf->mHandle.bufferFd();
}

const C2Handle* C2DmaBufAllocation::handle() const {
    return &mHandle;
}

C2DmaBufAllocation::~C2DmaBufAllocation() {
    if (!mMappings.empty()) {
        ALOGD("Dangling mappings!");
        for (const Mapping& map : mMappings) {
            int err = munmap(map.addr, map.size);
            if (err) ALOGD("munmap failed");
        }
    }
    if (mInit == C2_OK) {
        native_handle_close(&mHandle);
    }
}

C2DmaBufAllocation::C2DmaBufAllocation(BufferAllocator& alloc, size_t size, C2String heap_name,
                                       unsigned flags, C2Allocator::id_t id)
    : C2LinearAllocation(size), mHandle(-1, 0) {
    int bufferFd = -1;
    int ret = 0;

    bufferFd = alloc.Alloc(heap_name, size, flags);
    if (bufferFd < 0) ret = bufferFd;

    mHandle = C2HandleBuf(bufferFd, size);
    mId = id;
    mInit = c2_status_t(c2_map_errno<ENOMEM, EACCES, EINVAL>(ret));
}

C2DmaBufAllocation::C2DmaBufAllocation(size_t size, int shareFd, C2Allocator::id_t id)
    : C2LinearAllocation(size), mHandle(-1, 0) {
    mHandle = C2HandleBuf(shareFd, size);
    mId = id;
    mInit = c2_status_t(c2_map_errno<ENOMEM, EACCES, EINVAL>(0));
}

/* =========================== DMABUF ALLOCATOR =========================== */
C2DmaBufAllocator::C2DmaBufAllocator(id_t id) : mInit(C2_OK) {
    C2MemoryUsage minUsage = {0, 0};
    C2MemoryUsage maxUsage = {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
    Traits traits = {"android.allocator.dmabuf", id, LINEAR, minUsage, maxUsage};
    mTraits = std::make_shared<Traits>(traits);
}

C2Allocator::id_t C2DmaBufAllocator::getId() const {
    std::lock_guard<std::mutex> lock(mUsageMapperLock);
    return mTraits->id;
}

C2String C2DmaBufAllocator::getName() const {
    std::lock_guard<std::mutex> lock(mUsageMapperLock);
    return mTraits->name;
}

std::shared_ptr<const C2Allocator::Traits> C2DmaBufAllocator::getTraits() const {
    std::lock_guard<std::mutex> lock(mUsageMapperLock);
    return mTraits;
}

void C2DmaBufAllocator::setUsageMapper(const UsageMapperFn& mapper __unused, uint64_t minUsage,
                                       uint64_t maxUsage, uint64_t blockSize) {
    std::lock_guard<std::mutex> lock(mUsageMapperLock);
    mUsageMapperCache.clear();
    mUsageMapperLru.clear();
    mUsageMapper = mapper;
    Traits traits = {mTraits->name, mTraits->id, LINEAR, C2MemoryUsage(minUsage),
                     C2MemoryUsage(maxUsage)};
    mTraits = std::make_shared<Traits>(traits);
    mBlockSize = blockSize;
}

std::size_t C2DmaBufAllocator::MapperKeyHash::operator()(const MapperKey& k) const {
    return std::hash<uint64_t>{}(k.first) ^ std::hash<size_t>{}(k.second);
}

c2_status_t C2DmaBufAllocator::mapUsage(C2MemoryUsage usage, size_t capacity, C2String* heap_name,
                                        unsigned* flags) {
    std::lock_guard<std::mutex> lock(mUsageMapperLock);
    c2_status_t res = C2_OK;
    // align capacity
    capacity = (capacity + mBlockSize - 1) & ~(mBlockSize - 1);
    MapperKey key = std::make_pair(usage.expected, capacity);
    auto entry = mUsageMapperCache.find(key);
    if (entry == mUsageMapperCache.end()) {
        if (mUsageMapper) {
            res = mUsageMapper(usage, capacity, heap_name, flags);
        } else {
            // No system-uncached yet, so disabled for now
            if (0 && !(usage.expected & (C2MemoryUsage::CPU_READ | C2MemoryUsage::CPU_WRITE)))
                *heap_name = "system-uncached";
            else
                *heap_name = "system";
            *flags = 0;
            res = C2_NO_INIT;
        }
        // add usage to cache
        MapperValue value = std::make_tuple(*heap_name, *flags, res);
        mUsageMapperLru.emplace_front(key, value);
        mUsageMapperCache.emplace(std::make_pair(key, mUsageMapperLru.begin()));
        if (mUsageMapperCache.size() > USAGE_LRU_CACHE_SIZE) {
            // remove LRU entry
            MapperKey lruKey = mUsageMapperLru.front().first;
            mUsageMapperCache.erase(lruKey);
            mUsageMapperLru.pop_back();
        }
    } else {
        // move entry to MRU
        mUsageMapperLru.splice(mUsageMapperLru.begin(), mUsageMapperLru, entry->second);
        const MapperValue& value = entry->second->second;
        std::tie(*heap_name, *flags, res) = value;
    }
    return res;
}

c2_status_t C2DmaBufAllocator::newLinearAllocation(
        uint32_t capacity, C2MemoryUsage usage, std::shared_ptr<C2LinearAllocation>* allocation) {
    if (allocation == nullptr) {
        return C2_BAD_VALUE;
    }

    allocation->reset();
    if (mInit != C2_OK) {
        return mInit;
    }

    C2String heap_name;
    unsigned flags = 0;
    c2_status_t ret = mapUsage(usage, capacity, &heap_name, &flags);
    if (ret && ret != C2_NO_INIT) {
        return ret;
    }

    std::shared_ptr<C2DmaBufAllocation> alloc = std::make_shared<C2DmaBufAllocation>(
            mBufferAllocator, capacity, heap_name, flags, getId());
    ret = alloc->status();
    if (ret == C2_OK) {
        *allocation = alloc;
    }
    return ret;
}

c2_status_t C2DmaBufAllocator::priorLinearAllocation(
        const C2Handle* handle, std::shared_ptr<C2LinearAllocation>* allocation) {
    *allocation = nullptr;
    if (mInit != C2_OK) {
        return mInit;
    }

    if (!C2HandleBuf::IsValid(handle)) {
        return C2_BAD_VALUE;
    }

    // TODO: get capacity and validate it
    const C2HandleBuf* h = static_cast<const C2HandleBuf*>(handle);
    std::shared_ptr<C2DmaBufAllocation> alloc =
            std::make_shared<C2DmaBufAllocation>(h->size(), h->bufferFd(), getId());
    c2_status_t ret = alloc->status();
    if (ret == C2_OK) {
        *allocation = alloc;
        native_handle_delete(
                const_cast<native_handle_t*>(reinterpret_cast<const native_handle_t*>(handle)));
    }
    return ret;
}

// static
bool C2DmaBufAllocator::CheckHandle(const C2Handle* const o) {
    return C2HandleBuf::IsValid(o);
}

}  // namespace android
+119 −4
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@
#include <C2AllocatorBlob.h>
#include <C2AllocatorGralloc.h>
#include <C2AllocatorIon.h>
#include <C2DmaBufAllocator.h>
#include <C2BufferPriv.h>
#include <C2BqBufferPriv.h>
#include <C2Component.h>
@@ -82,6 +83,7 @@ private:

    /// returns a shared-singleton ion allocator
    std::shared_ptr<C2Allocator> fetchIonAllocator();
    std::shared_ptr<C2Allocator> fetchDmaBufAllocator();

    /// returns a shared-singleton gralloc allocator
    std::shared_ptr<C2Allocator> fetchGrallocAllocator();
@@ -99,6 +101,20 @@ private:
C2PlatformAllocatorStoreImpl::C2PlatformAllocatorStoreImpl() {
}

static bool using_ion(void) {
    static int cached_result = -1;

    if (cached_result == -1) {
        struct stat buffer;
        cached_result = (stat("/dev/ion", &buffer) == 0);
        if (cached_result)
            ALOGD("Using ION\n");
        else
            ALOGD("Using DMABUF Heaps\n");
    }
    return (cached_result == 1);
}

c2_status_t C2PlatformAllocatorStoreImpl::fetchAllocator(
        id_t id, std::shared_ptr<C2Allocator> *const allocator) {
    allocator->reset();
@@ -107,8 +123,11 @@ c2_status_t C2PlatformAllocatorStoreImpl::fetchAllocator(
    }
    switch (id) {
    // TODO: should we implement a generic registry for all, and use that?
    case C2PlatformAllocatorStore::ION:
    case C2PlatformAllocatorStore::ION: /* also ::DMABUFHEAP */
        if (using_ion())
            *allocator = fetchIonAllocator();
        else
            *allocator = fetchDmaBufAllocator();
        break;

    case C2PlatformAllocatorStore::GRALLOC:
@@ -142,7 +161,9 @@ c2_status_t C2PlatformAllocatorStoreImpl::fetchAllocator(
namespace {

std::mutex gIonAllocatorMutex;
std::mutex gDmaBufAllocatorMutex;
std::weak_ptr<C2AllocatorIon> gIonAllocator;
std::weak_ptr<C2DmaBufAllocator> gDmaBufAllocator;

void UseComponentStoreForIonAllocator(
        const std::shared_ptr<C2AllocatorIon> allocator,
@@ -197,6 +218,65 @@ void UseComponentStoreForIonAllocator(
    allocator->setUsageMapper(mapper, minUsage, maxUsage, blockSize);
}

void UseComponentStoreForDmaBufAllocator(const std::shared_ptr<C2DmaBufAllocator> allocator,
                                         std::shared_ptr<C2ComponentStore> store) {
    C2DmaBufAllocator::UsageMapperFn mapper;
    const size_t maxHeapNameLen = 128;
    uint64_t minUsage = 0;
    uint64_t maxUsage = C2MemoryUsage(C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE).expected;
    size_t blockSize = getpagesize();

    // query min and max usage as well as block size via supported values
    std::unique_ptr<C2StoreDmaBufUsageInfo> usageInfo;
    usageInfo = C2StoreDmaBufUsageInfo::AllocUnique(maxHeapNameLen);

    std::vector<C2FieldSupportedValuesQuery> query = {
            C2FieldSupportedValuesQuery::Possible(C2ParamField::Make(*usageInfo, usageInfo->m.usage)),
            C2FieldSupportedValuesQuery::Possible(
                    C2ParamField::Make(*usageInfo, usageInfo->m.capacity)),
    };
    c2_status_t res = store->querySupportedValues_sm(query);
    if (res == C2_OK) {
        if (query[0].status == C2_OK) {
            const C2FieldSupportedValues& fsv = query[0].values;
            if (fsv.type == C2FieldSupportedValues::FLAGS && !fsv.values.empty()) {
                minUsage = fsv.values[0].u64;
                maxUsage = 0;
                for (C2Value::Primitive v : fsv.values) {
                    maxUsage |= v.u64;
                }
            }
        }
        if (query[1].status == C2_OK) {
            const C2FieldSupportedValues& fsv = query[1].values;
            if (fsv.type == C2FieldSupportedValues::RANGE && fsv.range.step.u32 > 0) {
                blockSize = fsv.range.step.u32;
            }
        }

        mapper = [store](C2MemoryUsage usage, size_t capacity, C2String* heapName,
                         unsigned* flags) -> c2_status_t {
            if (capacity > UINT32_MAX) {
                return C2_BAD_VALUE;
            }

            std::unique_ptr<C2StoreDmaBufUsageInfo> usageInfo;
            usageInfo = C2StoreDmaBufUsageInfo::AllocUnique(maxHeapNameLen, usage.expected, capacity);
            std::vector<std::unique_ptr<C2SettingResult>> failures;  // TODO: remove

            c2_status_t res = store->config_sm({&*usageInfo}, &failures);
            if (res == C2_OK) {
                *heapName = C2String(usageInfo->m.heapName);
                *flags = usageInfo->m.allocFlags;
            }

            return res;
        };
    }

    allocator->setUsageMapper(mapper, minUsage, maxUsage, blockSize);
}

}

void C2PlatformAllocatorStoreImpl::setComponentStore(std::shared_ptr<C2ComponentStore> store) {
@@ -233,6 +313,22 @@ std::shared_ptr<C2Allocator> C2PlatformAllocatorStoreImpl::fetchIonAllocator() {
    return allocator;
}

std::shared_ptr<C2Allocator> C2PlatformAllocatorStoreImpl::fetchDmaBufAllocator() {
    std::lock_guard<std::mutex> lock(gDmaBufAllocatorMutex);
    std::shared_ptr<C2DmaBufAllocator> allocator = gDmaBufAllocator.lock();
    if (allocator == nullptr) {
        std::shared_ptr<C2ComponentStore> componentStore;
        {
            std::lock_guard<std::mutex> lock(_mComponentStoreReadLock);
            componentStore = _mComponentStore;
        }
        allocator = std::make_shared<C2DmaBufAllocator>(C2PlatformAllocatorStore::DMABUFHEAP);
        UseComponentStoreForDmaBufAllocator(allocator, componentStore);
        gDmaBufAllocator = allocator;
    }
    return allocator;
}

std::shared_ptr<C2Allocator> C2PlatformAllocatorStoreImpl::fetchBlobAllocator() {
    static std::mutex mutex;
    static std::weak_ptr<C2Allocator> blobAllocator;
@@ -347,7 +443,7 @@ public:
            allocatorId = GetPreferredLinearAllocatorId(GetCodec2PoolMask());
        }
        switch(allocatorId) {
            case C2PlatformAllocatorStore::ION:
            case C2PlatformAllocatorStore::ION: /* also ::DMABUFHEAP */
                res = allocatorStore->fetchAllocator(
                        C2PlatformAllocatorStore::ION, &allocator);
                if (res == C2_OK) {
@@ -645,6 +741,7 @@ private:

    struct Interface : public C2InterfaceHelper {
        std::shared_ptr<C2StoreIonUsageInfo> mIonUsageInfo;
        std::shared_ptr<C2StoreDmaBufUsageInfo> mDmaBufUsageInfo;

        Interface(std::shared_ptr<C2ReflectorHelper> reflector)
            : C2InterfaceHelper(reflector) {
@@ -680,7 +777,13 @@ private:
                    me.set().minAlignment = 0;
#endif
                    return C2R::Ok();
                }
                };

                static C2R setDmaBufUsage(bool /* mayBlock */, C2P<C2StoreDmaBufUsageInfo> &me) {
                    strncpy(me.set().m.heapName, "system", me.v.flexCount());
                    me.set().m.allocFlags = 0;
                    return C2R::Ok();
                };
            };

            addParameter(
@@ -695,6 +798,18 @@ private:
                })
                .withSetter(Setter::setIonUsage)
                .build());

            addParameter(
                DefineParam(mDmaBufUsageInfo, "dmabuf-usage")
                .withDefault(C2StoreDmaBufUsageInfo::AllocShared(0))
                .withFields({
                    C2F(mDmaBufUsageInfo, m.usage).flags({C2MemoryUsage::CPU_READ | C2MemoryUsage::CPU_WRITE}),
                    C2F(mDmaBufUsageInfo, m.capacity).inRange(0, UINT32_MAX, 1024),
                    C2F(mDmaBufUsageInfo, m.allocFlags).flags({}),
                    C2F(mDmaBufUsageInfo, m.heapName).any(),
                })
                .withSetter(Setter::setDmaBufUsage)
                .build());
        }
    };

Loading