Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3615b1b2 authored by Sandeep Patil's avatar Sandeep Patil Committed by android-build-merger
Browse files

Merge changes from topic "ion-unit-tests" am: 6f4f7091 am: 329b861a

am: aafd80ed

Change-Id: I0bbf12431baa4c4dc9e3db897b354a3ac5b8e605
parents 43089f40 aafd80ed
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -152,6 +152,8 @@ int ion_alloc_fd(int fd, size_t len, size_t align, unsigned int heap_mask, unsig
    ion_user_handle_t handle;
    int ret;

    if (!handle_fd) return -EINVAL;

    if (!ion_is_legacy(fd)) {
        struct ion_new_allocation_data data = {
            .len = len,
@@ -201,6 +203,7 @@ int ion_query_heap_cnt(int fd, int* cnt) {
    int ret;
    struct ion_heap_query query;

    if (!cnt) return -EINVAL;
    memset(&query, 0, sizeof(query));

    ret = ion_ioctl(fd, ION_IOC_HEAP_QUERY, &query);
+3 −6
Original line number Diff line number Diff line
@@ -18,18 +18,15 @@ cc_test {
    name: "ion-unit-tests",
    cflags: [
        "-g",
        "-Wall",
        "-Werror",
        "-Wno-missing-field-initializers",
    ],
    shared_libs: ["libion"],
    srcs: [
        "ion_test_fixture.cpp",
        "allocate_test.cpp",
        "formerly_valid_handle_test.cpp",
        "exit_test.cpp",
    	"heap_query.cpp",
        "invalid_values_test.cpp",
        "ion_test_fixture.cpp",
        "map_test.cpp",
        "device_test.cpp",
        "exit_test.cpp",
    ],
}
+60 −57
Original line number Diff line number Diff line
@@ -14,92 +14,103 @@
 * limitations under the License.
 */

#include <memory>
#include <sys/mman.h>
#include <memory>

#include <gtest/gtest.h>

#include <ion/ion.h>
#include "ion_test_fixture.h"

class Allocate : public IonAllHeapsTest {
};
class Allocate : public IonTest {};

TEST_F(Allocate, Allocate)
{
TEST_F(Allocate, Allocate) {
    static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
    for (unsigned int heapMask : m_allHeaps) {
    for (const auto& heap : ion_heaps) {
        for (size_t size : allocationSizes) {
            SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
            SCOPED_TRACE(::testing::Message()
                         << "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
            SCOPED_TRACE(::testing::Message() << "size " << size);
            ion_user_handle_t handle = 0;
            ASSERT_EQ(0, ion_alloc(m_ionFd, size, 0, heapMask, 0, &handle));
            ASSERT_TRUE(handle != 0);
            ASSERT_EQ(0, ion_free(m_ionFd, handle));
            int fd;
            ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), 0, &fd));
            ASSERT_TRUE(fd != 0);
            ASSERT_EQ(close(fd), 0);  // free the buffer
        }
    }
}

TEST_F(Allocate, AllocateCached)
{
TEST_F(Allocate, AllocateCached) {
    static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
    for (unsigned int heapMask : m_allHeaps) {
    for (const auto& heap : ion_heaps) {
        for (size_t size : allocationSizes) {
            SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
            SCOPED_TRACE(::testing::Message()
                         << "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
            SCOPED_TRACE(::testing::Message() << "size " << size);
            ion_user_handle_t handle = 0;
            ASSERT_EQ(0, ion_alloc(m_ionFd, size, 0, heapMask, ION_FLAG_CACHED, &handle));
            ASSERT_TRUE(handle != 0);
            ASSERT_EQ(0, ion_free(m_ionFd, handle));
            int fd;
            ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), ION_FLAG_CACHED, &fd));
            ASSERT_TRUE(fd != 0);
            ASSERT_EQ(close(fd), 0);  // free the buffer
        }
    }
}

TEST_F(Allocate, AllocateCachedNeedsSync)
{
TEST_F(Allocate, AllocateCachedNeedsSync) {
    static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
    for (unsigned int heapMask : m_allHeaps) {
    for (const auto& heap : ion_heaps) {
        for (size_t size : allocationSizes) {
            SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
            SCOPED_TRACE(::testing::Message()
                         << "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
            SCOPED_TRACE(::testing::Message() << "size " << size);
            ion_user_handle_t handle = 0;
            ASSERT_EQ(0, ion_alloc(m_ionFd, size, 0, heapMask, ION_FLAG_CACHED_NEEDS_SYNC, &handle));
            ASSERT_TRUE(handle != 0);
            ASSERT_EQ(0, ion_free(m_ionFd, handle));
            int fd;
            ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id),
                                      ION_FLAG_CACHED_NEEDS_SYNC, &fd));
            ASSERT_TRUE(fd != 0);
            ASSERT_EQ(close(fd), 0);  // free the buffer
        }
    }
}

TEST_F(Allocate, RepeatedAllocate)
{
TEST_F(Allocate, RepeatedAllocate) {
    static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
    for (unsigned int heapMask : m_allHeaps) {
    for (const auto& heap : ion_heaps) {
        for (size_t size : allocationSizes) {
            SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
            SCOPED_TRACE(::testing::Message()
                         << "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
            SCOPED_TRACE(::testing::Message() << "size " << size);
            ion_user_handle_t handle = 0;
            int fd;

            for (unsigned int i = 0; i < 1024; i++) {
                SCOPED_TRACE(::testing::Message() << "iteration " << i);
                ASSERT_EQ(0, ion_alloc(m_ionFd, size, 0, heapMask, 0, &handle));
                ASSERT_TRUE(handle != 0);
                ASSERT_EQ(0, ion_free(m_ionFd, handle));
                ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), 0, &fd));
                ASSERT_TRUE(fd != 0);
                ASSERT_EQ(close(fd), 0);  // free the buffer
            }
        }
    }
}

TEST_F(Allocate, Large) {
    for (const auto& heap : ion_heaps) {
        SCOPED_TRACE(::testing::Message()
                     << "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
        int fd;
        ASSERT_EQ(-ENOMEM,
                  ion_alloc_fd(ionfd, 3UL * 1024 * 1024 * 1024, 0, (1 << heap.heap_id), 0, &fd));
    }
}

TEST_F(Allocate, Zeroed)
{
// Make sure all heaps always return zeroed pages
TEST_F(Allocate, Zeroed) {
    auto zeroes_ptr = std::make_unique<char[]>(4096);

    for (unsigned int heapMask : m_allHeaps) {
        SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
    for (const auto& heap : ion_heaps) {
        SCOPED_TRACE(::testing::Message()
                     << "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
        int fds[16];
        for (unsigned int i = 0; i < 16; i++) {
            int map_fd = -1;

            ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, 0, &map_fd));
            ASSERT_EQ(0, ion_alloc_fd(ionfd, 4096, 0, (1 << heap.heap_id), 0, &map_fd));
            ASSERT_GE(map_fd, 0);

            void* ptr = NULL;
@@ -116,10 +127,10 @@ TEST_F(Allocate, Zeroed)
            ASSERT_EQ(0, close(fds[i]));
        }

        int newIonFd = ion_open();
        int new_ionfd = ion_open();
        int map_fd = -1;

        ASSERT_EQ(0, ion_alloc_fd(newIonFd, 4096, 0, heapMask, 0, &map_fd));
        ASSERT_EQ(0, ion_alloc_fd(new_ionfd, 4096, 0, (1 << heap.heap_id), 0, &map_fd));
        ASSERT_GE(map_fd, 0);

        void* ptr = NULL;
@@ -130,14 +141,6 @@ TEST_F(Allocate, Zeroed)

        ASSERT_EQ(0, munmap(ptr, 4096));
        ASSERT_EQ(0, close(map_fd));
    }
}

TEST_F(Allocate, Large)
{
    for (unsigned int heapMask : m_allHeaps) {
            SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
        ion_user_handle_t handle = 0;
        ASSERT_EQ(-ENOMEM, ion_alloc(m_ionFd, 3UL*1024*1024*1024, 0, heapMask, 0, &handle));
        ASSERT_EQ(0, ion_close(new_ionfd));
    }
}

libion/tests/device_test.cpp

deleted100644 → 0
+0 −546
Original line number Diff line number Diff line
/*
 * Copyright (C) 2013 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <fcntl.h>
#include <memory>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>

#include <linux/ion_test.h>

#include <gtest/gtest.h>

#include <ion/ion.h>

#include "ion_test_fixture.h"

#define ALIGN(x,y) (((x) + ((y) - 1)) & ~((y) - 1))

class Device : public IonAllHeapsTest {
 public:
    virtual void SetUp();
    virtual void TearDown();
    int m_deviceFd;
    void readDMA(int fd, void *buf, size_t size);
    void writeDMA(int fd, void *buf, size_t size);
    void readKernel(int fd, void *buf, size_t size);
    void writeKernel(int fd, void *buf, size_t size);
    void blowCache();
    void dirtyCache(void *ptr, size_t size);
};

void Device::SetUp()
{
    IonAllHeapsTest::SetUp();
    m_deviceFd = open("/dev/ion-test", O_RDONLY);
    ASSERT_GE(m_deviceFd, 0);
}

void Device::TearDown()
{
    ASSERT_EQ(0, close(m_deviceFd));
    IonAllHeapsTest::TearDown();
}

void Device::readDMA(int fd, void *buf, size_t size)
{
    ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, fd));
    struct ion_test_rw_data ion_test_rw_data = {
            .ptr = (uint64_t)buf,
            .offset = 0,
            .size = size,
            .write = 0,
    };

    ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_DMA_MAPPING, &ion_test_rw_data));
    ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, -1));
}

void Device::writeDMA(int fd, void *buf, size_t size)
{
    ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, fd));
    struct ion_test_rw_data ion_test_rw_data = {
            .ptr = (uint64_t)buf,
            .offset = 0,
            .size = size,
            .write = 1,
    };

    ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_DMA_MAPPING, &ion_test_rw_data));
    ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, -1));
}

void Device::readKernel(int fd, void *buf, size_t size)
{
    ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, fd));
    struct ion_test_rw_data ion_test_rw_data = {
            .ptr = (uint64_t)buf,
            .offset = 0,
            .size = size,
            .write = 0,
    };

    ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_KERNEL_MAPPING, &ion_test_rw_data));
    ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, -1));
}

void Device::writeKernel(int fd, void *buf, size_t size)
{
    ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, fd));
    struct ion_test_rw_data ion_test_rw_data = {
            .ptr = (uint64_t)buf,
            .offset = 0,
            .size = size,
            .write = 1,
    };

    ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_KERNEL_MAPPING, &ion_test_rw_data));
    ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, -1));
}

void Device::blowCache()
{
    const size_t bigger_than_cache = 8*1024*1024;
    void *buf1 = malloc(bigger_than_cache);
    void *buf2 = malloc(bigger_than_cache);
    memset(buf1, 0xaa, bigger_than_cache);
    memcpy(buf2, buf1, bigger_than_cache);
    free(buf1);
    free(buf2);
}

void Device::dirtyCache(void *ptr, size_t size)
{
    /* try to dirty cache lines */
    for (size_t i = size-1; i > 0; i--) {
        ((volatile char *)ptr)[i];
        ((char *)ptr)[i] = i;
    }
}

TEST_F(Device, KernelReadCached)
{
    auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
    void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);

    for (unsigned int heapMask : m_allHeaps) {
        SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
        int map_fd = -1;
        unsigned int flags = ION_FLAG_CACHED;

        ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
        ASSERT_GE(map_fd, 0);

        void *ptr;
        ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
        ASSERT_TRUE(ptr != NULL);

        for (int i = 0; i < 4096; i++)
            ((char *)ptr)[i] = i;

        ((char*)buf)[4096] = 0x12;
        readKernel(map_fd, buf, 4096);
        ASSERT_EQ(((char*)buf)[4096], 0x12);

        for (int i = 0; i < 4096; i++)
            ASSERT_EQ((char)i, ((char *)buf)[i]);

        ASSERT_EQ(0, munmap(ptr, 4096));
        ASSERT_EQ(0, close(map_fd));
    }
}

TEST_F(Device, KernelWriteCached)
{
    auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
    void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);

    for (int i = 0; i < 4096; i++)
        ((char *)buf)[i] = i;

    for (unsigned int heapMask : m_allHeaps) {
        SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
        int map_fd = -1;
        unsigned int flags = ION_FLAG_CACHED;

        ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
        ASSERT_GE(map_fd, 0);

        void *ptr;
        ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
        ASSERT_TRUE(ptr != NULL);

        dirtyCache(ptr, 4096);

        writeKernel(map_fd, buf, 4096);

        for (int i = 0; i < 4096; i++)
            ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;

        ASSERT_EQ(0, munmap(ptr, 4096));
        ASSERT_EQ(0, close(map_fd));
    }
}

TEST_F(Device, DMAReadCached)
{
    auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
    void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);

    for (unsigned int heapMask : m_allHeaps) {
        SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
        int map_fd = -1;
        unsigned int flags = ION_FLAG_CACHED;

        ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
        ASSERT_GE(map_fd, 0);

        void *ptr;
        ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
        ASSERT_TRUE(ptr != NULL);

        for (int i = 0; i < 4096; i++)
            ((char *)ptr)[i] = i;

        readDMA(map_fd, buf, 4096);

        for (int i = 0; i < 4096; i++)
            ASSERT_EQ((char)i, ((char *)buf)[i]);

        ASSERT_EQ(0, munmap(ptr, 4096));
        ASSERT_EQ(0, close(map_fd));
    }
}

TEST_F(Device, DMAWriteCached)
{
    auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
    void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);

    for (int i = 0; i < 4096; i++)
        ((char *)buf)[i] = i;

    for (unsigned int heapMask : m_allHeaps) {
        SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
        int map_fd = -1;
        unsigned int flags = ION_FLAG_CACHED;

        ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
        ASSERT_GE(map_fd, 0);

        void *ptr;
        ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
        ASSERT_TRUE(ptr != NULL);

        dirtyCache(ptr, 4096);

        writeDMA(map_fd, buf, 4096);

        for (int i = 0; i < 4096; i++)
            ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;

        ASSERT_EQ(0, munmap(ptr, 4096));
        ASSERT_EQ(0, close(map_fd));
    }
}

TEST_F(Device, KernelReadCachedNeedsSync)
{
    auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
    void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);

    for (unsigned int heapMask : m_allHeaps) {
        SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
        int map_fd = -1;
        unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;

        ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
        ASSERT_GE(map_fd, 0);

        void *ptr;
        ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
        ASSERT_TRUE(ptr != NULL);

        for (int i = 0; i < 4096; i++)
            ((char *)ptr)[i] = i;

        ((char*)buf)[4096] = 0x12;
        readKernel(map_fd, buf, 4096);
        ASSERT_EQ(((char*)buf)[4096], 0x12);

        for (int i = 0; i < 4096; i++)
            ASSERT_EQ((char)i, ((char *)buf)[i]);

        ASSERT_EQ(0, munmap(ptr, 4096));
        ASSERT_EQ(0, close(map_fd));
    }
}

TEST_F(Device, KernelWriteCachedNeedsSync)
{
    auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
    void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);

    for (int i = 0; i < 4096; i++)
        ((char *)buf)[i] = i;

    for (unsigned int heapMask : m_allHeaps) {
        SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
        int map_fd = -1;
        unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;

        ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
        ASSERT_GE(map_fd, 0);

        void *ptr;
        ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
        ASSERT_TRUE(ptr != NULL);

        dirtyCache(ptr, 4096);

        writeKernel(map_fd, buf, 4096);

        for (int i = 0; i < 4096; i++)
            ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;

        ASSERT_EQ(0, munmap(ptr, 4096));
        ASSERT_EQ(0, close(map_fd));
    }
}

TEST_F(Device, DMAReadCachedNeedsSync)
{
    auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
    void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);

    for (unsigned int heapMask : m_allHeaps) {
        SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
        int map_fd = -1;
        unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;

        ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
        ASSERT_GE(map_fd, 0);

        void *ptr;
        ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
        ASSERT_TRUE(ptr != NULL);

        for (int i = 0; i < 4096; i++)
            ((char *)ptr)[i] = i;

        ion_sync_fd(m_ionFd, map_fd);

        readDMA(map_fd, buf, 4096);

        for (int i = 0; i < 4096; i++)
            ASSERT_EQ((char)i, ((char *)buf)[i]);

        ASSERT_EQ(0, munmap(ptr, 4096));
        ASSERT_EQ(0, close(map_fd));
    }
}

TEST_F(Device, DMAWriteCachedNeedsSync)
{
    auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
    void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);

    for (int i = 0; i < 4096; i++)
        ((char *)buf)[i] = i;

    for (unsigned int heapMask : m_allHeaps) {
        SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
        int map_fd = -1;
        unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;

        ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
        ASSERT_GE(map_fd, 0);

        void *ptr;
        ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
        ASSERT_TRUE(ptr != NULL);

        dirtyCache(ptr, 4096);

        writeDMA(map_fd, buf, 4096);

        ion_sync_fd(m_ionFd, map_fd);

        for (int i = 0; i < 4096; i++)
            ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;

        ASSERT_EQ(0, munmap(ptr, 4096));
        ASSERT_EQ(0, close(map_fd));
    }
}
TEST_F(Device, KernelRead)
{
    auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
    void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);

    for (unsigned int heapMask : m_allHeaps) {
        SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
        int map_fd = -1;
        unsigned int flags = 0;

        ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
        ASSERT_GE(map_fd, 0);

        void *ptr;
        ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
        ASSERT_TRUE(ptr != NULL);

        for (int i = 0; i < 4096; i++)
            ((char *)ptr)[i] = i;

        ((char*)buf)[4096] = 0x12;
        readKernel(map_fd, buf, 4096);
        ASSERT_EQ(((char*)buf)[4096], 0x12);

        for (int i = 0; i < 4096; i++)
            ASSERT_EQ((char)i, ((char *)buf)[i]);

        ASSERT_EQ(0, munmap(ptr, 4096));
        ASSERT_EQ(0, close(map_fd));
    }
}

TEST_F(Device, KernelWrite)
{
    auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
    void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);

    for (int i = 0; i < 4096; i++)
        ((char *)buf)[i] = i;

    for (unsigned int heapMask : m_allHeaps) {
        SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
        int map_fd = -1;
        unsigned int flags = 0;

        ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
        ASSERT_GE(map_fd, 0);

        void *ptr;
        ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
        ASSERT_TRUE(ptr != NULL);

        dirtyCache(ptr, 4096);

        writeKernel(map_fd, buf, 4096);

        for (int i = 0; i < 4096; i++)
            ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;

        ASSERT_EQ(0, munmap(ptr, 4096));
        ASSERT_EQ(0, close(map_fd));
    }
}

TEST_F(Device, DMARead)
{
    auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
    void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);

    for (unsigned int heapMask : m_allHeaps) {
        SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
        int map_fd = -1;
        unsigned int flags = 0;

        ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
        ASSERT_GE(map_fd, 0);

        void *ptr;
        ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
        ASSERT_TRUE(ptr != NULL);

        for (int i = 0; i < 4096; i++)
            ((char *)ptr)[i] = i;

        readDMA(map_fd, buf, 4096);

        for (int i = 0; i < 4096; i++)
            ASSERT_EQ((char)i, ((char *)buf)[i]);

        ASSERT_EQ(0, munmap(ptr, 4096));
        ASSERT_EQ(0, close(map_fd));
    }
}

TEST_F(Device, DMAWrite)
{
    auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
    void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);

    for (int i = 0; i < 4096; i++)
        ((char *)buf)[i] = i;

    for (unsigned int heapMask : m_allHeaps) {
        SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
        int map_fd = -1;
        unsigned int flags = 0;

        ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
        ASSERT_GE(map_fd, 0);

        void *ptr;
        ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
        ASSERT_TRUE(ptr != NULL);

        dirtyCache(ptr, 4096);

        writeDMA(map_fd, buf, 4096);

        for (int i = 0; i < 4096; i++)
            ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;

        ASSERT_EQ(0, munmap(ptr, 4096));
        ASSERT_EQ(0, close(map_fd));
    }
}

TEST_F(Device, IsCached)
{
    auto buf_ptr = std::make_unique<char[]>(4096);
    void *buf = buf_ptr.get();

    for (unsigned int heapMask : m_allHeaps) {
        SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
        int map_fd = -1;
        unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;

        ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
        ASSERT_GE(map_fd, 0);

        void *ptr;
        ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
        ASSERT_TRUE(ptr != NULL);

        dirtyCache(ptr, 4096);

        readDMA(map_fd, buf, 4096);

        bool same = true;
        for (int i = 4096-16; i >= 0; i -= 16)
            if (((char *)buf)[i] != i)
                same = false;
        ASSERT_FALSE(same);

        ASSERT_EQ(0, munmap(ptr, 4096));
        ASSERT_EQ(0, close(map_fd));
    }
}
+150 −150

File changed.

Preview size limit exceeded, changes collapsed.

Loading