Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 42281d20 authored by Andy King's avatar Andy King Committed by Greg Kroah-Hartman
Browse files

VMCI: Remove dependency on BLOCK I/O



No need to bring in dm-mapper.h and along with it a dependency on BLOCK I/O
just to use dm_div_up().  Just use the existing DIV_ROUND_UP().

Reported-by: default avatarRandy Dunlap <rdunlap@infradead.org>
Signed-off-by: default avatarAndy King <acking@vmware.com>
Signed-off-by: default avatarDmitry Torokhov <dtor@vmware.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent bad7d9df
Loading
Loading
Loading
Loading
+16 −12
Original line number Original line Diff line number Diff line
@@ -13,12 +13,16 @@
 * for more details.
 * for more details.
 */
 */


#include <linux/device-mapper.h>
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
#include <linux/vmw_vmci_api.h>
#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/socket.h>
#include <linux/wait.h>
#include <linux/wait.h>


@@ -247,8 +251,8 @@ static struct qp_list qp_guest_endpoints = {


#define INVALID_VMCI_GUEST_MEM_ID  0
#define INVALID_VMCI_GUEST_MEM_ID  0
#define QPE_NUM_PAGES(_QPE) ((u32) \
#define QPE_NUM_PAGES(_QPE) ((u32) \
			     (dm_div_up(_QPE.produce_size, PAGE_SIZE) +	 \
			     (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \
			      dm_div_up(_QPE.consume_size, PAGE_SIZE) + 2))
			      DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2))




/*
/*
@@ -260,7 +264,7 @@ static void qp_free_queue(void *q, u64 size)
	struct vmci_queue *queue = q;
	struct vmci_queue *queue = q;


	if (queue) {
	if (queue) {
		u64 i = dm_div_up(size, PAGE_SIZE);
		u64 i = DIV_ROUND_UP(size, PAGE_SIZE);


		if (queue->kernel_if->mapped) {
		if (queue->kernel_if->mapped) {
			vunmap(queue->kernel_if->va);
			vunmap(queue->kernel_if->va);
@@ -289,7 +293,7 @@ static void *qp_alloc_queue(u64 size, u32 flags)
	u64 i;
	u64 i;
	struct vmci_queue *queue;
	struct vmci_queue *queue;
	struct vmci_queue_header *q_header;
	struct vmci_queue_header *q_header;
	const u64 num_data_pages = dm_div_up(size, PAGE_SIZE);
	const u64 num_data_pages = DIV_ROUND_UP(size, PAGE_SIZE);
	const uint queue_size =
	const uint queue_size =
	    PAGE_SIZE +
	    PAGE_SIZE +
	    sizeof(*queue) + sizeof(*(queue->kernel_if)) +
	    sizeof(*queue) + sizeof(*(queue->kernel_if)) +
@@ -611,7 +615,7 @@ static int qp_memcpy_from_queue_iov(void *dest,
static struct vmci_queue *qp_host_alloc_queue(u64 size)
static struct vmci_queue *qp_host_alloc_queue(u64 size)
{
{
	struct vmci_queue *queue;
	struct vmci_queue *queue;
	const size_t num_pages = dm_div_up(size, PAGE_SIZE) + 1;
	const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
	const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
	const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
	const size_t queue_page_size =
	const size_t queue_page_size =
	    num_pages * sizeof(*queue->kernel_if->page);
	    num_pages * sizeof(*queue->kernel_if->page);
@@ -963,8 +967,8 @@ qp_guest_endpoint_create(struct vmci_handle handle,
	int result;
	int result;
	struct qp_guest_endpoint *entry;
	struct qp_guest_endpoint *entry;
	/* One page each for the queue headers. */
	/* One page each for the queue headers. */
	const u64 num_ppns = dm_div_up(produce_size, PAGE_SIZE) +
	const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) +
	    dm_div_up(consume_size, PAGE_SIZE) + 2;
	    DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2;


	if (vmci_handle_is_invalid(handle)) {
	if (vmci_handle_is_invalid(handle)) {
		u32 context_id = vmci_get_context_id();
		u32 context_id = vmci_get_context_id();
@@ -1175,9 +1179,9 @@ static int qp_alloc_guest_work(struct vmci_handle *handle,
			       u32 priv_flags)
			       u32 priv_flags)
{
{
	const u64 num_produce_pages =
	const u64 num_produce_pages =
	    dm_div_up(produce_size, PAGE_SIZE) + 1;
	    DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1;
	const u64 num_consume_pages =
	const u64 num_consume_pages =
	    dm_div_up(consume_size, PAGE_SIZE) + 1;
	    DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1;
	void *my_produce_q = NULL;
	void *my_produce_q = NULL;
	void *my_consume_q = NULL;
	void *my_consume_q = NULL;
	int result;
	int result;
@@ -1456,7 +1460,7 @@ static int qp_broker_create(struct vmci_handle handle,
		entry->state = VMCIQPB_CREATED_MEM;
		entry->state = VMCIQPB_CREATED_MEM;
		entry->produce_q->q_header = entry->local_mem;
		entry->produce_q->q_header = entry->local_mem;
		tmp = (u8 *)entry->local_mem + PAGE_SIZE *
		tmp = (u8 *)entry->local_mem + PAGE_SIZE *
		    (dm_div_up(entry->qp.produce_size, PAGE_SIZE) + 1);
		    (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1);
		entry->consume_q->q_header = (struct vmci_queue_header *)tmp;
		entry->consume_q->q_header = (struct vmci_queue_header *)tmp;
	} else if (page_store) {
	} else if (page_store) {
		/*
		/*