Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e8e3c3d6 authored by Cong Wang's avatar Cong Wang Committed by Cong Wang
Browse files

fs: remove the second argument of k[un]map_atomic()

parent c6daa7ff
Loading
Loading
Loading
Loading
+15 −15
Original line number Diff line number Diff line
@@ -160,7 +160,7 @@ static int aio_setup_ring(struct kioctx *ctx)

	info->nr = nr_events;		/* trusted copy */

	ring = kmap_atomic(info->ring_pages[0], KM_USER0);
	ring = kmap_atomic(info->ring_pages[0]);
	ring->nr = nr_events;	/* user copy */
	ring->id = ctx->user_id;
	ring->head = ring->tail = 0;
@@ -168,32 +168,32 @@ static int aio_setup_ring(struct kioctx *ctx)
	ring->compat_features = AIO_RING_COMPAT_FEATURES;
	ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
	ring->header_length = sizeof(struct aio_ring);
	kunmap_atomic(ring, KM_USER0);
	kunmap_atomic(ring);

	return 0;
}


/* aio_ring_event: returns a pointer to the event at the given index from
 * kmap_atomic(, km).  Release the pointer with put_aio_ring_event();
 * kmap_atomic().  Release the pointer with put_aio_ring_event();
 */
#define AIO_EVENTS_PER_PAGE	(PAGE_SIZE / sizeof(struct io_event))
#define AIO_EVENTS_FIRST_PAGE	((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
#define AIO_EVENTS_OFFSET	(AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)

#define aio_ring_event(info, nr, km) ({					\
#define aio_ring_event(info, nr) ({					\
	unsigned pos = (nr) + AIO_EVENTS_OFFSET;			\
	struct io_event *__event;					\
	__event = kmap_atomic(						\
			(info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \
			(info)->ring_pages[pos / AIO_EVENTS_PER_PAGE]); \
	__event += pos % AIO_EVENTS_PER_PAGE;				\
	__event;							\
})

#define put_aio_ring_event(event, km) do {	\
#define put_aio_ring_event(event) do {		\
	struct io_event *__event = (event);	\
	(void)__event;				\
	kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
	kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK)); \
} while(0)

static void ctx_rcu_free(struct rcu_head *head)
@@ -1019,10 +1019,10 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
	if (kiocbIsCancelled(iocb))
		goto put_rq;

	ring = kmap_atomic(info->ring_pages[0], KM_IRQ1);
	ring = kmap_atomic(info->ring_pages[0]);

	tail = info->tail;
	event = aio_ring_event(info, tail, KM_IRQ0);
	event = aio_ring_event(info, tail);
	if (++tail >= info->nr)
		tail = 0;

@@ -1043,8 +1043,8 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
	info->tail = tail;
	ring->tail = tail;

	put_aio_ring_event(event, KM_IRQ0);
	kunmap_atomic(ring, KM_IRQ1);
	put_aio_ring_event(event);
	kunmap_atomic(ring);

	pr_debug("added to ring %p at [%lu]\n", iocb, tail);

@@ -1089,7 +1089,7 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
	unsigned long head;
	int ret = 0;

	ring = kmap_atomic(info->ring_pages[0], KM_USER0);
	ring = kmap_atomic(info->ring_pages[0]);
	dprintk("in aio_read_evt h%lu t%lu m%lu\n",
		 (unsigned long)ring->head, (unsigned long)ring->tail,
		 (unsigned long)ring->nr);
@@ -1101,18 +1101,18 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)

	head = ring->head % info->nr;
	if (head != ring->tail) {
		struct io_event *evp = aio_ring_event(info, head, KM_USER1);
		struct io_event *evp = aio_ring_event(info, head);
		*ent = *evp;
		head = (head + 1) % info->nr;
		smp_mb(); /* finish reading the event before updatng the head */
		ring->head = head;
		ret = 1;
		put_aio_ring_event(evp, KM_USER1);
		put_aio_ring_event(evp);
	}
	spin_unlock(&info->ring_lock);

out:
	kunmap_atomic(ring, KM_USER0);
	kunmap_atomic(ring);
	dprintk("leaving aio_read_evt: %d  h%lu t%lu\n", ret,
		 (unsigned long)ring->head, (unsigned long)ring->tail);
	return ret;
+5 −5
Original line number Diff line number Diff line
@@ -357,7 +357,7 @@ static void bio_integrity_generate(struct bio *bio)
	bix.sector_size = bi->sector_size;

	bio_for_each_segment(bv, bio, i) {
		void *kaddr = kmap_atomic(bv->bv_page, KM_USER0);
		void *kaddr = kmap_atomic(bv->bv_page);
		bix.data_buf = kaddr + bv->bv_offset;
		bix.data_size = bv->bv_len;
		bix.prot_buf = prot_buf;
@@ -371,7 +371,7 @@ static void bio_integrity_generate(struct bio *bio)
		total += sectors * bi->tuple_size;
		BUG_ON(total > bio->bi_integrity->bip_size);

		kunmap_atomic(kaddr, KM_USER0);
		kunmap_atomic(kaddr);
	}
}

@@ -498,7 +498,7 @@ static int bio_integrity_verify(struct bio *bio)
	bix.sector_size = bi->sector_size;

	bio_for_each_segment(bv, bio, i) {
		void *kaddr = kmap_atomic(bv->bv_page, KM_USER0);
		void *kaddr = kmap_atomic(bv->bv_page);
		bix.data_buf = kaddr + bv->bv_offset;
		bix.data_size = bv->bv_len;
		bix.prot_buf = prot_buf;
@@ -507,7 +507,7 @@ static int bio_integrity_verify(struct bio *bio)
		ret = bi->verify_fn(&bix);

		if (ret) {
			kunmap_atomic(kaddr, KM_USER0);
			kunmap_atomic(kaddr);
			return ret;
		}

@@ -517,7 +517,7 @@ static int bio_integrity_verify(struct bio *bio)
		total += sectors * bi->tuple_size;
		BUG_ON(total > bio->bi_integrity->bip_size);

		kunmap_atomic(kaddr, KM_USER0);
		kunmap_atomic(kaddr);
	}

	return ret;
+2 −2
Original line number Diff line number Diff line
@@ -1339,13 +1339,13 @@ int remove_arg_zero(struct linux_binprm *bprm)
			ret = -EFAULT;
			goto out;
		}
		kaddr = kmap_atomic(page, KM_USER0);
		kaddr = kmap_atomic(page);

		for (; offset < PAGE_SIZE && kaddr[offset];
				offset++, bprm->p++)
			;

		kunmap_atomic(kaddr, KM_USER0);
		kunmap_atomic(kaddr);
		put_arg_page(page);

		if (offset == PAGE_SIZE)
+2 −2
Original line number Diff line number Diff line
@@ -3371,9 +3371,9 @@ retry:
	if (err)
		goto fail;

	kaddr = kmap_atomic(page, KM_USER0);
	kaddr = kmap_atomic(page);
	memcpy(kaddr, symname, len-1);
	kunmap_atomic(kaddr, KM_USER0);
	kunmap_atomic(kaddr);

	err = pagecache_write_end(NULL, mapping, 0, len-1, len-1,
							page, fsdata);
+4 −4
Original line number Diff line number Diff line
@@ -230,7 +230,7 @@ void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
{
	if (atomic) {
		buf->flags |= PIPE_BUF_FLAG_ATOMIC;
		return kmap_atomic(buf->page, KM_USER0);
		return kmap_atomic(buf->page);
	}

	return kmap(buf->page);
@@ -251,7 +251,7 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
{
	if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
		buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
		kunmap_atomic(map_data, KM_USER0);
		kunmap_atomic(map_data);
	} else
		kunmap(buf->page);
}
@@ -565,14 +565,14 @@ redo1:
			iov_fault_in_pages_read(iov, chars);
redo2:
			if (atomic)
				src = kmap_atomic(page, KM_USER0);
				src = kmap_atomic(page);
			else
				src = kmap(page);

			error = pipe_iov_copy_from_user(src, iov, chars,
							atomic);
			if (atomic)
				kunmap_atomic(src, KM_USER0);
				kunmap_atomic(src);
			else
				kunmap(page);

Loading