Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e0a8144b authored by John L. Hammond's avatar John L. Hammond Committed by Greg Kroah-Hartman
Browse files

staging/lustre/llite: use vui prefix for struct vvp_io members



Rename members of struct vvp_io to used to start with vui_ rather than
cui_.  Rename several instances of struct vvp_io * from cio to vio.

Signed-off-by: default avatarJohn L. Hammond <john.hammond@intel.com>
Reviewed-on: http://review.whamcloud.com/13363
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-5971


Reviewed-by: default avatarBobi Jam <bobijam@hotmail.com>
Reviewed-by: default avatarLai Siyao <lai.siyao@intel.com>
Reviewed-by: default avatarJinshan Xiong <jinshan.xiong@intel.com>
Signed-off-by: default avatarOleg Drokin <green@linuxhacker.ru>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 10cdef73
Loading
Loading
Loading
Loading
+10 −10
Original line number Diff line number Diff line
@@ -1135,18 +1135,18 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
	ll_io_init(io, file, iot == CIT_WRITE);

	if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
		struct vvp_io *cio = vvp_env_io(env);
		struct vvp_io *vio = vvp_env_io(env);
		int write_mutex_locked = 0;

		cio->cui_fd  = LUSTRE_FPRIVATE(file);
		cio->cui_io_subtype = args->via_io_subtype;
		vio->vui_fd  = LUSTRE_FPRIVATE(file);
		vio->vui_io_subtype = args->via_io_subtype;

		switch (cio->cui_io_subtype) {
		switch (vio->vui_io_subtype) {
		case IO_NORMAL:
			cio->cui_iter = args->u.normal.via_iter;
			cio->cui_iocb = args->u.normal.via_iocb;
			vio->vui_iter = args->u.normal.via_iter;
			vio->vui_iocb = args->u.normal.via_iocb;
			if ((iot == CIT_WRITE) &&
			    !(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
			    !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
				if (mutex_lock_interruptible(&lli->
							       lli_write_mutex)) {
					result = -ERESTARTSYS;
@@ -1157,11 +1157,11 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
			down_read(&lli->lli_trunc_sem);
			break;
		case IO_SPLICE:
			cio->u.splice.cui_pipe = args->u.splice.via_pipe;
			cio->u.splice.cui_flags = args->u.splice.via_flags;
			vio->u.splice.vui_pipe = args->u.splice.via_pipe;
			vio->u.splice.vui_flags = args->u.splice.via_flags;
			break;
		default:
			CERROR("Unknown IO type - %u\n", cio->cui_io_subtype);
			CERROR("Unknown IO type - %u\n", vio->vui_io_subtype);
			LBUG();
		}
		result = cl_io_loop(env, io);
+17 −17
Original line number Diff line number Diff line
@@ -210,19 +210,19 @@ int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
			  __u32 enqflags, enum cl_lock_mode mode,
			  pgoff_t start, pgoff_t end)
{
	struct vvp_io *cio = vvp_env_io(env);
	struct cl_lock_descr   *descr = &cio->cui_link.cill_descr;
	struct vvp_io          *vio   = vvp_env_io(env);
	struct cl_lock_descr   *descr = &vio->vui_link.cill_descr;
	struct cl_object       *obj   = io->ci_obj;

	CLOBINVRNT(env, obj, vvp_object_invariant(obj));

	CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);

	memset(&cio->cui_link, 0, sizeof(cio->cui_link));
	memset(&vio->vui_link, 0, sizeof(vio->vui_link));

	if (cio->cui_fd && (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
	if (vio->vui_fd && (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
		descr->cld_mode = CLM_GROUP;
		descr->cld_gid  = cio->cui_fd->fd_grouplock.cg_gid;
		descr->cld_gid  = vio->vui_fd->fd_grouplock.cg_gid;
	} else {
		descr->cld_mode  = mode;
	}
@@ -231,19 +231,19 @@ int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
	descr->cld_end   = end;
	descr->cld_enq_flags = enqflags;

	cl_io_lock_add(env, io, &cio->cui_link);
	cl_io_lock_add(env, io, &vio->vui_link);
	return 0;
}

void vvp_io_update_iov(const struct lu_env *env,
		       struct vvp_io *cio, struct cl_io *io)
		       struct vvp_io *vio, struct cl_io *io)
{
	size_t size = io->u.ci_rw.crw_count;

	if (!cl_is_normalio(env, io) || !cio->cui_iter)
	if (!cl_is_normalio(env, io) || !vio->vui_iter)
		return;

	iov_iter_truncate(cio->cui_iter, size);
	iov_iter_truncate(vio->vui_iter, size);
}

int vvp_io_one_lock(const struct lu_env *env, struct cl_io *io,
@@ -266,7 +266,7 @@ void vvp_io_advance(const struct lu_env *env,
		    const struct cl_io_slice *ios,
		    size_t nob)
{
	struct vvp_io    *cio = cl2vvp_io(env, ios);
	struct vvp_io    *vio = cl2vvp_io(env, ios);
	struct cl_io     *io  = ios->cis_io;
	struct cl_object *obj = ios->cis_io->ci_obj;

@@ -275,7 +275,7 @@ void vvp_io_advance(const struct lu_env *env,
	if (!cl_is_normalio(env, io))
		return;

	iov_iter_reexpand(cio->cui_iter, cio->cui_tot_count  -= nob);
	iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count  -= nob);
}

/**
@@ -461,13 +461,13 @@ int cl_setattr_ost(struct inode *inode, const struct iattr *attr)

again:
	if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
		struct vvp_io *cio = vvp_env_io(env);
		struct vvp_io *vio = vvp_env_io(env);

		if (attr->ia_valid & ATTR_FILE)
			/* populate the file descriptor for ftruncate to honor
			 * group lock - see LU-787
			 */
			cio->cui_fd = LUSTRE_FPRIVATE(attr->ia_file);
			vio->vui_fd = LUSTRE_FPRIVATE(attr->ia_file);

		result = cl_io_loop(env, io);
	} else {
@@ -496,12 +496,12 @@ int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
struct vvp_io *cl2vvp_io(const struct lu_env *env,
			 const struct cl_io_slice *slice)
{
	struct vvp_io *cio;
	struct vvp_io *vio;

	cio = container_of(slice, struct vvp_io, cui_cl);
	LASSERT(cio == vvp_env_io(env));
	vio = container_of(slice, struct vvp_io, vui_cl);
	LASSERT(vio == vvp_env_io(env));

	return cio;
	return vio;
}

struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice)
+3 −3
Original line number Diff line number Diff line
@@ -146,14 +146,14 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,

	rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
	if (rc == 0) {
		struct vvp_io *cio = vvp_env_io(env);
		struct vvp_io *vio = vvp_env_io(env);
		struct ll_file_data *fd = LUSTRE_FPRIVATE(file);

		LASSERT(cio->cui_cl.cis_io == io);
		LASSERT(vio->vui_cl.cis_io == io);

		/* mmap lock must be MANDATORY it has to cache pages. */
		io->ci_lockreq = CILR_MANDATORY;
		cio->cui_fd = fd;
		vio->vui_fd = fd;
	} else {
		LASSERT(rc < 0);
		cl_io_fini(env, io);
+12 −12
Original line number Diff line number Diff line
@@ -90,7 +90,7 @@ struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage)
	struct lu_env    *env;
	struct cl_io     *io;
	struct cl_object *clob;
	struct vvp_io    *cio;
	struct vvp_io    *vio;

	int refcheck;
	int result = 0;
@@ -108,8 +108,8 @@ struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage)
	lcc->lcc_refcheck = refcheck;
	lcc->lcc_cookie = current;

	cio = vvp_env_io(env);
	io = cio->cui_cl.cis_io;
	vio = vvp_env_io(env);
	io = vio->vui_cl.cis_io;
	lcc->lcc_io = io;
	if (!io) {
		struct inode *inode = file_inode(file);
@@ -125,7 +125,7 @@ struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage)
		struct cl_page   *page;

		LASSERT(io->ci_state == CIS_IO_GOING);
		LASSERT(cio->cui_fd == LUSTRE_FPRIVATE(file));
		LASSERT(vio->vui_fd == LUSTRE_FPRIVATE(file));
		page = cl_page_find(env, clob, vmpage->index, vmpage,
				    CPT_CACHEABLE);
		if (!IS_ERR(page)) {
@@ -553,10 +553,10 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
	spin_lock(&ras->ras_lock);

	/* Enlarge the RA window to encompass the full read */
	if (vio->cui_ra_valid &&
	if (vio->vui_ra_valid &&
	    ras->ras_window_start + ras->ras_window_len <
	    vio->cui_ra_start + vio->cui_ra_count) {
		ras->ras_window_len = vio->cui_ra_start + vio->cui_ra_count -
	    vio->vui_ra_start + vio->vui_ra_count) {
		ras->ras_window_len = vio->vui_ra_start + vio->vui_ra_count -
				      ras->ras_window_start;
	}

@@ -615,15 +615,15 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
	CDEBUG(D_READA, DFID ": ria: %lu/%lu, bead: %lu/%lu, hit: %d\n",
	       PFID(lu_object_fid(&clob->co_lu)),
	       ria->ria_start, ria->ria_end,
	       vio->cui_ra_valid ? vio->cui_ra_start : 0,
	       vio->cui_ra_valid ? vio->cui_ra_count : 0,
	       vio->vui_ra_valid ? vio->vui_ra_start : 0,
	       vio->vui_ra_valid ? vio->vui_ra_count : 0,
	       hit);

	/* at least to extend the readahead window to cover current read */
	if (!hit && vio->cui_ra_valid &&
	    vio->cui_ra_start + vio->cui_ra_count > ria->ria_start) {
	if (!hit && vio->vui_ra_valid &&
	    vio->vui_ra_start + vio->vui_ra_count > ria->ria_start) {
		/* to the end of current read window. */
		mlen = vio->cui_ra_start + vio->cui_ra_count - ria->ria_start;
		mlen = vio->vui_ra_start + vio->vui_ra_count - ria->ria_start;
		/* trim to RPC boundary */
		start = ria->ria_start & (PTLRPC_MAX_BRW_PAGES - 1);
		mlen = min(mlen, PTLRPC_MAX_BRW_PAGES - start);
+10 −10
Original line number Diff line number Diff line
@@ -376,7 +376,7 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,

	env = cl_env_get(&refcheck);
	LASSERT(!IS_ERR(env));
	io = vvp_env_io(env)->cui_cl.cis_io;
	io = vvp_env_io(env)->vui_cl.cis_io;
	LASSERT(io);

	/* 0. Need locking between buffered and direct access. and race with
@@ -439,10 +439,10 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
		inode_unlock(inode);

	if (tot_bytes > 0) {
		struct vvp_io *cio = vvp_env_io(env);
		struct vvp_io *vio = vvp_env_io(env);

		/* no commit async for direct IO */
		cio->u.write.cui_written += tot_bytes;
		vio->u.write.vui_written += tot_bytes;
	}

	cl_env_put(env, &refcheck);
@@ -513,8 +513,8 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
	/* To avoid deadlock, try to lock page first. */
	vmpage = grab_cache_page_nowait(mapping, index);
	if (unlikely(!vmpage || PageDirty(vmpage) || PageWriteback(vmpage))) {
		struct vvp_io *cio = vvp_env_io(env);
		struct cl_page_list *plist = &cio->u.write.cui_queue;
		struct vvp_io *vio = vvp_env_io(env);
		struct cl_page_list *plist = &vio->u.write.vui_queue;

		/* if the page is already in dirty cache, we have to commit
		 * the pages right now; otherwise, it may cause deadlock
@@ -595,7 +595,7 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
	struct ll_cl_context *lcc = fsdata;
	struct lu_env *env;
	struct cl_io *io;
	struct vvp_io *cio;
	struct vvp_io *vio;
	struct cl_page *page;
	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
	bool unplug = false;
@@ -606,21 +606,21 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
	env  = lcc->lcc_env;
	page = lcc->lcc_page;
	io   = lcc->lcc_io;
	cio  = vvp_env_io(env);
	vio  = vvp_env_io(env);

	LASSERT(cl_page_is_owned(page, io));
	if (copied > 0) {
		struct cl_page_list *plist = &cio->u.write.cui_queue;
		struct cl_page_list *plist = &vio->u.write.vui_queue;

		lcc->lcc_page = NULL; /* page will be queued */

		/* Add it into write queue */
		cl_page_list_add(plist, page);
		if (plist->pl_nr == 1) /* first page */
			cio->u.write.cui_from = from;
			vio->u.write.vui_from = from;
		else
			LASSERT(from == 0);
		cio->u.write.cui_to = from + copied;
		vio->u.write.vui_to = from + copied;

		/* We may have one full RPC, commit it soon */
		if (plist->pl_nr >= PTLRPC_MAX_BRW_PAGES)
Loading