Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d5751469 authored by Pavel Shilovsky's avatar Pavel Shilovsky Committed by Steve French
Browse files

CIFS: Do not kmalloc under the flocks spinlock



Reorganize the code to make the memory already allocated before
spinlock'ed loop.

Cc: stable@vger.kernel.org
Reviewed-by: default avatarJeff Layton <jlayton@redhat.com>
Signed-off-by: default avatarPavel Shilovsky <piastry@etersoft.ru>
Signed-off-by: default avatarSteve French <sfrench@us.ibm.com>
parent b0f8ef20
Loading
Loading
Loading
Loading
+56 −13
Original line number Original line Diff line number Diff line
@@ -920,16 +920,26 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
	for (lockp = &inode->i_flock; *lockp != NULL; \
	for (lockp = &inode->i_flock; *lockp != NULL; \
	     lockp = &(*lockp)->fl_next)
	     lockp = &(*lockp)->fl_next)


struct lock_to_push {
	struct list_head llist;
	__u64 offset;
	__u64 length;
	__u32 pid;
	__u16 netfid;
	__u8 type;
};

static int
static int
cifs_push_posix_locks(struct cifsFileInfo *cfile)
cifs_push_posix_locks(struct cifsFileInfo *cfile)
{
{
	struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
	struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
	struct file_lock *flock, **before;
	struct file_lock *flock, **before;
	struct cifsLockInfo *lck, *tmp;
	unsigned int count = 0, i = 0;
	int rc = 0, xid, type;
	int rc = 0, xid, type;
	struct list_head locks_to_send, *el;
	struct lock_to_push *lck, *tmp;
	__u64 length;
	__u64 length;
	struct list_head locks_to_send;


	xid = GetXid();
	xid = GetXid();


@@ -940,29 +950,55 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
		return rc;
		return rc;
	}
	}


	lock_flocks();
	cifs_for_each_lock(cfile->dentry->d_inode, before) {
		if ((*before)->fl_flags & FL_POSIX)
			count++;
	}
	unlock_flocks();

	INIT_LIST_HEAD(&locks_to_send);
	INIT_LIST_HEAD(&locks_to_send);


	/*
	 * Allocating count locks is enough because no locks can be added to
	 * the list while we are holding cinode->lock_mutex that protects
	 * locking operations of this inode.
	 */
	for (; i < count; i++) {
		lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
		if (!lck) {
			rc = -ENOMEM;
			goto err_out;
		}
		list_add_tail(&lck->llist, &locks_to_send);
	}

	i = 0;
	el = locks_to_send.next;
	lock_flocks();
	lock_flocks();
	cifs_for_each_lock(cfile->dentry->d_inode, before) {
	cifs_for_each_lock(cfile->dentry->d_inode, before) {
		if (el == &locks_to_send) {
			/* something is really wrong */
			cERROR(1, "Can't push all brlocks!");
			break;
		}
		flock = *before;
		flock = *before;
		if ((flock->fl_flags & FL_POSIX) == 0)
			continue;
		length = 1 + flock->fl_end - flock->fl_start;
		length = 1 + flock->fl_end - flock->fl_start;
		if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
		if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
			type = CIFS_RDLCK;
			type = CIFS_RDLCK;
		else
		else
			type = CIFS_WRLCK;
			type = CIFS_WRLCK;

		lck = list_entry(el, struct lock_to_push, llist);
		lck = cifs_lock_init(flock->fl_start, length, type,
				     cfile->netfid);
		if (!lck) {
			rc = -ENOMEM;
			goto send_locks;
		}
		lck->pid = flock->fl_pid;
		lck->pid = flock->fl_pid;

		lck->netfid = cfile->netfid;
		list_add_tail(&lck->llist, &locks_to_send);
		lck->length = length;
		lck->type = type;
		lck->offset = flock->fl_start;
		i++;
		el = el->next;
	}
	}

send_locks:
	unlock_flocks();
	unlock_flocks();


	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
@@ -979,11 +1015,18 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
		kfree(lck);
		kfree(lck);
	}
	}


out:
	cinode->can_cache_brlcks = false;
	cinode->can_cache_brlcks = false;
	mutex_unlock(&cinode->lock_mutex);
	mutex_unlock(&cinode->lock_mutex);


	FreeXid(xid);
	FreeXid(xid);
	return rc;
	return rc;
err_out:
	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
		list_del(&lck->llist);
		kfree(lck);
	}
	goto out;
}
}


static int
static int