Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a9866ba4 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull CIFS fixes from Steve French.

* git://git.samba.org/sfrench/cifs-2.6:
  cifs: always update the inode cache with the results from a FIND_*
  cifs: when CONFIG_HIGHMEM is set, serialize the read/write kmaps
  cifs: on CONFIG_HIGHMEM machines, limit the rsize/wsize to the kmap space
  Initialise mid_q_entry before putting it on the pending queue
parents 331ae496 cd60042c
Loading
Loading
Loading
Loading
+29 −1
Original line number Diff line number Diff line
@@ -86,7 +86,31 @@ static struct {
#endif /* CONFIG_CIFS_WEAK_PW_HASH */
#endif /* CIFS_POSIX */

/* Forward declarations */
#ifdef CONFIG_HIGHMEM
/*
 * On arches that have high memory, kmap address space is limited. By
 * serializing the kmap operations on those arches, we ensure that we don't
 * end up with a bunch of threads in writeback with partially mapped page
 * arrays, stuck waiting for kmap to come back. That situation prevents
 * progress and can deadlock.
 */
static DEFINE_MUTEX(cifs_kmap_mutex);

static inline void
cifs_kmap_lock(void)
{
	mutex_lock(&cifs_kmap_mutex);
}

static inline void
cifs_kmap_unlock(void)
{
	mutex_unlock(&cifs_kmap_mutex);
}
#else /* !CONFIG_HIGHMEM */
#define cifs_kmap_lock() do { ; } while(0)
#define cifs_kmap_unlock() do { ; } while(0)
#endif /* CONFIG_HIGHMEM */

/* Mark as invalid, all open files on tree connections since they
   were closed when session to server was lost */
@@ -1503,7 +1527,9 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
	}

	/* marshal up the page array */
	cifs_kmap_lock();
	len = rdata->marshal_iov(rdata, data_len);
	cifs_kmap_unlock();
	data_len -= len;

	/* issue the read if we have any iovecs left to fill */
@@ -2069,7 +2095,9 @@ cifs_async_writev(struct cifs_writedata *wdata)
	 * and set the iov_len properly for each one. It may also set
	 * wdata->bytes too.
	 */
	cifs_kmap_lock();
	wdata->marshal_iov(iov, wdata);
	cifs_kmap_unlock();

	cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes);

+18 −0
Original line number Diff line number Diff line
@@ -3445,6 +3445,18 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
#define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
#define CIFS_DEFAULT_NON_POSIX_WSIZE (65536)

/*
 * On hosts with high memory, we can't currently support wsize/rsize that are
 * larger than we can kmap at once. Cap the rsize/wsize at
 * LAST_PKMAP * PAGE_SIZE. We'll never be able to fill a read or write request
 * larger than that anyway.
 */
#ifdef CONFIG_HIGHMEM
#define CIFS_KMAP_SIZE_LIMIT	(LAST_PKMAP * PAGE_CACHE_SIZE)
#else /* CONFIG_HIGHMEM */
#define CIFS_KMAP_SIZE_LIMIT	(1<<24)
#endif /* CONFIG_HIGHMEM */

static unsigned int
cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
{
@@ -3475,6 +3487,9 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
		wsize = min_t(unsigned int, wsize,
				server->maxBuf - sizeof(WRITE_REQ) + 4);

	/* limit to the amount that we can kmap at once */
	wsize = min_t(unsigned int, wsize, CIFS_KMAP_SIZE_LIMIT);

	/* hard limit of CIFS_MAX_WSIZE */
	wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE);

@@ -3516,6 +3531,9 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
	if (!(server->capabilities & CAP_LARGE_READ_X))
		rsize = min_t(unsigned int, CIFSMaxBufSize, rsize);

	/* limit to the amount that we can kmap at once */
	rsize = min_t(unsigned int, rsize, CIFS_KMAP_SIZE_LIMIT);

	/* hard limit of CIFS_MAX_RSIZE */
	rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE);

+5 −2
Original line number Diff line number Diff line
@@ -86,9 +86,12 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,

	dentry = d_lookup(parent, name);
	if (dentry) {
		/* FIXME: check for inode number changes? */
		if (dentry->d_inode != NULL)
		inode = dentry->d_inode;
		/* update inode in place if i_ino didn't change */
		if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
			cifs_fattr_to_inode(inode, fattr);
			return dentry;
		}
		d_drop(dentry);
		dput(dentry);
	}
+14 −12
Original line number Diff line number Diff line
@@ -365,18 +365,16 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov,
	if (mid == NULL)
		return -ENOMEM;

	/* put it on the pending_mid_q */
	spin_lock(&GlobalMid_Lock);
	list_add_tail(&mid->qhead, &server->pending_mid_q);
	spin_unlock(&GlobalMid_Lock);

	rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
	if (rc)
		delete_mid(mid);
	*ret_mid = mid;
	if (rc) {
		DeleteMidQEntry(mid);
		return rc;
	}

	*ret_mid = mid;
	return 0;
}

/*
 * Send a SMB request and set the callback function in the mid to handle
 * the result. Caller is responsible for dealing with timeouts.
@@ -407,17 +405,21 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
	mid->callback_data = cbdata;
	mid->mid_state = MID_REQUEST_SUBMITTED;

	/* put it on the pending_mid_q */
	spin_lock(&GlobalMid_Lock);
	list_add_tail(&mid->qhead, &server->pending_mid_q);
	spin_unlock(&GlobalMid_Lock);


	cifs_in_send_inc(server);
	rc = smb_sendv(server, iov, nvec);
	cifs_in_send_dec(server);
	cifs_save_when_sent(mid);
	mutex_unlock(&server->srv_mutex);

	if (rc)
		goto out_err;
	if (rc == 0)
		return 0;

	return rc;
out_err:
	delete_mid(mid);
	add_credits(server, 1);
	wake_up(&server->request_q);