Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 68058e75 authored by Steve French's avatar Steve French
Browse files

[CIFS] Reduce CIFS tcp congestion timeout (it was too long) and backoff


ever longer amounts (up to 15 seconds).  This improves performance
especially when using large wsize.

Signed-off-by: default avatarSteve French <(sfrench@us.ibm.com)>
parent 131afd0b
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -114,7 +114,7 @@ cifs_debug_data_read(char *buf, char **beginBuffer, off_t offset,
				atomic_read(&ses->server->inFlight));

#ifdef CONFIG_CIFS_STATS2
			buf += sprintf(buf, "\tIn Send: %d In MaxReq Wait: %d",
			buf += sprintf(buf, " In Send: %d In MaxReq Wait: %d",
				atomic_read(&ses->server->inSend), 
				atomic_read(&ses->server->num_waiters));
#endif
+13 −3
Original line number Diff line number Diff line
@@ -405,6 +405,16 @@ static struct quotactl_ops cifs_quotactl_ops = {
};
#endif

static void cifs_umount_begin(struct super_block * sblock)
{
	cERROR(1,("kill all tasks now - umount begin not implemented yet"));

/* BB FIXME - finish BB */

	return;
}
	

static int cifs_remount(struct super_block *sb, int *flags, char *data)
{
	*flags |= MS_NODIRATIME;
@@ -422,7 +432,7 @@ struct super_operations cifs_super_ops = {
   unless later we add lazy close of inodes or unless the kernel forgets to call
   us with the same number of releases (closes) as opens */
	.show_options = cifs_show_options,
/*    .umount_begin   = cifs_umount_begin, *//* consider adding in the future */
/*	.umount_begin   = cifs_umount_begin, */ /* BB finish in the future */
	.remount_fs = cifs_remount,
};

@@ -790,9 +800,7 @@ static int cifs_oplock_thread(void * dummyarg)
	do {
		if(try_to_freeze()) 
			continue;
		set_current_state(TASK_INTERRUPTIBLE);
		
		schedule_timeout(1*HZ);  
		spin_lock(&GlobalMid_Lock);
		if(list_empty(&GlobalOplock_Q)) {
			spin_unlock(&GlobalMid_Lock);
@@ -841,6 +849,8 @@ static int cifs_oplock_thread(void * dummyarg)
				}
			} else
				spin_unlock(&GlobalMid_Lock);
			set_current_state(TASK_INTERRUPTIBLE);
			schedule_timeout(1);  /* yield in case q were corrupt */
		}
	} while(!signal_pending(current));
	oplockThread = NULL;
+7 −6
Original line number Diff line number Diff line
@@ -157,14 +157,14 @@ smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer,
		/* smaller timeout here than send2 since smaller size */
		/* Although it may not be required, this also is smaller 
		   oplock break time */  
			if(i > 30) {
			if(i > 12) {
				cERROR(1,
				   ("sends on sock %p stuck for 15 seconds",
				   ("sends on sock %p stuck for 7 seconds",
				    ssocket));
				rc = -EAGAIN;
				break;
			}
			msleep(500);
			msleep(1 << i);
			continue;
		}
		if (rc < 0) 
@@ -224,14 +224,14 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec,
				    n_vec - first_vec, total_len);
		if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
			i++;
			if(i > 40) {
			if(i >= 14) {
				cERROR(1,
				   ("sends on sock %p stuck for 20 seconds",
				   ("sends on sock %p stuck for 15 seconds",
				    ssocket));
				rc = -EAGAIN;
				break;
			}
			msleep(500);
			msleep(1 << i);
			continue;
		}
		if (rc < 0) 
@@ -249,6 +249,7 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec,
			continue;
		}
		total_len -= rc;
		/* the line below resets i */
		for (i = first_vec; i < n_vec; i++) {
			if (iov[i].iov_len) {
				if (rc > iov[i].iov_len) {