Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e2f90a91 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
  [AF_RXRPC]: Return the number of bytes buffered in rxrpc_send_data()
  [IPVS]: Fix state variable on failure to start ipvs threads
  [XFRM]: Fix MTU calculation for non-ESP SAs
parents 0864a4e2 19e6454c
Loading
Loading
Loading
Loading
+39 −2
Original line number Diff line number Diff line
@@ -67,6 +67,11 @@ struct ip_vs_sync_conn_options {
	struct ip_vs_seq        out_seq;        /* outgoing seq. struct */
};

struct ip_vs_sync_thread_data {
	struct completion *startup;
	int state;
};

#define IP_VS_SYNC_CONN_TIMEOUT (3*60*HZ)
#define SIMPLE_CONN_SIZE  (sizeof(struct ip_vs_sync_conn))
#define FULL_CONN_SIZE  \
@@ -751,6 +756,7 @@ static int sync_thread(void *startup)
	mm_segment_t oldmm;
	int state;
	const char *name;
	struct ip_vs_sync_thread_data *tinfo = startup;

	/* increase the module use count */
	ip_vs_use_count_inc();
@@ -789,7 +795,14 @@ static int sync_thread(void *startup)
	add_wait_queue(&sync_wait, &wait);

	set_sync_pid(state, current->pid);
	complete((struct completion *)startup);
	complete(tinfo->startup);

	/*
	 * once we call the completion queue above, we should
	 * null out that reference, since its allocated on the
	 * stack of the creating kernel thread
	 */
	tinfo->startup = NULL;

	/* processing master/backup loop here */
	if (state == IP_VS_STATE_MASTER)
@@ -801,6 +814,14 @@ static int sync_thread(void *startup)
	remove_wait_queue(&sync_wait, &wait);

	/* thread exits */

	/*
	 * If we weren't explicitly stopped, then we
	 * exited in error, and should undo our state
	 */
	if ((!stop_master_sync) && (!stop_backup_sync))
		ip_vs_sync_state -= tinfo->state;

	set_sync_pid(state, 0);
	IP_VS_INFO("sync thread stopped!\n");

@@ -812,6 +833,11 @@ static int sync_thread(void *startup)
	set_stop_sync(state, 0);
	wake_up(&stop_sync_wait);

	/*
	 * we need to free the structure that was allocated
	 * for us in start_sync_thread
	 */
	kfree(tinfo);
	return 0;
}

@@ -838,11 +864,19 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
{
	DECLARE_COMPLETION_ONSTACK(startup);
	pid_t pid;
	struct ip_vs_sync_thread_data *tinfo;

	if ((state == IP_VS_STATE_MASTER && sync_master_pid) ||
	    (state == IP_VS_STATE_BACKUP && sync_backup_pid))
		return -EEXIST;

	/*
	 * Note that tinfo will be freed in sync_thread on exit
	 */
	tinfo = kmalloc(sizeof(struct ip_vs_sync_thread_data), GFP_KERNEL);
	if (!tinfo)
		return -ENOMEM;

	IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, current->pid);
	IP_VS_DBG(7, "Each ip_vs_sync_conn entry need %Zd bytes\n",
		  sizeof(struct ip_vs_sync_conn));
@@ -858,8 +892,11 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
		ip_vs_backup_syncid = syncid;
	}

	tinfo->state = state;
	tinfo->startup = &startup;

  repeat:
	if ((pid = kernel_thread(fork_sync_thread, &startup, 0)) < 0) {
	if ((pid = kernel_thread(fork_sync_thread, tinfo, 0)) < 0) {
		IP_VS_ERR("could not create fork_sync_thread due to %d... "
			  "retrying.\n", pid);
		msleep_interruptible(1000);
+4 −1
Original line number Diff line number Diff line
@@ -640,6 +640,7 @@ static int rxrpc_send_data(struct kiocb *iocb,
			goto efault;
		sp->remain -= copy;
		skb->mark += copy;
		copied += copy;

		len -= copy;
		segment -= copy;
@@ -709,6 +710,8 @@ static int rxrpc_send_data(struct kiocb *iocb,

	} while (segment > 0);

success:
	ret = copied;
out:
	call->tx_pending = skb;
	_leave(" = %d", ret);
@@ -725,7 +728,7 @@ static int rxrpc_send_data(struct kiocb *iocb,

maybe_error:
	if (copied)
		ret = copied;
		goto success;
	goto out;

efault:
+1 −1
Original line number Diff line number Diff line
@@ -1729,7 +1729,7 @@ int xfrm_state_mtu(struct xfrm_state *x, int mtu)
	    x->type && x->type->get_mtu)
		res = x->type->get_mtu(x, mtu);
	else
		res = mtu;
		res = mtu - x->props.header_len;
	spin_unlock_bh(&x->lock);
	return res;
}