Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2d175d43 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6:
  [TIPC]: Add tipc_config.h to include/linux/Kbuild.
  [WAN]: lmc_ioctl: don't return with locks held
  [SUNRPC]: fix rpc debugging
  [TCP]: Saner thash_entries default with much memory.
  [SUNRPC] rpc_rdma: we need to cast u64 to unsigned long long for printing
  [IPv4] SNMP: Refer correct memory location to display ICMP out-going statistics
  [NET]: Fix error reporting in sys_socketpair().
  [NETFILTER]: nf_ct_alloc_hashtable(): use __GFP_NOWARN
  [NET]: Fix race between poll_napi() and net_rx_action()
  [TCP] MD5: Remove some more unnecessary casting.
  [TCP] vegas: Fix a bug in disabling slow start by gamma parameter.
  [IPVS]: use proper timeout instead of fixed value
  [IPV6] NDISC: Fix setting base_reachable_time_ms variable.
parents c2db6376 502ef38d
Loading
Loading
Loading
Loading
+34 −21
Original line number Diff line number Diff line
@@ -143,7 +143,8 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
         */
    case LMCIOCGINFO: /*fold01*/
	if (copy_to_user(ifr->ifr_data, &sc->ictl, sizeof(lmc_ctl_t)))
            return -EFAULT;
		ret = -EFAULT;
	else
		ret = 0;
        break;

@@ -159,8 +160,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
            break;
        }

        if (copy_from_user(&ctl, ifr->ifr_data, sizeof (lmc_ctl_t)))
            return -EFAULT;
	if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) {
		ret = -EFAULT;
		break;
	}

        sc->lmc_media->set_status (sc, &ctl);

@@ -190,8 +193,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
		break;
	    }

	    if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u_int16_t)))
                return -EFAULT;
	    if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u_int16_t))) {
		ret = -EFAULT;
		break;
	    }

            
	    if (new_type == old_type)
@@ -229,8 +234,9 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
        sc->lmc_xinfo.Magic1 = 0xDEADBEEF;

        if (copy_to_user(ifr->ifr_data, &sc->lmc_xinfo,
                         sizeof (struct lmc_xinfo)))
            return -EFAULT;
					sizeof(struct lmc_xinfo))) {
		ret = -EFAULT;
	else
		ret = 0;

        break;
@@ -262,8 +268,8 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/

        if (copy_to_user(ifr->ifr_data, &sc->stats,
                         sizeof (struct lmc_statistics)))
            return -EFAULT;

		ret = -EFAULT;
	else
		ret = 0;
        break;

@@ -292,8 +298,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
            break;
        }

        if (copy_from_user(&ctl, ifr->ifr_data, sizeof (lmc_ctl_t)))
            return -EFAULT;
	if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) {
		ret = -EFAULT;
		break;
	}
        sc->lmc_media->set_circuit_type(sc, ctl.circuit_type);
        sc->ictl.circuit_type = ctl.circuit_type;
        ret = 0;
@@ -318,12 +326,15 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/

#ifdef DEBUG
    case LMCIOCDUMPEVENTLOG:
        if (copy_to_user(ifr->ifr_data, &lmcEventLogIndex, sizeof (u32)))
            return -EFAULT;
	if (copy_to_user(ifr->ifr_data, &lmcEventLogIndex, sizeof(u32))) {
		ret = -EFAULT;
		break;
	}
        if (copy_to_user(ifr->ifr_data + sizeof (u32), lmcEventLogBuf, sizeof (lmcEventLogBuf)))
            return -EFAULT;

		ret = -EFAULT;
	else
		ret = 0;

        break;
#endif /* end ifdef _DBG_EVENTLOG */
    case LMCIOCT1CONTROL: /*fold01*/
@@ -346,8 +357,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
             */
            netif_stop_queue(dev);

            if (copy_from_user(&xc, ifr->ifr_data, sizeof (struct lmc_xilinx_control)))
                return -EFAULT;
	if (copy_from_user(&xc, ifr->ifr_data, sizeof(struct lmc_xilinx_control))) {
		ret = -EFAULT;
		break;
	}
            switch(xc.command){
            case lmc_xilinx_reset: /*fold02*/
                {
+1 −0
Original line number Diff line number Diff line
@@ -149,6 +149,7 @@ header-y += ticable.h
header-y += times.h
header-y += tiocl.h
header-y += tipc.h
header-y += tipc_config.h
header-y += toshiba.h
header-y += ultrasound.h
header-y += un.h
+9 −1
Original line number Diff line number Diff line
@@ -2172,6 +2172,14 @@ static void net_rx_action(struct softirq_action *h)

		weight = n->weight;

		/* This NAPI_STATE_SCHED test is for avoiding a race
		 * with netpoll's poll_napi().  Only the entity which
		 * obtains the lock and sees NAPI_STATE_SCHED set will
		 * actually make the ->poll() call.  Therefore we avoid
		 * accidently calling ->poll() when NAPI is not scheduled.
		 */
		work = 0;
		if (test_bit(NAPI_STATE_SCHED, &n->state))
			work = n->poll(n, weight);

		WARN_ON_ONCE(work > weight);
+28 −9
Original line number Diff line number Diff line
@@ -116,24 +116,43 @@ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
 * network adapter, forcing superfluous retries and possibly timeouts.
 * Thus, we set our budget to greater than 1.
 */
static void poll_napi(struct netpoll *np)
static int poll_one_napi(struct netpoll_info *npinfo,
			 struct napi_struct *napi, int budget)
{
	struct netpoll_info *npinfo = np->dev->npinfo;
	struct napi_struct *napi;
	int budget = 16;
	int work;

	/* net_rx_action's ->poll() invocations and our's are
	 * synchronized by this test which is only made while
	 * holding the napi->poll_lock.
	 */
	if (!test_bit(NAPI_STATE_SCHED, &napi->state))
		return budget;

	list_for_each_entry(napi, &np->dev->napi_list, dev_list) {
		if (test_bit(NAPI_STATE_SCHED, &napi->state) &&
		    napi->poll_owner != smp_processor_id() &&
		    spin_trylock(&napi->poll_lock)) {
	npinfo->rx_flags |= NETPOLL_RX_DROP;
	atomic_inc(&trapped);

			napi->poll(napi, budget);
	work = napi->poll(napi, budget);

	atomic_dec(&trapped);
	npinfo->rx_flags &= ~NETPOLL_RX_DROP;

	return budget - work;
}

static void poll_napi(struct netpoll *np)
{
	struct netpoll_info *npinfo = np->dev->npinfo;
	struct napi_struct *napi;
	int budget = 16;

	list_for_each_entry(napi, &np->dev->napi_list, dev_list) {
		if (napi->poll_owner != smp_processor_id() &&
		    spin_trylock(&napi->poll_lock)) {
			budget = poll_one_napi(npinfo, napi, budget);
			spin_unlock(&napi->poll_lock);

			if (!budget)
				break;
		}
	}
}
+3 −2
Original line number Diff line number Diff line
@@ -72,7 +72,6 @@ struct ip_vs_sync_thread_data {
	int state;
};

#define IP_VS_SYNC_CONN_TIMEOUT (3*60*HZ)
#define SIMPLE_CONN_SIZE  (sizeof(struct ip_vs_sync_conn))
#define FULL_CONN_SIZE  \
(sizeof(struct ip_vs_sync_conn) + sizeof(struct ip_vs_sync_conn_options))
@@ -284,6 +283,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
	struct ip_vs_sync_conn *s;
	struct ip_vs_sync_conn_options *opt;
	struct ip_vs_conn *cp;
	struct ip_vs_protocol *pp;
	char *p;
	int i;

@@ -342,7 +342,8 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
			p += SIMPLE_CONN_SIZE;

		atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
		cp->timeout = IP_VS_SYNC_CONN_TIMEOUT;
		pp = ip_vs_proto_get(s->protocol);
		cp->timeout = pp->timeout_table[cp->state];
		ip_vs_conn_put(cp);

		if (p > buffer+buflen) {
Loading