Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5309199d authored by Sivan Reinstein's avatar Sivan Reinstein Committed by Matt Wagantall
Browse files

net: adding API to get the current NAPI context



Adding a new API - get_current_napi_context(), which returns the
current napi_struct on the current running CPU.
This change allows network drivers to receive the current
running napi_struct, to use when calling APIs that require it.

Adding a new field to struct softnet_data: current_napi.
The field current_napi will hold the current running napi_sturct.

Adding a call to napi_gro_flush() from process_backlog() when there
are no more packets to process, since process_backlog() doesn't
call napi_complete() API.

CRs-fixed: 784626
Change-Id: I05bde8bb00c4ec6440fc1db29e741746b759d1b7
Signed-off-by: default avatarSivan Reinstein <sivanr@codeaurora.org>
parent 8b10d3f8
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -2319,6 +2319,7 @@ struct softnet_data {
	struct Qdisc		*output_queue;
	struct Qdisc		**output_queue_tailp;
	struct list_head	poll_list;
	struct napi_struct	*current_napi;
	struct sk_buff		*completion_queue;
	struct sk_buff_head	process_queue;

@@ -2836,6 +2837,7 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi);
gro_result_t napi_gro_frags(struct napi_struct *napi);
struct packet_offload *gro_find_receive_by_type(__be16 type);
struct packet_offload *gro_find_complete_by_type(__be16 type);
extern struct napi_struct *get_current_napi_context(void);

static inline void napi_free_frags(struct napi_struct *napi)
{
+21 −0
Original line number Diff line number Diff line
@@ -4323,6 +4323,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
{
	int work = 0;
	struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
	static int state_changed;

#ifdef CONFIG_RPS
	/* Check if we have pending ipi, its better to send them now,
@@ -4344,7 +4345,14 @@ static int process_backlog(struct napi_struct *napi, int quota)
			local_irq_disable();
			input_queue_head_incr(sd);
			if (++work >= quota) {
				if (state_changed) {
					local_irq_enable();
					napi_gro_flush(napi, false);
					local_irq_disable();
					state_changed = 0;
				}
				local_irq_enable();
				sd->current_napi = NULL;
				return work;
			}
		}
@@ -4361,6 +4369,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
			 */
			list_del(&napi->poll_list);
			napi->state = 0;
			state_changed = 1;
			rps_unlock(sd);

			break;
@@ -4371,6 +4380,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
		rps_unlock(sd);
	}
	local_irq_enable();
	sd->current_napi = NULL;

	return work;
}
@@ -4393,11 +4403,14 @@ EXPORT_SYMBOL(__napi_schedule);

void __napi_complete(struct napi_struct *n)
{
	struct softnet_data *sd = &__get_cpu_var(softnet_data);

	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
	BUG_ON(n->gro_list);

	list_del(&n->poll_list);
	smp_mb__before_atomic();
	sd->current_napi = NULL;
	clear_bit(NAPI_STATE_SCHED, &n->state);
}
EXPORT_SYMBOL(__napi_complete);
@@ -4505,6 +4518,13 @@ void netif_napi_del(struct napi_struct *napi)
}
EXPORT_SYMBOL(netif_napi_del);

struct napi_struct *get_current_napi_context(void)
{
	struct softnet_data *sd = &__get_cpu_var(softnet_data);
	return sd->current_napi;
}
EXPORT_SYMBOL(get_current_napi_context);

static void net_rx_action(struct softirq_action *h)
{
	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
@@ -4546,6 +4566,7 @@ static void net_rx_action(struct softirq_action *h)
		 */
		work = 0;
		if (test_bit(NAPI_STATE_SCHED, &n->state)) {
			sd->current_napi = n;
			work = n->poll(n, weight);
			trace_napi_poll(n);
		}