Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ace4cede authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 's390-qeth-next'



Julian Wiedmann says:

====================
s390/qeth: updates 2019-08-23

please apply one more round of qeth patches. These implement support for
a bunch of TX-related features - namely TX NAPI, BQL and xmit_more.

Note that this includes two qdio patches which lay the necessary
groundwork, and have been acked by Vasily.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents fbbdbc64 9549d70a
Loading
Loading
Loading
Loading
+5 −1
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@
#define QDIO_MAX_QUEUES_PER_IRQ		4
#define QDIO_MAX_BUFFERS_PER_Q		128
#define QDIO_MAX_BUFFERS_MASK		(QDIO_MAX_BUFFERS_PER_Q - 1)
#define QDIO_BUFNR(num)			((num) & QDIO_MAX_BUFFERS_MASK)
#define QDIO_MAX_ELEMENTS_PER_BUFFER	16
#define QDIO_SBAL_SIZE			256

@@ -359,7 +360,7 @@ struct qdio_initialize {
	qdio_handler_t *output_handler;
	void (**queue_start_poll_array) (struct ccw_device *, int,
					  unsigned long);
	int scan_threshold;
	unsigned int scan_threshold;
	unsigned long int_parm;
	struct qdio_buffer **input_sbal_addr_array;
	struct qdio_buffer **output_sbal_addr_array;
@@ -416,6 +417,9 @@ extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int,
extern int qdio_start_irq(struct ccw_device *, int);
extern int qdio_stop_irq(struct ccw_device *, int);
extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
extern int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr,
			      bool is_input, unsigned int *bufnr,
			      unsigned int *error);
extern int qdio_shutdown(struct ccw_device *, int);
extern int qdio_free(struct ccw_device *);
extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *);
+1 −2
Original line number Diff line number Diff line
@@ -206,8 +206,6 @@ struct qdio_output_q {
	struct qdio_outbuf_state *sbal_state;
	/* timer to check for more outbound work */
	struct timer_list timer;
	/* used SBALs before tasklet schedule */
	int scan_threshold;
};

/*
@@ -295,6 +293,7 @@ struct qdio_irq {
	struct qdio_ssqd_desc ssqd_desc;
	void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);

	unsigned int scan_threshold;	/* used SBALs before tasklet schedule */
	int perf_stat_enabled;

	struct qdr *qdr;
+51 −24
Original line number Diff line number Diff line
@@ -647,8 +647,6 @@ static void qdio_kick_handler(struct qdio_q *q, unsigned int count)
		qperf_inc(q, outbound_handler);
		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
			      start, count);
		if (q->u.out.use_cq)
			qdio_handle_aobs(q, start, count);
	}

	q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
@@ -774,8 +772,11 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)

	count = get_outbound_buffer_frontier(q, start);

	if (count)
	if (count) {
		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
		if (q->u.out.use_cq)
			qdio_handle_aobs(q, start, count);
	}

	return count;
}
@@ -879,7 +880,7 @@ static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
	struct qdio_q *out;
	int i;

	if (!pci_out_supported(irq))
	if (!pci_out_supported(irq) || !irq->scan_threshold)
		return;

	for_each_output_queue(irq, out, i)
@@ -972,7 +973,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
		}
	}

	if (!pci_out_supported(irq_ptr))
	if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
		return;

	for_each_output_queue(irq_ptr, q, i) {
@@ -1527,6 +1528,7 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
static int handle_outbound(struct qdio_q *q, unsigned int callflags,
			   int bufnr, int count)
{
	const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
	unsigned char state = 0;
	int used, rc = 0;

@@ -1565,8 +1567,12 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
		rc = qdio_kick_outbound_q(q, 0);
	}

	/* Let drivers implement their own completion scanning: */
	if (!scan_threshold)
		return rc;

	/* in case of SIGA errors we must process the error immediately */
	if (used >= q->u.out.scan_threshold || rc)
	if (used >= scan_threshold || rc)
		qdio_tasklet_schedule(q);
	else
		/* free the SBALs in case of no further traffic */
@@ -1655,6 +1661,44 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
}
EXPORT_SYMBOL(qdio_start_irq);

static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr,
				unsigned int *error)
{
	unsigned int start = q->first_to_check;
	int count;

	count = q->is_input_q ? qdio_inbound_q_moved(q, start) :
				qdio_outbound_q_moved(q, start);
	if (count == 0)
		return 0;

	*bufnr = start;
	*error = q->qdio_error;

	/* for the next time */
	q->first_to_check = add_buf(start, count);
	q->qdio_error = 0;

	return count;
}

int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input,
		       unsigned int *bufnr, unsigned int *error)
{
	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
	struct qdio_q *q;

	if (!irq_ptr)
		return -ENODEV;
	q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr];

	if (need_siga_sync(q))
		qdio_siga_sync_q(q);

	return __qdio_inspect_queue(q, bufnr, error);
}
EXPORT_SYMBOL_GPL(qdio_inspect_queue);

/**
 * qdio_get_next_buffers - process input buffers
 * @cdev: associated ccw_device for the qdio subchannel
@@ -1672,13 +1716,10 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
{
	struct qdio_q *q;
	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
	unsigned int start;
	int count;

	if (!irq_ptr)
		return -ENODEV;
	q = irq_ptr->input_qs[nr];
	start = q->first_to_check;

	/*
	 * Cannot rely on automatic sync after interrupt since queues may
@@ -1689,25 +1730,11 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,

	qdio_check_outbound_pci_queues(irq_ptr);

	count = qdio_inbound_q_moved(q, start);
	if (count == 0)
		return 0;

	start = add_buf(start, count);
	q->first_to_check = start;

	/* Note: upper-layer MUST stop processing immediately here ... */
	if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
		return -EIO;

	*bufnr = q->first_to_kick;
	*error = q->qdio_error;

	/* for the next time */
	q->first_to_kick = add_buf(q->first_to_kick, count);
	q->qdio_error = 0;

	return count;
	return __qdio_inspect_queue(q, bufnr, error);
}
EXPORT_SYMBOL(qdio_get_next_buffers);

+1 −1
Original line number Diff line number Diff line
@@ -248,7 +248,6 @@ static void setup_queues(struct qdio_irq *irq_ptr,
		output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;

		q->is_input_q = 0;
		q->u.out.scan_threshold = qdio_init->scan_threshold;
		setup_storage_lists(q, irq_ptr, output_sbal_array, i);
		output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;

@@ -474,6 +473,7 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
	irq_ptr->nr_input_qs = init_data->no_input_qs;
	irq_ptr->nr_output_qs = init_data->no_output_qs;
	irq_ptr->cdev = init_data->cdev;
	irq_ptr->scan_threshold = init_data->scan_threshold;
	ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid);
	setup_queues(irq_ptr, init_data);

+52 −0
Original line number Diff line number Diff line
@@ -22,6 +22,7 @@
#include <linux/hashtable.h>
#include <linux/ip.h>
#include <linux/refcount.h>
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/workqueue.h>

@@ -30,6 +31,7 @@
#include <net/ipv6.h>
#include <net/if_inet6.h>
#include <net/addrconf.h>
#include <net/sch_generic.h>
#include <net/tcp.h>

#include <asm/debug.h>
@@ -376,6 +378,28 @@ enum qeth_header_ids {
#define QETH_HDR_EXT_CSUM_TRANSP_REQ  0x20
#define QETH_HDR_EXT_UDP	      0x40 /*bit off for TCP*/

static inline bool qeth_l2_same_vlan(struct qeth_hdr_layer2 *h1,
				     struct qeth_hdr_layer2 *h2)
{
	return !((h1->flags[2] ^ h2->flags[2]) & QETH_LAYER2_FLAG_VLAN) &&
	       h1->vlan_id == h2->vlan_id;
}

static inline bool qeth_l3_iqd_same_vlan(struct qeth_hdr_layer3 *h1,
					 struct qeth_hdr_layer3 *h2)
{
	return !((h1->ext_flags ^ h2->ext_flags) & QETH_HDR_EXT_VLAN_FRAME) &&
	       h1->vlan_id == h2->vlan_id;
}

static inline bool qeth_l3_same_next_hop(struct qeth_hdr_layer3 *h1,
					 struct qeth_hdr_layer3 *h2)
{
	return !((h1->flags ^ h2->flags) & QETH_HDR_IPV6) &&
	       ipv6_addr_equal(&h1->next_hop.ipv6_addr,
			       &h2->next_hop.ipv6_addr);
}

enum qeth_qdio_info_states {
	QETH_QDIO_UNINITIALIZED,
	QETH_QDIO_ALLOCATED,
@@ -424,6 +448,7 @@ struct qeth_qdio_out_buffer {
	struct qdio_buffer *buffer;
	atomic_t state;
	int next_element_to_fill;
	unsigned int bytes;
	struct sk_buff_head skb_list;
	int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER];

@@ -473,6 +498,8 @@ struct qeth_out_q_stats {
	u64 tso_bytes;
	u64 packing_mode_switch;
	u64 stopped;
	u64 completion_yield;
	u64 completion_timer;

	/* rtnl_link_stats64 */
	u64 tx_packets;
@@ -481,6 +508,8 @@ struct qeth_out_q_stats {
	u64 tx_dropped;
};

#define QETH_TX_TIMER_USECS		500

struct qeth_qdio_out_q {
	struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
	struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
@@ -499,13 +528,36 @@ struct qeth_qdio_out_q {
	atomic_t used_buffers;
	/* indicates whether PCI flag must be set (or if one is outstanding) */
	atomic_t set_pci_flags_count;
	struct napi_struct napi;
	struct timer_list timer;
	struct qeth_hdr *prev_hdr;
	u8 bulk_start;
};

#define qeth_for_each_output_queue(card, q, i)		\
	for (i = 0; i < card->qdio.no_out_queues &&	\
		    (q = card->qdio.out_qs[i]); i++)

#define	qeth_napi_to_out_queue(n) container_of(n, struct qeth_qdio_out_q, napi)

static inline void qeth_tx_arm_timer(struct qeth_qdio_out_q *queue)
{
	if (timer_pending(&queue->timer))
		return;
	mod_timer(&queue->timer, usecs_to_jiffies(QETH_TX_TIMER_USECS) +
				 jiffies);
}

static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue)
{
	return atomic_read(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q;
}

static inline bool qeth_out_queue_is_empty(struct qeth_qdio_out_q *queue)
{
	return atomic_read(&queue->used_buffers) == 0;
}

struct qeth_qdio_info {
	atomic_t state;
	/* input */
Loading