Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d36deae7 authored by Jan Glauber's avatar Jan Glauber Committed by David S. Miller
Browse files

qdio: extend API to allow polling



Extend the qdio API to allow polling in the upper-layer driver. This
is needed by qeth to use NAPI.

To use the new interface the upper-layer driver must specify the
queue_start_poll(). This callback is used to signal the upper-layer
driver that is has initiative and must process the inbound queue by
calling qdio_get_next_buffers(). If the upper-layer driver wants to
stop polling it calls qdio_start_irq().

Since adapter interrupts are not completely stoppable qdio implements
a software bit QDIO_QUEUE_IRQS_DISABLED to safely disable interrupts for an
input queue.

The old interface is preserved and will be used as is by zfcp.

Signed-off-by: default avatarJan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarFrank Blaschka <frank.blaschka@de.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e508be17
Loading
Loading
Loading
Loading
+8 −5
Original line number Diff line number Diff line
@@ -360,6 +360,7 @@ struct qdio_initialize {
	unsigned int no_output_qs;
	qdio_handler_t *input_handler;
	qdio_handler_t *output_handler;
	void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
	unsigned long int_parm;
	void **input_sbal_addr_array;
	void **output_sbal_addr_array;
@@ -377,11 +378,13 @@ struct qdio_initialize {
extern int qdio_allocate(struct qdio_initialize *);
extern int qdio_establish(struct qdio_initialize *);
extern int qdio_activate(struct ccw_device *);

extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
		   int q_nr, unsigned int bufnr, unsigned int count);
extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int,
		   unsigned int);
extern int qdio_start_irq(struct ccw_device *, int);
extern int qdio_stop_irq(struct ccw_device *, int);
extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
extern int qdio_shutdown(struct ccw_device *, int);
extern int qdio_free(struct ccw_device *);
extern int qdio_get_ssqd_desc(struct ccw_device *dev, struct qdio_ssqd_desc*);
extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *);

#endif /* __QDIO_H__ */
+29 −0
Original line number Diff line number Diff line
@@ -208,6 +208,7 @@ struct qdio_dev_perf_stat {
	unsigned int eqbs_partial;
	unsigned int sqbs;
	unsigned int sqbs_partial;
	unsigned int int_discarded;
} ____cacheline_aligned;

struct qdio_queue_perf_stat {
@@ -222,6 +223,10 @@ struct qdio_queue_perf_stat {
	unsigned int nr_sbal_total;
};

enum qdio_queue_irq_states {
	QDIO_QUEUE_IRQS_DISABLED,
};

struct qdio_input_q {
	/* input buffer acknowledgement flag */
	int polling;
@@ -231,6 +236,10 @@ struct qdio_input_q {
	int ack_count;
	/* last time of noticing incoming data */
	u64 timestamp;
	/* upper-layer polling flag */
	unsigned long queue_irq_state;
	/* callback to start upper-layer polling */
	void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
};

struct qdio_output_q {
@@ -399,6 +408,26 @@ static inline int multicast_outbound(struct qdio_q *q)
#define sub_buf(bufnr, dec) \
	((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)

#define queue_irqs_enabled(q)			\
	(test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0)
#define queue_irqs_disabled(q)			\
	(test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0)

#define TIQDIO_SHARED_IND		63

/* device state change indicators */
struct indicator_t {
	u32 ind;	/* u32 because of compare-and-swap performance */
	atomic_t count; /* use count, 0 or 1 for non-shared indicators */
};

extern struct indicator_t *q_indicators;

static inline int shared_ind(struct qdio_irq *irq_ptr)
{
	return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
}

/* prototypes for thin interrupt */
void qdio_setup_thinint(struct qdio_irq *irq_ptr);
int qdio_establish_thinint(struct qdio_irq *irq_ptr);
+12 −21
Original line number Diff line number Diff line
@@ -56,9 +56,16 @@ static int qstat_show(struct seq_file *m, void *v)

	seq_printf(m, "DSCI: %d   nr_used: %d\n",
		   *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used));
	seq_printf(m, "ftc: %d  last_move: %d\n", q->first_to_check, q->last_move);
	seq_printf(m, "ftc: %d  last_move: %d\n",
		   q->first_to_check, q->last_move);
	if (q->is_input_q) {
		seq_printf(m, "polling: %d  ack start: %d  ack count: %d\n",
		   q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count);
			   q->u.in.polling, q->u.in.ack_start,
			   q->u.in.ack_count);
		seq_printf(m, "IRQs disabled: %u\n",
			   test_bit(QDIO_QUEUE_IRQS_DISABLED,
			   &q->u.in.queue_irq_state));
	}
	seq_printf(m, "SBAL states:\n");
	seq_printf(m, "|0      |8      |16     |24     |32     |40     |48     |56  63|\n");

@@ -113,22 +120,6 @@ static int qstat_show(struct seq_file *m, void *v)
	return 0;
}

static ssize_t qstat_seq_write(struct file *file, const char __user *buf,
			       size_t count, loff_t *off)
{
	struct seq_file *seq = file->private_data;
	struct qdio_q *q = seq->private;

	if (!q)
		return 0;
	if (q->is_input_q)
		xchg(q->irq_ptr->dsci, 1);
	local_bh_disable();
	tasklet_schedule(&q->tasklet);
	local_bh_enable();
	return count;
}

static int qstat_seq_open(struct inode *inode, struct file *filp)
{
	return single_open(filp, qstat_show,
@@ -139,7 +130,6 @@ static const struct file_operations debugfs_fops = {
	.owner	 = THIS_MODULE,
	.open	 = qstat_seq_open,
	.read	 = seq_read,
	.write	 = qstat_seq_write,
	.llseek  = seq_lseek,
	.release = single_release,
};
@@ -166,7 +156,8 @@ static char *qperf_names[] = {
	"QEBSM eqbs",
	"QEBSM eqbs partial",
	"QEBSM sqbs",
	"QEBSM sqbs partial"
	"QEBSM sqbs partial",
	"Discarded interrupts"
};

static int qperf_show(struct seq_file *m, void *v)
+136 −2
Original line number Diff line number Diff line
@@ -884,8 +884,19 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
	if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
		return;

	for_each_input_queue(irq_ptr, q, i)
	for_each_input_queue(irq_ptr, q, i) {
		if (q->u.in.queue_start_poll) {
			/* skip if polling is enabled or already in work */
			if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
				     &q->u.in.queue_irq_state)) {
				qperf_inc(q, int_discarded);
				continue;
			}
			q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
						 q->irq_ptr->int_parm);
		} else
			tasklet_schedule(&q->tasklet);
	}

	if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
		return;
@@ -1519,6 +1530,129 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
}
EXPORT_SYMBOL_GPL(do_QDIO);

/**
 * qdio_start_irq - process input buffers
 * @cdev: associated ccw_device for the qdio subchannel
 * @nr: input queue number
 *
 * Return codes
 *   0 - success
 *   1 - irqs not started since new data is available
 */
int qdio_start_irq(struct ccw_device *cdev, int nr)
{
	struct qdio_q *q;
	struct qdio_irq *irq_ptr = cdev->private->qdio_data;

	if (!irq_ptr)
		return -ENODEV;
	q = irq_ptr->input_qs[nr];

	WARN_ON(queue_irqs_enabled(q));

	if (!shared_ind(q->irq_ptr))
		xchg(q->irq_ptr->dsci, 0);

	qdio_stop_polling(q);
	clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);

	/*
	 * We need to check again to not lose initiative after
	 * resetting the ACK state.
	 */
	if (!shared_ind(q->irq_ptr) && *q->irq_ptr->dsci)
		goto rescan;
	if (!qdio_inbound_q_done(q))
		goto rescan;
	return 0;

rescan:
	if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
			     &q->u.in.queue_irq_state))
		return 0;
	else
		return 1;

}
EXPORT_SYMBOL(qdio_start_irq);

/**
 * qdio_get_next_buffers - process input buffers
 * @cdev: associated ccw_device for the qdio subchannel
 * @nr: input queue number
 * @bufnr: first filled buffer number
 * @error: buffers are in error state
 *
 * Return codes
 *   < 0 - error
 *   = 0 - no new buffers found
 *   > 0 - number of processed buffers
 */
int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
			  int *error)
{
	struct qdio_q *q;
	int start, end;
	struct qdio_irq *irq_ptr = cdev->private->qdio_data;

	if (!irq_ptr)
		return -ENODEV;
	q = irq_ptr->input_qs[nr];
	WARN_ON(queue_irqs_enabled(q));

	qdio_sync_after_thinint(q);

	/*
	 * The interrupt could be caused by a PCI request. Check the
	 * PCI capable outbound queues.
	 */
	qdio_check_outbound_after_thinint(q);

	if (!qdio_inbound_q_moved(q))
		return 0;

	/* Note: upper-layer MUST stop processing immediately here ... */
	if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
		return -EIO;

	start = q->first_to_kick;
	end = q->first_to_check;
	*bufnr = start;
	*error = q->qdio_error;

	/* for the next time */
	q->first_to_kick = end;
	q->qdio_error = 0;
	return sub_buf(end, start);
}
EXPORT_SYMBOL(qdio_get_next_buffers);

/**
 * qdio_stop_irq - disable interrupt processing for the device
 * @cdev: associated ccw_device for the qdio subchannel
 * @nr: input queue number
 *
 * Return codes
 *   0 - interrupts were already disabled
 *   1 - interrupts successfully disabled
 */
int qdio_stop_irq(struct ccw_device *cdev, int nr)
{
	struct qdio_q *q;
	struct qdio_irq *irq_ptr = cdev->private->qdio_data;

	if (!irq_ptr)
		return -ENODEV;
	q = irq_ptr->input_qs[nr];

	if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
			     &q->u.in.queue_irq_state))
		return 0;
	else
		return 1;
}
EXPORT_SYMBOL(qdio_stop_irq);

static int __init init_QDIO(void)
{
	int rc;
+1 −0
Original line number Diff line number Diff line
@@ -161,6 +161,7 @@ static void setup_queues(struct qdio_irq *irq_ptr,
		setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);

		q->is_input_q = 1;
		q->u.in.queue_start_poll = qdio_init->queue_start_poll;
		setup_storage_lists(q, irq_ptr, input_sbal_array, i);
		input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;

Loading