Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6f2584f4 authored by Jack Steiner's avatar Jack Steiner Committed by Linus Torvalds
Browse files

sgi-gru: add support to the GRU driver for message queue interrupts



Add support to the GRU driver for message queue interrupts.

Signed-off-by: default avatarJack Steiner <steiner@sgi.com>
Signed-off-by: default avatarDean Nelson <dcn@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ecdaf2b5
Loading
Loading
Loading
Loading
+58 −46
Original line number Diff line number Diff line
@@ -54,6 +54,8 @@
/* Blade percpu resources PERMANENTLY reserved for kernel use */
#define GRU_NUM_KERNEL_CBR	1
#define GRU_NUM_KERNEL_DSR_BYTES 256
#define GRU_NUM_KERNEL_DSR_CL	(GRU_NUM_KERNEL_DSR_BYTES /		\
					GRU_CACHE_LINE_BYTES)
#define KERNEL_CTXNUM           15

/* GRU instruction attributes for all instructions */
@@ -94,7 +96,6 @@ struct message_header {
	char	fill;
};

#define QLINES(mq)	((mq) + offsetof(struct message_queue, qlines))
#define HSTATUS(mq, h)	((mq) + offsetof(struct message_queue, hstatus[h]))

static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
@@ -250,7 +251,8 @@ static inline void restore_present2(void *p, int val)
 * Create a message queue.
 * 	qlines - message queue size in cache lines. Includes 2-line header.
 */
int gru_create_message_queue(void *p, unsigned int bytes)
int gru_create_message_queue(struct gru_message_queue_desc *mqd,
		void *p, unsigned int bytes, int nasid, int vector, int apicid)
{
	struct message_queue *mq = p;
	unsigned int qlines;
@@ -265,6 +267,12 @@ int gru_create_message_queue(void *p, unsigned int bytes)
	mq->hstatus[0] = 0;
	mq->hstatus[1] = 1;
	mq->head = gru_mesq_head(2, qlines / 2 + 1);
	mqd->mq = mq;
	mqd->mq_gpa = uv_gpa(mq);
	mqd->qlines = qlines;
	mqd->interrupt_pnode = UV_NASID_TO_PNODE(nasid);
	mqd->interrupt_vector = vector;
	mqd->interrupt_apicid = apicid;
	return 0;
}
EXPORT_SYMBOL_GPL(gru_create_message_queue);
@@ -277,8 +285,8 @@ EXPORT_SYMBOL_GPL(gru_create_message_queue);
 *		-1 - if mesq sent successfully but queue not full
 *		>0 - unexpected error. MQE_xxx returned
 */
static int send_noop_message(void *cb,
				unsigned long mq, void *mesg)
static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
				void *mesg)
{
	const struct message_header noop_header = {
					.present = MQS_NOOP, .lines = 1};
@@ -289,7 +297,7 @@ static int send_noop_message(void *cb,
	STAT(mesq_noop);
	save_mhdr = *mhdr;
	*mhdr = noop_header;
	gru_mesq(cb, mq, gru_get_tri(mhdr), 1, IMA);
	gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), 1, IMA);
	ret = gru_wait(cb);

	if (ret) {
@@ -313,7 +321,7 @@ static int send_noop_message(void *cb,
			break;
		case CBSS_PUT_NACKED:
			STAT(mesq_noop_put_nacked);
			m = mq + (gru_get_amo_value_head(cb) << 6);
			m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
			gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, 1, 1,
						IMA);
			if (gru_wait(cb) == CBS_IDLE)
@@ -333,30 +341,20 @@ static int send_noop_message(void *cb,
/*
 * Handle a gru_mesq full.
 */
static int send_message_queue_full(void *cb,
			   unsigned long mq, void *mesg, int lines)
static int send_message_queue_full(void *cb, struct gru_message_queue_desc *mqd,
				void *mesg, int lines)
{
	union gru_mesqhead mqh;
	unsigned int limit, head;
	unsigned long avalue;
	int half, qlines, save;
	int half, qlines;

	/* Determine if switching to first/second half of q */
	avalue = gru_get_amo_value(cb);
	head = gru_get_amo_value_head(cb);
	limit = gru_get_amo_value_limit(cb);

	/*
	 * Fetch "qlines" from the queue header. Since the queue may be
	 * in memory that can't be accessed using socket addresses, use
	 * the GRU to access the data. Use DSR space from the message.
	 */
	save = *(int *)mesg;
	gru_vload(cb, QLINES(mq), gru_get_tri(mesg), XTYPE_W, 1, 1, IMA);
	if (gru_wait(cb) != CBS_IDLE)
		goto cberr;
	qlines = *(int *)mesg;
	*(int *)mesg = save;
	qlines = mqd->qlines;
	half = (limit != qlines);

	if (half)
@@ -365,7 +363,7 @@ static int send_message_queue_full(void *cb,
		mqh = gru_mesq_head(2, qlines / 2 + 1);

	/* Try to get lock for switching head pointer */
	gru_gamir(cb, EOP_IR_CLR, HSTATUS(mq, half), XTYPE_DW, IMA);
	gru_gamir(cb, EOP_IR_CLR, HSTATUS(mqd->mq_gpa, half), XTYPE_DW, IMA);
	if (gru_wait(cb) != CBS_IDLE)
		goto cberr;
	if (!gru_get_amo_value(cb)) {
@@ -375,8 +373,8 @@ static int send_message_queue_full(void *cb,

	/* Got the lock. Send optional NOP if queue not full, */
	if (head != limit) {
		if (send_noop_message(cb, mq, mesg)) {
			gru_gamir(cb, EOP_IR_INC, HSTATUS(mq, half),
		if (send_noop_message(cb, mqd, mesg)) {
			gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half),
					XTYPE_DW, IMA);
			if (gru_wait(cb) != CBS_IDLE)
				goto cberr;
@@ -387,14 +385,16 @@ static int send_message_queue_full(void *cb,
	}

	/* Then flip queuehead to other half of queue. */
	gru_gamer(cb, EOP_ERR_CSWAP, mq, XTYPE_DW, mqh.val, avalue, IMA);
	gru_gamer(cb, EOP_ERR_CSWAP, mqd->mq_gpa, XTYPE_DW, mqh.val, avalue,
							IMA);
	if (gru_wait(cb) != CBS_IDLE)
		goto cberr;

	/* If not successfully in swapping queue head, clear the hstatus lock */
	if (gru_get_amo_value(cb) != avalue) {
		STAT(mesq_qf_switch_head_failed);
		gru_gamir(cb, EOP_IR_INC, HSTATUS(mq, half), XTYPE_DW, IMA);
		gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half), XTYPE_DW,
							IMA);
		if (gru_wait(cb) != CBS_IDLE)
			goto cberr;
	}
@@ -404,15 +404,25 @@ static int send_message_queue_full(void *cb,
	return MQE_UNEXPECTED_CB_ERR;
}

/*
 * Send a cross-partition interrupt to the SSI that contains the target
 * message queue. Normally, the interrupt is automatically delivered by hardware
 * but some error conditions require explicit delivery.
 */
static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
{
	if (mqd->interrupt_vector)
		uv_hub_send_ipi(mqd->interrupt_pnode, mqd->interrupt_apicid,
				mqd->interrupt_vector);
}


/*
 * Handle a gru_mesq failure. Some of these failures are software recoverable
 * or retryable.
 */
static int send_message_failure(void *cb,
				unsigned long mq,
				void *mesg,
				int lines)
static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
				void *mesg, int lines)
{
	int substatus, ret = 0;
	unsigned long m;
@@ -429,7 +439,7 @@ static int send_message_failure(void *cb,
		break;
	case CBSS_QLIMIT_REACHED:
		STAT(mesq_send_qlimit_reached);
		ret = send_message_queue_full(cb, mq, mesg, lines);
		ret = send_message_queue_full(cb, mqd, mesg, lines);
		break;
	case CBSS_AMO_NACKED:
		STAT(mesq_send_amo_nacked);
@@ -437,12 +447,14 @@ static int send_message_failure(void *cb,
		break;
	case CBSS_PUT_NACKED:
		STAT(mesq_send_put_nacked);
		m = mq + (gru_get_amo_value_head(cb) << 6);
		m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
		gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
		if (gru_wait(cb) == CBS_IDLE)
		if (gru_wait(cb) == CBS_IDLE) {
			ret = MQE_OK;
		else
			send_message_queue_interrupt(mqd);
		} else {
			ret = MQE_UNEXPECTED_CB_ERR;
		}
		break;
	default:
		BUG();
@@ -452,12 +464,12 @@ static int send_message_failure(void *cb,

/*
 * Send a message to a message queue
 * 	cb	GRU control block to use to send message
 * 	mq	message queue
 * 	mqd	message queue descriptor
 * 	mesg	message. ust be vaddr within a GSEG
 * 	bytes	message size (<= 2 CL)
 */
int gru_send_message_gpa(unsigned long mq, void *mesg, unsigned int bytes)
int gru_send_message_gpa(struct gru_message_queue_desc *mqd, void *mesg,
				unsigned int bytes)
{
	struct message_header *mhdr;
	void *cb;
@@ -481,10 +493,10 @@ int gru_send_message_gpa(unsigned long mq, void *mesg, unsigned int bytes)

	do {
		ret = MQE_OK;
		gru_mesq(cb, mq, gru_get_tri(mhdr), clines, IMA);
		gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), clines, IMA);
		istatus = gru_wait(cb);
		if (istatus != CBS_IDLE)
			ret = send_message_failure(cb, mq, dsr, clines);
			ret = send_message_failure(cb, mqd, dsr, clines);
	} while (ret == MQIE_AGAIN);
	gru_free_cpu_resources(cb, dsr);

@@ -497,9 +509,9 @@ EXPORT_SYMBOL_GPL(gru_send_message_gpa);
/*
 * Advance the receive pointer for the queue to the next message.
 */
void gru_free_message(void *rmq, void *mesg)
void gru_free_message(struct gru_message_queue_desc *mqd, void *mesg)
{
	struct message_queue *mq = rmq;
	struct message_queue *mq = mqd->mq;
	struct message_header *mhdr = mq->next;
	void *next, *pnext;
	int half = -1;
@@ -529,16 +541,16 @@ EXPORT_SYMBOL_GPL(gru_free_message);
 * present. User must call next_message() to move to next message.
 * 	rmq	message queue
 */
void *gru_get_next_message(void *rmq)
void *gru_get_next_message(struct gru_message_queue_desc *mqd)
{
	struct message_queue *mq = rmq;
	struct message_queue *mq = mqd->mq;
	struct message_header *mhdr = mq->next;
	int present = mhdr->present;

	/* skip NOOP messages */
	STAT(mesq_receive);
	while (present == MQS_NOOP) {
		gru_free_message(rmq, mhdr);
		gru_free_message(mqd, mhdr);
		mhdr = mq->next;
		present = mhdr->present;
	}
@@ -576,7 +588,7 @@ int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
	if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
		return MQE_BUG_NO_RESOURCES;
	gru_bcopy(cb, src_gpa, dest_gpa, gru_get_tri(dsr),
		  XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_BYTES, IMA);
		  XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_CL, IMA);
	ret = gru_wait(cb);
	gru_free_cpu_resources(cb, dsr);
	return ret;
@@ -611,7 +623,7 @@ static int quicktest(struct gru_state *gru)

	if (word0 != word1 || word0 != MAGIC) {
		printk
		    ("GRU quicktest err: gru %d, found 0x%lx, expected 0x%lx\n",
		    ("GRU quicktest err: gid %d, found 0x%lx, expected 0x%lx\n",
		     gru->gs_gid, word1, MAGIC);
		BUG();		/* ZZZ should not be fatal */
	}
+24 −9
Original line number Diff line number Diff line
@@ -41,6 +41,15 @@
 * 	- gru_create_message_queue() needs interrupt vector info
 */

struct gru_message_queue_desc {
	void		*mq;			/* message queue vaddress */
	unsigned long	mq_gpa;			/* global address of mq */
	int		qlines;			/* queue size in CL */
	int		interrupt_vector;	/* interrupt vector */
	int		interrupt_pnode;	/* pnode for interrupt */
	int		interrupt_apicid;	/* lapicid for interrupt */
};

/*
 * Initialize a user allocated chunk of memory to be used as
 * a message queue. The caller must ensure that the queue is
@@ -51,14 +60,19 @@
 * to manage the queue.
 *
 *  Input:
 * 	p	pointer to user allocated memory.
 * 	mqd	pointer to message queue descriptor
 * 	p	pointer to user allocated mesq memory.
 * 	bytes	size of message queue in bytes
 *      vector	interrupt vector (zero if no interrupts)
 *      nasid	nasid of blade where interrupt is delivered
 *      apicid	apicid of cpu for interrupt
 *
 *  Errors:
 *  	0	OK
 *  	>0	error
 */
extern int gru_create_message_queue(void *p, unsigned int bytes);
extern int gru_create_message_queue(struct gru_message_queue_desc *mqd,
		void *p, unsigned int bytes, int nasid, int vector, int apicid);

/*
 * Send a message to a message queue.
@@ -68,7 +82,7 @@ extern int gru_create_message_queue(void *p, unsigned int bytes);
 *
 *
 *   Input:
 * 	xmq	message queue - must be a UV global physical address
 * 	mqd	pointer to message queue descriptor
 * 	mesg	pointer to message. Must be 64-bit aligned
 * 	bytes	size of message in bytes
 *
@@ -77,8 +91,8 @@ extern int gru_create_message_queue(void *p, unsigned int bytes);
 *     >0	Send failure - see error codes below
 *
 */
extern int gru_send_message_gpa(unsigned long mq_gpa, void *mesg,
						unsigned int bytes);
extern int gru_send_message_gpa(struct gru_message_queue_desc *mqd,
			void *mesg, unsigned int bytes);

/* Status values for gru_send_message() */
#define MQE_OK			0	/* message sent successfully */
@@ -94,10 +108,11 @@ extern int gru_send_message_gpa(unsigned long mq_gpa, void *mesg,
 * API extensions may allow for out-of-order freeing.
 *
 *   Input
 * 	mq	message queue
 * 	mqd	pointer to message queue descriptor
 * 	mesq	message being freed
 */
extern void gru_free_message(void *mq, void *mesq);
extern void gru_free_message(struct gru_message_queue_desc *mqd,
			     void *mesq);

/*
 * Get next message from message queue. Returns pointer to
@@ -106,13 +121,13 @@ extern void gru_free_message(void *mq, void *mesq);
 * in order to move the queue pointers to next message.
 *
 *   Input
 * 	mq	message queue
 * 	mqd	pointer to message queue descriptor
 *
 *   Output:
 *	p	pointer to message
 *	NULL	no message available
 */
extern void *gru_get_next_message(void *mq);
extern void *gru_get_next_message(struct gru_message_queue_desc *mqd);


/*
+22 −11
Original line number Diff line number Diff line
@@ -92,7 +92,9 @@ struct xpc_rsvd_page {
	u8 pad1[3];		/* align to next u64 in 1st 64-byte cacheline */
	union {
		unsigned long vars_pa;	/* phys address of struct xpc_vars */
		unsigned long activate_mq_gpa; /* gru phy addr of activate_mq */
		unsigned long activate_gru_mq_desc_gpa; /* phys addr of */
							/* activate mq's */
							/* gru mq descriptor */
	} sn;
	unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */
	u64 pad2[10];		/* align to last u64 in 2nd 64-byte cacheline */
@@ -189,7 +191,9 @@ struct xpc_gru_mq_uv {
	int irq;		/* irq raised when message is received in mq */
	int mmr_blade;		/* blade where watchlist was allocated from */
	unsigned long mmr_offset; /* offset of irq mmr located on mmr_blade */
	unsigned long mmr_value; /* value of irq mmr located on mmr_blade */
	int watchlist_num;	/* number of watchlist allocatd by BIOS */
	void *gru_mq_desc;	/* opaque structure used by the GRU driver */
};

/*
@@ -197,6 +201,7 @@ struct xpc_gru_mq_uv {
 * heartbeat, partition active state, and channel state. This is UV only.
 */
struct xpc_activate_mq_msghdr_uv {
	unsigned int gru_msg_hdr; /* FOR GRU INTERNAL USE ONLY */
	short partid;		/* sender's partid */
	u8 act_state;		/* sender's act_state at time msg sent */
	u8 type;		/* message's type */
@@ -232,7 +237,7 @@ struct xpc_activate_mq_msg_heartbeat_req_uv {
struct xpc_activate_mq_msg_activate_req_uv {
	struct xpc_activate_mq_msghdr_uv hdr;
	unsigned long rp_gpa;
	unsigned long activate_mq_gpa;
	unsigned long activate_gru_mq_desc_gpa;
};

struct xpc_activate_mq_msg_deactivate_req_uv {
@@ -263,7 +268,7 @@ struct xpc_activate_mq_msg_chctl_openreply_uv {
	short ch_number;
	short remote_nentries;	/* ??? Is this needed? What is? */
	short local_nentries;	/* ??? Is this needed? What is? */
	unsigned long local_notify_mq_gpa;
	unsigned long notify_gru_mq_desc_gpa;
};

/*
@@ -510,8 +515,8 @@ struct xpc_channel_sn2 {
};

struct xpc_channel_uv {
	unsigned long remote_notify_mq_gpa;	/* gru phys address of remote */
						/* partition's notify mq */
	void *cached_notify_gru_mq_desc; /* remote partition's notify mq's */
					 /* gru mq descriptor */

	struct xpc_send_msg_slot_uv *send_msg_slots;
	void *recv_msg_slots;	/* each slot will hold a xpc_notify_mq_msg_uv */
@@ -682,8 +687,12 @@ struct xpc_partition_sn2 {
};

struct xpc_partition_uv {
	unsigned long remote_activate_mq_gpa;	/* gru phys address of remote */
						/* partition's activate mq */
	unsigned long activate_gru_mq_desc_gpa;	/* phys addr of parititon's */
						/* activate mq's gru mq */
						/* descriptor */
	void *cached_activate_gru_mq_desc; /* cached copy of partition's */
					   /* activate mq's gru mq descriptor */
	struct mutex cached_activate_gru_mq_desc_mutex;
	spinlock_t flags_lock;	/* protect updating of flags */
	unsigned int flags;	/* general flags */
	u8 remote_act_state;	/* remote partition's act_state */
@@ -696,6 +705,7 @@ struct xpc_partition_uv {

#define XPC_P_HEARTBEAT_OFFLINE_UV		0x00000001
#define XPC_P_ENGAGED_UV			0x00000002
#define XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV	0x00000004

/* struct xpc_partition_uv act_state change requests */

@@ -804,6 +814,7 @@ extern void xpc_activate_kthreads(struct xpc_channel *, int);
extern void xpc_create_kthreads(struct xpc_channel *, int, int);
extern void xpc_disconnect_wait(int);
extern int (*xpc_setup_partitions_sn) (void);
extern void (*xpc_teardown_partitions_sn) (void);
extern enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *, u64 *,
							 unsigned long *,
							 size_t *);
@@ -846,7 +857,7 @@ extern void (*xpc_send_chctl_openrequest) (struct xpc_channel *,
					   unsigned long *);
extern void (*xpc_send_chctl_openreply) (struct xpc_channel *, unsigned long *);

extern void (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *,
extern enum xp_retval (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *,
						      unsigned long);

extern enum xp_retval (*xpc_send_payload) (struct xpc_channel *, u32, void *,
+7 −1
Original line number Diff line number Diff line
@@ -183,6 +183,7 @@ xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number,
	    &part->remote_openclose_args[ch_number];
	struct xpc_channel *ch = &part->channels[ch_number];
	enum xp_retval reason;
	enum xp_retval ret;

	spin_lock_irqsave(&ch->lock, irq_flags);

@@ -399,8 +400,13 @@ xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number,
		DBUG_ON(args->local_nentries == 0);
		DBUG_ON(args->remote_nentries == 0);

		ret = xpc_save_remote_msgqueue_pa(ch, args->local_msgqueue_pa);
		if (ret != xpSuccess) {
			XPC_DISCONNECT_CHANNEL(ch, ret, &irq_flags);
			spin_unlock_irqrestore(&ch->lock, irq_flags);
			return;
		}
		ch->flags |= XPC_C_ROPENREPLY;
		xpc_save_remote_msgqueue_pa(ch, args->local_msgqueue_pa);

		if (args->local_nentries < ch->remote_nentries) {
			dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
+4 −2
Original line number Diff line number Diff line
@@ -171,6 +171,7 @@ static struct notifier_block xpc_die_notifier = {
};

int (*xpc_setup_partitions_sn) (void);
void (*xpc_teardown_partitions_sn) (void);
enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *buf, u64 *cookie,
						  unsigned long *rp_pa,
						  size_t *len);
@@ -217,7 +218,7 @@ void (*xpc_send_chctl_openrequest) (struct xpc_channel *ch,
void (*xpc_send_chctl_openreply) (struct xpc_channel *ch,
				  unsigned long *irq_flags);

void (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *ch,
enum xp_retval (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *ch,
					       unsigned long msgqueue_pa);

enum xp_retval (*xpc_send_payload) (struct xpc_channel *ch, u32 flags,
@@ -998,6 +999,7 @@ xpc_setup_partitions(void)
static void
xpc_teardown_partitions(void)
{
	xpc_teardown_partitions_sn();
	kfree(xpc_partitions);
}

Loading