Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 33ba3c77 authored by Dean Nelson's avatar Dean Nelson Committed by Linus Torvalds
Browse files

sgi-xp: isolate xpc_vars structure to sn2 only



Isolate the xpc_vars structure of XPC's reserved page to sn2 only.

Signed-off-by: default avatarDean Nelson <dcn@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e17d416b
Loading
Loading
Loading
Loading
+97 −432
Original line number Original line Diff line number Diff line
@@ -159,10 +159,10 @@ xpc_compare_stamps(struct timespec *stamp1, struct timespec *stamp2)
 * reflected by incrementing either the major or minor version numbers
 * reflected by incrementing either the major or minor version numbers
 * of struct xpc_vars.
 * of struct xpc_vars.
 */
 */
struct xpc_vars {
struct xpc_vars_sn2 {
	u8 version;
	u8 version;
	u64 heartbeat;
	u64 heartbeat;
	u64 heartbeating_to_mask;
	DECLARE_BITMAP(heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2);
	u64 heartbeat_offline;	/* if 0, heartbeat should be changing */
	u64 heartbeat_offline;	/* if 0, heartbeat should be changing */
	int act_nasid;
	int act_nasid;
	int act_phys_cpuid;
	int act_phys_cpuid;
@@ -176,43 +176,20 @@ struct xpc_vars {
#define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \
#define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \
			(_version >= _XPC_VERSION(3, 1))
			(_version >= _XPC_VERSION(3, 1))


static inline int
xpc_hb_allowed(short partid, struct xpc_vars *vars)
{
	return ((vars->heartbeating_to_mask & (1UL << partid)) != 0);
}

static inline void
xpc_allow_hb(short partid, struct xpc_vars *vars)
{
	u64 old_mask, new_mask;

	do {
		old_mask = vars->heartbeating_to_mask;
		new_mask = (old_mask | (1UL << partid));
	} while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
		 old_mask);
}

static inline void
xpc_disallow_hb(short partid, struct xpc_vars *vars)
{
	u64 old_mask, new_mask;

	do {
		old_mask = vars->heartbeating_to_mask;
		new_mask = (old_mask & ~(1UL << partid));
	} while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
		 old_mask);
}

/*
/*
 * The AMOs page consists of a number of AMO variables which are divided into
 * The following pertains to ia64-sn2 only.
 * four groups, The first two groups are used to identify an IRQ's sender.
 *
 * These two groups consist of 64 and 128 AMO variables respectively. The last
 * Memory for XPC's AMO variables is allocated by the MSPEC driver. These
 * two groups, consisting of just one AMO variable each, are used to identify
 * pages are located in the lowest granule. The lowest granule uses 4k pages
 * the remote partitions that are currently engaged (from the viewpoint of
 * for cached references and an alternate TLB handler to never provide a
 * the XPC running on the remote partition).
 * cacheable mapping for the entire region. This will prevent speculative
 * reading of cached copies of our lines from being issued which will cause
 * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64
 * AMO variables (based on XP_MAX_NPARTITIONS_SN2) to identify the senders of
 * NOTIFY IRQs, 128 AMO variables (based on XP_NASID_MASK_WORDS) to identify
 * the senders of ACTIVATE IRQs, and 2 AMO variables to identify which remote
 * partitions (i.e., XPCs) consider themselves currently engaged with the
 * local XPC.
 */
 */
#define XPC_NOTIFY_IRQ_AMOS	0
#define XPC_NOTIFY_IRQ_AMOS	0
#define XPC_ACTIVATE_IRQ_AMOS	(XPC_NOTIFY_IRQ_AMOS + XP_MAX_NPARTITIONS_SN2)
#define XPC_ACTIVATE_IRQ_AMOS	(XPC_NOTIFY_IRQ_AMOS + XP_MAX_NPARTITIONS_SN2)
@@ -259,11 +236,11 @@ struct xpc_vars_part_sn2 {
/* the reserved page sizes and offsets */
/* the reserved page sizes and offsets */


#define XPC_RP_HEADER_SIZE	L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))
#define XPC_RP_HEADER_SIZE	L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))
#define XPC_RP_VARS_SIZE	L1_CACHE_ALIGN(sizeof(struct xpc_vars))
#define XPC_RP_VARS_SIZE	L1_CACHE_ALIGN(sizeof(struct xpc_vars_sn2))


#define XPC_RP_PART_NASIDS(_rp) ((u64 *)((u8 *)(_rp) + XPC_RP_HEADER_SIZE))
#define XPC_RP_PART_NASIDS(_rp) ((u64 *)((u8 *)(_rp) + XPC_RP_HEADER_SIZE))
#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words)
#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words)
#define XPC_RP_VARS(_rp)	((struct xpc_vars *)(XPC_RP_MACH_NASIDS(_rp) + \
#define XPC_RP_VARS(_rp)	((struct xpc_vars_sn2 *)(XPC_RP_MACH_NASIDS(_rp) + \
				    xp_nasid_mask_words))
				    xp_nasid_mask_words))


/*
/*
@@ -344,6 +321,7 @@ struct xpc_notify {
 * allocated at the time a partition becomes active. The array contains one
 * allocated at the time a partition becomes active. The array contains one
 * of these structures for each potential channel connection to that partition.
 * of these structures for each potential channel connection to that partition.
 *
 *
>>> sn2 only!!!
 * Each of these structures manages two message queues (circular buffers).
 * Each of these structures manages two message queues (circular buffers).
 * They are allocated at the time a channel connection is made. One of
 * They are allocated at the time a channel connection is made. One of
 * these message queues (local_msgqueue) holds the locally created messages
 * these message queues (local_msgqueue) holds the locally created messages
@@ -622,6 +600,9 @@ extern struct device *xpc_part;
extern struct device *xpc_chan;
extern struct device *xpc_chan;
extern int xpc_disengage_request_timelimit;
extern int xpc_disengage_request_timelimit;
extern int xpc_disengage_request_timedout;
extern int xpc_disengage_request_timedout;
extern atomic_t xpc_act_IRQ_rcvd;
extern wait_queue_head_t xpc_act_IRQ_wq;
extern void *xpc_heartbeating_to_mask;
extern irqreturn_t xpc_notify_IRQ_handler(int, void *);
extern irqreturn_t xpc_notify_IRQ_handler(int, void *);
extern void xpc_dropped_IPI_check(struct xpc_partition *);
extern void xpc_dropped_IPI_check(struct xpc_partition *);
extern void xpc_activate_partition(struct xpc_partition *);
extern void xpc_activate_partition(struct xpc_partition *);
@@ -629,15 +610,48 @@ extern void xpc_activate_kthreads(struct xpc_channel *, int);
extern void xpc_create_kthreads(struct xpc_channel *, int, int);
extern void xpc_create_kthreads(struct xpc_channel *, int, int);
extern void xpc_disconnect_wait(int);
extern void xpc_disconnect_wait(int);
extern enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *);
extern enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *);
extern void (*xpc_heartbeat_init) (void);
extern void (*xpc_heartbeat_exit) (void);
extern void (*xpc_increment_heartbeat) (void);
extern void (*xpc_offline_heartbeat) (void);
extern void (*xpc_online_heartbeat) (void);
extern void (*xpc_check_remote_hb) (void);
extern enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *);
extern enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *);
extern u64 (*xpc_get_IPI_flags) (struct xpc_partition *);
extern u64 (*xpc_get_IPI_flags) (struct xpc_partition *);
extern struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *);
extern struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *);
extern void (*xpc_initiate_partition_activation) (struct xpc_rsvd_page *, u64,
						  int);
extern void (*xpc_process_act_IRQ_rcvd) (int);
extern enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *);
extern enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *);
extern void (*xpc_teardown_infrastructure) (struct xpc_partition *);
extern void (*xpc_teardown_infrastructure) (struct xpc_partition *);
extern void (*xpc_mark_partition_engaged) (struct xpc_partition *);
extern void (*xpc_mark_partition_disengaged) (struct xpc_partition *);
extern void (*xpc_request_partition_disengage) (struct xpc_partition *);
extern void (*xpc_cancel_partition_disengage_request) (struct xpc_partition *);
extern u64 (*xpc_partition_engaged) (u64);
extern u64 (*xpc_partition_disengage_requested) (u64);;
extern void (*xpc_clear_partition_engaged) (u64);
extern void (*xpc_clear_partition_disengage_request) (u64);

extern void (*xpc_IPI_send_local_activate) (int);
extern void (*xpc_IPI_send_activated) (struct xpc_partition *);
extern void (*xpc_IPI_send_local_reactivate) (int);
extern void (*xpc_IPI_send_disengage) (struct xpc_partition *);

extern void (*xpc_IPI_send_closerequest) (struct xpc_channel *,
					  unsigned long *);
extern void (*xpc_IPI_send_closereply) (struct xpc_channel *, unsigned long *);
extern void (*xpc_IPI_send_openrequest) (struct xpc_channel *, unsigned long *);
extern void (*xpc_IPI_send_openreply) (struct xpc_channel *, unsigned long *);

extern enum xp_retval (*xpc_allocate_msg) (struct xpc_channel *, u32,
					   struct xpc_msg **);
extern enum xp_retval (*xpc_send_msg) (struct xpc_channel *, struct xpc_msg *,
				       u8, xpc_notify_func, void *);
extern void (*xpc_received_msg) (struct xpc_channel *, struct xpc_msg *);


/* found in xpc_sn2.c */
/* found in xpc_sn2.c */
extern void xpc_init_sn2(void);
extern void xpc_init_sn2(void);
extern struct xpc_vars *xpc_vars;		/*>>> eliminate from here */


/* found in xpc_uv.c */
/* found in xpc_uv.c */
extern void xpc_init_uv(void);
extern void xpc_init_uv(void);
@@ -646,6 +660,7 @@ extern void xpc_init_uv(void);
extern int xpc_exiting;
extern int xpc_exiting;
extern int xp_nasid_mask_words;
extern int xp_nasid_mask_words;
extern struct xpc_rsvd_page *xpc_rsvd_page;
extern struct xpc_rsvd_page *xpc_rsvd_page;
extern u64 *xpc_mach_nasids;
extern struct xpc_partition *xpc_partitions;
extern struct xpc_partition *xpc_partitions;
extern char *xpc_remote_copy_buffer;
extern char *xpc_remote_copy_buffer;
extern void *xpc_remote_copy_buffer_base;
extern void *xpc_remote_copy_buffer_base;
@@ -658,7 +673,8 @@ extern int xpc_partition_disengaged(struct xpc_partition *);
extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *);
extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *);
extern void xpc_mark_partition_inactive(struct xpc_partition *);
extern void xpc_mark_partition_inactive(struct xpc_partition *);
extern void xpc_discovery(void);
extern void xpc_discovery(void);
extern void xpc_check_remote_hb(void);
extern enum xp_retval xpc_get_remote_rp(int, u64 *, struct xpc_rsvd_page *,
					u64 *);
extern void xpc_deactivate_partition(const int, struct xpc_partition *,
extern void xpc_deactivate_partition(const int, struct xpc_partition *,
				     enum xp_retval);
				     enum xp_retval);
extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *);
extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *);
@@ -667,6 +683,7 @@ extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *);
extern void *xpc_kzalloc_cacheline_aligned(size_t, gfp_t, void **);
extern void *xpc_kzalloc_cacheline_aligned(size_t, gfp_t, void **);
extern void xpc_initiate_connect(int);
extern void xpc_initiate_connect(int);
extern void xpc_initiate_disconnect(int);
extern void xpc_initiate_disconnect(int);
extern enum xp_retval xpc_allocate_msg_wait(struct xpc_channel *);
extern enum xp_retval xpc_initiate_allocate(short, int, u32, void **);
extern enum xp_retval xpc_initiate_allocate(short, int, u32, void **);
extern enum xp_retval xpc_initiate_send(short, int, void *);
extern enum xp_retval xpc_initiate_send(short, int, void *);
extern enum xp_retval xpc_initiate_send_notify(short, int, void *,
extern enum xp_retval xpc_initiate_send_notify(short, int, void *,
@@ -680,6 +697,40 @@ extern void xpc_disconnect_channel(const int, struct xpc_channel *,
extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval);
extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval);
extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval);
extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval);


static inline int
xpc_hb_allowed(short partid, void *heartbeating_to_mask)
{
	return test_bit(partid, heartbeating_to_mask);
}

static inline int
xpc_any_hbs_allowed(void)
{
	DBUG_ON(xpc_heartbeating_to_mask == NULL);
	return !bitmap_empty(xpc_heartbeating_to_mask, xp_max_npartitions);
}

static inline void
xpc_allow_hb(short partid)
{
	DBUG_ON(xpc_heartbeating_to_mask == NULL);
	set_bit(partid, xpc_heartbeating_to_mask);
}

static inline void
xpc_disallow_hb(short partid)
{
	DBUG_ON(xpc_heartbeating_to_mask == NULL);
	clear_bit(partid, xpc_heartbeating_to_mask);
}

static inline void
xpc_disallow_all_hbs(void)
{
	DBUG_ON(xpc_heartbeating_to_mask == NULL);
	bitmap_zero(xpc_heartbeating_to_mask, xp_max_npartitions);
}

static inline void
static inline void
xpc_wakeup_channel_mgr(struct xpc_partition *part)
xpc_wakeup_channel_mgr(struct xpc_partition *part)
{
{
@@ -749,297 +800,7 @@ xpc_part_ref(struct xpc_partition *part)
	}
	}


/*
/*
 * This next set of inlines are used to keep track of when a partition is
 * The sending and receiving of IPIs includes the setting of an >>>AMO variable
 * potentially engaged in accessing memory belonging to another partition.
 */

static inline void
xpc_mark_partition_engaged(struct xpc_partition *part)
{
	unsigned long irq_flags;
	AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
				   (XPC_ENGAGED_PARTITIONS_AMO *
				    sizeof(AMO_t)));

	local_irq_save(irq_flags);

	/* set bit corresponding to our partid in remote partition's AMO */
	FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
			 (1UL << sn_partition_id));
	/*
	 * We must always use the nofault function regardless of whether we
	 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
	 * didn't, we'd never know that the other partition is down and would
	 * keep sending IPIs and AMOs to it until the heartbeat times out.
	 */
	(void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
							       variable),
						     xp_nofault_PIOR_target));

	local_irq_restore(irq_flags);
}

static inline void
xpc_mark_partition_disengaged(struct xpc_partition *part)
{
	unsigned long irq_flags;
	AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
				   (XPC_ENGAGED_PARTITIONS_AMO *
				    sizeof(AMO_t)));

	local_irq_save(irq_flags);

	/* clear bit corresponding to our partid in remote partition's AMO */
	FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
			 ~(1UL << sn_partition_id));
	/*
	 * We must always use the nofault function regardless of whether we
	 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
	 * didn't, we'd never know that the other partition is down and would
	 * keep sending IPIs and AMOs to it until the heartbeat times out.
	 */
	(void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
							       variable),
						     xp_nofault_PIOR_target));

	local_irq_restore(irq_flags);
}

static inline void
xpc_request_partition_disengage(struct xpc_partition *part)
{
	unsigned long irq_flags;
	AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
				   (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));

	local_irq_save(irq_flags);

	/* set bit corresponding to our partid in remote partition's AMO */
	FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
			 (1UL << sn_partition_id));
	/*
	 * We must always use the nofault function regardless of whether we
	 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
	 * didn't, we'd never know that the other partition is down and would
	 * keep sending IPIs and AMOs to it until the heartbeat times out.
	 */
	(void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
							       variable),
						     xp_nofault_PIOR_target));

	local_irq_restore(irq_flags);
}

static inline void
xpc_cancel_partition_disengage_request(struct xpc_partition *part)
{
	unsigned long irq_flags;
	AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
				   (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));

	local_irq_save(irq_flags);

	/* clear bit corresponding to our partid in remote partition's AMO */
	FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
			 ~(1UL << sn_partition_id));
	/*
	 * We must always use the nofault function regardless of whether we
	 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
	 * didn't, we'd never know that the other partition is down and would
	 * keep sending IPIs and AMOs to it until the heartbeat times out.
	 */
	(void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
							       variable),
						     xp_nofault_PIOR_target));

	local_irq_restore(irq_flags);
}

static inline u64
xpc_partition_engaged(u64 partid_mask)
{
	AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;

	/* return our partition's AMO variable ANDed with partid_mask */
	return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
		partid_mask);
}

static inline u64
xpc_partition_disengage_requested(u64 partid_mask)
{
	AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;

	/* return our partition's AMO variable ANDed with partid_mask */
	return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
		partid_mask);
}

static inline void
xpc_clear_partition_engaged(u64 partid_mask)
{
	AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;

	/* clear bit(s) based on partid_mask in our partition's AMO */
	FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
			 ~partid_mask);
}

static inline void
xpc_clear_partition_disengage_request(u64 partid_mask)
{
	AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;

	/* clear bit(s) based on partid_mask in our partition's AMO */
	FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
			 ~partid_mask);
}

/*
 * The following set of macros and inlines are used for the sending and
 * receiving of IPIs (also known as IRQs). There are two flavors of IPIs,
 * one that is associated with partition activity (SGI_XPC_ACTIVATE) and
 * the other that is associated with channel activity (SGI_XPC_NOTIFY).
 */

static inline u64
xpc_IPI_receive(AMO_t *amo)
{
	return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR);
}

static inline enum xp_retval
xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
{
	int ret = 0;
	unsigned long irq_flags;

	local_irq_save(irq_flags);

	FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, flag);
	sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);

	/*
	 * We must always use the nofault function regardless of whether we
	 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
	 * didn't, we'd never know that the other partition is down and would
	 * keep sending IPIs and AMOs to it until the heartbeat times out.
	 */
	ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
						     xp_nofault_PIOR_target));

	local_irq_restore(irq_flags);

	return ((ret == 0) ? xpSuccess : xpPioReadError);
}

/*
 * IPIs associated with SGI_XPC_ACTIVATE IRQ.
 */

/*
 * Flag the appropriate AMO variable and send an IPI to the specified node.
 */
static inline void
xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid,
		      int to_phys_cpuid)
{
	int w_index = XPC_NASID_W_INDEX(from_nasid);
	int b_index = XPC_NASID_B_INDEX(from_nasid);
	AMO_t *amos = (AMO_t *)__va(amos_page_pa +
				    (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t)));

	(void)xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid,
			   to_phys_cpuid, SGI_XPC_ACTIVATE);
}

static inline void
xpc_IPI_send_activate(struct xpc_vars *vars)
{
	xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0),
			      vars->act_nasid, vars->act_phys_cpuid);
}

static inline void
xpc_IPI_send_activated(struct xpc_partition *part)
{
	xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
			      part->remote_act_nasid,
			      part->remote_act_phys_cpuid);
}

static inline void
xpc_IPI_send_reactivate(struct xpc_partition *part)
{
	xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid,
			      xpc_vars->act_nasid, xpc_vars->act_phys_cpuid);
}

static inline void
xpc_IPI_send_disengage(struct xpc_partition *part)
{
	xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
			      part->remote_act_nasid,
			      part->remote_act_phys_cpuid);
}

/*
 * IPIs associated with SGI_XPC_NOTIFY IRQ.
 */

/*
 * Send an IPI to the remote partition that is associated with the
 * specified channel.
 */
#define XPC_NOTIFY_IRQ_SEND(_ch, _ipi_f, _irq_f) \
		xpc_notify_IRQ_send(_ch, _ipi_f, #_ipi_f, _irq_f)

static inline void
xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
		    unsigned long *irq_flags)
{
	struct xpc_partition *part = &xpc_partitions[ch->partid];
	enum xp_retval ret;

	if (likely(part->act_state != XPC_P_DEACTIVATING)) {
		ret = xpc_IPI_send(part->remote_IPI_amo_va,
				   (u64)ipi_flag << (ch->number * 8),
				   part->remote_IPI_nasid,
				   part->remote_IPI_phys_cpuid, SGI_XPC_NOTIFY);
		dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
			ipi_flag_string, ch->partid, ch->number, ret);
		if (unlikely(ret != xpSuccess)) {
			if (irq_flags != NULL)
				spin_unlock_irqrestore(&ch->lock, *irq_flags);
			XPC_DEACTIVATE_PARTITION(part, ret);
			if (irq_flags != NULL)
				spin_lock_irqsave(&ch->lock, *irq_flags);
		}
	}
}

/*
 * Make it look like the remote partition, which is associated with the
 * specified channel, sent us an IPI. This faked IPI will be handled
 * by xpc_dropped_IPI_check().
 */
#define XPC_NOTIFY_IRQ_SEND_LOCAL(_ch, _ipi_f) \
		xpc_notify_IRQ_send_local(_ch, _ipi_f, #_ipi_f)

static inline void
xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
			  char *ipi_flag_string)
{
	struct xpc_partition *part = &xpc_partitions[ch->partid];

	FETCHOP_STORE_OP(TO_AMO((u64)&part->local_IPI_amo_va->variable),
			 FETCHOP_OR, ((u64)ipi_flag << (ch->number * 8)));
	dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
		ipi_flag_string, ch->partid, ch->number);
}

/*
 * The sending and receiving of IPIs includes the setting of an AMO variable
 * to indicate the reason the IPI was sent. The 64-bit variable is divided
 * to indicate the reason the IPI was sent. The 64-bit variable is divided
 * up into eight bytes, ordered from right to left. Byte zero pertains to
 * up into eight bytes, ordered from right to left. Byte zero pertains to
 * channel 0, byte one to channel 1, and so on. Each byte is described by
 * channel 0, byte one to channel 1, and so on. Each byte is described by
@@ -1052,107 +813,11 @@ xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
#define	XPC_IPI_OPENREPLY	0x08
#define	XPC_IPI_OPENREPLY	0x08
#define	XPC_IPI_MSGREQUEST	0x10
#define	XPC_IPI_MSGREQUEST	0x10


/* given an AMO variable and a channel#, get its associated IPI flags */
/* given an >>>AMO variable and a channel#, get its associated IPI flags */
#define XPC_GET_IPI_FLAGS(_amo, _c)	((u8) (((_amo) >> ((_c) * 8)) & 0xff))
#define XPC_GET_IPI_FLAGS(_amo, _c)	((u8) (((_amo) >> ((_c) * 8)) & 0xff))
#define XPC_SET_IPI_FLAGS(_amo, _c, _f)	(_amo) |= ((u64) (_f) << ((_c) * 8))
#define XPC_SET_IPI_FLAGS(_amo, _c, _f)	(_amo) |= ((u64) (_f) << ((_c) * 8))


#define	XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0fUL)
#define	XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0fUL)
#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo)       ((_amo) & 0x1010101010101010UL)
#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo)       ((_amo) & 0x1010101010101010UL)


static inline void
xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags)
{
	struct xpc_openclose_args *args = ch->local_openclose_args;

	args->reason = ch->reason;

	XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags);
}

static inline void
xpc_IPI_send_closereply(struct xpc_channel *ch, unsigned long *irq_flags)
{
	XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREPLY, irq_flags);
}

static inline void
xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags)
{
	struct xpc_openclose_args *args = ch->local_openclose_args;

	args->msg_size = ch->msg_size;
	args->local_nentries = ch->local_nentries;

	XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREQUEST, irq_flags);
}

static inline void
xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags)
{
	struct xpc_openclose_args *args = ch->local_openclose_args;

	args->remote_nentries = ch->remote_nentries;
	args->local_nentries = ch->local_nentries;
	args->local_msgqueue_pa = __pa(ch->local_msgqueue);

	XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREPLY, irq_flags);
}

static inline void
xpc_IPI_send_msgrequest(struct xpc_channel *ch)
{
	XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_MSGREQUEST, NULL);
}

static inline void
xpc_IPI_send_local_msgrequest(struct xpc_channel *ch)
{
	XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST);
}

/*
>>> this block comment needs to be moved and re-written.
 * Memory for XPC's AMO variables is allocated by the MSPEC driver. These
 * pages are located in the lowest granule. The lowest granule uses 4k pages
 * for cached references and an alternate TLB handler to never provide a
 * cacheable mapping for the entire region. This will prevent speculative
 * reading of cached copies of our lines from being issued which will cause
 * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64
 * AMO variables (based on xp_max_npartitions) for message notification and an
 * additional 128 AMO variables (based on XP_NASID_MASK_WORDS) for partition
 * activation and 2 AMO variables for partition deactivation.
 */
static inline AMO_t *
xpc_IPI_init(int index)
{
	AMO_t *amo = xpc_vars->amos_page + index;

	(void)xpc_IPI_receive(amo);	/* clear AMO variable */
	return amo;
}

/*
 * Check to see if there is any channel activity to/from the specified
 * partition.
 */
static inline void
xpc_check_for_channel_activity(struct xpc_partition *part)
{
	u64 IPI_amo;
	unsigned long irq_flags;

	IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va);
	if (IPI_amo == 0)
		return;

	spin_lock_irqsave(&part->IPI_lock, irq_flags);
	part->local_IPI_amo |= IPI_amo;
	spin_unlock_irqrestore(&part->IPI_lock, irq_flags);

	dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n",
		XPC_PARTID(part), IPI_amo);

	xpc_wakeup_channel_mgr(part);
}

#endif /* _DRIVERS_MISC_SGIXP_XPC_H */
#endif /* _DRIVERS_MISC_SGIXP_XPC_H */
+2 −313

File changed.

Preview size limit exceeded, changes collapsed.

+101 −51

File changed.

Preview size limit exceeded, changes collapsed.

+6 −448

File changed.

Preview size limit exceeded, changes collapsed.

+1175 −6

File changed.

Preview size limit exceeded, changes collapsed.

Loading