Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2c2b94f9 authored by Dean Nelson's avatar Dean Nelson Committed by Tony Luck
Browse files

[IA64] run drivers/misc/sgi-xp through scripts/checkpatch.pl



Addressed issues raised by scripts/checkpatch.pl. Removed unnecessary curly
braces. Eliminated uses of volatiles and use of kernel_thread() and daemonize().

Signed-off-by: default avatarDean Nelson <dcn@sgi.com>
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
parent 35190506
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -79,9 +79,9 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)

	ret = bte_copy(src, pdst, len, mode, notification);
	if ((ret != BTE_SUCCESS) && BTE_ERROR_RETRY(ret)) {
		if (!in_interrupt()) {
		if (!in_interrupt())
			cond_resched();
		}

		ret = bte_copy(src, pdst, len, mode, notification);
	}

@@ -255,7 +255,7 @@ enum xpc_retval {
				/* 115: BTE end */
	xpcBteSh2End = xpcBteSh2Start + BTEFAIL_SH2_ALL,

	xpcUnknownReason	/* 116: unknown reason -- must be last in list */
	xpcUnknownReason	/* 116: unknown reason - must be last in enum */
};

/*
+20 −21
Original line number Diff line number Diff line
@@ -23,15 +23,21 @@
#include "xp.h"

/*
 * Target of nofault PIO read.
 * The export of xp_nofault_PIOR needs to happen here since it is defined
 * in drivers/misc/sgi-xp/xp_nofault.S. The target of the nofault read is
 * defined here.
 */
EXPORT_SYMBOL_GPL(xp_nofault_PIOR);

u64 xp_nofault_PIOR_target;
EXPORT_SYMBOL_GPL(xp_nofault_PIOR_target);

/*
 * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
 * users of XPC.
 */
struct xpc_registration xpc_registrations[XPC_NCHANNELS];
EXPORT_SYMBOL_GPL(xpc_registrations);

/*
 * Initialize the XPC interface to indicate that XPC isn't loaded.
@@ -52,6 +58,7 @@ struct xpc_interface xpc_interface = {
	(void (*)(partid_t, int, void *))xpc_notloaded,
	(enum xpc_retval(*)(partid_t, void *))xpc_notloaded
};
EXPORT_SYMBOL_GPL(xpc_interface);

/*
 * XPC calls this when it (the XPC module) has been loaded.
@@ -74,6 +81,7 @@ xpc_set_interface(void (*connect) (int),
	xpc_interface.received = received;
	xpc_interface.partid_to_nasids = partid_to_nasids;
}
EXPORT_SYMBOL_GPL(xpc_set_interface);

/*
 * XPC calls this when it (the XPC module) is being unloaded.
@@ -95,6 +103,7 @@ xpc_clear_interface(void)
	xpc_interface.partid_to_nasids = (enum xpc_retval(*)(partid_t, void *))
	    xpc_notloaded;
}
EXPORT_SYMBOL_GPL(xpc_clear_interface);

/*
 * Register for automatic establishment of a channel connection whenever
@@ -133,9 +142,8 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,

	registration = &xpc_registrations[ch_number];

	if (mutex_lock_interruptible(&registration->mutex) != 0) {
	if (mutex_lock_interruptible(&registration->mutex) != 0)
		return xpcInterrupted;
	}

	/* if XPC_CHANNEL_REGISTERED(ch_number) */
	if (registration->func != NULL) {
@@ -157,6 +165,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,

	return xpcSuccess;
}
EXPORT_SYMBOL_GPL(xpc_connect);

/*
 * Remove the registration for automatic connection of the specified channel
@@ -207,6 +216,7 @@ xpc_disconnect(int ch_number)

	return;
}
EXPORT_SYMBOL_GPL(xpc_disconnect);

int __init
xp_init(void)
@@ -215,9 +225,8 @@ xp_init(void)
	u64 func_addr = *(u64 *)xp_nofault_PIOR;
	u64 err_func_addr = *(u64 *)xp_error_PIOR;

	if (!ia64_platform_is("sn2")) {
	if (!ia64_platform_is("sn2"))
		return -ENODEV;
	}

	/*
	 * Register a nofault code region which performs a cross-partition
@@ -228,8 +237,9 @@ xp_init(void)
	 * least some CPUs on Shubs <= v1.2, which unfortunately we have to
	 * work around).
	 */
	if ((ret = sn_register_nofault_code(func_addr, err_func_addr,
					    err_func_addr, 1, 1)) != 0) {
	ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr,
				       1, 1);
	if (ret != 0) {
		printk(KERN_ERR "XP: can't register nofault code, error=%d\n",
		       ret);
	}
@@ -237,16 +247,14 @@ xp_init(void)
	 * Setup the nofault PIO read target. (There is no special reason why
	 * SH_IPI_ACCESS was selected.)
	 */
	if (is_shub2()) {
	if (is_shub2())
		xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
	} else {
	else
		xp_nofault_PIOR_target = SH1_IPI_ACCESS;
	}

	/* initialize the connection registration mutex */
	for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) {
	for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++)
		mutex_init(&xpc_registrations[ch_number].mutex);
	}

	return 0;
}
@@ -269,12 +277,3 @@ module_exit(xp_exit);
MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition (XP) base");
MODULE_LICENSE("GPL");

EXPORT_SYMBOL(xp_nofault_PIOR);
EXPORT_SYMBOL(xp_nofault_PIOR_target);
EXPORT_SYMBOL(xpc_registrations);
EXPORT_SYMBOL(xpc_interface);
EXPORT_SYMBOL(xpc_clear_interface);
EXPORT_SYMBOL(xpc_set_interface);
EXPORT_SYMBOL(xpc_connect);
EXPORT_SYMBOL(xpc_disconnect);
+58 −66
Original line number Diff line number Diff line
@@ -110,7 +110,7 @@ struct xpc_rsvd_page {
	u8 partid;		/* SAL: partition ID */
	u8 version;
	u8 pad1[6];		/* align to next u64 in cacheline */
	volatile u64 vars_pa;
	u64 vars_pa;		/* physical address of struct xpc_vars */
	struct timespec stamp;	/* time when reserved page was setup by XPC */
	u64 pad2[9];		/* align to last u64 in cacheline */
	u64 nasids_size;	/* SAL: size of each nasid mask in bytes */
@@ -133,9 +133,10 @@ xpc_compare_stamps(struct timespec *stamp1, struct timespec *stamp2)
{
	int ret;

	if ((ret = stamp1->tv_sec - stamp2->tv_sec) == 0) {
	ret = stamp1->tv_sec - stamp2->tv_sec;
	if (ret == 0)
		ret = stamp1->tv_nsec - stamp2->tv_nsec;
	}

	return ret;
}

@@ -224,7 +225,7 @@ xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
 * occupies half a cacheline.
 */
struct xpc_vars_part {
	volatile u64 magic;
	u64 magic;

	u64 openclose_args_pa;	/* physical address of open and close args */
	u64 GPs_pa;		/* physical address of Get/Put values */
@@ -255,10 +256,12 @@ struct xpc_vars_part {
#define XPC_RP_HEADER_SIZE	L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))
#define XPC_RP_VARS_SIZE	L1_CACHE_ALIGN(sizeof(struct xpc_vars))

#define XPC_RP_PART_NASIDS(_rp) (u64 *) ((u8 *) _rp + XPC_RP_HEADER_SIZE)
#define XPC_RP_PART_NASIDS(_rp) ((u64 *)((u8 *)(_rp) + XPC_RP_HEADER_SIZE))
#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words)
#define XPC_RP_VARS(_rp)	((struct xpc_vars *) XPC_RP_MACH_NASIDS(_rp) + xp_nasid_mask_words)
#define XPC_RP_VARS_PART(_rp)	(struct xpc_vars_part *) ((u8 *) XPC_RP_VARS(rp) + XPC_RP_VARS_SIZE)
#define XPC_RP_VARS(_rp)	((struct xpc_vars *)(XPC_RP_MACH_NASIDS(_rp) + \
				    xp_nasid_mask_words))
#define XPC_RP_VARS_PART(_rp)	((struct xpc_vars_part *) \
				    ((u8 *)XPC_RP_VARS(_rp) + XPC_RP_VARS_SIZE))

/*
 * Functions registered by add_timer() or called by kernel_thread() only
@@ -277,8 +280,8 @@ struct xpc_vars_part {
 * Define a Get/Put value pair (pointers) used with a message queue.
 */
struct xpc_gp {
	volatile s64 get;	/* Get value */
	volatile s64 put;	/* Put value */
	s64 get;		/* Get value */
	s64 put;		/* Put value */
};

#define XPC_GP_SIZE \
@@ -315,7 +318,7 @@ struct xpc_openclose_args {
 * and consumed by the intended recipient.
 */
struct xpc_notify {
	volatile u8 type;	/* type of notification */
	u8 type;		/* type of notification */

	/* the following two fields are only used if type == XPC_N_CALL */
	xpc_notify_func func;	/* user's notify function */
@@ -458,16 +461,11 @@ struct xpc_channel {

	/* kthread management related fields */

// >>> rethink having kthreads_assigned_limit and kthreads_idle_limit; perhaps
// >>> allow the assigned limit be unbounded and let the idle limit be dynamic
// >>> dependent on activity over the last interval of time
	atomic_t kthreads_assigned;	/* #of kthreads assigned to channel */
	u32 kthreads_assigned_limit;	/* limit on #of kthreads assigned */
	atomic_t kthreads_idle;	/* #of kthreads idle waiting for work */
	u32 kthreads_idle_limit;	/* limit on #of kthreads idle */
	atomic_t kthreads_active;	/* #of kthreads actively working */
	// >>> following field is temporary
	u32 kthreads_created;	/* total #of kthreads created */

	wait_queue_head_t idle_wq;	/* idle kthread wait queue */

@@ -533,7 +531,7 @@ struct xpc_partition {

	/* XPC infrastructure referencing and teardown control */

	volatile u8 setup_state;	/* infrastructure setup state */
	u8 setup_state;		/* infrastructure setup state */
	wait_queue_head_t teardown_wq;	/* kthread waiting to teardown infra */
	atomic_t references;	/* #of references to infrastructure */

@@ -552,8 +550,8 @@ struct xpc_partition {
	void *local_GPs_base;	/* base address of kmalloc'd space */
	struct xpc_gp *local_GPs;	/* local Get/Put values */
	void *remote_GPs_base;	/* base address of kmalloc'd space */
	struct xpc_gp *remote_GPs;	/* copy of remote partition's local Get/Put */
					/* values */
	struct xpc_gp *remote_GPs;	/* copy of remote partition's local */
					/* Get/Put values */
	u64 remote_GPs_pa;	/* phys address of remote partition's local */
				/* Get/Put values */

@@ -678,10 +676,9 @@ extern void xpc_teardown_infrastructure(struct xpc_partition *);
static inline void
xpc_wakeup_channel_mgr(struct xpc_partition *part)
{
	if (atomic_inc_return(&part->channel_mgr_requests) == 1) {
	if (atomic_inc_return(&part->channel_mgr_requests) == 1)
		wake_up(&part->channel_mgr_wq);
}
}

/*
 * These next two inlines are used to keep us from tearing down a channel's
@@ -699,10 +696,9 @@ xpc_msgqueue_deref(struct xpc_channel *ch)
	s32 refs = atomic_dec_return(&ch->references);

	DBUG_ON(refs < 0);
	if (refs == 0) {
	if (refs == 0)
		xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]);
}
}

#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \
		xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs)
@@ -717,10 +713,9 @@ xpc_part_deref(struct xpc_partition *part)
	s32 refs = atomic_dec_return(&part->references);

	DBUG_ON(refs < 0);
	if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) {
	if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN)
		wake_up(&part->teardown_wq);
}
}

static inline int
xpc_part_ref(struct xpc_partition *part)
@@ -729,9 +724,9 @@ xpc_part_ref(struct xpc_partition *part)

	atomic_inc(&part->references);
	setup = (part->setup_state == XPC_P_SETUP);
	if (!setup) {
	if (!setup)
		xpc_part_deref(part);
	}

	return setup;
}

@@ -1007,16 +1002,14 @@ xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
		dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
			ipi_flag_string, ch->partid, ch->number, ret);
		if (unlikely(ret != xpcSuccess)) {
			if (irq_flags != NULL) {
			if (irq_flags != NULL)
				spin_unlock_irqrestore(&ch->lock, *irq_flags);
			}
			XPC_DEACTIVATE_PARTITION(part, ret);
			if (irq_flags != NULL) {
			if (irq_flags != NULL)
				spin_lock_irqsave(&ch->lock, *irq_flags);
		}
	}
}
}

/*
 * Make it look like the remote partition, which is associated with the
@@ -1056,8 +1049,8 @@ xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
#define XPC_GET_IPI_FLAGS(_amo, _c)	((u8) (((_amo) >> ((_c) * 8)) & 0xff))
#define XPC_SET_IPI_FLAGS(_amo, _c, _f)	(_amo) |= ((u64) (_f) << ((_c) * 8))

#define	XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x0f0f0f0f0f0f0f0f))
#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo)       ((_amo) & __IA64_UL_CONST(0x1010101010101010))
#define	XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0fUL)
#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo)       ((_amo) & 0x1010101010101010UL)

static inline void
xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags)
@@ -1178,9 +1171,8 @@ xpc_check_for_channel_activity(struct xpc_partition *part)
	unsigned long irq_flags;

	IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va);
	if (IPI_amo == 0) {
	if (IPI_amo == 0)
		return;
	}

	spin_lock_irqsave(&part->IPI_lock, irq_flags);
	part->local_IPI_amo |= IPI_amo;
+75 −117
Original line number Diff line number Diff line
@@ -33,19 +33,19 @@ xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
{
	/* see if kzalloc will give us cachline aligned memory by default */
	*base = kzalloc(size, flags);
	if (*base == NULL) {
	if (*base == NULL)
		return NULL;
	}
	if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) {

	if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
		return *base;
	}

	kfree(*base);

	/* nope, we'll have to do it ourselves */
	*base = kzalloc(size + L1_CACHE_BYTES, flags);
	if (*base == NULL) {
	if (*base == NULL)
		return NULL;
	}

	return (void *)L1_CACHE_ALIGN((u64)*base);
}

@@ -264,15 +264,13 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
	DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst));
	DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));

	if (part->act_state == XPC_P_DEACTIVATING) {
	if (part->act_state == XPC_P_DEACTIVATING)
		return part->reason;
	}

	bte_ret = xp_bte_copy((u64)src, (u64)dst, (u64)cnt,
			      (BTE_NORMAL | BTE_WACQUIRE), NULL);
	if (bte_ret == BTE_SUCCESS) {
	if (bte_ret == BTE_SUCCESS)
		return xpcSuccess;
	}

	dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n",
		XPC_PARTID(part), bte_ret);
@@ -359,18 +357,16 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
		part->remote_IPI_nasid = pulled_entry->IPI_nasid;
		part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid;

		if (part->nchannels > pulled_entry->nchannels) {
		if (part->nchannels > pulled_entry->nchannels)
			part->nchannels = pulled_entry->nchannels;
		}

		/* let the other side know that we've pulled their variables */

		xpc_vars_part[partid].magic = XPC_VP_MAGIC2;
	}

	if (pulled_entry->magic == XPC_VP_MAGIC1) {
	if (pulled_entry->magic == XPC_VP_MAGIC1)
		return xpcRetry;
	}

	return xpcSuccess;
}
@@ -390,9 +386,10 @@ xpc_get_IPI_flags(struct xpc_partition *part)
	 */

	spin_lock_irqsave(&part->IPI_lock, irq_flags);
	if ((IPI_amo = part->local_IPI_amo) != 0) {
	IPI_amo = part->local_IPI_amo;
	if (IPI_amo != 0)
		part->local_IPI_amo = 0;
	}

	spin_unlock_irqrestore(&part->IPI_lock, irq_flags);

	if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) {
@@ -441,20 +438,14 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch)
	int nentries;
	size_t nbytes;

	// >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
	// >>> iterations of the for-loop, bail if set?

	// >>> should we impose a minimum #of entries? like 4 or 8?
	for (nentries = ch->local_nentries; nentries > 0; nentries--) {

		nbytes = nentries * ch->msg_size;
		ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
								   GFP_KERNEL,
								   &ch->
								   local_msgqueue_base);
		if (ch->local_msgqueue == NULL) {
						      &ch->local_msgqueue_base);
		if (ch->local_msgqueue == NULL)
			continue;
		}

		nbytes = nentries * sizeof(struct xpc_notify);
		ch->notify_queue = kzalloc(nbytes, GFP_KERNEL);
@@ -493,20 +484,14 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch)

	DBUG_ON(ch->remote_nentries <= 0);

	// >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
	// >>> iterations of the for-loop, bail if set?

	// >>> should we impose a minimum #of entries? like 4 or 8?
	for (nentries = ch->remote_nentries; nentries > 0; nentries--) {

		nbytes = nentries * ch->msg_size;
		ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
								    GFP_KERNEL,
								    &ch->
								    remote_msgqueue_base);
		if (ch->remote_msgqueue == NULL) {
						     &ch->remote_msgqueue_base);
		if (ch->remote_msgqueue == NULL)
			continue;
		}

		spin_lock_irqsave(&ch->lock, irq_flags);
		if (nentries < ch->remote_nentries) {
@@ -538,11 +523,12 @@ xpc_allocate_msgqueues(struct xpc_channel *ch)

	DBUG_ON(ch->flags & XPC_C_SETUP);

	if ((ret = xpc_allocate_local_msgqueue(ch)) != xpcSuccess) {
	ret = xpc_allocate_local_msgqueue(ch);
	if (ret != xpcSuccess)
		return ret;
	}

	if ((ret = xpc_allocate_remote_msgqueue(ch)) != xpcSuccess) {
	ret = xpc_allocate_remote_msgqueue(ch);
	if (ret != xpcSuccess) {
		kfree(ch->local_msgqueue_base);
		ch->local_msgqueue = NULL;
		kfree(ch->notify_queue);
@@ -582,12 +568,11 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
		ret = xpc_allocate_msgqueues(ch);
		spin_lock_irqsave(&ch->lock, *irq_flags);

		if (ret != xpcSuccess) {
		if (ret != xpcSuccess)
			XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
		}
		if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) {

		if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING))
			return;
		}

		DBUG_ON(!(ch->flags & XPC_C_SETUP));
		DBUG_ON(ch->local_msgqueue == NULL);
@@ -599,9 +584,8 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
		xpc_IPI_send_openreply(ch, irq_flags);
	}

	if (!(ch->flags & XPC_C_ROPENREPLY)) {
	if (!(ch->flags & XPC_C_ROPENREPLY))
		return;
	}

	DBUG_ON(ch->remote_msgqueue_pa == 0);

@@ -719,9 +703,8 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)

	DBUG_ON(!spin_is_locked(&ch->lock));

	if (!(ch->flags & XPC_C_DISCONNECTING)) {
	if (!(ch->flags & XPC_C_DISCONNECTING))
		return;
	}

	DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));

@@ -736,27 +719,24 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)

	if (part->act_state == XPC_P_DEACTIVATING) {
		/* can't proceed until the other side disengages from us */
		if (xpc_partition_engaged(1UL << ch->partid)) {
		if (xpc_partition_engaged(1UL << ch->partid))
			return;
		}

	} else {

		/* as long as the other side is up do the full protocol */

		if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
		if (!(ch->flags & XPC_C_RCLOSEREQUEST))
			return;
		}

		if (!(ch->flags & XPC_C_CLOSEREPLY)) {
			ch->flags |= XPC_C_CLOSEREPLY;
			xpc_IPI_send_closereply(ch, irq_flags);
		}

		if (!(ch->flags & XPC_C_RCLOSEREPLY)) {
		if (!(ch->flags & XPC_C_RCLOSEREPLY))
			return;
	}
	}

	/* wake those waiting for notify completion */
	if (atomic_read(&ch->n_to_notify) > 0) {
@@ -817,7 +797,8 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,

again:

	if ((ch->flags & XPC_C_DISCONNECTED) && (ch->flags & XPC_C_WDISCONNECT)) {
	if ((ch->flags & XPC_C_DISCONNECTED) &&
	    (ch->flags & XPC_C_WDISCONNECT)) {
		/*
		 * Delay processing IPI flags until thread waiting disconnect
		 * has had a chance to see that the channel is disconnected.
@@ -890,11 +871,10 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,

		if (!(ch->flags & XPC_C_DISCONNECTING)) {
			reason = args->reason;
			if (reason <= xpcSuccess || reason > xpcUnknownReason) {
			if (reason <= xpcSuccess || reason > xpcUnknownReason)
				reason = xpcUnknownReason;
			} else if (reason == xpcUnregistering) {
			else if (reason == xpcUnregistering)
				reason = xpcOtherUnregistering;
			}

			XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);

@@ -1068,9 +1048,8 @@ xpc_connect_channel(struct xpc_channel *ch)
	unsigned long irq_flags;
	struct xpc_registration *registration = &xpc_registrations[ch->number];

	if (mutex_trylock(&registration->mutex) == 0) {
	if (mutex_trylock(&registration->mutex) == 0)
		return xpcRetry;
	}

	if (!XPC_CHANNEL_REGISTERED(ch->number)) {
		mutex_unlock(&registration->mutex);
@@ -1159,7 +1138,7 @@ xpc_clear_local_msgqueue_flags(struct xpc_channel *ch)
					 (get % ch->local_nentries) *
					 ch->msg_size);
		msg->flags = 0;
	} while (++get < (volatile s64)ch->remote_GP.get);
	} while (++get < ch->remote_GP.get);
}

/*
@@ -1177,7 +1156,7 @@ xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch)
					 (put % ch->remote_nentries) *
					 ch->msg_size);
		msg->flags = 0;
	} while (++put < (volatile s64)ch->remote_GP.put);
	} while (++put < ch->remote_GP.put);
}

static void
@@ -1244,10 +1223,9 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
		 * If anyone was waiting for message queue entries to become
		 * available, wake them up.
		 */
		if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) {
		if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
			wake_up(&ch->msg_allocate_wq);
	}
	}

	/*
	 * Now check for newly sent messages by the other side. (The remote
@@ -1273,11 +1251,10 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
				"delivered=%d, partid=%d, channel=%d\n",
				nmsgs_sent, ch->partid, ch->number);

			if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) {
			if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)
				xpc_activate_kthreads(ch, nmsgs_sent);
		}
	}
	}

	xpc_msgqueue_deref(ch);
}
@@ -1310,9 +1287,8 @@ xpc_process_channel_activity(struct xpc_partition *part)

		IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number);

		if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) {
		if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags))
			xpc_process_openclose_IPI(part, ch_number, IPI_flags);
		}

		ch_flags = ch->flags;	/* need an atomic snapshot of flags */

@@ -1323,9 +1299,8 @@ xpc_process_channel_activity(struct xpc_partition *part)
			continue;
		}

		if (part->act_state == XPC_P_DEACTIVATING) {
		if (part->act_state == XPC_P_DEACTIVATING)
			continue;
		}

		if (!(ch_flags & XPC_C_CONNECTED)) {
			if (!(ch_flags & XPC_C_OPENREQUEST)) {
@@ -1345,11 +1320,10 @@ xpc_process_channel_activity(struct xpc_partition *part)
		 * from the other partition.
		 */

		if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) {
		if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags))
			xpc_process_msg_IPI(part, ch_number);
	}
}
}

/*
 * XPC's heartbeat code calls this function to inform XPC that a partition is
@@ -1560,9 +1534,9 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,

	DBUG_ON(!spin_is_locked(&ch->lock));

	if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
	if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
		return;
	}

	DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));

	dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
@@ -1578,9 +1552,8 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,

	xpc_IPI_send_closerequest(ch, irq_flags);

	if (channel_was_connected) {
	if (channel_was_connected)
		ch->flags |= XPC_C_WASCONNECTED;
	}

	spin_unlock_irqrestore(&ch->lock, *irq_flags);

@@ -1595,9 +1568,8 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
	}

	/* wake those waiting to allocate an entry from the local msg queue */
	if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) {
	if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
		wake_up(&ch->msg_allocate_wq);
	}

	spin_lock_irqsave(&ch->lock, *irq_flags);
}
@@ -1632,7 +1604,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
	enum xpc_retval ret;

	if (ch->flags & XPC_C_DISCONNECTING) {
		DBUG_ON(ch->reason == xpcInterrupted);	// >>> Is this true?
		DBUG_ON(ch->reason == xpcInterrupted);
		return ch->reason;
	}

@@ -1642,7 +1614,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)

	if (ch->flags & XPC_C_DISCONNECTING) {
		ret = ch->reason;
		DBUG_ON(ch->reason == xpcInterrupted);	// >>> Is this true?
		DBUG_ON(ch->reason == xpcInterrupted);
	} else if (ret == 0) {
		ret = xpcTimeout;
	} else {
@@ -1685,9 +1657,9 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,

	while (1) {

		put = (volatile s64)ch->w_local_GP.put;
		if (put - (volatile s64)ch->w_remote_GP.get <
		    ch->local_nentries) {
		put = ch->w_local_GP.put;
		rmb();	/* guarantee that .put loads before .get */
		if (put - ch->w_remote_GP.get < ch->local_nentries) {

			/* There are available message entries. We need to try
			 * to secure one for ourselves. We'll do this by trying
@@ -1711,9 +1683,8 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
		 * that will cause the IPI handler to fetch the latest
		 * GP values as if an IPI was sent by the other side.
		 */
		if (ret == xpcTimeout) {
		if (ret == xpcTimeout)
			xpc_IPI_send_local_msgrequest(ch);
		}

		if (flags & XPC_NOWAIT) {
			xpc_msgqueue_deref(ch);
@@ -1772,10 +1743,9 @@ xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
		ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg);
		xpc_part_deref(part);

		if (msg != NULL) {
		if (msg != NULL)
			*payload = &msg->payload;
	}
	}

	return ret;
}
@@ -1795,17 +1765,15 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
	while (1) {

		while (1) {
			if (put == (volatile s64)ch->w_local_GP.put) {
			if (put == ch->w_local_GP.put)
				break;
			}

			msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
						 (put % ch->local_nentries) *
						 ch->msg_size);

			if (!(msg->flags & XPC_M_READY)) {
			if (!(msg->flags & XPC_M_READY))
				break;
			}

			put++;
		}
@@ -1818,7 +1786,7 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
		if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) !=
		    initial_put) {
			/* someone else beat us to it */
			DBUG_ON((volatile s64)ch->local_GP->put < initial_put);
			DBUG_ON(ch->local_GP->put < initial_put);
			break;
		}

@@ -1837,10 +1805,9 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
		initial_put = put;
	}

	if (send_IPI) {
	if (send_IPI)
		xpc_IPI_send_msgrequest(ch);
}
}

/*
 * Common code that does the actual sending of the message by advancing the
@@ -1880,7 +1847,7 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
		notify->key = key;
		notify->type = notify_type;

		// >>> is a mb() needed here?
		/* >>> is a mb() needed here? */

		if (ch->flags & XPC_C_DISCONNECTING) {
			/*
@@ -1913,9 +1880,8 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
	/* see if the message is next in line to be sent, if so send it */

	put = ch->local_GP->put;
	if (put == msg_number) {
	if (put == msg_number)
		xpc_send_msgs(ch, put);
	}

	/* drop the reference grabbed in xpc_allocate_msg() */
	xpc_msgqueue_deref(ch);
@@ -2032,10 +1998,8 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)

		msg_index = ch->next_msg_to_pull % ch->remote_nentries;

		DBUG_ON(ch->next_msg_to_pull >=
			(volatile s64)ch->w_remote_GP.put);
		nmsgs = (volatile s64)ch->w_remote_GP.put -
		    ch->next_msg_to_pull;
		DBUG_ON(ch->next_msg_to_pull >= ch->w_remote_GP.put);
		nmsgs = ch->w_remote_GP.put - ch->next_msg_to_pull;
		if (msg_index + nmsgs > ch->remote_nentries) {
			/* ignore the ones that wrap the msg queue for now */
			nmsgs = ch->remote_nentries - msg_index;
@@ -2046,9 +2010,9 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
		remote_msg = (struct xpc_msg *)(ch->remote_msgqueue_pa +
						msg_offset);

		if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg,
						      nmsgs * ch->msg_size)) !=
		    xpcSuccess) {
		ret = xpc_pull_remote_cachelines(part, msg, remote_msg,
						 nmsgs * ch->msg_size);
		if (ret != xpcSuccess) {

			dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
				" msg %ld from partition %d, channel=%d, "
@@ -2061,8 +2025,6 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
			return NULL;
		}

		mb();		/* >>> this may not be needed, we're not sure */

		ch->next_msg_to_pull += nmsgs;
	}

@@ -2085,14 +2047,13 @@ xpc_get_deliverable_msg(struct xpc_channel *ch)
	s64 get;

	do {
		if ((volatile u32)ch->flags & XPC_C_DISCONNECTING) {
		if (ch->flags & XPC_C_DISCONNECTING)
			break;
		}

		get = (volatile s64)ch->w_local_GP.get;
		if (get == (volatile s64)ch->w_remote_GP.put) {
		get = ch->w_local_GP.get;
		rmb();	/* guarantee that .get loads before .put */
		if (get == ch->w_remote_GP.put)
			break;
		}

		/* There are messages waiting to be pulled and delivered.
		 * We need to try to secure one for ourselves. We'll do this
@@ -2132,7 +2093,8 @@ xpc_deliver_msg(struct xpc_channel *ch)
{
	struct xpc_msg *msg;

	if ((msg = xpc_get_deliverable_msg(ch)) != NULL) {
	msg = xpc_get_deliverable_msg(ch);
	if (msg != NULL) {

		/*
		 * This ref is taken to protect the payload itself from being
@@ -2178,17 +2140,15 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
	while (1) {

		while (1) {
			if (get == (volatile s64)ch->w_local_GP.get) {
			if (get == ch->w_local_GP.get)
				break;
			}

			msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
						 (get % ch->remote_nentries) *
						 ch->msg_size);

			if (!(msg->flags & XPC_M_DONE)) {
			if (!(msg->flags & XPC_M_DONE))
				break;
			}

			msg_flags |= msg->flags;
			get++;
@@ -2202,7 +2162,7 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
		if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) !=
		    initial_get) {
			/* someone else beat us to it */
			DBUG_ON((volatile s64)ch->local_GP->get <= initial_get);
			DBUG_ON(ch->local_GP->get <= initial_get);
			break;
		}

@@ -2221,10 +2181,9 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
		initial_get = get;
	}

	if (send_IPI) {
	if (send_IPI)
		xpc_IPI_send_msgrequest(ch);
}
}

/*
 * Acknowledge receipt of a delivered message.
@@ -2276,9 +2235,8 @@ xpc_initiate_received(partid_t partid, int ch_number, void *payload)
	 * been delivered.
	 */
	get = ch->local_GP->get;
	if (get == msg_number) {
	if (get == msg_number)
		xpc_acknowledge_msgs(ch, get, msg->flags);
	}

	/* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg()  */
	xpc_msgqueue_deref(ch);
+56 −111

File changed.

Preview size limit exceeded, changes collapsed.

Loading