Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c745a8a1 authored by Chris Metcalf's avatar Chris Metcalf
Browse files

arch/tile: Various cleanups.



This change rolls up random cleanups not representing any actual bugs.

- Remove a stale CONFIG_ value from the default tile_defconfig
- Remove unused tns_atomic_xxx() family of methods from <asm/atomic.h>
- Optimize get_order() using Tile's "clz" instruction
- Fix a bad hypervisor upcall name (not currently used in Linux anyway)
- Use __copy_in_user_inatomic() name for consistency, and export it
- Export some additional hypervisor driver I/O upcalls and some homecache calls
- Remove the obfuscating MEMCPY_TEST_WH64 support code
- Other stray comment cleanups, #if 0 removal, etc.

Signed-off-by: default avatarChris Metcalf <cmetcalf@tilera.com>
parent 1fcbe027
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -231,7 +231,6 @@ CONFIG_HARDWALL=y
CONFIG_MEMPROF=y
CONFIG_XGBE=y
CONFIG_NET_TILE=y
CONFIG_PSEUDO_NAPI=y
CONFIG_TILEPCI_ENDP=y
CONFIG_TILEPCI_HOST_SUBSET=m
CONFIG_TILE_IDE_GPIO=y
+0 −37
Original line number Diff line number Diff line
@@ -255,43 +255,6 @@ static inline void atomic64_set(atomic64_t *v, u64 n)
#define smp_mb__after_atomic_dec()	do { } while (0)
#define smp_mb__after_atomic_inc()	do { } while (0)


/*
 * Support "tns" atomic integers.  These are atomic integers that can
 * hold any value but "1".  They are more efficient than regular atomic
 * operations because the "lock" (aka acquire) step is a single "tns"
 * in the uncontended case, and the "unlock" (aka release) step is a
 * single "store" without an mf.  (However, note that on tilepro the
 * "tns" will evict the local cache line, so it's not all upside.)
 *
 * Note that you can ONLY observe the value stored in the pointer
 * using these operations; a direct read of the value may confusingly
 * return the special value "1".
 */

int __tns_atomic_acquire(atomic_t *);
void __tns_atomic_release(atomic_t *p, int v);

static inline void tns_atomic_set(atomic_t *v, int i)
{
	__tns_atomic_acquire(v);
	__tns_atomic_release(v, i);
}

static inline int tns_atomic_cmpxchg(atomic_t *v, int o, int n)
{
	int ret = __tns_atomic_acquire(v);
	__tns_atomic_release(v, (ret == o) ? n : ret);
	return ret;
}

static inline int tns_atomic_xchg(atomic_t *v, int n)
{
	int ret = __tns_atomic_acquire(v);
	__tns_atomic_release(v, n);
	return ret;
}

#endif /* !__ASSEMBLY__ */

/*
+5 −1
Original line number Diff line number Diff line
@@ -129,6 +129,11 @@ static inline u64 pmd_val(pmd_t pmd)

#endif

static inline __attribute_const__ int get_order(unsigned long size)
{
	return BITS_PER_LONG - __builtin_clzl((size - 1) >> PAGE_SHIFT);
}

#endif /* !__ASSEMBLY__ */

#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
@@ -332,7 +337,6 @@ extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
	(VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)

#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>

#endif /* __KERNEL__ */

+2 −2
Original line number Diff line number Diff line
@@ -389,14 +389,14 @@ static inline unsigned long __must_check copy_from_user(void *to,
 * Returns number of bytes that could not be copied.
 * On success, this will be zero.
 */
extern unsigned long __copy_in_user_asm(
extern unsigned long __copy_in_user_inatomic(
	void __user *to, const void __user *from, unsigned long n);

static inline unsigned long __must_check
__copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
	might_sleep();
	return __copy_in_user_asm(to, from, n);
	return __copy_in_user_inatomic(to, from, n);
}

static inline unsigned long __must_check
+4 −4
Original line number Diff line number Diff line
@@ -532,11 +532,11 @@ void hv_disable_intr(HV_IntrMask disab_mask);
 */
void hv_clear_intr(HV_IntrMask clear_mask);

/** Assert a set of device interrupts.
/** Raise a set of device interrupts.
 *
 * @param assert_mask Bitmap of interrupts to clear.
 * @param raise_mask Bitmap of interrupts to raise.
 */
void hv_assert_intr(HV_IntrMask assert_mask);
void hv_raise_intr(HV_IntrMask raise_mask);

/** Trigger a one-shot interrupt on some tile
 *
@@ -1712,7 +1712,7 @@ typedef struct
 * @param cache_control This argument allows you to specify a length of
 *        physical address space to flush (maximum HV_FLUSH_MAX_CACHE_LEN).
 *        You can "or" in HV_FLUSH_EVICT_L2 to flush the whole L2 cache.
 *        You can "or" in HV_FLUSH_EVICT_LI1 to flush the whole LII cache.
 *        You can "or" in HV_FLUSH_EVICT_L1I to flush the whole L1I cache.
 *        HV_FLUSH_ALL flushes all caches.
 * @param cache_cpumask Bitmask (in row-major order, supervisor-relative) of
 *        tile indices to perform cache flush on.  The low bit of the first
Loading