Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b7f859dd authored by Michael Ellerman's avatar Michael Ellerman
Browse files

Merge branch 'next-remove-ldst' of...

Merge branch 'next-remove-ldst' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc into next
parents 428d4d65 0eebf9b5
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -42,12 +42,12 @@ struct dbdma_regs {
 * DBDMA command structure.  These fields are all little-endian!
 */
struct dbdma_cmd {
    unsigned short req_count;	/* requested byte transfer count */
    unsigned short command;	/* command word (has bit-fields) */
    unsigned int   phy_addr;	/* physical data address */
    unsigned int   cmd_dep;	/* command-dependent field */
    unsigned short res_count;	/* residual count after completion */
    unsigned short xfer_status;	/* transfer status */
	__le16 req_count;	/* requested byte transfer count */
	__le16 command;		/* command word (has bit-fields) */
	__le32 phy_addr;	/* physical data address */
	__le32 cmd_dep;		/* command-dependent field */
	__le16 res_count;	/* residual count after completion */
	__le16 xfer_status;	/* transfer status */
};

/* DBDMA command values in command field */
+1 −1
Original line number Diff line number Diff line
@@ -585,7 +585,7 @@ struct kvm_vcpu_arch {
	pgd_t *pgdir;

	u8 io_gpr; /* GPR used as IO source/target */
	u8 mmio_is_bigendian;
	u8 mmio_host_swabbed;
	u8 mmio_sign_extend;
	u8 osi_needed;
	u8 osi_enabled;
+0 −26
Original line number Diff line number Diff line
@@ -9,30 +9,4 @@

#include <uapi/asm/swab.h>

static __inline__ __u16 ld_le16(const volatile __u16 *addr)
{
	__u16 val;

	__asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
	return val;
}

static __inline__ void st_le16(volatile __u16 *addr, const __u16 val)
{
	__asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
}

static __inline__ __u32 ld_le32(const volatile __u32 *addr)
{
	__u32 val;

	__asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
	return val;
}

static __inline__ void st_le32(volatile __u32 *addr, const __u32 val)
{
	__asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
}

#endif /* _ASM_POWERPC_SWAB_H */
+2 −2
Original line number Diff line number Diff line
@@ -25,12 +25,12 @@

static inline void scr_writew(u16 val, volatile u16 *addr)
{
    st_le16(addr, val);
	*addr = cpu_to_le16(val);
}

static inline u16 scr_readw(volatile const u16 *addr)
{
    return ld_le16(addr);
	return le16_to_cpu(*addr);
}

#define VT_BUF_HAVE_MEMCPYW
+18 −20
Original line number Diff line number Diff line
@@ -720,7 +720,7 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
		return;
	}

	if (vcpu->arch.mmio_is_bigendian) {
	if (!vcpu->arch.mmio_host_swabbed) {
		switch (run->mmio.len) {
		case 8: gpr = *(u64 *)run->mmio.data; break;
		case 4: gpr = *(u32 *)run->mmio.data; break;
@@ -728,10 +728,10 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
		case 1: gpr = *(u8 *)run->mmio.data; break;
		}
	} else {
		/* Convert BE data from userland back to LE. */
		switch (run->mmio.len) {
		case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
		case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
		case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
		case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
		case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
		case 1: gpr = *(u8 *)run->mmio.data; break;
		}
	}
@@ -780,14 +780,13 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
		       int is_default_endian)
{
	int idx, ret;
	int is_bigendian;
	bool host_swabbed;

	/* Pity C doesn't have a logical XOR operator */
	if (kvmppc_need_byteswap(vcpu)) {
		/* Default endianness is "little endian". */
		is_bigendian = !is_default_endian;
		host_swabbed = is_default_endian;
	} else {
		/* Default endianness is "big endian". */
		is_bigendian = is_default_endian;
		host_swabbed = !is_default_endian;
	}

	if (bytes > sizeof(run->mmio.data)) {
@@ -800,7 +799,7 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
	run->mmio.is_write = 0;

	vcpu->arch.io_gpr = rt;
	vcpu->arch.mmio_is_bigendian = is_bigendian;
	vcpu->arch.mmio_host_swabbed = host_swabbed;
	vcpu->mmio_needed = 1;
	vcpu->mmio_is_write = 0;
	vcpu->arch.mmio_sign_extend = 0;
@@ -840,14 +839,13 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
{
	void *data = run->mmio.data;
	int idx, ret;
	int is_bigendian;
	bool host_swabbed;

	/* Pity C doesn't have a logical XOR operator */
	if (kvmppc_need_byteswap(vcpu)) {
		/* Default endianness is "little endian". */
		is_bigendian = !is_default_endian;
		host_swabbed = is_default_endian;
	} else {
		/* Default endianness is "big endian". */
		is_bigendian = is_default_endian;
		host_swabbed = !is_default_endian;
	}

	if (bytes > sizeof(run->mmio.data)) {
@@ -862,7 +860,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
	vcpu->mmio_is_write = 1;

	/* Store the value at the lowest bytes in 'data'. */
	if (is_bigendian) {
	if (!host_swabbed) {
		switch (bytes) {
		case 8: *(u64 *)data = val; break;
		case 4: *(u32 *)data = val; break;
@@ -870,10 +868,10 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
		case 1: *(u8  *)data = val; break;
		}
	} else {
		/* Store LE value into 'data'. */
		switch (bytes) {
		case 4: st_le32(data, val); break;
		case 2: st_le16(data, val); break;
		case 8: *(u64 *)data = swab64(val); break;
		case 4: *(u32 *)data = swab32(val); break;
		case 2: *(u16 *)data = swab16(val); break;
		case 1: *(u8  *)data = val; break;
		}
	}
Loading