diff mbox

[V2,2/3] powerpc: Fix Unaligned Fixed Point Loads and Stores

Message ID 1383244738-5986-3-git-send-email-tommusta@gmail.com (mailing list archive)
State Superseded, archived
Headers show

Commit Message

Tom Musta Oct. 31, 2013, 6:38 p.m. UTC
From: Tom Musta <tommusta@gmail.com>

This patch modifies the unaligned access routines of the sstep.c
module so that it properly reverses the bytes of storage operands
in the little endian kernel kernel.

Signed-off-by: Tom Musta <tommusta@gmail.com>
---
 arch/powerpc/lib/sstep.c |   45 +++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 45 insertions(+), 0 deletions(-)

Comments

Benjamin Herrenschmidt Nov. 4, 2013, 2:34 a.m. UTC | #1
On Thu, 2013-10-31 at 13:38 -0500, Tom wrote:
> From: Tom Musta <tommusta@gmail.com>
> 
> This patch modifies the unaligned access routines of the sstep.c
> module so that it properly reverses the bytes of storage operands
> in the little endian kernel kernel.

Do that patch differ from v1 ? (I already merged v1)

Cheers,
Ben.
 
> Signed-off-by: Tom Musta <tommusta@gmail.com>
> ---
>  arch/powerpc/lib/sstep.c |   45 +++++++++++++++++++++++++++++++++++++++++++++
>  1 files changed, 45 insertions(+), 0 deletions(-)
> 
> diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
> index 7bfaa9d..c8743e1 100644
> --- a/arch/powerpc/lib/sstep.c
> +++ b/arch/powerpc/lib/sstep.c
> @@ -212,11 +212,19 @@ static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
>  {
>  	int err;
>  	unsigned long x, b, c;
> +#ifdef __LITTLE_ENDIAN__
> +	int len = nb; /* save a copy of the length for byte reversal */
> +#endif
>  
>  	/* unaligned, do this in pieces */
>  	x = 0;
>  	for (; nb > 0; nb -= c) {
> +#ifdef __LITTLE_ENDIAN__
> +		c = 1;
> +#endif
> +#ifdef __BIG_ENDIAN__
>  		c = max_align(ea);
> +#endif
>  		if (c > nb)
>  			c = max_align(nb);
>  		err = read_mem_aligned(&b, ea, c);
> @@ -225,7 +233,24 @@ static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
>  		x = (x << (8 * c)) + b;
>  		ea += c;
>  	}
> +#ifdef __LITTLE_ENDIAN__
> +	switch (len) {
> +	case 2:
> +		*dest = byterev_2(x);
> +		break;
> +	case 4:
> +		*dest = byterev_4(x);
> +		break;
> +#ifdef __powerpc64__
> +	case 8:
> +		*dest = byterev_8(x);
> +		break;
> +#endif
> +	}
> +#endif
> +#ifdef __BIG_ENDIAN__
>  	*dest = x;
> +#endif
>  	return 0;
>  }
>  
> @@ -273,9 +298,29 @@ static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea,
>  	int err;
>  	unsigned long c;
>  
> +#ifdef __LITTLE_ENDIAN__
> +	switch (nb) {
> +	case 2:
> +		val = byterev_2(val);
> +		break;
> +	case 4:
> +		val = byterev_4(val);
> +		break;
> +#ifdef __powerpc64__
> +	case 8:
> +		val = byterev_8(val);
> +		break;
> +#endif
> +	}
> +#endif
>  	/* unaligned or little-endian, do this in pieces */
>  	for (; nb > 0; nb -= c) {
> +#ifdef __LITTLE_ENDIAN__
> +		c = 1;
> +#endif
> +#ifdef __BIG_ENDIAN__
>  		c = max_align(ea);
> +#endif
>  		if (c > nb)
>  			c = max_align(nb);
>  		err = write_mem_aligned(val >> (nb - c) * 8, ea, c);
Paul Mackerras Nov. 4, 2013, 2:43 a.m. UTC | #2
On Thu, Oct 31, 2013 at 01:38:57PM -0500, Tom wrote:
> From: Tom Musta <tommusta@gmail.com>
> 
> This patch modifies the unaligned access routines of the sstep.c
> module so that it properly reverses the bytes of storage operands
> in the little endian kernel kernel.

This has rather a lot of #ifdefs inside function definitions, and for
little-endian it does the unaligned accesses one byte at a time.  You
could avoid all the #ifdefs if you define the combining function in an
endian-dependant way and make read_mem_unaligned look something like
this:

#ifdef __LITTLE_ENDIAN__
#define combine_pieces(x, b, c, nd)	((x) + ((b) << (8 * (nd))))
#else
#define combine_pieces(x, b, c, nd)	(((x) << (8 * (c))) + (b))
#endif

static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
					int nb, struct pt_regs *regs)
{
	int err;
	int nd;
	unsigned long x, b, c;

	/* unaligned, do this in pieces */
	x = 0;
	for (nd = 0; nd < nb; nd += c) {
		c = max_align(ea);
		if (c > nb - nd)
			c = max_align(nb - nd);
		err = read_mem_aligned(&b, ea, c);
		if (err)
			return err;
		x = combine_pieces(x, b, c, nd);
		ea += c;
	}
	*dest = x;
	return 0;
}

and do something analogous for write_mem_unaligned().

Paul.
diff mbox

Patch

diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 7bfaa9d..c8743e1 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -212,11 +212,19 @@  static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
 {
 	int err;
 	unsigned long x, b, c;
+#ifdef __LITTLE_ENDIAN__
+	int len = nb; /* save a copy of the length for byte reversal */
+#endif
 
 	/* unaligned, do this in pieces */
 	x = 0;
 	for (; nb > 0; nb -= c) {
+#ifdef __LITTLE_ENDIAN__
+		c = 1;
+#endif
+#ifdef __BIG_ENDIAN__
 		c = max_align(ea);
+#endif
 		if (c > nb)
 			c = max_align(nb);
 		err = read_mem_aligned(&b, ea, c);
@@ -225,7 +233,24 @@  static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
 		x = (x << (8 * c)) + b;
 		ea += c;
 	}
+#ifdef __LITTLE_ENDIAN__
+	switch (len) {
+	case 2:
+		*dest = byterev_2(x);
+		break;
+	case 4:
+		*dest = byterev_4(x);
+		break;
+#ifdef __powerpc64__
+	case 8:
+		*dest = byterev_8(x);
+		break;
+#endif
+	}
+#endif
+#ifdef __BIG_ENDIAN__
 	*dest = x;
+#endif
 	return 0;
 }
 
@@ -273,9 +298,29 @@  static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea,
 	int err;
 	unsigned long c;
 
+#ifdef __LITTLE_ENDIAN__
+	switch (nb) {
+	case 2:
+		val = byterev_2(val);
+		break;
+	case 4:
+		val = byterev_4(val);
+		break;
+#ifdef __powerpc64__
+	case 8:
+		val = byterev_8(val);
+		break;
+#endif
+	}
+#endif
 	/* unaligned or little-endian, do this in pieces */
 	for (; nb > 0; nb -= c) {
+#ifdef __LITTLE_ENDIAN__
+		c = 1;
+#endif
+#ifdef __BIG_ENDIAN__
 		c = max_align(ea);
+#endif
 		if (c > nb)
 			c = max_align(nb);
 		err = write_mem_aligned(val >> (nb - c) * 8, ea, c);