diff mbox series

[6/9] powerpc/bpf: Fix BPF_SUB when imm == 0x80000000

Message ID 1912a409447071f46ac6cc957ce8edea0e5232b7.1633104510.git.naveen.n.rao@linux.vnet.ibm.com (mailing list archive)
State Superseded
Headers show
Series powerpc/bpf: Various fixes | expand
Related show

Commit Message

Naveen N. Rao Oct. 1, 2021, 9:14 p.m. UTC
We aren't handling subtraction involving an immediate value of
0x80000000 properly. Fix the same.

Fixes: 156d0e290e969c ("powerpc/ebpf/jit: Implement JIT compiler for extended BPF")
Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
---
 arch/powerpc/net/bpf_jit_comp64.c | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

Comments

Song Liu Oct. 1, 2021, 10:01 p.m. UTC | #1
On Fri, Oct 1, 2021 at 2:17 PM Naveen N. Rao
<naveen.n.rao@linux.vnet.ibm.com> wrote:
>
> We aren't handling subtraction involving an immediate value of
> 0x80000000 properly. Fix the same.
>
> Fixes: 156d0e290e969c ("powerpc/ebpf/jit: Implement JIT compiler for extended BPF")
> Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>

Acked-by: Song Liu <songliubraving@fb.com>

> ---
>  arch/powerpc/net/bpf_jit_comp64.c | 16 ++++++++--------
>  1 file changed, 8 insertions(+), 8 deletions(-)
>
> diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
> index ffb7a2877a8469..4641a50e82d50d 100644
> --- a/arch/powerpc/net/bpf_jit_comp64.c
> +++ b/arch/powerpc/net/bpf_jit_comp64.c
> @@ -333,15 +333,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
>                 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
>                 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
>                 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
> -                       if (BPF_OP(code) == BPF_SUB)
> -                               imm = -imm;
> -                       if (imm) {
> -                               if (imm >= -32768 && imm < 32768)
> -                                       EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
> -                               else {
> -                                       PPC_LI32(b2p[TMP_REG_1], imm);
> +                       if (imm > -32768 && imm < 32768) {
> +                               EMIT(PPC_RAW_ADDI(dst_reg, dst_reg,
> +                                       BPF_OP(code) == BPF_SUB ? IMM_L(-imm) : IMM_L(imm)));
> +                       } else {
> +                               PPC_LI32(b2p[TMP_REG_1], imm);
> +                               if (BPF_OP(code) == BPF_SUB)
> +                                       EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
> +                               else
>                                         EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
> -                               }
>                         }
>                         goto bpf_alu32_trunc;
>                 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
> --
> 2.33.0
>
Johan Almbladh Oct. 2, 2021, 5:33 p.m. UTC | #2
On Fri, Oct 1, 2021 at 11:15 PM Naveen N. Rao
<naveen.n.rao@linux.vnet.ibm.com> wrote:
>
> We aren't handling subtraction involving an immediate value of
> 0x80000000 properly. Fix the same.
>
> Fixes: 156d0e290e969c ("powerpc/ebpf/jit: Implement JIT compiler for extended BPF")
> Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>

Acked-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
Tested-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>

> ---
>  arch/powerpc/net/bpf_jit_comp64.c | 16 ++++++++--------
>  1 file changed, 8 insertions(+), 8 deletions(-)
>
> diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
> index ffb7a2877a8469..4641a50e82d50d 100644
> --- a/arch/powerpc/net/bpf_jit_comp64.c
> +++ b/arch/powerpc/net/bpf_jit_comp64.c
> @@ -333,15 +333,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
>                 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
>                 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
>                 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
> -                       if (BPF_OP(code) == BPF_SUB)
> -                               imm = -imm;
> -                       if (imm) {
> -                               if (imm >= -32768 && imm < 32768)
> -                                       EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
> -                               else {
> -                                       PPC_LI32(b2p[TMP_REG_1], imm);
> +                       if (imm > -32768 && imm < 32768) {
> +                               EMIT(PPC_RAW_ADDI(dst_reg, dst_reg,
> +                                       BPF_OP(code) == BPF_SUB ? IMM_L(-imm) : IMM_L(imm)));
> +                       } else {
> +                               PPC_LI32(b2p[TMP_REG_1], imm);
> +                               if (BPF_OP(code) == BPF_SUB)
> +                                       EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
> +                               else
>                                         EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
> -                               }
>                         }
>                         goto bpf_alu32_trunc;
>                 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
> --
> 2.33.0
>
Christophe Leroy Oct. 3, 2021, 8:07 a.m. UTC | #3
Le 01/10/2021 à 23:14, Naveen N. Rao a écrit :
> We aren't handling subtraction involving an immediate value of
> 0x80000000 properly. Fix the same.
> 
> Fixes: 156d0e290e969c ("powerpc/ebpf/jit: Implement JIT compiler for extended BPF")
> Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
> ---
>   arch/powerpc/net/bpf_jit_comp64.c | 16 ++++++++--------
>   1 file changed, 8 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
> index ffb7a2877a8469..4641a50e82d50d 100644
> --- a/arch/powerpc/net/bpf_jit_comp64.c
> +++ b/arch/powerpc/net/bpf_jit_comp64.c
> @@ -333,15 +333,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
>   		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
>   		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
>   		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
> -			if (BPF_OP(code) == BPF_SUB)
> -				imm = -imm;
> -			if (imm) {
> -				if (imm >= -32768 && imm < 32768)
> -					EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
> -				else {
> -					PPC_LI32(b2p[TMP_REG_1], imm);
> +			if (imm > -32768 && imm < 32768) {
> +				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg,
> +					BPF_OP(code) == BPF_SUB ? IMM_L(-imm) : IMM_L(imm)));
> +			} else {
> +				PPC_LI32(b2p[TMP_REG_1], imm);
> +				if (BPF_OP(code) == BPF_SUB)
> +					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
> +				else
>   					EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
> -				}
>   			}
>   			goto bpf_alu32_trunc;

There is now so few code common to both BPF_ADD and BPF_SUB that you 
should make them different cases.

While at it, why not also use ADDIS if imm is 32 bits ? That would be an 
ADDIS/ADDI instead of LIS/ORI/ADD

>   		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
>
Naveen N. Rao Oct. 4, 2021, 6:18 p.m. UTC | #4
Christophe Leroy wrote:
> 
> 
> Le 01/10/2021 à 23:14, Naveen N. Rao a écrit :
>> We aren't handling subtraction involving an immediate value of
>> 0x80000000 properly. Fix the same.
>> 
>> Fixes: 156d0e290e969c ("powerpc/ebpf/jit: Implement JIT compiler for extended BPF")
>> Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
>> ---
>>   arch/powerpc/net/bpf_jit_comp64.c | 16 ++++++++--------
>>   1 file changed, 8 insertions(+), 8 deletions(-)
>> 
>> diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
>> index ffb7a2877a8469..4641a50e82d50d 100644
>> --- a/arch/powerpc/net/bpf_jit_comp64.c
>> +++ b/arch/powerpc/net/bpf_jit_comp64.c
>> @@ -333,15 +333,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
>>   		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
>>   		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
>>   		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
>> -			if (BPF_OP(code) == BPF_SUB)
>> -				imm = -imm;
>> -			if (imm) {
>> -				if (imm >= -32768 && imm < 32768)
>> -					EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
>> -				else {
>> -					PPC_LI32(b2p[TMP_REG_1], imm);
>> +			if (imm > -32768 && imm < 32768) {
>> +				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg,
>> +					BPF_OP(code) == BPF_SUB ? IMM_L(-imm) : IMM_L(imm)));
>> +			} else {
>> +				PPC_LI32(b2p[TMP_REG_1], imm);
>> +				if (BPF_OP(code) == BPF_SUB)
>> +					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
>> +				else
>>   					EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
>> -				}
>>   			}
>>   			goto bpf_alu32_trunc;
> 
> There is now so few code common to both BPF_ADD and BPF_SUB that you 
> should make them different cases.
> 
> While at it, why not also use ADDIS if imm is 32 bits ? That would be an 
> ADDIS/ADDI instead of LIS/ORI/ADD

Sure. I wanted to limit the change for this fix. We can do a separate 
patch to optimize code generation for BPF_ADD.


- Naveen
Christophe Leroy Oct. 5, 2021, 5:40 a.m. UTC | #5
Le 04/10/2021 à 20:18, Naveen N. Rao a écrit :
> Christophe Leroy wrote:
>>
>>
>> Le 01/10/2021 à 23:14, Naveen N. Rao a écrit :
>>> We aren't handling subtraction involving an immediate value of
>>> 0x80000000 properly. Fix the same.
>>>
>>> Fixes: 156d0e290e969c ("powerpc/ebpf/jit: Implement JIT compiler for 
>>> extended BPF")
>>> Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
>>> ---
>>>   arch/powerpc/net/bpf_jit_comp64.c | 16 ++++++++--------
>>>   1 file changed, 8 insertions(+), 8 deletions(-)
>>>
>>> diff --git a/arch/powerpc/net/bpf_jit_comp64.c 
>>> b/arch/powerpc/net/bpf_jit_comp64.c
>>> index ffb7a2877a8469..4641a50e82d50d 100644
>>> --- a/arch/powerpc/net/bpf_jit_comp64.c
>>> +++ b/arch/powerpc/net/bpf_jit_comp64.c
>>> @@ -333,15 +333,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 
>>> *image, struct codegen_context *
>>>           case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
>>>           case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
>>>           case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
>>> -            if (BPF_OP(code) == BPF_SUB)
>>> -                imm = -imm;
>>> -            if (imm) {
>>> -                if (imm >= -32768 && imm < 32768)
>>> -                    EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
>>> -                else {
>>> -                    PPC_LI32(b2p[TMP_REG_1], imm);
>>> +            if (imm > -32768 && imm < 32768) {
>>> +                EMIT(PPC_RAW_ADDI(dst_reg, dst_reg,
>>> +                    BPF_OP(code) == BPF_SUB ? IMM_L(-imm) : 
>>> IMM_L(imm)));
>>> +            } else {
>>> +                PPC_LI32(b2p[TMP_REG_1], imm);
>>> +                if (BPF_OP(code) == BPF_SUB)
>>> +                    EMIT(PPC_RAW_SUB(dst_reg, dst_reg, 
>>> b2p[TMP_REG_1]));
>>> +                else
>>>                       EMIT(PPC_RAW_ADD(dst_reg, dst_reg, 
>>> b2p[TMP_REG_1]));
>>> -                }
>>>               }
>>>               goto bpf_alu32_trunc;
>>
>> There is now so few code common to both BPF_ADD and BPF_SUB that you 
>> should make them different cases.
>>
>> While at it, why not also use ADDIS if imm is 32 bits ? That would be 
>> an ADDIS/ADDI instead of LIS/ORI/ADD
> 
> Sure. I wanted to limit the change for this fix. We can do a separate 
> patch to optimize code generation for BPF_ADD.
> 

Sure, this second part was just a thought, I agree it should be another 
patch.

My main comment here is to split stuff and make it a different case, I 
don't think it increases the change much, and IMO it is easier to read:

diff --git a/arch/powerpc/net/bpf_jit_comp64.c 
b/arch/powerpc/net/bpf_jit_comp64.c
index ffb7a2877a84..39226d88c558 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -330,11 +330,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 
*image, struct codegen_context *
  			EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
  			goto bpf_alu32_trunc;
  		case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
-		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
  		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
-		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
-			if (BPF_OP(code) == BPF_SUB)
-				imm = -imm;
  			if (imm) {
  				if (imm >= -32768 && imm < 32768)
  					EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
@@ -344,6 +340,17 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 
*image, struct codegen_context *
  				}
  			}
  			goto bpf_alu32_trunc;
+		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
+		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
+			if (imm) {
+				if (-imm >= -32768 && -imm < 32768) {
+					EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
+				} else {
+					PPC_LI32(b2p[TMP_REG_1], imm);
+					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
+				}
+			}
+			goto bpf_alu32_trunc;
  		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
  		case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
  			if (BPF_CLASS(code) == BPF_ALU)
diff mbox series

Patch

diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index ffb7a2877a8469..4641a50e82d50d 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -333,15 +333,15 @@  int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
 		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
 		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
 		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
-			if (BPF_OP(code) == BPF_SUB)
-				imm = -imm;
-			if (imm) {
-				if (imm >= -32768 && imm < 32768)
-					EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
-				else {
-					PPC_LI32(b2p[TMP_REG_1], imm);
+			if (imm > -32768 && imm < 32768) {
+				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg,
+					BPF_OP(code) == BPF_SUB ? IMM_L(-imm) : IMM_L(imm)));
+			} else {
+				PPC_LI32(b2p[TMP_REG_1], imm);
+				if (BPF_OP(code) == BPF_SUB)
+					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
+				else
 					EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
-				}
 			}
 			goto bpf_alu32_trunc;
 		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */