Message ID | PAWPR08MB898248BE9DB331AC6DBEF4B183D79@PAWPR08MB8982.eurprd08.prod.outlook.com |
---|---|
State | New |
Headers | show |
Series | AArch64: Improve SVE memcpy and memmove | expand |
The 02/03/2023 13:05, Wilco Dijkstra wrote: > Improve SVE memcpy/memmove by copying 2 vectors if the size is small enough. > This improves performance of random memcpy by ~9% on Neoverse V1, and > memcpy/memmove of 33-64 bytes become ~16% faster. > > Passes regress, OK for commit? This is ok to commit, thanks. Reviewed-by: Szabolcs Nagy <szabolcs.nagy@arm.com> > > --- > > diff --git a/sysdeps/aarch64/multiarch/memcpy_sve.S b/sysdeps/aarch64/multiarch/memcpy_sve.S > index f4dc214f60bf25e818eb6b8de2d4093ad0c886e1..d11be6a44301af4bfd7fa4900555b769dc58d34d 100644 > --- a/sysdeps/aarch64/multiarch/memcpy_sve.S > +++ b/sysdeps/aarch64/multiarch/memcpy_sve.S > @@ -67,14 +67,15 @@ ENTRY (__memcpy_sve) > > cmp count, 128 > b.hi L(copy_long) > - cmp count, 32 > + cntb vlen > + cmp count, vlen, lsl 1 > b.hi L(copy32_128) > - > whilelo p0.b, xzr, count > - cntb vlen > - tbnz vlen, 4, L(vlen128) > - ld1b z0.b, p0/z, [src] > - st1b z0.b, p0, [dstin] > + whilelo p1.b, vlen, count > + ld1b z0.b, p0/z, [src, 0, mul vl] > + ld1b z1.b, p1/z, [src, 1, mul vl] > + st1b z0.b, p0, [dstin, 0, mul vl] > + st1b z1.b, p1, [dstin, 1, mul vl] > ret > > /* Medium copies: 33..128 bytes. */ > @@ -102,14 +103,6 @@ L(copy96): > stp C_q, D_q, [dstend, -32] > ret > > -L(vlen128): > - whilelo p1.b, vlen, count > - ld1b z0.b, p0/z, [src, 0, mul vl] > - ld1b z1.b, p1/z, [src, 1, mul vl] > - st1b z0.b, p0, [dstin, 0, mul vl] > - st1b z1.b, p1, [dstin, 1, mul vl] > - ret > - > .p2align 4 > /* Copy more than 128 bytes. */ > L(copy_long): > @@ -158,14 +151,15 @@ ENTRY (__memmove_sve) > > cmp count, 128 > b.hi L(move_long) > - cmp count, 32 > + cntb vlen > + cmp count, vlen, lsl 1 > b.hi L(copy32_128) > - > whilelo p0.b, xzr, count > - cntb vlen > - tbnz vlen, 4, L(vlen128) > - ld1b z0.b, p0/z, [src] > - st1b z0.b, p0, [dstin] > + whilelo p1.b, vlen, count > + ld1b z0.b, p0/z, [src, 0, mul vl] > + ld1b z1.b, p1/z, [src, 1, mul vl] > + st1b z0.b, p0, [dstin, 0, mul vl] > + st1b z1.b, p1, [dstin, 1, mul vl] > ret > > .p2align 4 >
diff --git a/sysdeps/aarch64/multiarch/memcpy_sve.S b/sysdeps/aarch64/multiarch/memcpy_sve.S index f4dc214f60bf25e818eb6b8de2d4093ad0c886e1..d11be6a44301af4bfd7fa4900555b769dc58d34d 100644 --- a/sysdeps/aarch64/multiarch/memcpy_sve.S +++ b/sysdeps/aarch64/multiarch/memcpy_sve.S @@ -67,14 +67,15 @@ ENTRY (__memcpy_sve) cmp count, 128 b.hi L(copy_long) - cmp count, 32 + cntb vlen + cmp count, vlen, lsl 1 b.hi L(copy32_128) - whilelo p0.b, xzr, count - cntb vlen - tbnz vlen, 4, L(vlen128) - ld1b z0.b, p0/z, [src] - st1b z0.b, p0, [dstin] + whilelo p1.b, vlen, count + ld1b z0.b, p0/z, [src, 0, mul vl] + ld1b z1.b, p1/z, [src, 1, mul vl] + st1b z0.b, p0, [dstin, 0, mul vl] + st1b z1.b, p1, [dstin, 1, mul vl] ret /* Medium copies: 33..128 bytes. */ @@ -102,14 +103,6 @@ L(copy96): stp C_q, D_q, [dstend, -32] ret -L(vlen128): - whilelo p1.b, vlen, count - ld1b z0.b, p0/z, [src, 0, mul vl] - ld1b z1.b, p1/z, [src, 1, mul vl] - st1b z0.b, p0, [dstin, 0, mul vl] - st1b z1.b, p1, [dstin, 1, mul vl] - ret - .p2align 4 /* Copy more than 128 bytes. */ L(copy_long): @@ -158,14 +151,15 @@ ENTRY (__memmove_sve) cmp count, 128 b.hi L(move_long) - cmp count, 32 + cntb vlen + cmp count, vlen, lsl 1 b.hi L(copy32_128) - whilelo p0.b, xzr, count - cntb vlen - tbnz vlen, 4, L(vlen128) - ld1b z0.b, p0/z, [src] - st1b z0.b, p0, [dstin] + whilelo p1.b, vlen, count + ld1b z0.b, p0/z, [src, 0, mul vl] + ld1b z1.b, p1/z, [src, 1, mul vl] + st1b z0.b, p0, [dstin, 0, mul vl] + st1b z1.b, p1, [dstin, 1, mul vl] ret .p2align 4