@@ -27,6 +27,7 @@
#define dstin x0
#define src x1
#define count x2
+#define dstlen x3
#define dst x3
#define srcend x4
#define dstend x5
@@ -66,7 +67,20 @@
# define MEMCPY memcpy
#endif
-ENTRY_ALIGN (MEMMOVE, 6)
+#if defined SHARED && IS_IN (libc)
+#ifndef MEMCPY_CHK
+# define MEMCPY_CHK __memcpy_chk
+#endif
+
+ENTRY_ALIGN (MEMCPY_CHK, 6)
+ cmp dstlen, count
+ b.lo __chk_fail
+ b MEMCPY
+ nop
+END (MEMCPY_CHK)
+#endif
+
+ENTRY (MEMMOVE)
DELOUSE (0)
DELOUSE (1)
@@ -1,3 +1,7 @@
ifeq ($(subdir),string)
sysdep_routines += memcpy_generic memcpy_thunderx memcpy_falkor
endif
+
+ifeq ($(subdir),debug)
+sysdep_routines += memcpy_chk-nonshared
+endif
@@ -42,6 +42,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_thunderx)
IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_falkor)
IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_generic))
+ IFUNC_IMPL (i, name, __memcpy_chk,
+ IFUNC_IMPL_ADD (array, i, __memcpy_chk, 1, __memcpy_chk_thunderx)
+ IFUNC_IMPL_ADD (array, i, __memcpy_chk, 1, __memcpy_chk_falkor)
+ IFUNC_IMPL_ADD (array, i, __memcpy_chk, 1, __memcpy_chk_generic))
IFUNC_IMPL (i, name, memmove,
IFUNC_IMPL_ADD (array, i, memmove, 1, __memmove_thunderx)
IFUNC_IMPL_ADD (array, i, memmove, 1, __memmove_generic))
@@ -26,6 +26,7 @@
#define dstin x0
#define src x1
#define count x2
+#define dstlen x3
#define dst x3
#define srcend x4
#define dstend x5
@@ -60,7 +61,17 @@
The non-temporal stores help optimize cache utilization. */
#if IS_IN (libc)
-ENTRY_ALIGN (__memcpy_falkor, 6)
+
+# ifdef SHARED
+ENTRY_ALIGN (__memcpy_chk_falkor, 6)
+ cmp dstlen, count
+ b.lo __chk_fail
+ nop
+ nop
+END (__memcpy_chk_falkor)
+# endif
+
+ENTRY(__memcpy_falkor)
cmp count, 32
add srcend, src, count
@@ -28,6 +28,10 @@
# define MEMCPY __memcpy_generic
# define MEMMOVE __memmove_generic
+# ifdef SHARED
+# define MEMCPY_CHK __memcpy_chk_generic
+# endif
+
/* Do not hide the generic versions of memcpy and memmove, we use them
internally. */
# undef libc_hidden_builtin_def
@@ -37,6 +41,7 @@
/* It doesn't make sense to send libc-internal memcpy calls through a PLT. */
.globl __GI_memcpy; __GI_memcpy = __memcpy_generic
.globl __GI_memmove; __GI_memmove = __memmove_generic
+ .globl __GI___memcpy_chk; __GI___memcpy_chk = __memcpy_chk_generic
# endif
#endif
@@ -33,6 +33,7 @@
#define dstin x0
#define src x1
#define count x2
+#define dstlen x3
#define dst x3
#define srcend x4
#define dstend x5
@@ -80,7 +81,17 @@
# define MEMMOVE __memmove_thunderx
# define USE_THUNDERX
-ENTRY_ALIGN (MEMMOVE, 6)
+#ifdef SHARED
+#define MEMCPY_CHK __memcpy_chk_thunderx
+ENTRY_ALIGN (MEMCPY_CHK, 6)
+ cmp dstlen, count
+ b.lo __chk_fail
+ b MEMCPY
+ nop
+END (MEMCPY_CHK)
+#endif
+
+ENTRY (MEMMOVE)
DELOUSE (0)
DELOUSE (1)