@@ -28,6 +28,14 @@ config CONFIG_ARC_CPU_HS
endchoice
+config CONFIG_ARC_HAS_ATOMICS
+ bool "Support for LLOCK/SCOND instructions"
+ default y
+ help
+ LLOCK/SCOND instructions are needed to implement atomic r-m-w
+ Otherwise libc needs kernel assisted atomic cmpxchg available
+ since v4.9 kernel
+
choice
prompt "MMU Page Size"
default CONFIG_ARC_PAGE_SIZE_8K
@@ -23,7 +23,7 @@
# define EXTRACT_2(RX,RY,IMM) lsr RX, RY, 0x08
#endif
-#ifdef __LL64__
+#if defined(__LL64__) || defined(__ARC_LL64__)
# define PREFETCH_READ(RX) prefetch [RX, 56]
# define PREFETCH_WRITE(RX) prefetchw [RX, 64]
# define LOADX(DST,RX) ldd.ab DST, [RX, 8]
@@ -52,7 +52,7 @@ ENTRY(memset)
lpnz @.Lset64bytes
;; LOOP START
PREWRITE(r3, 64) ;Prefetch the next write location
-#ifdef __LL64__
+#if defined(__LL64__) || defined(__ARC_LL64__)
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
@@ -85,7 +85,7 @@ ENTRY(memset)
lpnz .Lset32bytes
;; LOOP START
prefetchw [r3, 32] ;Prefetch the next write location
-#ifdef __LL64__
+#if defined(__LL64__) || defined(__ARC_LL64__)
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
@@ -38,6 +38,11 @@ void __arc_link_error (void);
#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
({ __arc_link_error (); oldval; })
+#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
+ ({ __arc_link_error (); oldval; })
+
+#ifdef __CONFIG_ARC_HAS_ATOMICS__
+
#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
({ \
__typeof(oldval) prev; \
@@ -56,5 +61,57 @@ void __arc_link_error (void);
prev; \
})
-#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
- ({ __arc_link_error (); oldval; })
+#else
+
+#ifndef __NR_arc_usr_cmpxchg
+#error "__NR_arc_usr_cmpxchg missing: Please upgrade to kernel 4.9+ headers"
+#endif
+
+/* With lack of hardware assist, use kernel to do the atomic operation
+ This will only work in a UP configuration
+ */
+#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
+ ({ \
+ /* opecode INTERNAL_SYSCALL as it lacks cc clobber */ \
+ register int __ret __asm__("r0") = (int)(mem); \
+ register int __a1 __asm__("r1") = (int)(oldval); \
+ register int __a2 __asm__("r2") = (int)(newval); \
+ register int _sys_num __asm__("r8") = __NR_arc_usr_cmpxchg; \
+ \
+ __asm__ volatile ( \
+ ARC_TRAP_INSN \
+ : "+r" (__ret) \
+ : "r"(_sys_num), "r"(__ret), "r"(__a1), "r"(__a2) \
+ : "memory", "cc"); \
+ \
+ /* syscall returns previous value */ \
+ /* Z bit is set if cmpxchg succeeded (we don't use that yet) */ \
+ \
+ (__typeof(oldval)) __ret; \
+ })
+
+#endif
+
+/* Store NEWVALUE in *MEM and return the old value.
+ Atomic EX is present in all configurations
+ */
+
+#define __arch_exchange_32_acq(mem, newval) \
+ ({ \
+ __typeof__(*(mem)) val = newval; \
+ \
+ __asm__ __volatile__( \
+ "ex %0, [%1]" \
+ : "+r" (val) \
+ : "r" (mem) \
+ : "memory" ); \
+ \
+ val; \
+ })
+
+#define atomic_exchange_acq(mem, newval) \
+ ({ \
+ if (sizeof(*(mem)) != 4) \
+ abort(); \
+ __arch_exchange_32_acq(mem, newval); \
+ })