@@ -164,12 +164,12 @@ static inline void barrier_wait(atomic_t *b)
{
while (atomic_read(b) == 0)
asm("pause");
- mfence();
+ mb();
}
static inline void release_barrier(atomic_t *b)
{
- mfence();
+ mb();
atomic_set(b, 1);
}
@@ -631,7 +631,7 @@ static int run_ap_work(struct mp_callback *callback, struct udevice *bsp,
if (cur_cpu != i)
store_callback(&ap_callbacks[i], callback);
}
- mfence();
+ mb();
/* Wait for all the APs to signal back that call has been accepted. */
start = get_timer(0);
@@ -656,7 +656,7 @@ static int run_ap_work(struct mp_callback *callback, struct udevice *bsp,
} while (cpus_accepted != num_aps);
/* Make sure we can see any data written by the APs */
- mfence();
+ mb();
return 0;
}
@@ -692,7 +692,7 @@ static int ap_wait_for_instruction(struct udevice *cpu, void *unused)
/* Copy to local variable before using the value */
memcpy(&lcb, cb, sizeof(lcb));
- mfence();
+ mb();
if (lcb.logical_cpu_number == MP_SELECT_ALL ||
lcb.logical_cpu_number == MP_SELECT_APS ||
dev_seq(cpu) == lcb.logical_cpu_number)
@@ -185,11 +185,6 @@ static inline int flag_is_changeable_p(uint32_t flag)
}
#endif
-static inline void mfence(void)
-{
- __asm__ __volatile__("mfence" : : : "memory");
-}
-
/**
* cpu_enable_paging_pae() - Enable PAE-paging
*
@@ -240,6 +240,7 @@ static inline void sync(void)
* have some advantages to use them instead of the simple one here.
*/
#define dmb() __asm__ __volatile__ ("" : : : "memory")
+#define mb() __asm__ __volatile__ ("mfence" : : : "memory")
#define __iormb() dmb()
#define __iowmb() dmb()
Implement a x86 memory barrier mb(). Furthermore, remove the previously used mfence() function, which does the same thing. The mb() macro is now equivalent to Linux (v6.9): linux/arch/x86/include/asm/barrier.h Signed-off-by: Philip Oberfichtner <pro@denx.de> --- Notes: Changes in V4: None Changes in V3: - Remove mfence() function arch/x86/cpu/mp_init.c | 10 +++++----- arch/x86/include/asm/cpu.h | 5 ----- arch/x86/include/asm/io.h | 1 + 3 files changed, 6 insertions(+), 10 deletions(-)