@@ -898,6 +898,8 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
hwaddr ppn;
int napot_bits = 0;
target_ulong napot_mask;
+ bool is_sstack_idx = ((mmu_idx & MMU_IDX_SS_WRITE) == MMU_IDX_SS_WRITE);
+ bool sstack_page = false;
/*
* Check if we should use the background registers for the two
@@ -1106,21 +1108,40 @@ restart:
return TRANSLATE_FAIL;
}
+ target_ulong rwx = pte & (PTE_R | PTE_W | PTE_X);
/* Check for reserved combinations of RWX flags. */
- switch (pte & (PTE_R | PTE_W | PTE_X)) {
- case PTE_W:
+ switch (rwx) {
case PTE_W | PTE_X:
return TRANSLATE_FAIL;
+ case PTE_W:
+ /* if bcfi enabled, PTE_W is not reserved and shadow stack page */
+ if (cpu_get_bcfien(env) && first_stage) {
+ sstack_page = true;
+ /* if ss index, read and write allowed. else only read allowed */
+ rwx = is_sstack_idx ? PTE_R | PTE_W : PTE_R;
+ break;
+ }
+ return TRANSLATE_FAIL;
+ case PTE_R:
+ /*
+ * no matter what's the `access_type`, shadow stack access to readonly
+ * memory are always store page faults. During unwind, loads will be
+ * promoted as store fault.
+ */
+ if (is_sstack_idx) {
+ return TRANSLATE_FAIL;
+ }
+ break;
}
int prot = 0;
- if (pte & PTE_R) {
+ if (rwx & PTE_R) {
prot |= PAGE_READ;
}
- if (pte & PTE_W) {
+ if (rwx & PTE_W) {
prot |= PAGE_WRITE;
}
- if (pte & PTE_X) {
+ if (rwx & PTE_X) {
bool mxr = false;
/*
@@ -1164,8 +1185,11 @@ restart:
}
if (!((prot >> access_type) & 1)) {
- /* Access check failed */
- return TRANSLATE_FAIL;
+ /*
+ * Access check failed, access check failures for shadow stack are
+ * access faults.
+ */
+ return sstack_page ? TRANSLATE_PMP_FAIL : TRANSLATE_FAIL;
}
target_ulong updated_pte = pte;
@@ -1352,9 +1376,17 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
break;
case MMU_DATA_LOAD:
cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
+ /* shadow stack mis aligned accesses are access faults */
+ if (mmu_idx & MMU_IDX_SS_WRITE) {
+ cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
+ }
break;
case MMU_DATA_STORE:
cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
+ /* shadow stack mis aligned accesses are access faults */
+ if (mmu_idx & MMU_IDX_SS_WRITE) {
+ cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
+ }
break;
default:
g_assert_not_reached();
@@ -30,12 +30,15 @@
* - U+2STAGE 0b100
* - S+2STAGE 0b101
* - S+SUM+2STAGE 0b110
+ * - Shadow stack+U 0b1000
+ * - Shadow stack+S 0b1001
*/
#define MMUIdx_U 0
#define MMUIdx_S 1
#define MMUIdx_S_SUM 2
#define MMUIdx_M 3
#define MMU_2STAGE_BIT (1 << 2)
+#define MMU_IDX_SS_WRITE (1 << 3)
static inline int mmuidx_priv(int mmu_idx)
{