From eaf0ed9c825ed70ef92b960bc27b53d2a9e8d630 Mon Sep 17 00:00:00 2001 From: Oystein Knauserud Date: Mon, 27 Feb 2023 12:12:48 +0100 Subject: [PATCH 1/4] Added exception codes 0x4 and 0x6 misaligned atomics. These will only be used for misaligned atomics which are not otherwise blocked by the PMA. SEC clean when A_EXT=A_NONE and the new exception codes are not allowed to cause exception triggers (tdata2_rdata may propagate and lead to counter examples). Signed-off-by: Oystein Knauserud --- bhv/cv32e40x_rvfi.sv | 18 ++++++++++-------- bhv/include/cv32e40x_rvfi_pkg.sv | 7 +++---- rtl/cv32e40x_controller_fsm.sv | 16 +++++++++------- rtl/cv32e40x_mpu.sv | 28 +++++++++++++++++++++------- rtl/cv32e40x_pma.sv | 15 +++++++-------- rtl/include/cv32e40x_pkg.sv | 32 +++++++++++++++++++------------- sva/cv32e40x_mpu_sva.sv | 14 +++++++++++--- sva/cv32e40x_rvfi_sva.sv | 28 +++++++++++++++++++++++----- 8 files changed, 103 insertions(+), 55 deletions(-) diff --git a/bhv/cv32e40x_rvfi.sv b/bhv/cv32e40x_rvfi.sv index eb6af374..4f20efad 100644 --- a/bhv/cv32e40x_rvfi.sv +++ b/bhv/cv32e40x_rvfi.sv @@ -712,9 +712,6 @@ module cv32e40x_rvfi // Detect PMA errors due to misaligned accesses logic lsu_pma_err_misaligned_ex; - // Detect LSU errors due to misaligned atomics (uses PMA logic but is not a true PMA error) - logic lsu_err_misaligned_atomic_ex; - assign mret_ptr_wb = mret_ptr_wb_i; // PMA error due to atomic not within an atomic region @@ -723,9 +720,6 @@ module cv32e40x_rvfi // PMA error due to misaligned accesses to I/O memory assign lsu_pma_err_misaligned_ex = lsu_pma_err_ex_i && lsu_misaligned_ex_i && !lsu_pma_cfg_ex_i.main; - // Detect LSU errors due to misaligned atomic instructions - assign lsu_err_misaligned_atomic_ex = lsu_pma_err_ex_i && lsu_pma_atomic_ex_i && lsu_misaligned_ex_i; - assign insn_opcode = rvfi_insn[6:0]; assign insn_rd = rvfi_insn[11:7]; assign insn_funct3 = rvfi_insn[14:12]; @@ -857,6 +851,12 @@ module cv32e40x_rvfi EXC_CAUSE_STORE_FAULT : begin rvfi_trap_next.cause_type = mem_err[STAGE_WB]; end + EXC_CAUSE_LOAD_MISALIGNED : begin + rvfi_trap_next.cause_type = 2'h0; + end + EXC_CAUSE_STORE_MISALIGNED : begin + rvfi_trap_next.cause_type = 2'h0; + end default : begin // rvfi_trap_next.cause_type is only set for exception codes that can have multiple causes end @@ -1109,8 +1109,10 @@ module cv32e40x_rvfi ex_mem_trans <= lsu_data_trans; end - mem_err [STAGE_WB] = lsu_err_misaligned_atomic_ex ? MEM_ERR_MISALIGNED_ATOMIC : // Non-naturally aligned atomic - lsu_pma_err_misaligned_ex ? MEM_ERR_IO_ALIGN : // Non-natrually aligned access to !main + // Capture cause of LSU exception for the cases that can have multiple reasons for an exception + // These are currently load and store access faults trigger by misaligned access to i/o regions, + // atomic accesses to regions not enabled for atomics or accesses blocked by PMP. + mem_err [STAGE_WB] = lsu_pma_err_misaligned_ex ? MEM_ERR_IO_ALIGN : // Non-natrually aligned access to !main lsu_pma_err_atomic_ex ? MEM_ERR_ATOMIC : // Any atomic to non-atomic PMA region MEM_ERR_PMP; // PMP error diff --git a/bhv/include/cv32e40x_rvfi_pkg.sv b/bhv/include/cv32e40x_rvfi_pkg.sv index 521effb8..caf050fd 100644 --- a/bhv/include/cv32e40x_rvfi_pkg.sv +++ b/bhv/include/cv32e40x_rvfi_pkg.sv @@ -30,10 +30,9 @@ package cv32e40x_rvfi_pkg; parameter NMEM = 128; // Maximum number of memory transactions per instruction is currently 13 when ZC_EXT=1 typedef enum logic [1:0] { // Memory error types - MEM_ERR_MISALIGNED_ATOMIC = 2'h0, - MEM_ERR_IO_ALIGN = 2'h1, - MEM_ERR_ATOMIC = 2'h2, - MEM_ERR_PMP = 2'h3 + MEM_ERR_IO_ALIGN = 2'h0, + MEM_ERR_ATOMIC = 2'h1, + MEM_ERR_PMP = 2'h2 } mem_err_t; typedef struct packed { // Autonomously updated CSRs diff --git a/rtl/cv32e40x_controller_fsm.sv b/rtl/cv32e40x_controller_fsm.sv index 1fbd9789..42f9b930 100644 --- a/rtl/cv32e40x_controller_fsm.sv +++ b/rtl/cv32e40x_controller_fsm.sv @@ -346,14 +346,16 @@ module cv32e40x_controller_fsm import cv32e40x_pkg::*; assign ctrl_fsm_o.exception_in_wb = exception_in_wb; // Set exception cause - assign exception_cause_wb = (ex_wb_pipe_i.instr.mpu_status != MPU_OK) ? EXC_CAUSE_INSTR_FAULT : - ex_wb_pipe_i.instr.bus_resp.err ? EXC_CAUSE_INSTR_BUS_FAULT : - ex_wb_pipe_i.illegal_insn ? EXC_CAUSE_ILLEGAL_INSN : - (ex_wb_pipe_i.sys_en && ex_wb_pipe_i.sys_ecall_insn) ? EXC_CAUSE_ECALL_MMODE : + assign exception_cause_wb = (ex_wb_pipe_i.instr.mpu_status != MPU_OK) ? EXC_CAUSE_INSTR_FAULT : + ex_wb_pipe_i.instr.bus_resp.err ? EXC_CAUSE_INSTR_BUS_FAULT : + ex_wb_pipe_i.illegal_insn ? EXC_CAUSE_ILLEGAL_INSN : + (ex_wb_pipe_i.sys_en && ex_wb_pipe_i.sys_ecall_insn) ? EXC_CAUSE_ECALL_MMODE : (ex_wb_pipe_i.sys_en && ex_wb_pipe_i.sys_ebrk_insn && (ex_wb_pipe_i.priv_lvl == PRIV_LVL_M) && - !dcsr_i.ebreakm && !debug_mode_q) ? EXC_CAUSE_BREAKPOINT : - (mpu_status_wb_i == MPU_WR_FAULT) ? EXC_CAUSE_STORE_FAULT : - EXC_CAUSE_LOAD_FAULT; // (mpu_status_wb_i == MPU_RE_FAULT) + !dcsr_i.ebreakm && !debug_mode_q) ? EXC_CAUSE_BREAKPOINT : + (mpu_status_wb_i == MPU_WR_FAULT) ? EXC_CAUSE_STORE_FAULT : + (mpu_status_wb_i == MPU_RE_FAULT) ? EXC_CAUSE_LOAD_FAULT : + (mpu_status_wb_i == MPU_WR_MISALIGNED) ? EXC_CAUSE_STORE_MISALIGNED : + EXC_CAUSE_LOAD_MISALIGNED; assign ctrl_fsm_o.exception_cause_wb = exception_cause_wb; diff --git a/rtl/cv32e40x_mpu.sv b/rtl/cv32e40x_mpu.sv index 44561690..ce66ba7e 100644 --- a/rtl/cv32e40x_mpu.sv +++ b/rtl/cv32e40x_mpu.sv @@ -70,6 +70,7 @@ module cv32e40x_mpu import cv32e40x_pkg::*; ); logic pma_err; + logic pma_misaligned_atomic; logic mpu_err; logic mpu_block_core; logic mpu_block_bus; @@ -118,27 +119,36 @@ module cv32e40x_mpu import cv32e40x_pkg::*; if (core_mpu_err_wait_i) begin if(core_trans_we) begin // MPU error on write - state_n = core_one_txn_pend_n ? MPU_WR_ERR_RESP : MPU_WR_ERR_WAIT; + // PMA errors take presedence over misaligned atomics + state_n = core_one_txn_pend_n ? (pma_err ? MPU_WR_ERR_RESP : MPU_WR_MISALIGN_RESP) : + (pma_err ? MPU_WR_ERR_WAIT : MPU_WR_MISALIGN_WAIT); end else begin // MPU error on read - state_n = core_one_txn_pend_n ? MPU_RE_ERR_RESP : MPU_RE_ERR_WAIT; + // PMA errors take presedence over misaligned atomics + state_n = core_one_txn_pend_n ? (pma_err ? MPU_RE_ERR_RESP : MPU_RE_MISALIGN_RESP) : + (pma_err ? MPU_RE_ERR_WAIT : MPU_RE_MISALIGN_WAIT); end end end end - MPU_RE_ERR_WAIT, MPU_WR_ERR_WAIT: begin + MPU_RE_ERR_WAIT, MPU_WR_ERR_WAIT, + MPU_RE_MISALIGN_WAIT, MPU_WR_MISALIGN_WAIT: begin // Block new transfers while waiting for in flight transfers to complete mpu_block_bus = 1'b1; mpu_block_core = 1'b1; if (core_one_txn_pend_n) begin - state_n = (state_q == MPU_RE_ERR_WAIT) ? MPU_RE_ERR_RESP : MPU_WR_ERR_RESP; + state_n = (state_q == MPU_RE_ERR_WAIT) ? MPU_RE_ERR_RESP : + (state_q == MPU_WR_ERR_WAIT) ? MPU_WR_ERR_RESP : + (state_q == MPU_RE_MISALIGN_WAIT) ? MPU_RE_MISALIGN_RESP : + MPU_WR_MISALIGN_RESP; end end - MPU_RE_ERR_RESP, MPU_WR_ERR_RESP: begin + MPU_RE_ERR_RESP, MPU_WR_ERR_RESP, + MPU_RE_MISALIGN_RESP, MPU_WR_MISALIGN_RESP: begin // Keep blocking new transfers mpu_block_bus = 1'b1; @@ -146,7 +156,10 @@ module cv32e40x_mpu import cv32e40x_pkg::*; // Set up MPU error response towards the core mpu_err_trans_valid = 1'b1; - mpu_status = (state_q == MPU_RE_ERR_RESP) ? MPU_RE_FAULT : MPU_WR_FAULT; + mpu_status = (state_q == MPU_RE_ERR_RESP) ? MPU_RE_FAULT : + (state_q == MPU_WR_ERR_RESP) ? MPU_WR_FAULT : + (state_q == MPU_RE_MISALIGN_RESP) ? MPU_RE_MISALIGNED : + MPU_WR_MISALIGNED; // Go back to IDLE uncoditionally. // The core is expected to always be ready for the response @@ -203,11 +216,12 @@ module cv32e40x_mpu import cv32e40x_pkg::*; .misaligned_access_i ( misaligned_access_i ), .load_access_i ( load_access ), .pma_err_o ( pma_err ), + .pma_misaligned_atomic_o ( pma_misaligned_atomic ), .pma_bufferable_o ( bus_trans_bufferable ), .pma_cacheable_o ( bus_trans_cacheable ) ); - assign mpu_err = pma_err; + assign mpu_err = pma_err || pma_misaligned_atomic; // Writes are only supported on the data interface // Tie to 1'b0 if this MPU is instantiatied in the IF stage diff --git a/rtl/cv32e40x_pma.sv b/rtl/cv32e40x_pma.sv index 7a2b86d8..0473a39f 100644 --- a/rtl/cv32e40x_pma.sv +++ b/rtl/cv32e40x_pma.sv @@ -38,6 +38,7 @@ module cv32e40x_pma import cv32e40x_pkg::*; input logic misaligned_access_i, // Indicate that ongoing access is part of a misaligned access input logic load_access_i, // Indicate that ongoing access is a load output logic pma_err_o, + output logic pma_misaligned_atomic_o, // Atomic instruction is misaligned output logic pma_bufferable_o, output logic pma_cacheable_o ); @@ -102,9 +103,15 @@ module cv32e40x_pma import cv32e40x_pkg::*; generate if (A_EXT) begin: pma_atomic assign pma_cfg_atomic = pma_cfg.atomic; + + // Check if atomic access is misaligned. + // If not otherwise blocked by the PMA, this will results in exception codes + // 4 or 6 indicating misaligned load/store atomics. + assign pma_misaligned_atomic_o = atomic_access_i && misaligned_access_i; end else begin: pma_no_atomic assign pma_cfg_atomic = 1'b0; + assign pma_misaligned_atomic_o = 1'b0; end endgenerate @@ -118,14 +125,6 @@ module cv32e40x_pma import cv32e40x_pkg::*; pma_err_o = 1'b1; end - // Check that atomic accesses are not misaligned - // Not strictly a part of the PMA, but reusing the PMA logic for flagging errors - // and consume transactions rather than making separate logic in the LSU. Uses the same exception - // codes as PMA errors. - if (atomic_access_i && misaligned_access_i) begin - pma_err_o = 1'b1; - end - // Instruction fetches only allowed in main memory if (instr_fetch_access_i && !pma_cfg.main) begin pma_err_o = 1'b1; diff --git a/rtl/include/cv32e40x_pkg.sv b/rtl/include/cv32e40x_pkg.sv index 8bba056a..29a72781 100644 --- a/rtl/include/cv32e40x_pkg.sv +++ b/rtl/include/cv32e40x_pkg.sv @@ -901,16 +901,19 @@ typedef enum logic[3:0] { } pc_mux_e; // Exception Cause -parameter EXC_CAUSE_INSTR_FAULT = 11'h01; -parameter EXC_CAUSE_ILLEGAL_INSN = 11'h02; -parameter EXC_CAUSE_BREAKPOINT = 11'h03; -parameter EXC_CAUSE_LOAD_FAULT = 11'h05; -parameter EXC_CAUSE_STORE_FAULT = 11'h07; -parameter EXC_CAUSE_ECALL_MMODE = 11'h0B; -parameter EXC_CAUSE_INSTR_BUS_FAULT = 11'h18; +parameter EXC_CAUSE_INSTR_FAULT = 11'h01; +parameter EXC_CAUSE_ILLEGAL_INSN = 11'h02; +parameter EXC_CAUSE_BREAKPOINT = 11'h03; +parameter EXC_CAUSE_LOAD_MISALIGNED = 22'h04; +parameter EXC_CAUSE_LOAD_FAULT = 11'h05; +parameter EXC_CAUSE_STORE_MISALIGNED = 11'h06; +parameter EXC_CAUSE_STORE_FAULT = 11'h07; +parameter EXC_CAUSE_ECALL_MMODE = 11'h0B; +parameter EXC_CAUSE_INSTR_BUS_FAULT = 11'h18; parameter logic [31:0] ETRIGGER_TDATA2_MASK = (1 << EXC_CAUSE_INSTR_BUS_FAULT) | (1 << EXC_CAUSE_ECALL_MMODE) | (1 << EXC_CAUSE_STORE_FAULT) | - (1 << EXC_CAUSE_LOAD_FAULT) | (1 << EXC_CAUSE_BREAKPOINT) | (1 << EXC_CAUSE_ILLEGAL_INSN) | (1 << EXC_CAUSE_INSTR_FAULT); + (1 << EXC_CAUSE_LOAD_FAULT) | (1 << EXC_CAUSE_BREAKPOINT) | (1 << EXC_CAUSE_ILLEGAL_INSN) | (1 << EXC_CAUSE_INSTR_FAULT) | + (1 << EXC_CAUSE_LOAD_MISALIGNED) | (1< (core_trans_i.addr[1:0] == 2'b00)) else `uvm_error("mpu", "Misaligned atomic instruction not flagged with error") diff --git a/sva/cv32e40x_rvfi_sva.sv b/sva/cv32e40x_rvfi_sva.sv index f4e9abd0..9171cf0a 100644 --- a/sva/cv32e40x_rvfi_sva.sv +++ b/sva/cv32e40x_rvfi_sva.sv @@ -221,8 +221,14 @@ if (DEBUG) begin end if ((A_EXT == A) || (A_EXT == ZALRSC)) begin - // A PMA error due to an aligned LR.W accessing a non-atomic region must get cause_type==MEM_ERR_ATOMIC (1) - // If a LR.W gets blocked due to misalignment, it must get cause_type==MEM_ERR_IO_ALIGN (0) + // Aligned atomics blocked by the PMA shall use the EXC_CAUSE_LOAD_FAULT or EXC_CAUSE_STORE_FAULT exception codes with + // cause_type MEM_ERR_ATOMIC. + // + // Misaligned atomics to a non-main PMA region shall use EXC_CAUSE_LOAD_FAULT or EXC_CAUSE_STORE_FAULT exception codes with + // cause_type MEM_ERR_IO_ALIGN + // + // Misaligned atomics which are otherwise not blocked by the PMA (cfg is main and atomic) shall use either + // EXC_CAUSE_LOAD_MISALIGNED or EXC_CAUSE_STORE_MISALIGNED with cause_type MEM_ERR_IO_ALIGN. a_aligned_lr_access_fault_trap: assert property (@(posedge clk_i) disable iff (!rst_ni) pc_mux_exception && (lsu_atomic_wb_i == AT_LR) && lsu_en_wb_i && @@ -239,8 +245,14 @@ if ((A_EXT == A) || (A_EXT == ZALRSC)) begin lsu_split_q_wb_i |=> rvfi_valid && - (rvfi_trap.cause_type == MEM_ERR_MISALIGNED_ATOMIC) && + ((rvfi_trap.cause_type == MEM_ERR_IO_ALIGN) && + (rvfi_trap.exception_cause == EXC_CAUSE_LOAD_MISALIGNED)) + or + ((rvfi_trap.cause_type == MEM_ERR_ATOMIC) && (rvfi_trap.exception_cause == EXC_CAUSE_LOAD_FAULT)) + or + ((rvfi_trap.cause_type == MEM_ERR_IO_ALIGN) && + (rvfi_trap.exception_cause == EXC_CAUSE_LOAD_FAULT))) else `uvm_error("rvfi", "Exception on misaligned LR.W atomic instruction did not set correct cause_type in rvfi_trap") a_aligned_sc_access_fault_trap: @@ -259,8 +271,14 @@ if ((A_EXT == A) || (A_EXT == ZALRSC)) begin lsu_split_q_wb_i |=> rvfi_valid && - (rvfi_trap.cause_type == MEM_ERR_MISALIGNED_ATOMIC) && + ((rvfi_trap.cause_type == MEM_ERR_IO_ALIGN) && + (rvfi_trap.exception_cause == EXC_CAUSE_STORE_MISALIGNED)) + or + ((rvfi_trap.cause_type == MEM_ERR_ATOMIC) && (rvfi_trap.exception_cause == EXC_CAUSE_STORE_FAULT)) + or + ((rvfi_trap.cause_type == MEM_ERR_IO_ALIGN) && + (rvfi_trap.exception_cause == EXC_CAUSE_STORE_FAULT))) else `uvm_error("rvfi", "Exception on misaligned SC.W atomic instruction did not set correct cause_type in rvfi_trap") end @@ -281,7 +299,7 @@ if (A_EXT == A) begin lsu_split_q_wb_i |=> rvfi_valid && - (rvfi_trap.cause_type == MEM_ERR_MISALIGNED_ATOMIC) && + (rvfi_trap.cause_type == MEM_ERR_IO_ALIGN) && (rvfi_trap.exception_cause == EXC_CAUSE_STORE_FAULT)) else `uvm_error("rvfi", "Exception on misaligned AMO* atomic instruction did not set correct cause_type in rvfi_trap") end From dfb24babace129fc71ce44eb178e2b71a05a5677 Mon Sep 17 00:00:00 2001 From: Oystein Knauserud Date: Mon, 27 Feb 2023 12:17:31 +0100 Subject: [PATCH 2/4] Fixed typo in exception code width. Signed-off-by: Oystein Knauserud --- rtl/include/cv32e40x_pkg.sv | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rtl/include/cv32e40x_pkg.sv b/rtl/include/cv32e40x_pkg.sv index 29a72781..8263dedb 100644 --- a/rtl/include/cv32e40x_pkg.sv +++ b/rtl/include/cv32e40x_pkg.sv @@ -904,7 +904,7 @@ typedef enum logic[3:0] { parameter EXC_CAUSE_INSTR_FAULT = 11'h01; parameter EXC_CAUSE_ILLEGAL_INSN = 11'h02; parameter EXC_CAUSE_BREAKPOINT = 11'h03; -parameter EXC_CAUSE_LOAD_MISALIGNED = 22'h04; +parameter EXC_CAUSE_LOAD_MISALIGNED = 11'h04; parameter EXC_CAUSE_LOAD_FAULT = 11'h05; parameter EXC_CAUSE_STORE_MISALIGNED = 11'h06; parameter EXC_CAUSE_STORE_FAULT = 11'h07; From c21271b5472128ae2467792c52995fd63a8dabd2 Mon Sep 17 00:00:00 2001 From: Oystein Knauserud Date: Mon, 27 Feb 2023 12:28:22 +0100 Subject: [PATCH 3/4] Updated RVFI assertion for chekcing exception cause and cause type for misaligned AMOs. Signed-off-by: Oystein Knauserud --- sva/cv32e40x_rvfi_sva.sv | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/sva/cv32e40x_rvfi_sva.sv b/sva/cv32e40x_rvfi_sva.sv index 9171cf0a..050c75ba 100644 --- a/sva/cv32e40x_rvfi_sva.sv +++ b/sva/cv32e40x_rvfi_sva.sv @@ -299,8 +299,14 @@ if (A_EXT == A) begin lsu_split_q_wb_i |=> rvfi_valid && - (rvfi_trap.cause_type == MEM_ERR_IO_ALIGN) && + ((rvfi_trap.cause_type == MEM_ERR_IO_ALIGN) && + (rvfi_trap.exception_cause == EXC_CAUSE_STORE_MISALIGNED)) + or + ((rvfi_trap.cause_type == MEM_ERR_ATOMIC) && (rvfi_trap.exception_cause == EXC_CAUSE_STORE_FAULT)) + or + ((rvfi_trap.cause_type == MEM_ERR_IO_ALIGN) && + (rvfi_trap.exception_cause == EXC_CAUSE_STORE_FAULT))) else `uvm_error("rvfi", "Exception on misaligned AMO* atomic instruction did not set correct cause_type in rvfi_trap") end From 25f616cf3c9528f150f1eae10b882575981279c0 Mon Sep 17 00:00:00 2001 From: Oystein Knauserud Date: Mon, 27 Feb 2023 13:01:49 +0100 Subject: [PATCH 4/4] Fixed typo. Signed-off-by: Oystein Knauserud --- rtl/cv32e40x_mpu.sv | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rtl/cv32e40x_mpu.sv b/rtl/cv32e40x_mpu.sv index ce66ba7e..58fa4a82 100644 --- a/rtl/cv32e40x_mpu.sv +++ b/rtl/cv32e40x_mpu.sv @@ -119,13 +119,13 @@ module cv32e40x_mpu import cv32e40x_pkg::*; if (core_mpu_err_wait_i) begin if(core_trans_we) begin // MPU error on write - // PMA errors take presedence over misaligned atomics + // PMA errors take precedence over misaligned atomics state_n = core_one_txn_pend_n ? (pma_err ? MPU_WR_ERR_RESP : MPU_WR_MISALIGN_RESP) : (pma_err ? MPU_WR_ERR_WAIT : MPU_WR_MISALIGN_WAIT); end else begin // MPU error on read - // PMA errors take presedence over misaligned atomics + // PMA errors take precedence over misaligned atomics state_n = core_one_txn_pend_n ? (pma_err ? MPU_RE_ERR_RESP : MPU_RE_MISALIGN_RESP) : (pma_err ? MPU_RE_ERR_WAIT : MPU_RE_MISALIGN_WAIT); end