Implement Zalrsc
This commit is contained in:
@@ -4,7 +4,7 @@ taitep's RISC-V Emulator.
|
||||
The goal is to support at least RV64GC and be able to run Linux,
|
||||
potentially more. No plans for RV32I or RV32/64E.
|
||||
|
||||
Currently implemented RISC-V ISA: `RV64IM`
|
||||
Currently implemented RISC-V ISA: `RV64IM-Zalrsc`
|
||||
|
||||
## Current Use
|
||||
Currently, the emulator is nowhere near complete,
|
||||
|
||||
@@ -20,6 +20,8 @@ pub struct Core {
|
||||
pub(crate) pc: u64,
|
||||
pub(crate) mem: MemConfig,
|
||||
command_stream: crossbeam::channel::Receiver<CoreCmd>,
|
||||
// LR/SC reservation set. Pair of the RAM version block index and expected version.
|
||||
pub(crate) reservation: Option<(usize, u32)>,
|
||||
}
|
||||
|
||||
pub mod commands;
|
||||
@@ -31,6 +33,7 @@ impl Core {
|
||||
pc: 0,
|
||||
mem,
|
||||
command_stream,
|
||||
reservation: None,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
// This file is part of TRVE (https://gitea.taitep.se/taitep/trve)
|
||||
// See LICENSE file in the project root for full license text.
|
||||
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
const MASK_REGISTER: u32 = 0x1f;
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
@@ -104,7 +106,22 @@ impl Instruction {
|
||||
}
|
||||
|
||||
/// Mostly/only used for the SYSTEM opcode
|
||||
#[inline]
|
||||
pub fn funct12(self) -> u16 {
|
||||
(self.0 >> 20) as u16
|
||||
}
|
||||
|
||||
/// Looks at the aq/rl bits of atomic instructions and converts to an Ordering
|
||||
#[inline]
|
||||
pub fn amo_ordering(self) -> Ordering {
|
||||
let aq = self.0 >> 26 & 1 != 0;
|
||||
let rl = self.0 >> 25 & 1 != 0;
|
||||
|
||||
match (aq, rl) {
|
||||
(false, false) => Ordering::Relaxed,
|
||||
(false, true) => Ordering::Release,
|
||||
(true, false) => Ordering::Acquire,
|
||||
(true, true) => Ordering::AcqRel,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,11 +4,244 @@
|
||||
// This file is part of TRVE (https://gitea.taitep.se/taitep/trve)
|
||||
// See LICENSE file in the project root for full license text.
|
||||
|
||||
use std::sync::atomic::{AtomicU32, AtomicU64};
|
||||
|
||||
use super::illegal;
|
||||
use crate::{core::Core, decode::Instruction, exceptions::Exception};
|
||||
use crate::{
|
||||
core::Core,
|
||||
decode::Instruction,
|
||||
exceptions::{Exception, ExceptionType},
|
||||
mem::{RAM_START, Ram},
|
||||
};
|
||||
|
||||
pub(super) fn find_and_exec(instr: Instruction, core: &mut Core) -> Result<(), Exception> {
|
||||
match (instr.funct3(), instr.funct5()) {
|
||||
match (instr.funct5(), instr.funct3()) {
|
||||
(0b00010, 0b010) if instr.rs2() == 0 => lr_w(core, instr),
|
||||
(0b00010, 0b011) if instr.rs2() == 0 => lr_d(core, instr),
|
||||
(0b00011, 0b010) => sc_w(core, instr),
|
||||
(0b00011, 0b011) => sc_d(core, instr),
|
||||
_ => illegal(instr),
|
||||
}
|
||||
}
|
||||
|
||||
fn lr_d(core: &mut Core, instr: Instruction) -> Result<(), Exception> {
|
||||
core.reservation = None;
|
||||
|
||||
let addr = core.reg_read(instr.rs1());
|
||||
if !addr.is_multiple_of(8) {
|
||||
return Err(Exception {
|
||||
type_: ExceptionType::LoadAddressMisaligned,
|
||||
value: addr,
|
||||
});
|
||||
}
|
||||
if addr < RAM_START {
|
||||
return Err(Exception {
|
||||
type_: ExceptionType::LoadAccessFault,
|
||||
value: addr,
|
||||
});
|
||||
}
|
||||
let ram_addr = addr - RAM_START;
|
||||
|
||||
let reservation_data = core
|
||||
.mem
|
||||
.ram
|
||||
.wait_for_even_version(ram_addr)
|
||||
.ok_or_else(|| Exception {
|
||||
type_: ExceptionType::LoadAccessFault,
|
||||
value: addr,
|
||||
})?;
|
||||
|
||||
core.reg_write(instr.rd(), unsafe {
|
||||
let index = ram_addr as usize / 8;
|
||||
core.mem
|
||||
.ram
|
||||
.buf_transmuted::<AtomicU64>()
|
||||
.get(index)
|
||||
.ok_or_else(|| Exception {
|
||||
type_: ExceptionType::LoadAccessFault,
|
||||
value: addr,
|
||||
})?
|
||||
.load(instr.amo_ordering())
|
||||
});
|
||||
|
||||
core.reservation = Some(reservation_data);
|
||||
core.advance_pc();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
fn sc_d(core: &mut Core, instr: Instruction) -> Result<(), Exception> {
|
||||
let res = if let Some((reserved_chunk_id, reserved_version)) = core.reservation {
|
||||
let addr = core.reg_read(instr.rs1());
|
||||
if !addr.is_multiple_of(8) {
|
||||
return Err(Exception {
|
||||
type_: ExceptionType::StoreAmoAddressMisaligned,
|
||||
value: addr,
|
||||
});
|
||||
}
|
||||
if addr < RAM_START {
|
||||
return Err(Exception {
|
||||
type_: ExceptionType::StoreAmoAccessFault,
|
||||
value: addr,
|
||||
});
|
||||
}
|
||||
let ram_addr = addr - RAM_START;
|
||||
let chunk_id = ram_addr as usize / Ram::VERSION_CHUNK_SIZE;
|
||||
|
||||
if chunk_id != reserved_chunk_id {
|
||||
// Mismatched reservation location and address
|
||||
core.reg_write(instr.rd(), 1);
|
||||
core.reservation = None;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let claim_res = core
|
||||
.mem
|
||||
.ram
|
||||
.claim_expected(chunk_id, reserved_version)
|
||||
.ok_or_else(|| Exception {
|
||||
type_: ExceptionType::StoreAmoAccessFault,
|
||||
value: addr,
|
||||
})?;
|
||||
|
||||
if claim_res.is_some() {
|
||||
core.reservation = None;
|
||||
let value = core.reg_read(instr.rs2());
|
||||
unsafe {
|
||||
let index = ram_addr as usize / 8;
|
||||
core.mem
|
||||
.ram
|
||||
.buf_transmuted::<AtomicU64>()
|
||||
.get(index)
|
||||
.ok_or_else(|| Exception {
|
||||
type_: ExceptionType::StoreAmoAccessFault,
|
||||
value: addr,
|
||||
})?
|
||||
.store(value, instr.amo_ordering());
|
||||
|
||||
Ok(0)
|
||||
}
|
||||
} else {
|
||||
core.reservation = None;
|
||||
Ok(1)
|
||||
}
|
||||
} else {
|
||||
core.reservation = None;
|
||||
Ok(1)
|
||||
}
|
||||
.map(|s| core.reg_write(instr.rd(), s));
|
||||
|
||||
core.advance_pc();
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
fn lr_w(core: &mut Core, instr: Instruction) -> Result<(), Exception> {
|
||||
core.reservation = None;
|
||||
|
||||
let addr = core.reg_read(instr.rs1());
|
||||
if !addr.is_multiple_of(4) {
|
||||
return Err(Exception {
|
||||
type_: ExceptionType::LoadAddressMisaligned,
|
||||
value: addr,
|
||||
});
|
||||
}
|
||||
if addr < RAM_START {
|
||||
return Err(Exception {
|
||||
type_: ExceptionType::LoadAccessFault,
|
||||
value: addr,
|
||||
});
|
||||
}
|
||||
let ram_addr = addr - RAM_START;
|
||||
|
||||
let reservation_data = core
|
||||
.mem
|
||||
.ram
|
||||
.wait_for_even_version(ram_addr)
|
||||
.ok_or_else(|| Exception {
|
||||
type_: ExceptionType::LoadAccessFault,
|
||||
value: addr,
|
||||
})?;
|
||||
|
||||
core.reg_write(instr.rd(), unsafe {
|
||||
let index = ram_addr as usize / 4;
|
||||
core.mem
|
||||
.ram
|
||||
.buf_transmuted::<AtomicU32>()
|
||||
.get(index)
|
||||
.ok_or_else(|| Exception {
|
||||
type_: ExceptionType::LoadAccessFault,
|
||||
value: addr,
|
||||
})?
|
||||
.load(instr.amo_ordering())
|
||||
} as i32 as i64 as u64);
|
||||
|
||||
core.reservation = Some(reservation_data);
|
||||
core.advance_pc();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
fn sc_w(core: &mut Core, instr: Instruction) -> Result<(), Exception> {
|
||||
let res = if let Some((reserved_chunk_id, reserved_version)) = core.reservation {
|
||||
let addr = core.reg_read(instr.rs1());
|
||||
if !addr.is_multiple_of(4) {
|
||||
return Err(Exception {
|
||||
type_: ExceptionType::StoreAmoAddressMisaligned,
|
||||
value: addr,
|
||||
});
|
||||
}
|
||||
if addr < RAM_START {
|
||||
return Err(Exception {
|
||||
type_: ExceptionType::StoreAmoAccessFault,
|
||||
value: addr,
|
||||
});
|
||||
}
|
||||
let ram_addr = addr - RAM_START;
|
||||
let chunk_id = ram_addr as usize / Ram::VERSION_CHUNK_SIZE;
|
||||
|
||||
if chunk_id != reserved_chunk_id {
|
||||
// Mismatched reservation location and address
|
||||
core.reg_write(instr.rd(), 1);
|
||||
core.reservation = None;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let claim_res = core
|
||||
.mem
|
||||
.ram
|
||||
.claim_expected(chunk_id, reserved_version)
|
||||
.ok_or_else(|| Exception {
|
||||
type_: ExceptionType::StoreAmoAccessFault,
|
||||
value: addr,
|
||||
})?;
|
||||
|
||||
if claim_res.is_some() {
|
||||
core.reservation = None;
|
||||
let value = core.reg_read(instr.rs2());
|
||||
unsafe {
|
||||
let index = ram_addr as usize / 4;
|
||||
core.mem
|
||||
.ram
|
||||
.buf_transmuted::<AtomicU32>()
|
||||
.get(index)
|
||||
.ok_or_else(|| Exception {
|
||||
type_: ExceptionType::StoreAmoAccessFault,
|
||||
value: addr,
|
||||
})?
|
||||
.store(value as u32, instr.amo_ordering());
|
||||
|
||||
Ok(0)
|
||||
}
|
||||
} else {
|
||||
core.reservation = None;
|
||||
Ok(1)
|
||||
}
|
||||
} else {
|
||||
core.reservation = None;
|
||||
Ok(1)
|
||||
}
|
||||
.map(|s| core.reg_write(instr.rd(), s));
|
||||
|
||||
core.advance_pc();
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
52
src/mem.rs
52
src/mem.rs
@@ -203,7 +203,7 @@ impl Ram {
|
||||
})
|
||||
}
|
||||
|
||||
const VERSION_CHUNK_SIZE: usize = 64;
|
||||
pub const VERSION_CHUNK_SIZE: usize = 64;
|
||||
|
||||
pub fn buf_mut(&mut self) -> &mut [u8] {
|
||||
self.buf.as_mut()
|
||||
@@ -216,7 +216,7 @@ impl Ram {
|
||||
/// It must also be known that the contents of RAM are made up of naturally
|
||||
/// aligned valid instances of T.
|
||||
#[inline]
|
||||
unsafe fn buf_transmuted<T>(&self) -> &[T] {
|
||||
pub(crate) unsafe fn buf_transmuted<T>(&self) -> &[T] {
|
||||
debug_assert!(self.buf.len().is_multiple_of(std::mem::size_of::<T>()));
|
||||
unsafe {
|
||||
std::slice::from_raw_parts(
|
||||
@@ -412,11 +412,38 @@ impl Ram {
|
||||
let chunk_counter = self.version_counters.get(chunk_id)?;
|
||||
Some(RamVersionClaim::claim_even(&chunk_counter))
|
||||
}
|
||||
|
||||
// Tries to create a claim for a specified chunk id with a specific version
|
||||
// Outer Option represents whether the chunk id exists
|
||||
// Inner Option represents whether the claim succeeded
|
||||
pub fn claim_expected<'a>(
|
||||
&'a self,
|
||||
chunk_id: usize,
|
||||
expected: u32,
|
||||
) -> Option<Option<RamVersionClaim<'a>>> {
|
||||
self.version_counters
|
||||
.get(chunk_id)
|
||||
.map(|chunk_counter| RamVersionClaim::claim_expected(chunk_counter, expected))
|
||||
}
|
||||
|
||||
/// Waits for a specific address to have an even (ready) version
|
||||
/// number and returns the version chunk id and version
|
||||
pub fn wait_for_even_version(&self, addr: u64) -> Option<(usize, u32)> {
|
||||
let chunk_id = addr as usize / Self::VERSION_CHUNK_SIZE;
|
||||
let chunk_counter = self.version_counters.get(chunk_id)?;
|
||||
|
||||
loop {
|
||||
let current_version = chunk_counter.load(Ordering::Acquire);
|
||||
if current_version.is_multiple_of(2) {
|
||||
return Some((chunk_id, current_version));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RamVersionClaim<'a> {
|
||||
version_counter: &'a AtomicU32,
|
||||
intial_version: u32,
|
||||
initial_version: u32,
|
||||
}
|
||||
|
||||
impl<'a> RamVersionClaim<'a> {
|
||||
@@ -438,17 +465,32 @@ impl<'a> RamVersionClaim<'a> {
|
||||
if let Ok(initial_version) = res {
|
||||
return RamVersionClaim {
|
||||
version_counter: counter,
|
||||
intial_version: initial_version,
|
||||
initial_version,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn claim_expected(counter: &'a AtomicU32, expected: u32) -> Option<RamVersionClaim<'a>> {
|
||||
counter
|
||||
.compare_exchange(
|
||||
expected,
|
||||
expected.wrapping_add(1),
|
||||
Ordering::AcqRel,
|
||||
Ordering::Acquire,
|
||||
)
|
||||
.ok()
|
||||
.map(|initial_version| RamVersionClaim {
|
||||
version_counter: counter,
|
||||
initial_version,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Drop for RamVersionClaim<'a> {
|
||||
fn drop(&mut self) {
|
||||
self.version_counter
|
||||
.store(self.intial_version.wrapping_add(2), Ordering::Release);
|
||||
.store(self.initial_version.wrapping_add(2), Ordering::Release);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user