(BIG CHANGE) memory handling has changed, MMIO is now a 2 level page table, misaligned access supported, addresses not internally split to page and offset immediately, all load/store instructions implemented. Might still have bugs

This commit is contained in:
2025-12-26 14:20:27 +01:00
parent 6d9efb7eb8
commit 528b519ce9
9 changed files with 478 additions and 456 deletions

View File

@@ -12,184 +12,171 @@ use std::sync::{
use memmap2::MmapMut;
use crate::{
consts::{Byte, DWord, HWord, Word},
exceptions::ExceptionType,
consts::{Addr, Byte, DWord, HWord, Word},
exceptions::MemoryExceptionType,
};
pub type PageNum = usize;
const PAGE_SIZE: usize = 4096;
pub const RAM_START: Addr = 0x8000_0000;
#[derive(Clone)]
pub struct MemConfig {
pub ram: Arc<Ram>,
pub ram_start: PageNum,
pub devices: Box<[DeviceEntry]>,
pub mmio_root: MmioRoot,
}
impl MemConfig {
#[allow(clippy::needless_borrow)]
pub fn find_device_by_page(&self, page: PageNum) -> Option<&DeviceEntry> {
for entry in self.devices.iter() {
if page_in_range(page, entry.base, entry.size) {
return Some(&entry);
}
}
None
}
pub fn memory_mapping_type(&self, page: PageNum) -> Option<MemoryMappingType> {
if page_in_range(page, self.ram_start, self.ram.pages) {
pub fn memory_mapping_type(&self, addr: Addr) -> Option<MemoryMappingType> {
if addr >= RAM_START {
Some(MemoryMappingType::RAM)
} else {
self.find_device_by_page(page)
.map(|_x| MemoryMappingType::MMIO)
self.mmio_root
.get_device(addr)
.map(|_| MemoryMappingType::MMIO)
}
}
pub fn read_dword(&self, page: PageNum, offset: u16) -> Result<DWord, ExceptionType> {
if page_in_range(page, self.ram_start, self.ram.pages) {
self.ram.read_dword(page - self.ram_start, offset)
pub fn read_dword(&self, addr: Addr) -> Result<DWord, MemoryExceptionType> {
if addr >= RAM_START {
self.ram.read_dword(addr - RAM_START)
} else {
let entry = self
.find_device_by_page(page)
.ok_or(ExceptionType::LoadAccessFault)?;
entry.interface.read_dword(page - entry.base, offset)
}
}
pub fn read_word(&self, page: PageNum, offset: u16) -> Result<Word, ExceptionType> {
if page_in_range(page, self.ram_start, self.ram.pages) {
self.ram.read_word(page - self.ram_start, offset)
} else {
let entry = self
.find_device_by_page(page)
.ok_or(ExceptionType::LoadAccessFault)?;
entry.interface.read_word(page - entry.base, offset)
}
}
pub fn read_hword(&self, page: PageNum, offset: u16) -> Result<HWord, ExceptionType> {
if page_in_range(page, self.ram_start, self.ram.pages) {
self.ram.read_hword(page - self.ram_start, offset)
} else {
let entry = self
.find_device_by_page(page)
.ok_or(ExceptionType::LoadAccessFault)?;
entry.interface.read_hword(page - entry.base, offset)
}
}
pub fn read_byte(&self, page: PageNum, offset: u16) -> Result<Byte, ExceptionType> {
if page_in_range(page, self.ram_start, self.ram.pages) {
self.ram.read_byte(page - self.ram_start, offset)
} else {
let entry = self
.find_device_by_page(page)
.ok_or(ExceptionType::LoadAccessFault)?;
entry.interface.read_byte(page - entry.base, offset)
}
}
pub fn write_dword(
&self,
page: PageNum,
offset: u16,
value: DWord,
) -> Result<(), ExceptionType> {
if page_in_range(page, self.ram_start, self.ram.pages) {
self.ram.write_dword(page - self.ram_start, offset, value)
} else {
let entry = self
.find_device_by_page(page)
.ok_or(ExceptionType::StoreAmoAccessFault)?;
entry
.interface
.write_dword(page - entry.base, offset, value)
}
}
pub fn write_word(&self, page: PageNum, offset: u16, value: Word) -> Result<(), ExceptionType> {
if page_in_range(page, self.ram_start, self.ram.pages) {
self.ram.write_word(page - self.ram_start, offset, value)
} else {
let entry = self
.find_device_by_page(page)
.ok_or(ExceptionType::StoreAmoAccessFault)?;
entry.interface.write_word(page - entry.base, offset, value)
}
}
pub fn write_hword(
&self,
page: PageNum,
offset: u16,
value: HWord,
) -> Result<(), ExceptionType> {
if page_in_range(page, self.ram_start, self.ram.pages) {
self.ram.write_hword(page - self.ram_start, offset, value)
} else {
let entry = self
.find_device_by_page(page)
.ok_or(ExceptionType::StoreAmoAccessFault)?;
entry
.interface
.write_hword(page - entry.base, offset, value)
}
}
pub fn write_byte(&self, page: PageNum, offset: u16, value: Byte) -> Result<(), ExceptionType> {
if page_in_range(page, self.ram_start, self.ram.pages) {
self.ram.write_byte(page - self.ram_start, offset, value)
} else {
let entry = self
.find_device_by_page(page)
.ok_or(ExceptionType::StoreAmoAccessFault)?;
entry.interface.write_byte(page - entry.base, offset, value)
}
}
pub fn get_atomic_dword(
&self,
page: PageNum,
offset: u16,
) -> Result<&AtomicU64, ExceptionType> {
if page_in_range(page, self.ram_start, self.ram.pages) {
debug_assert!(((offset * 8) as usize) < PAGE_SIZE);
let index = page * (PAGE_SIZE / 8) + (offset as usize);
unsafe {
self.ram
.buf_transmuted::<AtomicU64>()
.get(index)
.ok_or(ExceptionType::HardwareError)
if !addr.is_multiple_of(8) && self.mmio_root.crosses_boundary(addr, 8) {
return Err(MemoryExceptionType::AddressMisaligned);
}
} else {
let entry = self
.find_device_by_page(page)
.ok_or(ExceptionType::StoreAmoAccessFault)?;
entry.interface.get_atomic_dword(page - entry.base, offset)
}
}
pub fn get_atomic_word(&self, page: PageNum, offset: u16) -> Result<&AtomicU32, ExceptionType> {
if page_in_range(page, self.ram_start, self.ram.pages) {
debug_assert!(((offset * 4) as usize) < PAGE_SIZE);
let index = page * (PAGE_SIZE / 4) + (offset as usize);
unsafe {
self.ram
.buf_transmuted::<AtomicU32>()
.get(index)
.ok_or(ExceptionType::HardwareError)
}
} else {
let entry = self
.find_device_by_page(page)
.ok_or(ExceptionType::StoreAmoAccessFault)?;
entry.interface.get_atomic_word(page - entry.base, offset)
}
}
}
let (interface, addr) = self
.mmio_root
.get_device(addr)
.ok_or(MemoryExceptionType::AccessFault)?;
fn page_in_range(page: PageNum, start: PageNum, pages: PageNum) -> bool {
page >= start && page - start < pages
interface.read_dword(addr)
}
}
pub fn read_word(&self, addr: Addr) -> Result<Word, MemoryExceptionType> {
if addr >= RAM_START {
self.ram.read_word(addr - RAM_START)
} else {
if !addr.is_multiple_of(4) && self.mmio_root.crosses_boundary(addr, 4) {
return Err(MemoryExceptionType::AddressMisaligned);
}
let (interface, addr) = self
.mmio_root
.get_device(addr)
.ok_or(MemoryExceptionType::AccessFault)?;
interface.read_word(addr)
}
}
pub fn read_hword(&self, addr: Addr) -> Result<HWord, MemoryExceptionType> {
if addr >= RAM_START {
self.ram.read_hword(addr - RAM_START)
} else {
if !addr.is_multiple_of(2) && self.mmio_root.crosses_boundary(addr, 2) {
return Err(MemoryExceptionType::AddressMisaligned);
}
let (interface, addr) = self
.mmio_root
.get_device(addr)
.ok_or(MemoryExceptionType::AccessFault)?;
interface.read_hword(addr)
}
}
pub fn read_byte(&self, addr: Addr) -> Result<Byte, MemoryExceptionType> {
if addr >= RAM_START {
self.ram.read_byte(addr - RAM_START)
} else {
let (interface, addr) = self
.mmio_root
.get_device(addr)
.ok_or(MemoryExceptionType::AccessFault)?;
interface.read_byte(addr)
}
}
pub fn write_dword(&self, addr: Addr, value: DWord) -> Result<(), MemoryExceptionType> {
if addr >= RAM_START {
self.ram.write_dword(addr - RAM_START, value)
} else {
if !addr.is_multiple_of(8) && self.mmio_root.crosses_boundary(addr, 8) {
return Err(MemoryExceptionType::AddressMisaligned);
}
let (interface, addr) = self
.mmio_root
.get_device(addr)
.ok_or(MemoryExceptionType::AccessFault)?;
interface.write_dword(addr, value)
}
}
pub fn write_word(&self, addr: Addr, value: Word) -> Result<(), MemoryExceptionType> {
if addr >= RAM_START {
self.ram.write_word(addr - RAM_START, value)
} else {
if !addr.is_multiple_of(4) && self.mmio_root.crosses_boundary(addr, 4) {
return Err(MemoryExceptionType::AddressMisaligned);
}
let (interface, addr) = self
.mmio_root
.get_device(addr)
.ok_or(MemoryExceptionType::AccessFault)?;
interface.write_word(addr, value)
}
}
pub fn write_hword(&self, addr: Addr, value: HWord) -> Result<(), MemoryExceptionType> {
if addr >= RAM_START {
self.ram.write_hword(addr - RAM_START, value)
} else {
if !addr.is_multiple_of(2) && self.mmio_root.crosses_boundary(addr, 2) {
return Err(MemoryExceptionType::AddressMisaligned);
}
let (interface, addr) = self
.mmio_root
.get_device(addr)
.ok_or(MemoryExceptionType::AccessFault)?;
interface.write_hword(addr, value)
}
}
pub fn write_byte(&self, addr: Addr, value: Byte) -> Result<(), MemoryExceptionType> {
if addr >= RAM_START {
self.ram.write_byte(addr - RAM_START, value)
} else {
let (interface, addr) = self
.mmio_root
.get_device(addr)
.ok_or(MemoryExceptionType::AccessFault)?;
interface.write_byte(addr, value)
}
}
pub fn get_atomic_dword(&self, addr: Addr) -> Result<&AtomicU64, MemoryExceptionType> {
if !addr.is_multiple_of(8) {
return Err(MemoryExceptionType::AddressMisaligned);
}
let index = ((addr - RAM_START) / 8) as usize;
unsafe {
self.ram
.buf_transmuted::<AtomicU64>()
.get(index)
.ok_or(MemoryExceptionType::AccessFault)
}
}
pub fn get_atomic_word(&self, addr: Addr) -> Result<&AtomicU32, MemoryExceptionType> {
if !addr.is_multiple_of(4) {
return Err(MemoryExceptionType::AddressMisaligned);
}
if addr < RAM_START {
return Err(MemoryExceptionType::AccessFault);
}
let index = ((addr - RAM_START) / 4) as usize;
unsafe {
self.ram
.buf_transmuted::<AtomicU32>()
.get(index)
.ok_or(MemoryExceptionType::AccessFault)
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
@@ -200,17 +187,15 @@ pub enum MemoryMappingType {
pub struct Ram {
buf: MmapMut,
pages: PageNum,
}
#[cfg(target_endian = "big")]
compile_error!("Current RAM implementation requires a little-endian host.");
impl Ram {
pub fn try_new(pages: PageNum) -> Result<Self, std::io::Error> {
pub fn try_new(size: usize) -> Result<Self, std::io::Error> {
Ok(Self {
buf: MmapMut::map_anon(pages * PAGE_SIZE)?,
pages,
buf: MmapMut::map_anon(size)?,
})
}
@@ -218,10 +203,6 @@ impl Ram {
self.buf.as_mut()
}
pub fn pages(&self) -> PageNum {
self.pages
}
/// # Safety
/// Safe if T has a size divisible by page size (4kb) (or is known to have a size divisible by the full ram size) and you know that the RAM is made up of valid naturally aligned values of T
#[inline]
@@ -241,146 +222,277 @@ impl Ram {
}
#[inline]
pub fn read_dword(&self, page: PageNum, offset: u16) -> Result<DWord, ExceptionType> {
debug_assert!(((offset * 8) as usize) < PAGE_SIZE);
let index = page * (PAGE_SIZE / 8) + (offset as usize);
pub fn read_dword(&self, addr: Addr) -> Result<DWord, MemoryExceptionType> {
if !addr.is_multiple_of(8) {
let high_word_addr = addr.wrapping_add(4);
let low_word = self.read_byte(addr)?;
let high_word = self.read_byte(high_word_addr)?;
return Ok((low_word as DWord) | (high_word as DWord) << 32);
}
let index = (addr / 8) as usize;
Ok(unsafe {
self.buf_transmuted::<AtomicU64>()
.get(index)
.ok_or(ExceptionType::LoadAccessFault)
.ok_or(MemoryExceptionType::AccessFault)
}?
.load(Relaxed))
}
#[inline]
pub fn read_word(&self, page: PageNum, offset: u16) -> Result<Word, ExceptionType> {
debug_assert!(((offset * 4) as usize) < PAGE_SIZE);
let index = page * (PAGE_SIZE / 4) + (offset as usize);
pub fn read_word(&self, addr: Addr) -> Result<Word, MemoryExceptionType> {
if !addr.is_multiple_of(4) {
let high_hword_addr = addr.wrapping_add(2);
let low_hword = self.read_hword(addr)?;
let high_hword = self.read_hword(high_hword_addr)?;
return Ok((low_hword as Word) | (high_hword as Word) << 16);
}
let index = (addr / 4) as usize;
Ok(unsafe {
self.buf_transmuted::<AtomicU32>()
.get(index)
.ok_or(ExceptionType::LoadAccessFault)
.ok_or(MemoryExceptionType::AccessFault)
}?
.load(Relaxed))
}
#[inline]
pub fn read_hword(&self, page: PageNum, offset: u16) -> Result<HWord, ExceptionType> {
debug_assert!(((offset * 2) as usize) < PAGE_SIZE);
let index = page * (PAGE_SIZE / 2) + (offset as usize);
pub fn read_hword(&self, addr: Addr) -> Result<HWord, MemoryExceptionType> {
if !addr.is_multiple_of(2) {
let high_byte_addr = addr.wrapping_add(1);
let low_byte = self.read_byte(addr)?;
let high_byte = self.read_byte(high_byte_addr)?;
return Ok((low_byte as HWord) | (high_byte as HWord) << 8);
}
let index = (addr / 2) as usize;
Ok(unsafe {
self.buf_transmuted::<AtomicU16>()
.get(index)
.ok_or(ExceptionType::LoadAccessFault)
.ok_or(MemoryExceptionType::AccessFault)
}?
.load(Relaxed))
}
#[inline]
pub fn read_byte(&self, page: PageNum, offset: u16) -> Result<Byte, ExceptionType> {
debug_assert!((offset as usize) < PAGE_SIZE);
let index = page * PAGE_SIZE + (offset as usize);
pub fn read_byte(&self, addr: Addr) -> Result<Byte, MemoryExceptionType> {
Ok(self
.buf_atomic()
.get(index)
.ok_or(ExceptionType::LoadAccessFault)?
.get(addr as usize)
.ok_or(MemoryExceptionType::AccessFault)?
.load(Relaxed))
}
#[inline]
pub fn write_dword(
&self,
page: PageNum,
offset: u16,
value: DWord,
) -> Result<(), ExceptionType> {
debug_assert!(((offset * 8) as usize) < PAGE_SIZE);
let index = page * (PAGE_SIZE / 8) + (offset as usize);
pub fn write_dword(&self, addr: Addr, value: DWord) -> Result<(), MemoryExceptionType> {
if !addr.is_multiple_of(8) {
let low_word = value as Word;
let high_word = (value >> 32) as Word;
let high_word_address = addr.wrapping_add(4);
self.write_word(addr, low_word)?;
self.write_word(high_word_address, high_word)?;
return Ok(());
}
let index = (addr / 8) as usize;
unsafe {
self.buf_transmuted::<AtomicU64>()
.get(index)
.ok_or(ExceptionType::StoreAmoAccessFault)
.ok_or(MemoryExceptionType::AccessFault)
}?
.store(value, Relaxed);
Ok(())
}
#[inline]
pub fn write_word(&self, page: PageNum, offset: u16, value: Word) -> Result<(), ExceptionType> {
debug_assert!(((offset * 4) as usize) < PAGE_SIZE);
let index = page * (PAGE_SIZE / 4) + (offset as usize);
pub fn write_word(&self, addr: Addr, value: Word) -> Result<(), MemoryExceptionType> {
if !addr.is_multiple_of(4) {
let low_hword = value as HWord;
let high_hword = (value >> 16) as HWord;
let high_hword_address = addr.wrapping_add(2);
self.write_hword(addr, low_hword)?;
self.write_hword(high_hword_address, high_hword)?;
return Ok(());
}
let index = (addr / 4) as usize;
unsafe {
self.buf_transmuted::<AtomicU32>()
.get(index)
.ok_or(ExceptionType::StoreAmoAccessFault)
.ok_or(MemoryExceptionType::AccessFault)
}?
.store(value, Relaxed);
Ok(())
}
#[inline]
pub fn write_hword(
&self,
page: PageNum,
offset: u16,
value: HWord,
) -> Result<(), ExceptionType> {
debug_assert!(((offset * 2) as usize) < PAGE_SIZE);
let index = page * (PAGE_SIZE / 2) + (offset as usize);
pub fn write_hword(&self, addr: Addr, value: HWord) -> Result<(), MemoryExceptionType> {
if !addr.is_multiple_of(2) {
let low_byte = value as Byte;
let high_byte = (value >> 8) as Byte;
let high_byte_address = addr.wrapping_add(1);
self.write_byte(addr, low_byte)?;
self.write_byte(high_byte_address, high_byte)?;
return Ok(());
}
let index = (addr / 2) as usize;
unsafe {
self.buf_transmuted::<AtomicU16>()
.get(index)
.ok_or(ExceptionType::StoreAmoAccessFault)
.ok_or(MemoryExceptionType::AccessFault)
}?
.store(value, Relaxed);
Ok(())
}
#[inline]
pub fn write_byte(&self, page: PageNum, offset: u16, value: Byte) -> Result<(), ExceptionType> {
debug_assert!((offset as usize) < PAGE_SIZE);
let index = page * PAGE_SIZE + (offset as usize);
pub fn write_byte(&self, addr: Addr, value: Byte) -> Result<(), MemoryExceptionType> {
self.buf_atomic()
.get(index)
.ok_or(ExceptionType::StoreAmoAccessFault)?
.get(addr as usize)
.ok_or(MemoryExceptionType::AccessFault)?
.store(value, Relaxed);
Ok(())
}
}
pub const MMIO_SECOND_LEVEL_PAGE_SIZE: usize = 64 * 1024;
pub const MMIO_ROOT_PAGE_SIZE: usize = MMIO_SECOND_LEVEL_PAGE_SIZE * 64;
const MMIO_ROOT_ENTRIES: usize = RAM_START as usize / MMIO_ROOT_PAGE_SIZE;
const MMIO_SECOND_LEVEL_ENTRIES: usize = MMIO_ROOT_PAGE_SIZE / MMIO_SECOND_LEVEL_PAGE_SIZE;
#[derive(Clone)]
pub struct DeviceEntry {
pub base: PageNum,
pub size: PageNum,
pub interface: Arc<dyn MemDeviceInterface>,
pub struct MmioRoot(Box<[Option<MmioSecondLevel>; MMIO_ROOT_ENTRIES]>);
impl MmioRoot {
pub fn insert(&mut self, base_addr: Addr, interface: Arc<dyn MemDeviceInterface>) {
assert!(base_addr.is_multiple_of(MMIO_SECOND_LEVEL_PAGE_SIZE as u64));
assert!(base_addr < RAM_START);
let page_id = base_addr as usize / MMIO_SECOND_LEVEL_PAGE_SIZE;
let root_page_id = page_id / MMIO_SECOND_LEVEL_ENTRIES;
let second_level_page_id = page_id % MMIO_SECOND_LEVEL_ENTRIES;
let second_level = self.0[root_page_id].get_or_insert_default();
if let MmioSecondLevel::SubTable(t) = second_level {
t[second_level_page_id] = Some(interface);
}
}
pub fn insert_full(&mut self, base_addr: Addr, interface: Arc<dyn MemDeviceInterface>) {
assert!(base_addr.is_multiple_of(MMIO_ROOT_PAGE_SIZE as u64));
assert!(base_addr < RAM_START);
let page_id = base_addr as usize / MMIO_ROOT_PAGE_SIZE;
self.0[page_id] = Some(MmioSecondLevel::Interface(interface));
}
fn get_device(&self, addr: Addr) -> Option<(Arc<dyn MemDeviceInterface>, Addr)> {
debug_assert!(addr < RAM_START);
let page_id = addr as usize / MMIO_SECOND_LEVEL_PAGE_SIZE;
let root_page_id = page_id / MMIO_SECOND_LEVEL_ENTRIES;
self.0[root_page_id]
.as_ref()
.and_then(|s| s.get_device(addr % MMIO_ROOT_PAGE_SIZE as Addr))
}
fn crosses_boundary(&self, addr: Addr, size: Addr) -> bool {
if addr >= RAM_START {
return false;
}
if addr + size > RAM_START {
return true;
}
let page_id = addr as usize / MMIO_SECOND_LEVEL_PAGE_SIZE;
let root_page_id = page_id / MMIO_SECOND_LEVEL_ENTRIES;
let end = addr + size - 1;
match self.0[root_page_id].as_ref() {
Some(s) => match s {
MmioSecondLevel::SubTable(_) => {
let end_page_id = end as usize / MMIO_SECOND_LEVEL_PAGE_SIZE;
page_id != end_page_id
}
MmioSecondLevel::Interface(_) => {
let end_root_page_id = end as usize / MMIO_ROOT_PAGE_SIZE;
root_page_id != end_root_page_id
}
},
None => false,
}
}
}
impl Default for MmioRoot {
fn default() -> Self {
Self(Box::new([(); MMIO_ROOT_ENTRIES].map(|_| None)))
}
}
#[derive(Clone)]
enum MmioSecondLevel {
SubTable(Box<[Option<Arc<dyn MemDeviceInterface>>; MMIO_SECOND_LEVEL_ENTRIES]>),
Interface(Arc<dyn MemDeviceInterface>),
}
impl MmioSecondLevel {
fn get_device(&self, addr: Addr) -> Option<(Arc<dyn MemDeviceInterface>, Addr)> {
let page_id = addr as usize / MMIO_SECOND_LEVEL_PAGE_SIZE;
match self {
Self::SubTable(t) => t[page_id]
.as_ref()
.map(|i| (i.clone(), addr % MMIO_SECOND_LEVEL_PAGE_SIZE as Addr)),
Self::Interface(i) => Some((i.clone(), addr)),
}
}
}
impl Default for MmioSecondLevel {
fn default() -> Self {
Self::SubTable(Box::new([(); MMIO_SECOND_LEVEL_ENTRIES].map(|_| None)))
}
}
#[allow(unused_variables)]
pub trait MemDeviceInterface {
fn write_dword(&self, page: PageNum, offset: u16, value: DWord) -> Result<(), ExceptionType> {
Err(ExceptionType::StoreAmoAccessFault)
fn write_dword(&self, addr: Addr, value: DWord) -> Result<(), MemoryExceptionType> {
Err(MemoryExceptionType::AccessFault)
}
fn write_word(&self, page: PageNum, offset: u16, value: Word) -> Result<(), ExceptionType> {
Err(ExceptionType::StoreAmoAccessFault)
fn write_word(&self, addr: Addr, value: Word) -> Result<(), MemoryExceptionType> {
Err(MemoryExceptionType::AccessFault)
}
fn write_hword(&self, page: PageNum, offset: u16, value: HWord) -> Result<(), ExceptionType> {
Err(ExceptionType::StoreAmoAccessFault)
fn write_hword(&self, addr: Addr, value: HWord) -> Result<(), MemoryExceptionType> {
Err(MemoryExceptionType::AccessFault)
}
fn write_byte(&self, page: PageNum, offset: u16, value: Byte) -> Result<(), ExceptionType> {
Err(ExceptionType::StoreAmoAccessFault)
fn write_byte(&self, addr: Addr, value: Byte) -> Result<(), MemoryExceptionType> {
Err(MemoryExceptionType::AccessFault)
}
fn read_dword(&self, page: PageNum, offset: u16) -> Result<DWord, ExceptionType> {
Err(ExceptionType::LoadAccessFault)
fn read_dword(&self, addr: Addr) -> Result<DWord, MemoryExceptionType> {
Err(MemoryExceptionType::AccessFault)
}
fn read_word(&self, page: PageNum, offset: u16) -> Result<Word, ExceptionType> {
Err(ExceptionType::LoadAccessFault)
fn read_word(&self, addr: Addr) -> Result<Word, MemoryExceptionType> {
Err(MemoryExceptionType::AccessFault)
}
fn read_hword(&self, page: PageNum, offset: u16) -> Result<HWord, ExceptionType> {
Err(ExceptionType::LoadAccessFault)
fn read_hword(&self, addr: Addr) -> Result<HWord, MemoryExceptionType> {
Err(MemoryExceptionType::AccessFault)
}
fn read_byte(&self, page: PageNum, offset: u16) -> Result<Byte, ExceptionType> {
Err(ExceptionType::LoadAccessFault)
}
fn get_atomic_word(&self, page: PageNum, offset: u16) -> Result<&AtomicU32, ExceptionType> {
Err(ExceptionType::StoreAmoAccessFault)
}
fn get_atomic_dword(&self, page: PageNum, offset: u16) -> Result<&AtomicU64, ExceptionType> {
Err(ExceptionType::StoreAmoAccessFault)
fn read_byte(&self, addr: Addr) -> Result<Byte, MemoryExceptionType> {
Err(MemoryExceptionType::AccessFault)
}
}