Files
trve/src/mem.rs

387 lines
13 KiB
Rust

// Copyright (c) 2025 taitep
// SPDX-License-Identifier: MIT
//
// This file is part of TRVE (https://gitea.taitep.se/taitep/trve)
// See LICENSE file in the project root for full license text.
use std::sync::{
Arc,
atomic::{AtomicU8, AtomicU16, AtomicU32, AtomicU64, Ordering::Relaxed},
};
use memmap2::MmapMut;
use crate::{
consts::{Byte, DWord, HWord, Word},
exceptions::ExceptionType,
};
pub type PageNum = usize;
const PAGE_SIZE: usize = 4096;
#[derive(Clone)]
pub struct MemConfig {
pub ram: Arc<Ram>,
pub ram_start: PageNum,
pub devices: Box<[DeviceEntry]>,
}
impl MemConfig {
#[allow(clippy::needless_borrow)]
pub fn find_device_by_page(&self, page: PageNum) -> Option<&DeviceEntry> {
for entry in self.devices.iter() {
if page_in_range(page, entry.base, entry.size) {
return Some(&entry);
}
}
None
}
pub fn memory_mapping_type(&self, page: PageNum) -> Option<MemoryMappingType> {
if page_in_range(page, self.ram_start, self.ram.pages) {
Some(MemoryMappingType::RAM)
} else {
self.find_device_by_page(page)
.map(|_x| MemoryMappingType::MMIO)
}
}
pub fn read_dword(&self, page: PageNum, offset: u16) -> Result<DWord, ExceptionType> {
if page_in_range(page, self.ram_start, self.ram.pages) {
self.ram.read_dword(page - self.ram_start, offset)
} else {
let entry = self
.find_device_by_page(page)
.ok_or(ExceptionType::LoadAccessFault)?;
entry.interface.read_dword(page - entry.base, offset)
}
}
pub fn read_word(&self, page: PageNum, offset: u16) -> Result<Word, ExceptionType> {
if page_in_range(page, self.ram_start, self.ram.pages) {
self.ram.read_word(page - self.ram_start, offset)
} else {
let entry = self
.find_device_by_page(page)
.ok_or(ExceptionType::LoadAccessFault)?;
entry.interface.read_word(page - entry.base, offset)
}
}
pub fn read_hword(&self, page: PageNum, offset: u16) -> Result<HWord, ExceptionType> {
if page_in_range(page, self.ram_start, self.ram.pages) {
self.ram.read_hword(page - self.ram_start, offset)
} else {
let entry = self
.find_device_by_page(page)
.ok_or(ExceptionType::LoadAccessFault)?;
entry.interface.read_hword(page - entry.base, offset)
}
}
pub fn read_byte(&self, page: PageNum, offset: u16) -> Result<Byte, ExceptionType> {
if page_in_range(page, self.ram_start, self.ram.pages) {
self.ram.read_byte(page - self.ram_start, offset)
} else {
let entry = self
.find_device_by_page(page)
.ok_or(ExceptionType::LoadAccessFault)?;
entry.interface.read_byte(page - entry.base, offset)
}
}
pub fn write_dword(
&self,
page: PageNum,
offset: u16,
value: DWord,
) -> Result<(), ExceptionType> {
if page_in_range(page, self.ram_start, self.ram.pages) {
self.ram.write_dword(page - self.ram_start, offset, value)
} else {
let entry = self
.find_device_by_page(page)
.ok_or(ExceptionType::StoreAmoAccessFault)?;
entry
.interface
.write_dword(page - entry.base, offset, value)
}
}
pub fn write_word(&self, page: PageNum, offset: u16, value: Word) -> Result<(), ExceptionType> {
if page_in_range(page, self.ram_start, self.ram.pages) {
self.ram.write_word(page - self.ram_start, offset, value)
} else {
let entry = self
.find_device_by_page(page)
.ok_or(ExceptionType::StoreAmoAccessFault)?;
entry.interface.write_word(page - entry.base, offset, value)
}
}
pub fn write_hword(
&self,
page: PageNum,
offset: u16,
value: HWord,
) -> Result<(), ExceptionType> {
if page_in_range(page, self.ram_start, self.ram.pages) {
self.ram.write_hword(page - self.ram_start, offset, value)
} else {
let entry = self
.find_device_by_page(page)
.ok_or(ExceptionType::StoreAmoAccessFault)?;
entry
.interface
.write_hword(page - entry.base, offset, value)
}
}
pub fn write_byte(&self, page: PageNum, offset: u16, value: Byte) -> Result<(), ExceptionType> {
if page_in_range(page, self.ram_start, self.ram.pages) {
self.ram.write_byte(page - self.ram_start, offset, value)
} else {
let entry = self
.find_device_by_page(page)
.ok_or(ExceptionType::StoreAmoAccessFault)?;
entry.interface.write_byte(page - entry.base, offset, value)
}
}
pub fn get_atomic_dword(
&self,
page: PageNum,
offset: u16,
) -> Result<&AtomicU64, ExceptionType> {
if page_in_range(page, self.ram_start, self.ram.pages) {
debug_assert!(((offset * 8) as usize) < PAGE_SIZE);
let index = page * (PAGE_SIZE / 8) + (offset as usize);
unsafe {
self.ram
.buf_transmuted::<AtomicU64>()
.get(index)
.ok_or(ExceptionType::HardwareError)
}
} else {
let entry = self
.find_device_by_page(page)
.ok_or(ExceptionType::StoreAmoAccessFault)?;
entry.interface.get_atomic_dword(page - entry.base, offset)
}
}
pub fn get_atomic_word(&self, page: PageNum, offset: u16) -> Result<&AtomicU32, ExceptionType> {
if page_in_range(page, self.ram_start, self.ram.pages) {
debug_assert!(((offset * 4) as usize) < PAGE_SIZE);
let index = page * (PAGE_SIZE / 4) + (offset as usize);
unsafe {
self.ram
.buf_transmuted::<AtomicU32>()
.get(index)
.ok_or(ExceptionType::HardwareError)
}
} else {
let entry = self
.find_device_by_page(page)
.ok_or(ExceptionType::StoreAmoAccessFault)?;
entry.interface.get_atomic_word(page - entry.base, offset)
}
}
}
fn page_in_range(page: PageNum, start: PageNum, pages: PageNum) -> bool {
page >= start && page - start < pages
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum MemoryMappingType {
MMIO,
RAM,
}
pub struct Ram {
buf: MmapMut,
pages: PageNum,
}
#[cfg(target_endian = "big")]
compile_error!("Current RAM implementation requires a little-endian host.");
impl Ram {
pub fn try_new(pages: PageNum) -> Result<Self, std::io::Error> {
Ok(Self {
buf: MmapMut::map_anon(pages * PAGE_SIZE)?,
pages,
})
}
pub fn buf_mut(&mut self) -> &mut [u8] {
self.buf.as_mut()
}
pub fn pages(&self) -> PageNum {
self.pages
}
/// # Safety
/// Safe if T has a size divisible by page size (4kb) (or is known to have a size divisible by the full ram size) and you know that the RAM is made up of valid naturally aligned values of T
#[inline]
pub unsafe fn buf_transmuted<T>(&self) -> &[T] {
debug_assert!(self.buf.len().is_multiple_of(std::mem::size_of::<T>()));
unsafe {
std::slice::from_raw_parts(
self.buf.as_ptr() as *const T,
self.buf.len() / std::mem::size_of::<T>(),
)
}
}
#[inline]
pub fn buf_atomic(&self) -> &[AtomicU8] {
unsafe { std::slice::from_raw_parts(self.buf.as_ptr() as *const AtomicU8, self.buf.len()) }
}
#[inline]
pub fn read_dword(&self, page: PageNum, offset: u16) -> Result<DWord, ExceptionType> {
debug_assert!(((offset * 8) as usize) < PAGE_SIZE);
let index = page * (PAGE_SIZE / 8) + (offset as usize);
Ok(unsafe {
self.buf_transmuted::<AtomicU64>()
.get(index)
.ok_or(ExceptionType::LoadAccessFault)
}?
.load(Relaxed))
}
#[inline]
pub fn read_word(&self, page: PageNum, offset: u16) -> Result<Word, ExceptionType> {
debug_assert!(((offset * 4) as usize) < PAGE_SIZE);
let index = page * (PAGE_SIZE / 4) + (offset as usize);
Ok(unsafe {
self.buf_transmuted::<AtomicU32>()
.get(index)
.ok_or(ExceptionType::LoadAccessFault)
}?
.load(Relaxed))
}
#[inline]
pub fn read_hword(&self, page: PageNum, offset: u16) -> Result<HWord, ExceptionType> {
debug_assert!(((offset * 2) as usize) < PAGE_SIZE);
let index = page * (PAGE_SIZE / 2) + (offset as usize);
Ok(unsafe {
self.buf_transmuted::<AtomicU16>()
.get(index)
.ok_or(ExceptionType::LoadAccessFault)
}?
.load(Relaxed))
}
#[inline]
pub fn read_byte(&self, page: PageNum, offset: u16) -> Result<Byte, ExceptionType> {
debug_assert!((offset as usize) < PAGE_SIZE);
let index = page * PAGE_SIZE + (offset as usize);
Ok(self
.buf_atomic()
.get(index)
.ok_or(ExceptionType::LoadAccessFault)?
.load(Relaxed))
}
#[inline]
pub fn write_dword(
&self,
page: PageNum,
offset: u16,
value: DWord,
) -> Result<(), ExceptionType> {
debug_assert!(((offset * 8) as usize) < PAGE_SIZE);
let index = page * (PAGE_SIZE / 8) + (offset as usize);
unsafe {
self.buf_transmuted::<AtomicU64>()
.get(index)
.ok_or(ExceptionType::StoreAmoAccessFault)
}?
.store(value, Relaxed);
Ok(())
}
#[inline]
pub fn write_word(&self, page: PageNum, offset: u16, value: Word) -> Result<(), ExceptionType> {
debug_assert!(((offset * 4) as usize) < PAGE_SIZE);
let index = page * (PAGE_SIZE / 4) + (offset as usize);
unsafe {
self.buf_transmuted::<AtomicU32>()
.get(index)
.ok_or(ExceptionType::StoreAmoAccessFault)
}?
.store(value, Relaxed);
Ok(())
}
#[inline]
pub fn write_hword(
&self,
page: PageNum,
offset: u16,
value: HWord,
) -> Result<(), ExceptionType> {
debug_assert!(((offset * 2) as usize) < PAGE_SIZE);
let index = page * (PAGE_SIZE / 2) + (offset as usize);
unsafe {
self.buf_transmuted::<AtomicU16>()
.get(index)
.ok_or(ExceptionType::StoreAmoAccessFault)
}?
.store(value, Relaxed);
Ok(())
}
#[inline]
pub fn write_byte(&self, page: PageNum, offset: u16, value: Byte) -> Result<(), ExceptionType> {
debug_assert!((offset as usize) < PAGE_SIZE);
let index = page * PAGE_SIZE + (offset as usize);
self.buf_atomic()
.get(index)
.ok_or(ExceptionType::StoreAmoAccessFault)?
.store(value, Relaxed);
Ok(())
}
}
#[derive(Clone)]
pub struct DeviceEntry {
pub base: PageNum,
pub size: PageNum,
pub interface: Arc<dyn MemDeviceInterface>,
}
#[allow(unused_variables)]
pub trait MemDeviceInterface {
fn write_dword(&self, page: PageNum, offset: u16, value: DWord) -> Result<(), ExceptionType> {
Err(ExceptionType::StoreAmoAccessFault)
}
fn write_word(&self, page: PageNum, offset: u16, value: Word) -> Result<(), ExceptionType> {
Err(ExceptionType::StoreAmoAccessFault)
}
fn write_hword(&self, page: PageNum, offset: u16, value: HWord) -> Result<(), ExceptionType> {
Err(ExceptionType::StoreAmoAccessFault)
}
fn write_byte(&self, page: PageNum, offset: u16, value: Byte) -> Result<(), ExceptionType> {
Err(ExceptionType::StoreAmoAccessFault)
}
fn read_dword(&self, page: PageNum, offset: u16) -> Result<DWord, ExceptionType> {
Err(ExceptionType::LoadAccessFault)
}
fn read_word(&self, page: PageNum, offset: u16) -> Result<Word, ExceptionType> {
Err(ExceptionType::LoadAccessFault)
}
fn read_hword(&self, page: PageNum, offset: u16) -> Result<HWord, ExceptionType> {
Err(ExceptionType::LoadAccessFault)
}
fn read_byte(&self, page: PageNum, offset: u16) -> Result<Byte, ExceptionType> {
Err(ExceptionType::LoadAccessFault)
}
fn get_atomic_word(&self, page: PageNum, offset: u16) -> Result<&AtomicU32, ExceptionType> {
Err(ExceptionType::StoreAmoAccessFault)
}
fn get_atomic_dword(&self, page: PageNum, offset: u16) -> Result<&AtomicU64, ExceptionType> {
Err(ExceptionType::StoreAmoAccessFault)
}
}