372 lines
12 KiB
Rust
372 lines
12 KiB
Rust
// Copyright (c) 2025 taitep
|
|
// SPDX-License-Identifier: MIT
|
|
//
|
|
// This file is part of TRVE (https://gitea.taitep.se/taitep/trve)
|
|
// See LICENSE file in the project root for full license text.
|
|
|
|
use std::sync::{
|
|
Arc,
|
|
atomic::{AtomicU8, AtomicU16, AtomicU32, AtomicU64, Ordering::Relaxed},
|
|
};
|
|
|
|
use memmap2::MmapMut;
|
|
|
|
use crate::consts::{Byte, DWord, HWord, Word};
|
|
|
|
pub type PageNum = usize;
|
|
|
|
const PAGE_SIZE: usize = 4096;
|
|
|
|
#[derive(Clone)]
|
|
pub struct MemConfig {
|
|
pub ram: Arc<Ram>,
|
|
pub ram_start: PageNum,
|
|
pub devices: Box<[DeviceEntry]>,
|
|
}
|
|
|
|
impl MemConfig {
|
|
#[allow(clippy::needless_borrow)]
|
|
pub fn find_device_by_page(&self, page: PageNum) -> Option<&DeviceEntry> {
|
|
for entry in self.devices.iter() {
|
|
if page_in_range(page, entry.base, entry.size) {
|
|
return Some(&entry);
|
|
}
|
|
}
|
|
|
|
None
|
|
}
|
|
|
|
pub fn memory_mapping_type(&self, page: PageNum) -> Option<MemoryMappingType> {
|
|
if page_in_range(page, self.ram_start, self.ram.pages) {
|
|
Some(MemoryMappingType::RAM)
|
|
} else {
|
|
self.find_device_by_page(page)
|
|
.map(|_x| MemoryMappingType::MMIO)
|
|
}
|
|
}
|
|
|
|
pub fn read_dword(&self, page: PageNum, offset: u16) -> Result<DWord, MemAccessFault> {
|
|
if page_in_range(page, self.ram_start, self.ram.pages) {
|
|
self.ram.read_dword(page - self.ram_start, offset)
|
|
} else {
|
|
let entry = self.find_device_by_page(page).ok_or(MemAccessFault)?;
|
|
|
|
entry.interface.read_dword(page - entry.base, offset)
|
|
}
|
|
}
|
|
pub fn read_word(&self, page: PageNum, offset: u16) -> Result<Word, MemAccessFault> {
|
|
if page_in_range(page, self.ram_start, self.ram.pages) {
|
|
self.ram.read_word(page - self.ram_start, offset)
|
|
} else {
|
|
let entry = self.find_device_by_page(page).ok_or(MemAccessFault)?;
|
|
|
|
entry.interface.read_word(page - entry.base, offset)
|
|
}
|
|
}
|
|
pub fn read_hword(&self, page: PageNum, offset: u16) -> Result<HWord, MemAccessFault> {
|
|
if page_in_range(page, self.ram_start, self.ram.pages) {
|
|
self.ram.read_hword(page - self.ram_start, offset)
|
|
} else {
|
|
let entry = self.find_device_by_page(page).ok_or(MemAccessFault)?;
|
|
|
|
entry.interface.read_hword(page - entry.base, offset)
|
|
}
|
|
}
|
|
pub fn read_byte(&self, page: PageNum, offset: u16) -> Result<Byte, MemAccessFault> {
|
|
if page_in_range(page, self.ram_start, self.ram.pages) {
|
|
self.ram.read_byte(page - self.ram_start, offset)
|
|
} else {
|
|
let entry = self.find_device_by_page(page).ok_or(MemAccessFault)?;
|
|
|
|
entry.interface.read_byte(page - entry.base, offset)
|
|
}
|
|
}
|
|
|
|
pub fn write_dword(
|
|
&self,
|
|
page: PageNum,
|
|
offset: u16,
|
|
value: DWord,
|
|
) -> Result<(), MemAccessFault> {
|
|
if page_in_range(page, self.ram_start, self.ram.pages) {
|
|
self.ram.write_dword(page - self.ram_start, offset, value)
|
|
} else {
|
|
let entry = self.find_device_by_page(page).ok_or(MemAccessFault)?;
|
|
entry
|
|
.interface
|
|
.write_dword(page - entry.base, offset, value)
|
|
}
|
|
}
|
|
pub fn write_word(
|
|
&self,
|
|
page: PageNum,
|
|
offset: u16,
|
|
value: Word,
|
|
) -> Result<(), MemAccessFault> {
|
|
if page_in_range(page, self.ram_start, self.ram.pages) {
|
|
self.ram.write_word(page - self.ram_start, offset, value)
|
|
} else {
|
|
let entry = self.find_device_by_page(page).ok_or(MemAccessFault)?;
|
|
entry.interface.write_word(page - entry.base, offset, value)
|
|
}
|
|
}
|
|
pub fn write_hword(
|
|
&self,
|
|
page: PageNum,
|
|
offset: u16,
|
|
value: HWord,
|
|
) -> Result<(), MemAccessFault> {
|
|
if page_in_range(page, self.ram_start, self.ram.pages) {
|
|
self.ram.write_hword(page - self.ram_start, offset, value)
|
|
} else {
|
|
let entry = self.find_device_by_page(page).ok_or(MemAccessFault)?;
|
|
entry
|
|
.interface
|
|
.write_hword(page - entry.base, offset, value)
|
|
}
|
|
}
|
|
pub fn write_byte(
|
|
&self,
|
|
page: PageNum,
|
|
offset: u16,
|
|
value: Byte,
|
|
) -> Result<(), MemAccessFault> {
|
|
if page_in_range(page, self.ram_start, self.ram.pages) {
|
|
self.ram.write_byte(page - self.ram_start, offset, value)
|
|
} else {
|
|
let entry = self.find_device_by_page(page).ok_or(MemAccessFault)?;
|
|
entry.interface.write_byte(page - entry.base, offset, value)
|
|
}
|
|
}
|
|
|
|
pub fn get_atomic_dword(
|
|
&self,
|
|
page: PageNum,
|
|
offset: u16,
|
|
) -> Result<&AtomicU64, MemAccessFault> {
|
|
if page_in_range(page, self.ram_start, self.ram.pages) {
|
|
debug_assert!(((offset * 8) as usize) < PAGE_SIZE);
|
|
let index = page * (PAGE_SIZE / 8) + (offset as usize);
|
|
unsafe {
|
|
self.ram
|
|
.buf_transmuted::<AtomicU64>()
|
|
.get(index)
|
|
.ok_or(MemAccessFault)
|
|
}
|
|
} else {
|
|
let entry = self.find_device_by_page(page).ok_or(MemAccessFault)?;
|
|
entry.interface.get_atomic_dword(page - entry.base, offset)
|
|
}
|
|
}
|
|
pub fn get_atomic_word(
|
|
&self,
|
|
page: PageNum,
|
|
offset: u16,
|
|
) -> Result<&AtomicU32, MemAccessFault> {
|
|
if page_in_range(page, self.ram_start, self.ram.pages) {
|
|
debug_assert!(((offset * 4) as usize) < PAGE_SIZE);
|
|
let index = page * (PAGE_SIZE / 4) + (offset as usize);
|
|
unsafe {
|
|
self.ram
|
|
.buf_transmuted::<AtomicU32>()
|
|
.get(index)
|
|
.ok_or(MemAccessFault)
|
|
}
|
|
} else {
|
|
let entry = self.find_device_by_page(page).ok_or(MemAccessFault)?;
|
|
entry.interface.get_atomic_word(page - entry.base, offset)
|
|
}
|
|
}
|
|
}
|
|
|
|
fn page_in_range(page: PageNum, start: PageNum, pages: PageNum) -> bool {
|
|
page >= start && page - start < pages
|
|
}
|
|
|
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
|
pub enum MemoryMappingType {
|
|
MMIO,
|
|
RAM,
|
|
}
|
|
|
|
pub struct Ram {
|
|
buf: MmapMut,
|
|
pages: PageNum,
|
|
}
|
|
|
|
#[cfg(target_endian = "big")]
|
|
compile_error!("Current RAM implementation requires a little-endian host.");
|
|
|
|
impl Ram {
|
|
pub fn try_new(pages: PageNum) -> Result<Self, std::io::Error> {
|
|
Ok(Self {
|
|
buf: MmapMut::map_anon(pages * PAGE_SIZE)?,
|
|
pages,
|
|
})
|
|
}
|
|
|
|
pub fn buf_mut(&mut self) -> &mut [u8] {
|
|
self.buf.as_mut()
|
|
}
|
|
|
|
pub fn pages(&self) -> PageNum {
|
|
self.pages
|
|
}
|
|
|
|
/// # Safety
|
|
/// Safe if T has a size divisible by page size (4kb) (or is known to have a size divisible by the full ram size) and you know that the RAM is made up of valid naturally aligned values of T
|
|
#[inline]
|
|
pub unsafe fn buf_transmuted<T>(&self) -> &[T] {
|
|
debug_assert!(self.buf.len().is_multiple_of(std::mem::size_of::<T>()));
|
|
unsafe {
|
|
std::slice::from_raw_parts(
|
|
self.buf.as_ptr() as *const T,
|
|
self.buf.len() / std::mem::size_of::<T>(),
|
|
)
|
|
}
|
|
}
|
|
|
|
#[inline]
|
|
pub fn buf_atomic(&self) -> &[AtomicU8] {
|
|
unsafe { std::slice::from_raw_parts(self.buf.as_ptr() as *const AtomicU8, self.buf.len()) }
|
|
}
|
|
|
|
#[inline]
|
|
pub fn read_dword(&self, page: PageNum, offset: u16) -> Result<DWord, MemAccessFault> {
|
|
debug_assert!(((offset * 8) as usize) < PAGE_SIZE);
|
|
let index = page * (PAGE_SIZE / 8) + (offset as usize);
|
|
Ok(unsafe {
|
|
self.buf_transmuted::<AtomicU64>()
|
|
.get(index)
|
|
.ok_or(MemAccessFault)
|
|
}?
|
|
.load(Relaxed))
|
|
}
|
|
#[inline]
|
|
pub fn read_word(&self, page: PageNum, offset: u16) -> Result<Word, MemAccessFault> {
|
|
debug_assert!(((offset * 4) as usize) < PAGE_SIZE);
|
|
let index = page * (PAGE_SIZE / 4) + (offset as usize);
|
|
Ok(unsafe {
|
|
self.buf_transmuted::<AtomicU32>()
|
|
.get(index)
|
|
.ok_or(MemAccessFault)
|
|
}?
|
|
.load(Relaxed))
|
|
}
|
|
#[inline]
|
|
pub fn read_hword(&self, page: PageNum, offset: u16) -> Result<HWord, MemAccessFault> {
|
|
debug_assert!(((offset * 2) as usize) < PAGE_SIZE);
|
|
let index = page * (PAGE_SIZE / 2) + (offset as usize);
|
|
Ok(unsafe {
|
|
self.buf_transmuted::<AtomicU16>()
|
|
.get(index)
|
|
.ok_or(MemAccessFault)
|
|
}?
|
|
.load(Relaxed))
|
|
}
|
|
#[inline]
|
|
pub fn read_byte(&self, page: PageNum, offset: u16) -> Result<Byte, MemAccessFault> {
|
|
debug_assert!((offset as usize) < PAGE_SIZE);
|
|
let index = page * PAGE_SIZE + (offset as usize);
|
|
Ok(self
|
|
.buf_atomic()
|
|
.get(index)
|
|
.ok_or(MemAccessFault)?
|
|
.load(Relaxed))
|
|
}
|
|
|
|
#[inline]
|
|
pub fn write_dword(
|
|
&self,
|
|
page: PageNum,
|
|
offset: u16,
|
|
value: DWord,
|
|
) -> Result<(), MemAccessFault> {
|
|
debug_assert!(((offset * 8) as usize) < PAGE_SIZE);
|
|
let index = page * (PAGE_SIZE / 8) + (offset as usize);
|
|
unsafe {
|
|
self.buf_transmuted::<AtomicU64>()
|
|
.get(index)
|
|
.ok_or(MemAccessFault)
|
|
}?
|
|
.store(value, Relaxed);
|
|
Ok(())
|
|
}
|
|
#[inline]
|
|
pub fn write_word(
|
|
&self,
|
|
page: PageNum,
|
|
offset: u16,
|
|
value: Word,
|
|
) -> Result<(), MemAccessFault> {
|
|
debug_assert!(((offset * 4) as usize) < PAGE_SIZE);
|
|
let index = page * (PAGE_SIZE / 4) + (offset as usize);
|
|
unsafe {
|
|
self.buf_transmuted::<AtomicU32>()
|
|
.get(index)
|
|
.ok_or(MemAccessFault)
|
|
}?
|
|
.store(value, Relaxed);
|
|
Ok(())
|
|
}
|
|
#[inline]
|
|
pub fn write_hword(
|
|
&self,
|
|
page: PageNum,
|
|
offset: u16,
|
|
value: HWord,
|
|
) -> Result<(), MemAccessFault> {
|
|
debug_assert!(((offset * 2) as usize) < PAGE_SIZE);
|
|
let index = page * (PAGE_SIZE / 2) + (offset as usize);
|
|
unsafe {
|
|
self.buf_transmuted::<AtomicU16>()
|
|
.get(index)
|
|
.ok_or(MemAccessFault)
|
|
}?
|
|
.store(value, Relaxed);
|
|
Ok(())
|
|
}
|
|
#[inline]
|
|
pub fn write_byte(
|
|
&self,
|
|
page: PageNum,
|
|
offset: u16,
|
|
value: Byte,
|
|
) -> Result<(), MemAccessFault> {
|
|
debug_assert!((offset as usize) < PAGE_SIZE);
|
|
let index = page * PAGE_SIZE + (offset as usize);
|
|
self.buf_atomic()
|
|
.get(index)
|
|
.ok_or(MemAccessFault)?
|
|
.store(value, Relaxed);
|
|
Ok(())
|
|
}
|
|
}
|
|
|
|
#[derive(Clone)]
|
|
pub struct DeviceEntry {
|
|
pub base: PageNum,
|
|
pub size: PageNum,
|
|
pub interface: Arc<dyn MemDeviceInterface>,
|
|
}
|
|
|
|
pub trait MemDeviceInterface {
|
|
fn write_dword(&self, page: PageNum, offset: u16, value: DWord) -> Result<(), MemAccessFault>;
|
|
fn write_word(&self, page: PageNum, offset: u16, value: Word) -> Result<(), MemAccessFault>;
|
|
fn write_hword(&self, page: PageNum, offset: u16, value: HWord) -> Result<(), MemAccessFault>;
|
|
fn write_byte(&self, page: PageNum, offset: u16, value: Byte) -> Result<(), MemAccessFault>;
|
|
|
|
fn read_dword(&self, page: PageNum, offset: u16) -> Result<DWord, MemAccessFault>;
|
|
fn read_word(&self, page: PageNum, offset: u16) -> Result<Word, MemAccessFault>;
|
|
fn read_hword(&self, page: PageNum, offset: u16) -> Result<HWord, MemAccessFault>;
|
|
fn read_byte(&self, page: PageNum, offset: u16) -> Result<Byte, MemAccessFault>;
|
|
|
|
fn get_atomic_word(&self, page: PageNum, offset: u16) -> Result<&AtomicU32, MemAccessFault>;
|
|
fn get_atomic_dword(&self, page: PageNum, offset: u16) -> Result<&AtomicU64, MemAccessFault>;
|
|
}
|
|
|
|
/// Error that means something has gone wrong accessing memory
|
|
/// Examples are: Accessing unmapped memory, accessing an MMIO register at the wrong size
|
|
#[derive(Debug)]
|
|
pub struct MemAccessFault;
|