Initial stuff and memory implementation

This commit is contained in:
2025-09-27 16:38:06 +02:00
parent 282740cb59
commit 4a2272ae49
6 changed files with 268 additions and 0 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target

25
Cargo.lock generated Normal file
View File

@@ -0,0 +1,25 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 4
[[package]]
name = "libc"
version = "0.2.176"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "58f929b4d672ea937a23a1ab494143d968337a5f47e56d0815df1e0890ddf174"
[[package]]
name = "memmap2"
version = "0.9.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "843a98750cd611cc2965a8213b53b43e715f13c37a9e096c6408e69990961db7"
dependencies = [
"libc",
]
[[package]]
name = "rvem"
version = "0.1.0"
dependencies = [
"memmap2",
]

7
Cargo.toml Normal file
View File

@@ -0,0 +1,7 @@
[package]
name = "rvem"
version = "0.1.0"
edition = "2024"
[dependencies]
memmap2 = "0.9.8"

7
src/consts.rs Normal file
View File

@@ -0,0 +1,7 @@
pub type Byte = u8;
pub type HWord = u16;
pub type Word = u32;
pub type DWord = u64;
pub type Reg = DWord;
pub type Addr = DWord;

2
src/lib.rs Normal file
View File

@@ -0,0 +1,2 @@
mod consts;
pub mod mem;

226
src/mem.rs Normal file
View File

@@ -0,0 +1,226 @@
use std::sync::{
Arc,
atomic::{AtomicU8, AtomicU16, AtomicU32, AtomicU64, Ordering::Relaxed},
};
use memmap2::MmapMut;
use crate::consts::{Byte, DWord, HWord, Word};
type PageNum = usize;
const PAGE_SIZE: usize = 4096;
#[derive(Clone)]
pub struct MemConfig {
ram: Arc<Ram>,
ram_start: PageNum,
devices: Box<[DeviceEntry]>,
}
impl MemConfig {
pub fn find_device_by_page(&self, page: PageNum) -> Option<Arc<dyn MemDeviceInterface>> {
for entry in self.devices.iter() {
if page_in_range(page, entry.base, entry.size) {
return Some(entry.interface.clone());
}
}
None
}
pub fn read_dword(&self, page: PageNum, offset: u16) -> Result<DWord, MemAccessFault> {
if page_in_range(page, self.ram_start, self.ram.pages) {
self.ram.read_dword(page, offset)
} else {
self.find_device_by_page(page)
.ok_or(MemAccessFault)?
.read_dword(page, offset)
}
}
}
fn page_in_range(page: PageNum, start: PageNum, pages: PageNum) -> bool {
page >= start && page - start < pages
}
pub struct Ram {
buf: MmapMut,
pages: PageNum,
}
#[cfg(target_endian = "big")]
compile_error!("Current RAM implementation requires a little-endian host.");
impl Ram {
pub fn try_new(pages: PageNum) -> Result<Self, std::io::Error> {
Ok(Self {
buf: MmapMut::map_anon(pages * PAGE_SIZE)?,
pages,
})
}
pub fn buf_mut(&mut self) -> &mut [u8] {
self.buf.as_mut()
}
pub fn pages(&self) -> PageNum {
self.pages
}
/// # Safety
/// Safe if T has a size divisible by page size (4kb) (or is known to have a size divisible by the full ram size) and you know that the RAM is made up of valid naturally aligned values of T
#[inline]
pub unsafe fn buf_transmuted<T>(&self) -> &[T] {
debug_assert!(self.buf.len().is_multiple_of(std::mem::size_of::<T>()));
unsafe {
std::slice::from_raw_parts(
self.buf.as_ptr() as *const T,
self.buf.len() / std::mem::size_of::<T>(),
)
}
}
#[inline]
pub fn buf_atomic(&self) -> &[AtomicU8] {
unsafe { std::slice::from_raw_parts(self.buf.as_ptr() as *const AtomicU8, self.buf.len()) }
}
#[inline]
pub fn read_dword(&self, page: PageNum, offset: u16) -> Result<DWord, MemAccessFault> {
debug_assert!(((offset * 8) as usize) < PAGE_SIZE);
let index = page * (PAGE_SIZE / 8) + (offset as usize);
Ok(unsafe {
self.buf_transmuted::<AtomicU64>()
.get(index)
.ok_or(MemAccessFault)
}?
.load(Relaxed))
}
#[inline]
pub fn read_word(&self, page: PageNum, offset: u16) -> Result<Word, MemAccessFault> {
debug_assert!(((offset * 4) as usize) < PAGE_SIZE);
let index = page * (PAGE_SIZE / 4) + (offset as usize);
Ok(unsafe {
self.buf_transmuted::<AtomicU32>()
.get(index)
.ok_or(MemAccessFault)
}?
.load(Relaxed))
}
#[inline]
pub fn read_hword(&self, page: PageNum, offset: u16) -> Result<HWord, MemAccessFault> {
debug_assert!(((offset * 2) as usize) < PAGE_SIZE);
let index = page * (PAGE_SIZE / 2) + (offset as usize);
Ok(unsafe {
self.buf_transmuted::<AtomicU16>()
.get(index)
.ok_or(MemAccessFault)
}?
.load(Relaxed))
}
#[inline]
pub fn read_byte(&self, page: PageNum, offset: u16) -> Result<Byte, MemAccessFault> {
debug_assert!((offset as usize) < PAGE_SIZE);
let index = page * PAGE_SIZE + (offset as usize);
Ok(self
.buf_atomic()
.get(index)
.ok_or(MemAccessFault)?
.load(Relaxed))
}
#[inline]
pub fn write_dword(
&self,
page: PageNum,
offset: u16,
value: DWord,
) -> Result<(), MemAccessFault> {
debug_assert!(((offset * 8) as usize) < PAGE_SIZE);
let index = page * (PAGE_SIZE / 8) + (offset as usize);
unsafe {
self.buf_transmuted::<AtomicU64>()
.get(index)
.ok_or(MemAccessFault)
}?
.store(value, Relaxed);
Ok(())
}
#[inline]
pub fn write_word(
&self,
page: PageNum,
offset: u16,
value: Word,
) -> Result<(), MemAccessFault> {
debug_assert!(((offset * 4) as usize) < PAGE_SIZE);
let index = page * (PAGE_SIZE / 4) + (offset as usize);
unsafe {
self.buf_transmuted::<AtomicU32>()
.get(index)
.ok_or(MemAccessFault)
}?
.store(value, Relaxed);
Ok(())
}
#[inline]
pub fn write_hword(
&self,
page: PageNum,
offset: u16,
value: HWord,
) -> Result<(), MemAccessFault> {
debug_assert!(((offset * 2) as usize) < PAGE_SIZE);
let index = page * (PAGE_SIZE / 2) + (offset as usize);
unsafe {
self.buf_transmuted::<AtomicU16>()
.get(index)
.ok_or(MemAccessFault)
}?
.store(value, Relaxed);
Ok(())
}
#[inline]
pub fn write_byte(
&self,
page: PageNum,
offset: u16,
value: Byte,
) -> Result<(), MemAccessFault> {
debug_assert!((offset as usize) < PAGE_SIZE);
let index = page * PAGE_SIZE + (offset as usize);
self.buf_atomic()
.get(index)
.ok_or(MemAccessFault)?
.store(value, Relaxed);
Ok(())
}
}
#[derive(Clone)]
pub struct DeviceEntry {
base: PageNum,
size: PageNum,
interface: Arc<dyn MemDeviceInterface>,
}
pub trait MemDeviceInterface {
fn write_dword(&self, page: PageNum, offset: u16, value: DWord) -> Result<(), MemAccessFault>;
fn write_word(&self, page: PageNum, offset: u16, value: Word) -> Result<(), MemAccessFault>;
fn write_hword(&self, page: PageNum, offset: u16, value: HWord) -> Result<(), MemAccessFault>;
fn write_byte(&self, page: PageNum, offset: u16, value: Byte) -> Result<(), MemAccessFault>;
fn read_dword(&self, page: PageNum, offset: u16) -> Result<DWord, MemAccessFault>;
fn read_word(&self, page: PageNum, offset: u16) -> Result<Word, MemAccessFault>;
fn read_hword(&self, page: PageNum, offset: u16) -> Result<HWord, MemAccessFault>;
fn read_byte(&self, page: PageNum, offset: u16) -> Result<Byte, MemAccessFault>;
fn get_atomic_word(&self, page: PageNum, offset: u16) -> Result<&AtomicU32, MemAccessFault>;
fn get_atomic_dword(&self, page: PageNum, offset: u16) -> Result<&AtomicU64, MemAccessFault>;
}
/// Error that means something has gone wrong accessing memory
/// Examples are: Accessing unmapped memory, accessing an MMIO register at the wrong size
#[derive(Debug)]
pub struct MemAccessFault;