Implement the memory version system that will be necessary for LR/SC

This commit is contained in:
2026-01-06 21:50:47 +01:00
parent 07e755340e
commit 9861187fa6

View File

@@ -6,7 +6,10 @@
use std::sync::{ use std::sync::{
Arc, Arc,
atomic::{AtomicU8, AtomicU16, AtomicU32, AtomicU64, Ordering::Relaxed}, atomic::{
AtomicU8, AtomicU16, AtomicU32, AtomicU64,
Ordering::{self, Relaxed},
},
}; };
use memmap2::MmapMut; use memmap2::MmapMut;
@@ -179,6 +182,7 @@ pub enum MemoryMappingType {
pub struct Ram { pub struct Ram {
buf: MmapMut, buf: MmapMut,
version_counters: Arc<[AtomicU32]>,
} }
#[cfg(target_endian = "big")] #[cfg(target_endian = "big")]
@@ -191,9 +195,16 @@ impl Ram {
} }
Ok(Self { Ok(Self {
buf: MmapMut::map_anon(size)?, buf: MmapMut::map_anon(size)?,
// SAFETY: We do not care about the initial version counts. Wrapping is fine. Only
// equality is ever checked for, not magnitude.
version_counters: unsafe {
Arc::new_uninit_slice(size.div_ceil(Self::VERSION_CHUNK_SIZE)).assume_init()
},
}) })
} }
const VERSION_CHUNK_SIZE: usize = 64;
pub fn buf_mut(&mut self) -> &mut [u8] { pub fn buf_mut(&mut self) -> &mut [u8] {
self.buf.as_mut() self.buf.as_mut()
} }
@@ -311,6 +322,9 @@ impl Ram {
return Ok(()); return Ok(());
} }
self.claim_addr_even(addr)
.ok_or_else(|| MemoryExceptionType::AccessFault.with_addr(addr))?;
let index = (addr / 8) as usize; let index = (addr / 8) as usize;
unsafe { unsafe {
self.buf_transmuted::<AtomicU64>() self.buf_transmuted::<AtomicU64>()
@@ -336,6 +350,9 @@ impl Ram {
return Ok(()); return Ok(());
} }
self.claim_addr_even(addr)
.ok_or_else(|| MemoryExceptionType::AccessFault.with_addr(addr))?;
let index = (addr / 4) as usize; let index = (addr / 4) as usize;
unsafe { unsafe {
self.buf_transmuted::<AtomicU32>() self.buf_transmuted::<AtomicU32>()
@@ -361,6 +378,9 @@ impl Ram {
return Ok(()); return Ok(());
} }
self.claim_addr_even(addr)
.ok_or_else(|| MemoryExceptionType::AccessFault.with_addr(addr))?;
let index = (addr / 2) as usize; let index = (addr / 2) as usize;
unsafe { unsafe {
self.buf_transmuted::<AtomicU16>() self.buf_transmuted::<AtomicU16>()
@@ -375,6 +395,8 @@ impl Ram {
} }
#[inline] #[inline]
pub fn write_byte(&self, addr: u64, value: u8) -> Result<(), MemoryException> { pub fn write_byte(&self, addr: u64, value: u8) -> Result<(), MemoryException> {
self.claim_addr_even(addr)
.ok_or_else(|| MemoryExceptionType::AccessFault.with_addr(addr))?;
self.buf_atomic() self.buf_atomic()
.get(addr as usize) .get(addr as usize)
.ok_or(MemoryException { .ok_or(MemoryException {
@@ -384,6 +406,58 @@ impl Ram {
.store(value, Relaxed); .store(value, Relaxed);
Ok(()) Ok(())
} }
pub fn claim_addr_even<'a>(&'a self, addr: u64) -> Option<RamVersionClaim<'a>> {
let chunk_id = addr as usize / Self::VERSION_CHUNK_SIZE;
let chunk_counter = self.version_counters.get(chunk_id)?;
Some(RamVersionClaim::claim_even(&chunk_counter))
}
}
pub struct RamVersionClaim<'a> {
version_counter: &'a AtomicU32,
intial_version: u32,
}
impl<'a> RamVersionClaim<'a> {
pub fn claim_even(counter: &'a AtomicU32) -> RamVersionClaim<'a> {
loop {
let current_version = counter.load(Ordering::Acquire);
if !current_version.is_multiple_of(2) {
continue;
}
// Attempt to increment and therefore successfully claim the version
let res = counter.compare_exchange(
current_version,
current_version.wrapping_add(1),
Ordering::AcqRel,
Ordering::Acquire,
);
if let Ok(initial_version) = res {
return RamVersionClaim {
version_counter: counter,
intial_version: initial_version,
};
}
}
}
/// Reset the state of the version counter to its initial state
/// ONLY USE IF YOU ARE SURE RAM HAS NOT BEEN WRITTEN TO
pub fn reset(self) {
self.version_counter
.store(self.intial_version, Ordering::Release);
std::mem::forget(self);
}
}
impl<'a> Drop for RamVersionClaim<'a> {
fn drop(&mut self) {
self.version_counter
.store(self.intial_version.wrapping_add(2), Ordering::Release);
}
} }
pub const MMIO_SECOND_LEVEL_PAGE_SIZE: usize = 64 * 1024; pub const MMIO_SECOND_LEVEL_PAGE_SIZE: usize = 64 * 1024;