Skip to content

Commit

Permalink
Merge pull request #9 from embassy-rs/write-max-chunk-size
Browse files Browse the repository at this point in the history
Write max chunk size
  • Loading branch information
Dirbaio authored May 2, 2024
2 parents 9973a06 + 32f9ce8 commit 6c1c3c3
Show file tree
Hide file tree
Showing 6 changed files with 65 additions and 30 deletions.
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,7 @@ branching-factor-2 = [] # Default
branching-factor-3 = []
branching-factor-4 = []

max-chunk-size-128 = []
max-chunk-size-256 = []
max-chunk-size-512 = []
max-chunk-size-1024 = []
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ Key-value database for embedded systems, for raw NOR flash, using an LSM-Tree.
- Corruption-resistant: A corrupted or deliberately manipulated flash image cannot cause crashes, panics or infinite loops, only `Err(Corrupted)` errors.
- Optional CRC32 protection of headers and data on flash.
- Extensively tested, using unit tests and fuzzing.
- Tunable chunk size. Smaller chunks reduce RAM requirements at the expense of doing more and smaller writesand spending a bit more flash space in chunk headers with CRCs.
- Tunable chunk size. Smaller chunks reduce RAM requirements at the expense of doing more and smaller writes and spending a bit more flash space in chunk headers with CRCs.

## Current status

Expand Down
2 changes: 1 addition & 1 deletion gen_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def feature(name, default, min=None, max=None, pow2=None, vals=None, factors=[])

feature("scratch_page_count", default=4, min=0, max=65536, pow2=True)
feature("branching_factor", default=2, min=2, max=4)
feature("max_chunk_size", default=4096, vals=[256, 512, 1024, 2048, 4096])
feature("max_chunk_size", default=4096, vals=[128, 256, 512, 1024, 2048, 4096])

# ========= Update Cargo.toml

Expand Down
2 changes: 2 additions & 0 deletions src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,8 @@ const _CHECKS: () = {
core::assert!(MAX_VALUE_SIZE > 0);

core::assert!(RECORD_HEADER_SIZE <= 4);

core::assert!(MAX_CHUNK_SIZE % ALIGN == 0);
};

/// Dump the compile-time configuration to `log` or `defmt`.
Expand Down
16 changes: 13 additions & 3 deletions src/file.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,15 @@ pub use crate::page::ReadError;
use crate::page::{ChunkHeader, DehydratedPageReader, Header, PageHeader, PageReader, PageWriter};
use crate::types::{OptionPageID, PageID};

pub const PAGE_MAX_PAYLOAD_SIZE: usize = PAGE_SIZE - PageHeader::SIZE - size_of::<DataHeader>() - ChunkHeader::SIZE;
// Number of chunks + chunk headers per page.
const CHUNKS_PER_PAGE: usize =
(PAGE_SIZE - PageHeader::SIZE - size_of::<DataHeader>()) / (page::MAX_CHUNK_SIZE + ChunkHeader::SIZE);
// Size of the last chunk + chunk header.
const CHUNKS_REMAINDER: usize =
(PAGE_SIZE - PageHeader::SIZE - size_of::<DataHeader>()) % (page::MAX_CHUNK_SIZE + ChunkHeader::SIZE);
// Bytes in max chunks + remainder chunk without the last chunk header.
pub const PAGE_MAX_PAYLOAD_SIZE: usize =
(CHUNKS_PER_PAGE * page::MAX_CHUNK_SIZE) + CHUNKS_REMAINDER.saturating_sub(ChunkHeader::SIZE);

pub type FileID = u8;

Expand All @@ -28,7 +36,7 @@ pub struct MetaHeader {
}

unsafe impl page::Header for MetaHeader {
const MAGIC: u32 = 0x1d81bccc;
const MAGIC: u32 = 0x1d81bcde;
}

#[derive(Clone, Copy, PartialEq, Eq, Debug)]
Expand All @@ -48,7 +56,7 @@ pub struct DataHeader {
}

unsafe impl page::Header for DataHeader {
const MAGIC: u32 = 0x7fcbf25c;
const MAGIC: u32 = 0x7fcbf35d;
}

#[derive(Clone, Copy, PartialEq, Eq, Debug)]
Expand Down Expand Up @@ -93,6 +101,7 @@ pub struct FileManager<F: Flash> {

impl<F: Flash> FileManager<F> {
pub fn new(flash: F, random_seed: u32) -> Self {
assert!(FILE_COUNT * FileMeta::SIZE <= page::MAX_CHUNK_SIZE);
Self {
flash,
random: random_seed,
Expand Down Expand Up @@ -310,6 +319,7 @@ impl<F: Flash> FileManager<F> {
.inspect_err(|_| {
debug!("read_page failed: last_page_id={:?} file_id={}", last_page_id, file_id);
})?;

let page_len = r.skip(&mut self.flash, PAGE_SIZE).await?;
let last_seq = h.seq.add(page_len)?;

Expand Down
72 changes: 47 additions & 25 deletions src/page.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use crate::errors::Error;
use crate::flash::Flash;
use crate::types::PageID;

const CHUNK_MAGIC: u16 = 0x59B4;
const CHUNK_MAGIC: u16 = 0x59C5;

#[derive(Clone, Copy, PartialEq, Eq, Debug)]
#[repr(C)]
Expand Down Expand Up @@ -53,7 +53,8 @@ pub unsafe trait Header: Sized {
const MAGIC: u32;
}

const MAX_CHUNK_SIZE: usize = if config::MAX_CHUNK_SIZE > (PAGE_SIZE - PageHeader::SIZE - ChunkHeader::SIZE) {
pub(crate) const MAX_CHUNK_SIZE: usize = if config::MAX_CHUNK_SIZE > (PAGE_SIZE - PageHeader::SIZE - ChunkHeader::SIZE)
{
PAGE_SIZE - PageHeader::SIZE - ChunkHeader::SIZE
} else {
config::MAX_CHUNK_SIZE
Expand Down Expand Up @@ -151,9 +152,14 @@ impl ChunkIter {
return Ok(false);
}

if header.len as usize > MAX_CHUNK_SIZE {
corrupted!();
}

let Some(data_end) = data_start.checked_add(header.len as usize) else {
corrupted!()
corrupted!();
};

if data_end > PAGE_SIZE {
corrupted!();
}
Expand Down Expand Up @@ -236,6 +242,8 @@ impl PageReader {

async fn load_chunk<F: Flash>(&mut self, flash: &mut F) -> Result<(), Error<F::Error>> {
let n = align_up(self.ch.chunk_len);
assert!(n <= MAX_CHUNK_SIZE);

flash
.read(
self.ch.page_id as _,
Expand Down Expand Up @@ -269,48 +277,49 @@ impl PageReader {
self.ch.page_id
}

/// Read up to data.len() bytes of data or until the end of the current chunk.
///
/// May return less bytes than the buffer if not available.
pub async fn read<F: Flash>(&mut self, flash: &mut F, data: &mut [u8]) -> Result<usize, Error<F::Error>> {
trace!("PageReader({:?}): read({})", self.ch.page_id, data.len());
if self.ch.at_end || data.is_empty() {
if self.is_at_eof(flash).await? || data.is_empty() {
trace!("read: at end or zero len");
return Ok(0);
}

if self.chunk_pos == self.ch.chunk_len {
trace!("read: at end of chunk");
if !self.next_chunk(flash).await? {
trace!("read: no next chunk, we're at end.");
return Ok(0);
}
}

let n = data.len().min(self.ch.chunk_len - self.chunk_pos);
data[..n].copy_from_slice(&self.buf[self.chunk_pos..][..n]);
self.chunk_pos += n;
trace!("read: done, n={}", n);
Ok(n)
}

pub async fn skip<F: Flash>(&mut self, flash: &mut F, len: usize) -> Result<usize, Error<F::Error>> {
/// Skip up to len bytes in the reader or until the end of the last chunk
///
/// Skips across chunks within the page.
pub async fn skip<F: Flash>(&mut self, flash: &mut F, mut len: usize) -> Result<usize, Error<F::Error>> {
trace!("PageReader({:?}): skip({})", self.ch.page_id, len);
if self.ch.at_end || len == 0 {
trace!("skip: at end or zero len");
return Ok(0);
}

if self.chunk_pos == self.ch.chunk_len {
trace!("skip: at end of chunk");
if !self.next_chunk(flash).await? {
let start = len;
loop {
if self.is_at_eof(flash).await? {
trace!("skip: no next chunk, we're at end.");
return Ok(0);
return Ok(start - len);
}
}

let n = len.min(self.ch.chunk_len - self.chunk_pos);
self.ch.prev_chunks_len += n;
self.chunk_pos += n;
trace!("skip: done, n={}", n);
Ok(n)
let n = len.min(self.ch.chunk_len - self.chunk_pos);
self.ch.prev_chunks_len += n;
self.chunk_pos += n;
len -= n;
if len == 0 {
trace!("skip: done, n={}", start - len);
return Ok(start - len);
}
}
}

pub async fn is_at_eof<F: Flash>(&mut self, flash: &mut F) -> Result<bool, Error<F::Error>> {
Expand Down Expand Up @@ -433,8 +442,17 @@ impl<H: Header> PageWriter<H> {
self.page_id
}

fn is_chunk_full(&self) -> bool {
self.chunk_pos >= MAX_CHUNK_SIZE
}

/// Write n bytes of data to the page.
///
/// If the current chunk is full, it will commit it.
pub async fn write<F: Flash>(&mut self, flash: &mut F, data: &[u8]) -> Result<usize, Error<F::Error>> {
let max_write = PAGE_SIZE.saturating_sub(self.chunk_offset + ChunkHeader::SIZE + self.chunk_pos);
let max_write = PAGE_SIZE
.saturating_sub(self.chunk_offset + ChunkHeader::SIZE + self.chunk_pos)
.min(MAX_CHUNK_SIZE.saturating_sub(self.chunk_pos));
let total_n = data.len().min(max_write);
if total_n == 0 {
return Ok(0);
Expand Down Expand Up @@ -489,6 +507,10 @@ impl<H: Header> PageWriter<H> {
self.total_pos += n;
self.chunk_pos += n;

if self.is_chunk_full() {
self.commit(flash).await?;
}

Ok(total_n)
}

Expand All @@ -513,7 +535,6 @@ impl<H: Header> PageWriter<H> {
// nothing to commit.
return Ok(());
}

self.erase_if_needed(flash).await.map_err(Error::Flash)?;

// flush align buf.
Expand All @@ -535,6 +556,7 @@ impl<H: Header> PageWriter<H> {
#[cfg(feature = "crc")]
crc: self.crc.finish(),
};

flash
.write(self.page_id as _, self.chunk_offset, &h.to_bytes())
.await
Expand Down

0 comments on commit 6c1c3c3

Please sign in to comment.