Skip to content

Commit

Permalink
Merge pull request #316 from LGFae/unmap-when-idle
Browse files Browse the repository at this point in the history
Unmap when idle
  • Loading branch information
LGFae authored May 23, 2024
2 parents 5e346c5 + 6ed6929 commit 8533744
Show file tree
Hide file tree
Showing 4 changed files with 100 additions and 44 deletions.
14 changes: 6 additions & 8 deletions daemon/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -336,7 +336,8 @@ impl wayland::interfaces::wl_surface::EvHandler for Daemon {
impl wayland::interfaces::wl_buffer::EvHandler for Daemon {
fn release(&mut self, sender_id: ObjectId) {
for wallpaper in self.wallpapers.iter() {
if wallpaper.try_set_buffer_release_flag(sender_id) {
let strong_count = Arc::strong_count(wallpaper);
if wallpaper.try_set_buffer_release_flag(sender_id, strong_count) {
break;
}
}
Expand Down Expand Up @@ -480,15 +481,12 @@ fn main() -> Result<(), String> {
// wait for the animation threads to finish.
while !daemon.wallpapers.is_empty() {
// When all animations finish, Arc's strong count will be exactly 1
daemon
.wallpapers
.retain(|w| Arc::<Wallpaper>::strong_count(w) > 1);
daemon.wallpapers.retain(|w| Arc::strong_count(w) > 1);
// set all frame callbacks as completed, otherwise the animation threads might deadlock on
// the conditional variable
daemon
.wallpapers
.iter()
.for_each(|w| w.frame_callback_completed());
for wallpaper in &daemon.wallpapers {
wallpaper.frame_callback_completed();
}
// yield to waste less cpu
std::thread::yield_now();
}
Expand Down
17 changes: 9 additions & 8 deletions daemon/src/wallpaper.rs
Original file line number Diff line number Diff line change
Expand Up @@ -324,14 +324,15 @@ impl Wallpaper {
self.layer_surface == layer_surface
}

pub(super) fn try_set_buffer_release_flag(&self, buffer: ObjectId) -> bool {
let pool = self.pool.lock().unwrap();
if let Some(release_flag) = pool.get_buffer_release_flag(buffer) {
release_flag.set_released();
true
} else {
false
}
pub(super) fn try_set_buffer_release_flag(
&self,
buffer: ObjectId,
arc_strong_count: usize,
) -> bool {
self.pool
.lock()
.unwrap()
.set_buffer_release_flag(buffer, arc_strong_count != 1)
}

pub(super) fn has_callback(&self, callback: ObjectId) -> bool {
Expand Down
54 changes: 33 additions & 21 deletions daemon/src/wayland/bump_pool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,10 @@ impl Buffer {
#[derive(Debug)]
/// A pool implementation that only gives buffers of a fixed size, creating new ones if none of
/// them are freed. It also takes care of copying the previous buffer's content over to the new one
/// for us
/// for us.
///
/// Current implementation will automatically unmap the underlying shared memory when we aren't
/// animating and all created buffers have been released
pub(crate) struct BumpPool {
pool_id: ObjectId,
mmap: Mmap,
Expand All @@ -74,8 +77,11 @@ impl BumpPool {
pub(crate) fn new(width: i32, height: i32) -> Self {
let len =
width as usize * height as usize * super::globals::pixel_format().channels() as usize;
let (mmap, pool_id) = new_pool(len);
let buffers = vec![];
let mmap = Mmap::create(len);
let pool_id = globals::object_create(super::WlDynObj::ShmPool);
super::interfaces::wl_shm::req::create_pool(pool_id, &mmap.fd(), len as i32)
.expect("failed to create WlShmPool object");
let buffers = Vec::with_capacity(2);

Self {
pool_id,
Expand All @@ -87,14 +93,27 @@ impl BumpPool {
}
}

pub(crate) fn get_buffer_release_flag(&self, buffer_id: ObjectId) -> Option<&ReleaseFlag> {
self.buffers.iter().find_map(|b| {
if b.object_id == buffer_id {
Some(&b.released)
} else {
None
/// Releases a buffer, if we have it
///
/// This will unmap the underlying shared memory if we aren't animating and all buffers have
/// been released
pub(crate) fn set_buffer_release_flag(
&mut self,
buffer_id: ObjectId,
is_animating: bool,
) -> bool {
if let Some(b) = self.buffers.iter().find(|b| b.object_id == buffer_id) {
b.released.set_released();
if !is_animating && self.buffers.iter().all(|b| b.released.is_released()) {
for buffer in self.buffers.drain(..) {
buffer.destroy();
}
self.mmap.unmap();
}
})
true
} else {
false
}
}

fn buffer_len(&self) -> usize {
Expand All @@ -116,6 +135,10 @@ impl BumpPool {
let len = self.buffer_len();
let new_len = self.occupied_bytes() + len;

// we unmap the shared memory file descriptor when animations are done, so here we must
// ensure the bytes are actually mmaped
self.mmap.ensure_mapped();

if new_len > self.mmap.len() {
if new_len > i32::MAX as usize {
panic!("Buffers have grown too big. We cannot allocate any more.")
Expand Down Expand Up @@ -174,8 +197,6 @@ impl BumpPool {
}

/// gets the last buffer we've drawn to
///
/// This may return None if there was a resize request in-between the last call to get_drawable
pub(crate) fn get_commitable_buffer(&self) -> ObjectId {
self.buffers[self.last_used_buffer].object_id
}
Expand All @@ -201,12 +222,3 @@ impl Drop for BumpPool {
}
}
}

fn new_pool(len: usize) -> (Mmap, ObjectId) {
let mmap = Mmap::create(len);
let pool_id = globals::object_create(super::WlDynObj::ShmPool);
super::interfaces::wl_shm::req::create_pool(pool_id, &mmap.fd(), len as i32)
.expect("failed to create WlShmPool object");

(mmap, pool_id)
}
59 changes: 52 additions & 7 deletions utils/src/ipc/mmap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ pub struct Mmap {
fd: OwnedFd,
ptr: NonNull<std::ffi::c_void>,
len: usize,
mmaped: bool,
}

impl Mmap {
Expand All @@ -30,7 +31,48 @@ impl Mmap {
// POSIX says that the implementation will never select an address at 0
NonNull::new_unchecked(ptr)
};
Self { fd, ptr, len }
Self {
fd,
ptr,
len,
mmaped: true,
}
}

#[inline]
/// Unmaps without destroying the file descriptor
///
/// This is only ever used in the daemon, when animations finish, in order to free up memory
pub fn unmap(&mut self) {
if let Err(e) = unsafe { munmap(self.ptr.as_ptr(), self.len) } {
eprintln!("ERROR WHEN UNMAPPING MEMORY: {e}");
} else {
self.mmaped = false;
}
}

#[inline]
/// Ensures that the underlying file descriptor is mapped
///
/// Because `unmap`, above, is only used in the daemon, this is also only used there
pub fn ensure_mapped(&mut self) {
if !self.mmaped {
self.mmaped = true;
self.ptr = unsafe {
let ptr = mmap(
std::ptr::null_mut(),
self.len,
Self::PROT,
Self::FLAGS,
&self.fd,
0,
)
.unwrap();
// SAFETY: the function above will never return a null pointer if it succeeds
// POSIX says that the implementation will never select an address at 0
NonNull::new_unchecked(ptr)
};
}
}

#[inline]
Expand All @@ -57,9 +99,7 @@ impl Mmap {
}
}

if let Err(e) = unsafe { munmap(self.ptr.as_ptr(), self.len) } {
eprintln!("ERROR WHEN UNMAPPING MEMORY: {e}");
}
self.unmap();

self.len = new_len;
self.ptr = unsafe {
Expand Down Expand Up @@ -94,7 +134,12 @@ impl Mmap {
// POSIX says that the implementation will never select an address at 0
NonNull::new_unchecked(ptr)
};
Self { fd, ptr, len }
Self {
fd,
ptr,
len,
mmaped: true,
}
}

#[inline]
Expand Down Expand Up @@ -126,8 +171,8 @@ impl Mmap {
impl Drop for Mmap {
#[inline]
fn drop(&mut self) {
if let Err(e) = unsafe { munmap(self.ptr.as_ptr(), self.len) } {
eprintln!("ERROR WHEN UNMAPPING MEMORY: {e}");
if self.mmaped {
self.unmap();
}
}
}
Expand Down

0 comments on commit 8533744

Please sign in to comment.