diff --git a/libbpf-rs/src/map.rs b/libbpf-rs/src/map.rs index 57812d37..3b87bb06 100644 --- a/libbpf-rs/src/map.rs +++ b/libbpf-rs/src/map.rs @@ -27,6 +27,7 @@ use bitflags::bitflags; use libbpf_sys::bpf_map_info; use libbpf_sys::bpf_obj_get_info_by_fd; +use crate::error; use crate::util; use crate::util::parse_ret_i32; use crate::util::validate_bpf_ret; @@ -369,7 +370,7 @@ fn lookup_batch_raw( elem_flags: MapFlags, flags: MapFlags, delete: bool, -) -> Result> +) -> BatchedMapIter<'_> where M: MapCore + ?Sized, { @@ -382,21 +383,35 @@ where ..Default::default() }; - let key_size = match map.map_type() { - MapType::Hash | MapType::PercpuHash | MapType::LruHash | MapType::LruPercpuHash => { - map.key_size().max(4) - } - _ => map.key_size(), + // for maps of type BPF_MAP_TYPE_{HASH, PERCPU_HASH, LRU_HASH, LRU_PERCPU_HASH} + // the key size must be at least 4 bytes + let key_size = if map.map_type().is_hash_map() { + map.key_size().max(4) + } else { + map.key_size() }; - Ok(BatchedMapIter::new( - map.as_fd(), - count, - key_size, - map.value_size(), - opts, - delete, - )) + BatchedMapIter::new(map.as_fd(), count, key_size, map.value_size(), opts, delete) +} + +/// Intneral function that returns an error for per-cpu and bloom filter maps. +fn check_not_bloom_or_percpu(map: &M) -> Result<()> +where + M: MapCore + ?Sized, +{ + if map.map_type().is_bloom_filter() { + return Err(Error::with_invalid_data( + "lookup_bloom_filter() must be used for bloom filter maps", + )); + } + if map.map_type().is_percpu() { + return Err(Error::with_invalid_data(format!( + "lookup_percpu() must be used for per-cpu maps (type of the map is {:?})", + map.map_type(), + ))); + } + + Ok(()) } #[allow(clippy::wildcard_imports)] @@ -446,18 +461,7 @@ pub trait MapCore: Debug + AsFd + private::Sealed { /// must be used. /// If the map is of type bloom_filter the function [`Self::lookup_bloom_filter()`] must be used fn lookup(&self, key: &[u8], flags: MapFlags) -> Result>> { - if self.map_type().is_bloom_filter() { - return Err(Error::with_invalid_data( - "lookup_bloom_filter() must be used for bloom filter maps", - )); - } - if self.map_type().is_percpu() { - return Err(Error::with_invalid_data(format!( - "lookup_percpu() must be used for per-cpu maps (type of the map is {:?})", - self.map_type(), - ))); - } - + check_not_bloom_or_percpu(self)?; let out_size = self.value_size() as usize; lookup_raw(self, key, flags, out_size) } @@ -471,19 +475,8 @@ pub trait MapCore: Debug + AsFd + private::Sealed { elem_flags: MapFlags, flags: MapFlags, ) -> Result> { - if self.map_type().is_bloom_filter() { - return Err(Error::with_invalid_data( - "lookup_bloom_filter() must be used for bloom filter maps", - )); - } - if self.map_type().is_percpu() { - return Err(Error::with_invalid_data(format!( - "lookup_percpu() must be used for per-cpu maps (type of the map is {:?})", - self.map_type(), - ))); - } - - lookup_batch_raw(self, count, elem_flags, flags, false) + check_not_bloom_or_percpu(self)?; + Ok(lookup_batch_raw(self, count, elem_flags, flags, false)) } /// Returns many elements in batch mode from the map. @@ -495,19 +488,8 @@ pub trait MapCore: Debug + AsFd + private::Sealed { elem_flags: MapFlags, flags: MapFlags, ) -> Result> { - if self.map_type().is_bloom_filter() { - return Err(Error::with_invalid_data( - "lookup_bloom_filter() must be used for bloom filter maps", - )); - } - if self.map_type().is_percpu() { - return Err(Error::with_invalid_data(format!( - "lookup_percpu() must be used for per-cpu maps (type of the map is {:?})", - self.map_type(), - ))); - } - - lookup_batch_raw(self, count, elem_flags, flags, true) + check_not_bloom_or_percpu(self)?; + Ok(lookup_batch_raw(self, count, elem_flags, flags, true)) } /// Returns if the given value is likely present in bloom_filter as `bool`. @@ -1253,6 +1235,14 @@ impl MapType { ) } + /// Returns if the map is of one of the hashmap types. + pub fn is_hash_map(&self) -> bool { + matches!( + self, + MapType::Hash | MapType::PercpuHash | MapType::LruHash | MapType::LruPercpuHash + ) + } + /// Returns if the map is keyless map type as per documentation of libbpf /// Keyless map types are: Queues, Stacks and Bloom Filters fn is_keyless(&self) -> bool { @@ -1370,16 +1360,16 @@ impl Iterator for MapKeyIter<'_> { #[derive(Debug)] pub struct BatchedMapIter<'map> { map_fd: BorrowedFd<'map>, - count: u32, - key_size: u32, - value_size: u32, + delete: bool, + count: usize, + key_size: usize, + value_size: usize, keys: Vec, values: Vec, prev: Option>, next: Vec, batch_opts: libbpf_sys::bpf_map_batch_opts, - done: bool, - delete: bool, + index: Option, } impl<'map> BatchedMapIter<'map> { @@ -1393,39 +1383,31 @@ impl<'map> BatchedMapIter<'map> { ) -> Self { Self { map_fd, - count, - key_size, - value_size, + delete, + count: count as usize, + key_size: key_size as usize, + value_size: value_size as usize, keys: vec![0; (count * key_size) as usize], values: vec![0; (count * value_size) as usize], prev: None, next: vec![0; key_size as usize], batch_opts, - done: false, - delete, + index: None, } } -} - -impl Iterator for BatchedMapIter<'_> { - type Item = (Vec>, Vec>); - - fn next(&mut self) -> Option { - if self.done { - return None; - } + fn lookup_next_batch(&mut self) { let prev = self.prev.as_ref().map_or(ptr::null(), |p| p.as_ptr()); - let mut count = self.count; + let mut count = self.count as u32; let ret = unsafe { if self.delete { libbpf_sys::bpf_map_lookup_and_delete_batch( self.map_fd.as_raw_fd(), prev as _, - self.next.as_mut_ptr() as _, - self.keys.as_mut_ptr() as *mut c_void, - self.values.as_mut_ptr() as *mut c_void, + self.next.as_mut_ptr().cast(), + self.keys.as_mut_ptr().cast(), + self.values.as_mut_ptr().cast(), (&mut count) as *mut u32, &self.batch_opts as *const libbpf_sys::bpf_map_batch_opts, ) @@ -1433,42 +1415,67 @@ impl Iterator for BatchedMapIter<'_> { libbpf_sys::bpf_map_lookup_batch( self.map_fd.as_raw_fd(), prev as _, - self.next.as_mut_ptr() as _, - self.keys.as_mut_ptr() as *mut c_void, - self.values.as_mut_ptr() as *mut c_void, + self.next.as_mut_ptr().cast(), + self.keys.as_mut_ptr().cast(), + self.values.as_mut_ptr().cast(), (&mut count) as *mut u32, &self.batch_opts as *const libbpf_sys::bpf_map_batch_opts, ) } }; - if ret == -14 || count == 0 { - None - } else { - self.prev = Some(self.next.clone()); - if ret != 0 { - self.done = true; + if let Err(e) = util::parse_ret(ret) { + match e.kind() { + // in this case we can trust the returned count value + error::ErrorKind::NotFound => {} + // retry with same input arguments + error::ErrorKind::Interrupted => { + return self.lookup_next_batch(); + } + _ => { + self.index = None; + return; + } } + } - unsafe { - self.keys.set_len((self.key_size * count) as usize); - self.values.set_len((self.value_size * count) as usize); - } + self.prev = Some(self.next.clone()); + self.index = Some(0); - let keys = self - .keys - .chunks_exact(self.key_size as usize) - .map(|c| c.to_vec()) - .collect(); + unsafe { + self.keys.set_len(self.key_size * count as usize); + self.values.set_len(self.value_size * count as usize); + } + } +} + +impl Iterator for BatchedMapIter<'_> { + type Item = (Vec, Vec); - let values = self - .values - .chunks_exact(self.value_size as usize) - .map(|c| c.to_vec()) - .collect(); + fn next(&mut self) -> Option { + let load_next_batch = match self.index { + Some(index) => { + let batch_finished = index * self.key_size >= self.keys.len(); + let last_batch = self.keys.len() < self.key_size * self.count; + batch_finished && !last_batch + } + None => true, + }; - Some((keys, values)) + if load_next_batch { + self.lookup_next_batch(); } + + let index = self.index?; + let key = self.keys.chunks_exact(self.key_size).nth(index)?.to_vec(); + let val = self + .values + .chunks_exact(self.value_size) + .nth(index)? + .to_vec(); + + self.index = Some(index + 1); + Some((key, val)) } } diff --git a/libbpf-rs/tests/test.rs b/libbpf-rs/tests/test.rs index 9c84d5c8..bf9dd65c 100644 --- a/libbpf-rs/tests/test.rs +++ b/libbpf-rs/tests/test.rs @@ -57,7 +57,6 @@ use crate::common::get_test_object_path; use crate::common::open_test_object; use crate::common::with_ringbuffer; - #[tag(root)] #[test] fn test_object_build_and_load() { @@ -272,74 +271,43 @@ fn test_object_map_lookup_batch() { .is_ok()); } - let mut iter = start.lookup_batch(2, MapFlags::ANY, MapFlags::ANY) - .expect("failed to lookup batch"); - - let (keys, vals) = iter.next() - .expect("failed to find any values"); - assert_eq!(keys.len(), 2); - assert_eq!(keys.len(), vals.len()); - - let key0 = u32::from_ne_bytes(keys[0].to_vec().try_into().unwrap()); - let val0 = u64::from_ne_bytes(vals[0].to_vec().try_into().unwrap()); - assert_eq!(val0, data[&key0]); - - let key1 = u32::from_ne_bytes(keys[1].to_vec().try_into().unwrap()); - let val1 = u64::from_ne_bytes(vals[1].to_vec().try_into().unwrap()); - assert_eq!(val1, data[&key1]); - - let (keys, vals) = iter.next() - .expect("failed to find any values"); - assert_eq!(keys.len(), 2); - assert_eq!(keys.len(), vals.len()); - - let key0 = u32::from_ne_bytes(keys[0].to_vec().try_into().unwrap()); - let val0 = u64::from_ne_bytes(vals[0].to_vec().try_into().unwrap()); - assert_eq!(val0, data[&key0]); - - let key1 = u32::from_ne_bytes(keys[1].to_vec().try_into().unwrap()); - let val1 = u64::from_ne_bytes(vals[1].to_vec().try_into().unwrap()); - assert_eq!(val1, data[&key1]); + let elems = start + .lookup_batch(2, MapFlags::ANY, MapFlags::ANY) + .expect("failed to lookup batch") + .collect::>(); + assert_eq!(elems.len(), 4); - assert!(iter.next().is_none()); + for (key, val) in elems.into_iter() { + let key = u32::from_ne_bytes(key.try_into().unwrap()); + let val = u64::from_ne_bytes(val.try_into().unwrap()); + assert_eq!(val, data[&key]); + } // test lookup with batch size larger than the number of keys - let mut iter = start.lookup_batch(5, MapFlags::ANY, MapFlags::ANY) - .expect("failed to lookup batch"); - - let (keys, vals) = iter.next() - .expect("failed to find any values"); - assert_eq!(keys.len(), 4); - assert_eq!(keys.len(), vals.len()); - - let mut iter = start.lookup_and_delete_batch(3, MapFlags::ANY, MapFlags::ANY) - .expect("failed to lookup and delete batch"); - - let (keys, vals) = iter.next() - .expect("failed to find any values"); - assert_eq!(keys.len(), 3); - assert_eq!(keys.len(), vals.len()); - - let key0 = u32::from_ne_bytes(keys[0].to_vec().try_into().unwrap()); - let val0 = u64::from_ne_bytes(vals[0].to_vec().try_into().unwrap()); - assert_eq!(val0, data[&key0]); - - let key1 = u32::from_ne_bytes(keys[1].to_vec().try_into().unwrap()); - let val1 = u64::from_ne_bytes(vals[1].to_vec().try_into().unwrap()); - assert_eq!(val1, data[&key1]); + let elems = start + .lookup_batch(5, MapFlags::ANY, MapFlags::ANY) + .expect("failed to lookup batch") + .collect::>(); + assert_eq!(elems.len(), 4); - let key2 = u32::from_ne_bytes(keys[2].to_vec().try_into().unwrap()); - let val2 = u64::from_ne_bytes(vals[2].to_vec().try_into().unwrap()); - assert_eq!(val2, data[&key2]); + for (key, val) in elems.into_iter() { + let key = u32::from_ne_bytes(key.try_into().unwrap()); + let val = u64::from_ne_bytes(val.try_into().unwrap()); + assert_eq!(val, data[&key]); + } - let (keys, vals) = iter.next() - .expect("failed to find any values"); - assert_eq!(keys.len(), 1); - assert_eq!(keys.len(), vals.len()); + // test lookup and delete with batch size that does not divide total count + let elems = start + .lookup_and_delete_batch(3, MapFlags::ANY, MapFlags::ANY) + .expect("failed to lookup batch") + .collect::>(); + assert_eq!(elems.len(), 4); - let key0 = u32::from_ne_bytes(keys[0].to_vec().try_into().unwrap()); - let val0 = u64::from_ne_bytes(vals[0].to_vec().try_into().unwrap()); - assert_eq!(val0, data[&key0]); + for (key, val) in elems.into_iter() { + let key = u32::from_ne_bytes(key.try_into().unwrap()); + let val = u64::from_ne_bytes(val.try_into().unwrap()); + assert_eq!(val, data[&key]); + } // Map should be empty now. assert!(start.keys().collect::>().is_empty())