From de266f18dd00c55f335769a4347d4417db694d69 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 1 Apr 2022 15:52:00 +0200 Subject: [PATCH 001/391] Complete wire protocol --- src/rpc.rs | 314 +++++++++++++++++++++-------------------------------- 1 file changed, 121 insertions(+), 193 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index d795f08c1..1f6d53618 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -80,14 +80,20 @@ pub enum RequestBody { /// The request. request: Vec, }, - /// A REGISTERTOPIC request. + /// A REGTOPIC request. RegisterTopic { + /// The topic we want to advertise at the node receiving this request. topic: Vec, + // Current node record of sender. enr: crate::Enr, + // Ticket content of ticket from a previous registration attempt or empty. ticket: Vec, }, /// A TOPICQUERY request. - TopicQuery { topic: TopicHash }, + TopicQuery { + /// The hashed topic we want NODES response(s) for. + topic: TopicHash + }, } #[derive(Debug, Clone, PartialEq)] @@ -101,7 +107,7 @@ pub enum ResponseBody { /// Our external UDP port as observed by the responder. port: u16, }, - /// A NODES response. + /// A NODES response to a FINDNODE or TOPICQUERY request. Nodes { /// The total number of responses that make up this response. total: u64, @@ -113,12 +119,17 @@ pub enum ResponseBody { /// The response for the talk. response: Vec, }, - Ticket { - ticket: Vec, - wait_time: u64, + /// The TICKET response. + Ticket { + /// The response to a REGTOPIC request. + ticket: Vec, + /// The time in seconds to wait before attempting to register again. + wait_time: u64 }, - RegisterConfirmation { - topic: Vec, + /// The REGCONFIRMATION response. + RegisterConfirmation { + /// The topic of a successful REGTOPIC request. + topic: Vec }, } @@ -549,61 +560,76 @@ impl Message { body: ResponseBody::Talk { response }, }) } + 7 => { + // RegisterTopicRequest + if list_len != 4 { + debug!("RegisterTopic Request has an invalid RLP list length. Expected 2, found {}", list_len); + return Err(DecoderError::RlpIncorrectListLen); + } + let topic = rlp.val_at::>(1)?; + let enr_rlp = rlp.at(2)?; + let enr = enr_rlp.as_val::>()?; + let ticket = rlp.val_at::>(3)?; + Message::Request(Request { + id, + body: RequestBody::RegisterTopic { topic, enr, ticket }, + }) + } + 8 => { + // TicketResponse + if list_len != 3 { + debug!("RegisterTopic Response has an invalid RLP list length. Expected 2, found {}", list_len); + return Err(DecoderError::RlpIncorrectListLen); + } + let ticket = rlp.val_at::>(1)?; + let wait_time = rlp.val_at::(2)?; + Message::Response(Response { + id, + body: ResponseBody::Ticket { ticket, wait_time }, + }) + } + 9 => { + // RegisterConfirmationResponse + if list_len != 2 { + debug!( + "TopicQuery Request has an invalid RLP list length. Expected 2, found {}", + list_len + ); + return Err(DecoderError::RlpIncorrectListLen); + } + let topic = rlp.val_at::>(1)?; + Message::Response(Response { + id, + body: ResponseBody::RegisterConfirmation { topic }, + }) + } + 10 => { + // TopicQueryRequest + if list_len != 2 { + debug!( + "TopicQuery Request has an invalid RLP list length. Expected 2, found {}", + list_len + ); + return Err(DecoderError::RlpIncorrectListLen); + } + let topic = { + let topic_bytes = rlp.val_at::>(1)?; + if topic_bytes.len() > 32 { + debug!("TopicQuery Request has a topic greater than 32 bytes"); + return Err(DecoderError::RlpIsTooBig); + } + let mut topic = [0u8; 32]; + topic[32 - topic_bytes.len()..].copy_from_slice(&topic_bytes); + topic + }; + Message::Request(Request { + id, + body: RequestBody::TopicQuery { topic }, + }) + } _ => { return Err(DecoderError::Custom("Unknown RPC message type")); - } /* - * All other RPC messages are currently not supported as per the 5.1 specification. - - 7 => { - // RegisterTopicRequest - if list_len != 2 { - debug!("RegisterTopic Request has an invalid RLP list length. Expected 2, found {}", list_len); - return Err(DecoderError::RlpIncorrectListLen); - } - let ticket = rlp.val_at::>(1)?; - Message::Request(Request { - id, - body: RequestBody::RegisterTopic { ticket }, - }) - } - 8 => { - // RegisterTopicResponse - if list_len != 2 { - debug!("RegisterTopic Response has an invalid RLP list length. Expected 2, found {}", list_len); - return Err(DecoderError::RlpIncorrectListLen); - } - Message::Response(Response { - id, - body: ResponseBody::RegisterTopic { - registered: rlp.val_at::(1)?, - }, - }) - } - 9 => { - // TopicQueryRequest - if list_len != 2 { - debug!( - "TopicQuery Request has an invalid RLP list length. Expected 2, found {}", - list_len - ); - return Err(DecoderError::RlpIncorrectListLen); - } - let topic = { - let topic_bytes = rlp.val_at::>(1)?; - if topic_bytes.len() > 32 { - debug!("Ticket Request has a topic greater than 32 bytes"); - return Err(DecoderError::RlpIsTooBig); - } - let mut topic = [0u8; 32]; - topic[32 - topic_bytes.len()..].copy_from_slice(&topic_bytes); - topic - }; - Message::Request(Request { - id, - body: RequestBody::TopicQuery { topic }, - }) - } - */ + } }; Ok(message) @@ -836,7 +862,7 @@ mod tests { } #[test] - fn encode_decode_ticket_request() { + fn encode_decode_talk_request() { let id = RequestId(vec![1]); let request = Message::Request(Request { id, @@ -852,167 +878,69 @@ mod tests { assert_eq!(request, decoded); } - /* - * These RPC messages are not in use yet - * - #[test] - fn ref_test_encode_request_ticket() { - // reference input - let id = 1; - let hash_bytes = - hex::decode("fb757dc581730490a1d7a00deea65e9b1936924caaea8f44d476014856b68736") - .unwrap(); - - // expected hex output - let expected_output = - hex::decode("05e201a0fb757dc581730490a1d7a00deea65e9b1936924caaea8f44d476014856b68736") - .unwrap(); - - let mut topic_hash = [0; 32]; - topic_hash.copy_from_slice(&hash_bytes); - - let message = Message::Request(Request { - id, - body: RequestBody::Ticket { topic: topic_hash }, - }); - assert_eq!(message.encode(), expected_output); - } - - #[test] - fn ref_test_encode_request_register_topic() { - // reference input - let id = 1; - let ticket = - hex::decode("fb757dc581730490a1d7a00deea65e9b1936924caaea8f44d476014856b68736") - .unwrap(); - - // expected hex output - let expected_output = - hex::decode("07e201a0fb757dc581730490a1d7a00deea65e9b1936924caaea8f44d476014856b68736") - .unwrap(); - - let message = Message::Request(Request { - id, - body: RequestBody::RegisterTopic { ticket }, - }); - assert_eq!(message.encode(), expected_output); - } - - #[test] - fn ref_test_encode_request_topic_query() { - // reference input - let id = 1; - let hash_bytes = - hex::decode("fb757dc581730490a1d7a00deea65e9b1936924caaea8f44d476014856b68736") - .unwrap(); - - // expected hex output - let expected_output = - hex::decode("09e201a0fb757dc581730490a1d7a00deea65e9b1936924caaea8f44d476014856b68736") - .unwrap(); - - let mut topic_hash = [0; 32]; - topic_hash.copy_from_slice(&hash_bytes); - - let message = Message::Request(Request { - id, - body: RequestBody::TopicQuery { topic: topic_hash }, - }); - assert_eq!(message.encode(), expected_output); - } - - #[test] - fn ref_test_encode_response_register_topic() { - // reference input - let id = 1; - let registered = true; - - // expected hex output - let expected_output = hex::decode("08c20101").unwrap(); - let message = Message::Response(Response { - id, - body: ResponseBody::RegisterTopic { registered }, - }); - assert_eq!(message.encode(), expected_output); - } - #[test] fn encode_decode_register_topic_request() { + let port = 5000; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + let request = Message::Request(Request { - id: 1, + id: RequestId(vec![1]), body: RequestBody::RegisterTopic { - topic: vec![1,2,3], + topic: vec![1, 2, 3], + enr, ticket: vec![1, 2, 3, 4, 5], }, }); let encoded = request.clone().encode(); - let decoded = Message::decode(encoded).unwrap(); + let decoded = Message::decode(&encoded).unwrap(); assert_eq!(request, decoded); } #[test] - fn encode_decode_register_topic_response() { - let request = Message::Response(Response { - id: 0, - body: ResponseBody::RegisterTopic { registered: true }, + fn encode_decode_ticket_response() { + let response = Message::Response(Response { + id: RequestId(vec![1]), + body: ResponseBody::Ticket { + ticket: vec![1, 2, 3], + wait_time: 1u64, + }, }); - let encoded = request.clone().encode(); - let decoded = Message::decode(encoded).unwrap(); + let encoded = response.clone().encode(); + let decoded = Message::decode(&encoded).unwrap(); - assert_eq!(request, decoded); + assert_eq!(response, decoded); } #[test] - fn encode_decode_topic_query_request() { - let request = Message::Request(Request { - id: 1, - body: RequestBody::TopicQuery { topic: [17u8; 32] }, + fn encode_decode_register_confirmation_response() { + let response = Message::Response(Response { + id: RequestId(vec![1]), + body: ResponseBody::RegisterConfirmation { + topic: vec![1, 2, 3], + }, }); - let encoded = request.clone().encode(); - let decoded = Message::decode(encoded).unwrap(); - - assert_eq!(request, decoded); - } - - #[test] - fn ref_test_encode_response_ticket() { - // reference input - let id = 1; - let ticket = [0; 32].to_vec(); // all 0's - let wait_time = 5; - - // expected hex output - let expected_output = hex::decode( - "06e301a0000000000000000000000000000000000000000000000000000000000000000005", - ) - .unwrap(); + let encoded = response.clone().encode(); + let decoded = Message::decode(&encoded).unwrap(); - let message = Message::Response(Response { - id, - body: ResponseBody::Ticket { ticket, wait_time }, - }); - assert_eq!(message.encode(), expected_output); + assert_eq!(response, decoded); } #[test] - fn encode_decode_ticket_response() { - let request = Message::Response(Response { - id: 0, - body: ResponseBody::Ticket { - ticket: vec![1, 2, 3, 4, 5], - wait_time: 5, - }, + fn encode_decode_topic_query_request() { + let request = Message::Request(Request { + id: RequestId(vec![1]), + body: RequestBody::TopicQuery { topic: [0u8; 32] }, }); let encoded = request.clone().encode(); - let decoded = Message::decode(encoded).unwrap(); + let decoded = Message::decode(&encoded).unwrap(); assert_eq!(request, decoded); } - - */ } From 2bf40a1b136c7fb48a893581fbfc01cf58d6d5fc Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 1 Apr 2022 16:25:12 +0200 Subject: [PATCH 002/391] Fix typos --- src/rpc.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index 1f6d53618..810047fce 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -73,7 +73,7 @@ pub enum RequestBody { /// The distance(s) of peers we expect to be returned in the response. distances: Vec, }, - /// A Talk request. + /// A TALKREQ request. Talk { /// The protocol requesting. protocol: Vec, @@ -114,9 +114,9 @@ pub enum ResponseBody { /// A list of ENR's returned by the responder. nodes: Vec>, }, - /// The TALK response. + /// The TALKRESP response. Talk { - /// The response for the talk. + /// The response for the TALKREQ request. response: Vec, }, /// The TICKET response. @@ -428,10 +428,10 @@ impl Message { }) } 2 => { - // PingResponse + // PongResponse if list_len != 4 { debug!( - "Ping Response has an invalid RLP list length. Expected 4, found {}", + "Pong Response has an invalid RLP list length. Expected 4, found {}", list_len ); return Err(DecoderError::RlpIncorrectListLen); @@ -455,7 +455,7 @@ impl Message { } } _ => { - debug!("Ping Response has incorrect byte length for IP"); + debug!("Pong Response has incorrect byte length for IP"); return Err(DecoderError::RlpIncorrectListLen); } }; @@ -530,7 +530,7 @@ impl Message { }) } 5 => { - // Talk Request + // TalkRequest if list_len != 3 { debug!( "Talk Request has an invalid RLP list length. Expected 3, found {}", @@ -546,7 +546,7 @@ impl Message { }) } 6 => { - // Talk Response + // TalkResponse if list_len != 2 { debug!( "Talk Response has an invalid RLP list length. Expected 2, found {}", From ca36212849e980cd58145640daf8e6555b1e8e95 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 1 Apr 2022 21:29:50 +0200 Subject: [PATCH 003/391] Add logic for ads --- src/advertisements.rs | 157 ++++++++++++++++++++++++++++++++ src/handler/hashmap_delay.rs | 170 ----------------------------------- src/lib.rs | 2 + src/rpc.rs | 12 +-- src/service.rs | 38 ++++++-- src/service/test.rs | 1 + src/ticket/mod.rs | 11 +++ 7 files changed, 210 insertions(+), 181 deletions(-) create mode 100644 src/advertisements.rs delete mode 100644 src/handler/hashmap_delay.rs create mode 100644 src/ticket/mod.rs diff --git a/src/advertisements.rs b/src/advertisements.rs new file mode 100644 index 000000000..5a7ca34dc --- /dev/null +++ b/src/advertisements.rs @@ -0,0 +1,157 @@ +use super::*; +use core::time::Duration; +use enr::NodeId; +use futures::prelude::*; +use std::collections::{HashMap, VecDeque}; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::time::{sleep, Instant, Sleep}; +use tracing::debug; + +pub const MAX_ADS_PER_TOPIC: usize = 100; +pub const MAX_ADS: i32 = 5000; +pub const AD_LIFETIME: Duration = Duration::from_secs(60 * 15); + +type Topic = [u8; 32]; +pub struct Ads { + expirations: VecDeque<(Pin>, Topic)>, + ads: HashMap>, + total_ads: i32, +} + +impl Ads { + pub fn new() -> Self { + Ads { + expirations: VecDeque::new(), + ads: HashMap::new(), + total_ads: 0, + } + } + + pub fn ticket_wait_time(&self, topic: Topic) -> Duration { + let now = Instant::now(); + match self.ads.get(&topic) { + Some(nodes) => { + if nodes.len() < MAX_ADS_PER_TOPIC { + Duration::from_secs(0) + } else { + match nodes.get(0) { + Some((_, insert_time)) => { + let elapsed_time = now.saturating_duration_since(*insert_time); + AD_LIFETIME.saturating_sub(elapsed_time) + } + None => { + #[cfg(debug_assertions)] + panic!("Panic on debug, topic was not removed when empty"); + #[cfg(not(debug_assertions))] + { + error!("Topic was not removed when empty"); + return Poll::Ready(Err("No nodes for topic".into())); + } + } + } + } + } + None => { + if self.total_ads < MAX_ADS { + Duration::from_secs(0) + } else { + match self.expirations.get(0) { + Some((fut, _)) => fut.deadline().saturating_duration_since(now), + None => { + #[cfg(debug_assertions)] + panic!("Panic on debug, mismatched mapping between expiration queue and total ads count"); + #[cfg(not(debug_assertions))] + { + error!("Mismatched mapping between expiration queue and total ads count"); + return Duration::from_secs(0); + } + } + } + } + } + } + } + + pub fn insert(&mut self, node_id: NodeId, topic: Topic) { + let now = Instant::now(); + if let Some(nodes) = self.ads.get_mut(&topic) { + nodes.push_back((node_id, now)); + } else { + let mut nodes = VecDeque::new(); + nodes.push_back((node_id, now)); + self.ads.insert(topic, nodes); + } + self.expirations + .push_back((Box::pin(sleep(Duration::from_secs(60 * 15))), topic)); + self.total_ads += 1; + } + + // Should first be be called after checking if list is empty in + fn next_to_expire(&mut self) -> Result<(&mut Pin>, Topic), String> { + if self.expirations.is_empty() { + return Err("No ads in 'table'".into()); + } + match self.expirations.get_mut(0) { + Some((fut, topic)) => Ok((fut, *topic)), + None => { + #[cfg(debug_assertions)] + panic!( + "Panic on debug, mismatched mapping between expiration queue and entry queue" + ); + #[cfg(not(debug_assertions))] + { + error!("Mismatched mapping between expiration queue and entry queue"); + return Err("Topic doesn't exist".into()); + } + } + } + } +} + +impl Stream for Ads { + // type returned can be unit type but for testing easier to get values, worth the overhead to keep? + type Item = Result<((NodeId, Instant), Topic), String>; + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match self.next_to_expire() { + Ok((fut, topic)) => { + match fut.poll_unpin(cx) { + Poll::Ready(()) => match self.ads.get_mut(&topic) { + Some(topic_ads) => match topic_ads.pop_front() { + Some((node_id, insert_time)) => { + if topic_ads.is_empty() { + self.ads.remove(&topic); + } + self.total_ads -= 1; + Poll::Ready(Some(Ok(((node_id, insert_time), topic)))) + } + None => { + #[cfg(debug_assertions)] + panic!("Panic on debug, mismatched mapping between expiration queue and entry queue"); + #[cfg(not(debug_assertions))] + { + error!("Mismatched mapping between expiration queue and entry queue"); + return Poll::Ready(Err("No nodes for topic".into())); + } + } + }, + None => { + #[cfg(debug_assertions)] + panic!("Panic on debug, mismatched mapping between expiration queue and entry queue"); + #[cfg(not(debug_assertions))] + { + error!("Mismatched mapping between expiration queue and entry queue"); + return Poll::Ready(Err("Topic doesn't exist".into())); + } + } + }, + Poll::Pending => Poll::Pending, + } + }, + Err(e)=> { + debug!("{}", e); + Poll::Pending + } + } + } +} diff --git a/src/handler/hashmap_delay.rs b/src/handler/hashmap_delay.rs deleted file mode 100644 index 3f369d961..000000000 --- a/src/handler/hashmap_delay.rs +++ /dev/null @@ -1,170 +0,0 @@ -//! A simple hashmap object coupled with a `delay_queue` which has entries that expire after a -//! fixed time. -//! -//! A `HashMapDelay` implements `Stream` which removes expired items from the map. - -/// The default delay for entries, in seconds. This is only used when `insert()` is used to add -/// entries. -const DEFAULT_DELAY: u64 = 30; - -use futures::prelude::*; -use std::{ - collections::HashMap, - pin::Pin, - task::{Context, Poll}, - time::Duration, -}; -use tokio_util::time::delay_queue::{self, DelayQueue}; - -pub struct HashMapDelay -where - K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin, -{ - /// The given entries. - entries: HashMap>, - /// A queue holding the timeouts of each entry. - expirations: DelayQueue, - /// The default expiration timeout of an entry. - default_entry_timeout: Duration, -} - -/// A wrapping around entries that adds the link to the entry's expiration, via a `delay_queue` key. -struct MapEntry { - /// The expiration key for the entry. - key: delay_queue::Key, - /// The actual entry. - value: V, -} - -impl Default for HashMapDelay -where - K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin, -{ - fn default() -> Self { - HashMapDelay::new(Duration::from_secs(DEFAULT_DELAY)) - } -} - -impl HashMapDelay -where - K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin, -{ - /// Creates a new instance of `HashMapDelay`. - pub fn new(default_entry_timeout: Duration) -> Self { - HashMapDelay { - entries: HashMap::new(), - expirations: DelayQueue::new(), - default_entry_timeout, - } - } - - /// Insert an entry into the mapping. Entries will expire after the `default_entry_timeout`. - pub fn insert(&mut self, key: K, value: V) { - self.insert_at(key, value, self.default_entry_timeout); - } - - /// Inserts an entry that will expire at a given instant. - pub fn insert_at(&mut self, key: K, value: V, entry_duration: Duration) { - if self.contains_key(&key) { - // update the timeout - self.update_timeout(&key, value, entry_duration); - } else { - let delay_key = self.expirations.insert(key.clone(), entry_duration); - let entry = MapEntry { - key: delay_key, - value, - }; - self.entries.insert(key, entry); - } - } - - /// Updates the timeout for a given key. Returns true if the key existed, false otherwise. - /// - /// Panics if the duration is too far in the future. - pub fn update_timeout(&mut self, key: &K, value: V, timeout: Duration) -> bool { - if let Some(entry) = self.entries.get_mut(key) { - entry.value = value; - self.expirations.reset(&entry.key, timeout); - true - } else { - false - } - } - - /// Gets a reference to an entry if it exists. - /// - /// Returns None if the entry does not exist. - pub fn get(&self, key: &K) -> Option<&V> { - self.entries.get(key).map(|entry| &entry.value) - } - - /// Gets a mutable reference to an entry if it exists. - /// - /// Returns None if the entry does not exist. - pub fn _get_mut(&mut self, key: &K) -> Option<&mut V> { - self.entries.get_mut(key).map(|entry| &mut entry.value) - } - - /// Returns true if the key exists, false otherwise. - pub fn contains_key(&self, key: &K) -> bool { - self.entries.contains_key(key) - } - - /// Returns the length of the mapping. - pub fn _len(&self) -> usize { - self.entries.len() - } - - /// Removes a key from the map returning the value associated with the key that was in the map. - /// - /// Return None if the key was not in the map. - pub fn remove(&mut self, key: &K) -> Option { - if let Some(entry) = self.entries.remove(key) { - self.expirations.remove(&entry.key); - return Some(entry.value); - } - None - } - - /// Retains only the elements specified by the predicate. - /// - /// In other words, remove all pairs `(k, v)` such that `f(&k,&mut v)` returns false. - pub fn _retain bool>(&mut self, mut f: F) { - let expiration = &mut self.expirations; - self.entries.retain(|key, entry| { - let result = f(key, &mut entry.value); - if !result { - expiration.remove(&entry.key); - } - result - }) - } - - /// Removes all entries from the map. - pub fn _clear(&mut self) { - self.entries.clear(); - self.expirations.clear(); - } -} - -impl Stream for HashMapDelay -where - K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin, - V: Unpin, -{ - type Item = Result<(K, V), String>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.expirations.poll_expired(cx) { - Poll::Ready(Some(Ok(key))) => match self.entries.remove(key.get_ref()) { - Some(entry) => Poll::Ready(Some(Ok((key.into_inner(), entry.value)))), - None => Poll::Ready(Some(Err("Value no longer exists in expirations".into()))), - }, - Poll::Ready(Some(Err(e))) => { - Poll::Ready(Some(Err(format!("delay queue error: {:?}", e)))) - } - Poll::Ready(None) => Poll::Ready(None), - Poll::Pending => Poll::Pending, - } - } -} diff --git a/src/lib.rs b/src/lib.rs index 1aac8130e..7a7302686 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -102,6 +102,7 @@ //! [`Service`]: service/struct.Service.html //! [`Session`]: session/struct.Session.html +mod advertisements; mod config; mod discv5; mod error; @@ -117,6 +118,7 @@ mod query_pool; pub mod rpc; pub mod service; pub mod socket; +pub mod ticket; #[macro_use] extern crate lazy_static; diff --git a/src/rpc.rs b/src/rpc.rs index 810047fce..742ecc6c9 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -92,7 +92,7 @@ pub enum RequestBody { /// A TOPICQUERY request. TopicQuery { /// The hashed topic we want NODES response(s) for. - topic: TopicHash + topic: TopicHash, }, } @@ -120,16 +120,16 @@ pub enum ResponseBody { response: Vec, }, /// The TICKET response. - Ticket { + Ticket { /// The response to a REGTOPIC request. - ticket: Vec, + ticket: Vec, /// The time in seconds to wait before attempting to register again. - wait_time: u64 + wait_time: u64, }, /// The REGCONFIRMATION response. - RegisterConfirmation { + RegisterConfirmation { /// The topic of a successful REGTOPIC request. - topic: Vec + topic: Vec, }, } diff --git a/src/service.rs b/src/service.rs index b168cbd76..8c3f427de 100644 --- a/src/service.rs +++ b/src/service.rs @@ -18,6 +18,7 @@ use self::{ query_info::{QueryInfo, QueryType}, }; use crate::{ + advertisements::Ads, error::{RequestError, ResponseError}, handler::{Handler, HandlerIn, HandlerOut}, kbucket::{ @@ -29,7 +30,9 @@ use crate::{ query_pool::{ FindNodeQueryConfig, PredicateQueryConfig, QueryId, QueryPool, QueryPoolState, TargetKey, }, - rpc, Discv5Config, Discv5Event, Enr, + rpc, + ticket::topic_hash, + Discv5Config, Discv5Event, Enr, }; use delay_map::HashSetDelay; use enr::{CombinedKey, NodeId}; @@ -37,7 +40,9 @@ use fnv::FnvHashMap; use futures::prelude::*; use parking_lot::RwLock; use rpc::*; -use std::{collections::HashMap, net::SocketAddr, sync::Arc, task::Poll, time::Instant}; +use std::{ + collections::HashMap, net::SocketAddr, sync::Arc, task::Poll, time::Duration, time::Instant, +}; use tokio::sync::{mpsc, oneshot}; use tracing::{debug, error, info, trace, warn}; @@ -45,6 +50,9 @@ mod ip_vote; mod query_info; mod test; +pub(crate) const MAX_TABLE_SIZE: usize = 5000; +pub(crate) const MAX_QUEUE_SIZE: usize = 100; + /// Request type for Protocols using `TalkReq` message. /// /// Automatically responds with an empty body on drop if @@ -188,6 +196,9 @@ pub struct Service { /// A channel that the service emits events on. event_stream: Option>, + + /// Ads + ads: Ads, } /// Active RPC request awaiting a response from the handler. @@ -283,6 +294,7 @@ impl Service { peers_to_ping: HashSetDelay::new(config.ping_interval), discv5_recv, event_stream: None, + ads: Ads::new(), exit, config: config.clone(), }; @@ -414,6 +426,7 @@ impl Service { self.send_ping(enr); } } + _ = self.ads.next() => {} } } } @@ -573,11 +586,17 @@ impl Service { self.send_event(Discv5Event::TalkRequest(req)); } - RequestBody::RegisterTopic { .. } => { + RequestBody::RegisterTopic { topic, enr, ticket } => { + // todo: temp use of unwrap as hash function not properly impl + let topic_hash = topic_hash(topic).unwrap(); + // inspect ticket + let ticket_wait_time = self.ads.ticket_wait_time(topic_hash); + // send ticket, if has previous valid ticket Duration + 10 secs? + // do regconfirmation checks debug!("Received RegisterTopic request which is unimplemented"); } - RequestBody::TopicQuery { .. } => { - debug!("Received TopicQuery request which is unimplemented"); + RequestBody::TopicQuery { topic } => { + self.send_topic_query_response(node_address, id, topic); } } } @@ -900,6 +919,15 @@ impl Service { self.send_rpc_request(active_request); } + fn send_topic_query_response( + &mut self, + node_address: NodeAddress, + rpc_id: RequestId, + topic: [u8; 32], + ) { + unimplemented!() + } + /// Sends a NODES response, given a list of found ENR's. This function splits the nodes up /// into multiple responses to ensure the response stays below the maximum packet size. fn send_nodes_response( diff --git a/src/service/test.rs b/src/service/test.rs index 29b21b189..04a0b7f92 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -97,6 +97,7 @@ async fn build_service( peers_to_ping: HashSetDelay::new(config.ping_interval), discv5_recv, event_stream: None, + ads: Ads::new(), exit, config, } diff --git a/src/ticket/mod.rs b/src/ticket/mod.rs new file mode 100644 index 000000000..72bf1f6bf --- /dev/null +++ b/src/ticket/mod.rs @@ -0,0 +1,11 @@ +type Topic = [u8; 32]; + +// Temporary, some hash function will probably be used here instead of padding +pub fn topic_hash(topic: Vec) -> Result { + if topic.len() > 32 { + return Err("Topic is greater than 32 bytes".into()); + } + let mut topic_hash = [0u8; 32]; + topic_hash[32 - topic.len()..].copy_from_slice(&topic); + Ok(topic_hash) +} From 11b20dc1e2d76a1009b0766636e8fd74289b25bd Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 4 Apr 2022 15:25:05 +0200 Subject: [PATCH 004/391] Set ad lifetime in constructor to facilitate testing --- src/advertisements.rs | 45 ++++++++++++++++++++++--------------------- src/service.rs | 2 +- src/service/test.rs | 2 +- 3 files changed, 25 insertions(+), 24 deletions(-) diff --git a/src/advertisements.rs b/src/advertisements.rs index 5a7ca34dc..f5f926898 100644 --- a/src/advertisements.rs +++ b/src/advertisements.rs @@ -10,21 +10,22 @@ use tracing::debug; pub const MAX_ADS_PER_TOPIC: usize = 100; pub const MAX_ADS: i32 = 5000; -pub const AD_LIFETIME: Duration = Duration::from_secs(60 * 15); type Topic = [u8; 32]; pub struct Ads { expirations: VecDeque<(Pin>, Topic)>, ads: HashMap>, total_ads: i32, + ad_lifetime: Duration, } impl Ads { - pub fn new() -> Self { + pub fn new(ad_lifetime: Duration) -> Self { Ads { expirations: VecDeque::new(), ads: HashMap::new(), total_ads: 0, + ad_lifetime, } } @@ -38,7 +39,7 @@ impl Ads { match nodes.get(0) { Some((_, insert_time)) => { let elapsed_time = now.saturating_duration_since(*insert_time); - AD_LIFETIME.saturating_sub(elapsed_time) + self.ad_lifetime.saturating_sub(elapsed_time) } None => { #[cfg(debug_assertions)] @@ -83,11 +84,11 @@ impl Ads { self.ads.insert(topic, nodes); } self.expirations - .push_back((Box::pin(sleep(Duration::from_secs(60 * 15))), topic)); + .push_back((Box::pin(sleep(self.ad_lifetime)), topic)); self.total_ads += 1; } - // Should first be be called after checking if list is empty in + // Should first be be called after checking if list is empty in fn next_to_expire(&mut self) -> Result<(&mut Pin>, Topic), String> { if self.expirations.is_empty() { return Err("No ads in 'table'".into()); @@ -114,10 +115,10 @@ impl Stream for Ads { type Item = Result<((NodeId, Instant), Topic), String>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { match self.next_to_expire() { - Ok((fut, topic)) => { - match fut.poll_unpin(cx) { - Poll::Ready(()) => match self.ads.get_mut(&topic) { - Some(topic_ads) => match topic_ads.pop_front() { + Ok((fut, topic)) => match fut.poll_unpin(cx) { + Poll::Ready(()) => match self.ads.get_mut(&topic) { + Some(topic_ads) => { + match topic_ads.pop_front() { Some((node_id, insert_time)) => { if topic_ads.is_empty() { self.ads.remove(&topic); @@ -134,21 +135,21 @@ impl Stream for Ads { return Poll::Ready(Err("No nodes for topic".into())); } } - }, - None => { - #[cfg(debug_assertions)] - panic!("Panic on debug, mismatched mapping between expiration queue and entry queue"); - #[cfg(not(debug_assertions))] - { - error!("Mismatched mapping between expiration queue and entry queue"); - return Poll::Ready(Err("Topic doesn't exist".into())); - } } - }, - Poll::Pending => Poll::Pending, - } + } + None => { + #[cfg(debug_assertions)] + panic!("Panic on debug, mismatched mapping between expiration queue and entry queue"); + #[cfg(not(debug_assertions))] + { + error!("Mismatched mapping between expiration queue and entry queue"); + return Poll::Ready(Err("Topic doesn't exist".into())); + } + } + }, + Poll::Pending => Poll::Pending, }, - Err(e)=> { + Err(e) => { debug!("{}", e); Poll::Pending } diff --git a/src/service.rs b/src/service.rs index 8c3f427de..01c2d7527 100644 --- a/src/service.rs +++ b/src/service.rs @@ -294,7 +294,7 @@ impl Service { peers_to_ping: HashSetDelay::new(config.ping_interval), discv5_recv, event_stream: None, - ads: Ads::new(), + ads: Ads::new(Duration::from_secs(60 * 15)), exit, config: config.clone(), }; diff --git a/src/service/test.rs b/src/service/test.rs index 04a0b7f92..2649e3052 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -97,7 +97,7 @@ async fn build_service( peers_to_ping: HashSetDelay::new(config.ping_interval), discv5_recv, event_stream: None, - ads: Ads::new(), + ads: Ads::new(Duration::from_secs(60)), exit, config, } From c7191ca4314c60236b20b1d23f891fe1d547de2a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 4 Apr 2022 15:42:35 +0200 Subject: [PATCH 005/391] Restructure code --- src/{advertisements.rs => advertisement/mod.rs} | 2 ++ src/advertisement/test.rs | 8 ++++++++ src/{ticket/mod.rs => advertisement/ticket.rs} | 0 src/lib.rs | 3 +-- src/service.rs | 6 +----- 5 files changed, 12 insertions(+), 7 deletions(-) rename src/{advertisements.rs => advertisement/mod.rs} (99%) create mode 100644 src/advertisement/test.rs rename src/{ticket/mod.rs => advertisement/ticket.rs} (100%) diff --git a/src/advertisements.rs b/src/advertisement/mod.rs similarity index 99% rename from src/advertisements.rs rename to src/advertisement/mod.rs index f5f926898..8c29da9a7 100644 --- a/src/advertisements.rs +++ b/src/advertisement/mod.rs @@ -8,6 +8,8 @@ use std::task::{Context, Poll}; use tokio::time::{sleep, Instant, Sleep}; use tracing::debug; +pub mod ticket; + pub const MAX_ADS_PER_TOPIC: usize = 100; pub const MAX_ADS: i32 = 5000; diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs new file mode 100644 index 000000000..252ab24b8 --- /dev/null +++ b/src/advertisement/test.rs @@ -0,0 +1,8 @@ +#![cfg(test)] + +use super::*; + +#[test] +fn insert_ad() { + let ads = Ads::new(Duration::from_secs(60)); +} diff --git a/src/ticket/mod.rs b/src/advertisement/ticket.rs similarity index 100% rename from src/ticket/mod.rs rename to src/advertisement/ticket.rs diff --git a/src/lib.rs b/src/lib.rs index 7a7302686..1572c515a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -102,7 +102,7 @@ //! [`Service`]: service/struct.Service.html //! [`Session`]: session/struct.Session.html -mod advertisements; +mod advertisement; mod config; mod discv5; mod error; @@ -118,7 +118,6 @@ mod query_pool; pub mod rpc; pub mod service; pub mod socket; -pub mod ticket; #[macro_use] extern crate lazy_static; diff --git a/src/service.rs b/src/service.rs index 01c2d7527..0ada96388 100644 --- a/src/service.rs +++ b/src/service.rs @@ -18,7 +18,7 @@ use self::{ query_info::{QueryInfo, QueryType}, }; use crate::{ - advertisements::Ads, + advertisement::{Ads, ticket::topic_hash}, error::{RequestError, ResponseError}, handler::{Handler, HandlerIn, HandlerOut}, kbucket::{ @@ -31,7 +31,6 @@ use crate::{ FindNodeQueryConfig, PredicateQueryConfig, QueryId, QueryPool, QueryPoolState, TargetKey, }, rpc, - ticket::topic_hash, Discv5Config, Discv5Event, Enr, }; use delay_map::HashSetDelay; @@ -50,9 +49,6 @@ mod ip_vote; mod query_info; mod test; -pub(crate) const MAX_TABLE_SIZE: usize = 5000; -pub(crate) const MAX_QUEUE_SIZE: usize = 100; - /// Request type for Protocols using `TalkReq` message. /// /// Automatically responds with an empty body on drop if From aeb483fc6248d2ea713b1a5a03c0b110272d951f Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 4 Apr 2022 16:30:43 +0200 Subject: [PATCH 006/391] Split up send nodes method for reuse for TOPICQUERY req --- src/advertisement/mod.rs | 23 +++++++++++++++-------- src/service.rs | 33 +++++++++++++++++++++++++-------- 2 files changed, 40 insertions(+), 16 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 8c29da9a7..404893cec 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -1,6 +1,6 @@ use super::*; use core::time::Duration; -use enr::NodeId; +use enr::{CombinedKey, Enr}; use futures::prelude::*; use std::collections::{HashMap, VecDeque}; use std::pin::Pin; @@ -16,7 +16,7 @@ pub const MAX_ADS: i32 = 5000; type Topic = [u8; 32]; pub struct Ads { expirations: VecDeque<(Pin>, Topic)>, - ads: HashMap>, + ads: HashMap, Instant)>>, total_ads: i32, ad_lifetime: Duration, } @@ -31,6 +31,13 @@ impl Ads { } } + pub fn get_ad_nodes(&self, topic: Topic) -> Result>, String> { + match self.ads.get(&topic) { + Some(topic_ads) => Ok(topic_ads.into_iter().map(|(enr, _)| enr.clone()).collect()), + None => Err("No ads for this topic".into()), + } + } + pub fn ticket_wait_time(&self, topic: Topic) -> Duration { let now = Instant::now(); match self.ads.get(&topic) { @@ -76,13 +83,13 @@ impl Ads { } } - pub fn insert(&mut self, node_id: NodeId, topic: Topic) { + pub fn insert(&mut self, node_record: Enr, topic: Topic) { let now = Instant::now(); if let Some(nodes) = self.ads.get_mut(&topic) { - nodes.push_back((node_id, now)); + nodes.push_back((node_record, now)); } else { let mut nodes = VecDeque::new(); - nodes.push_back((node_id, now)); + nodes.push_back((node_record, now)); self.ads.insert(topic, nodes); } self.expirations @@ -114,19 +121,19 @@ impl Ads { impl Stream for Ads { // type returned can be unit type but for testing easier to get values, worth the overhead to keep? - type Item = Result<((NodeId, Instant), Topic), String>; + type Item = Result<((Enr, Instant), Topic), String>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { match self.next_to_expire() { Ok((fut, topic)) => match fut.poll_unpin(cx) { Poll::Ready(()) => match self.ads.get_mut(&topic) { Some(topic_ads) => { match topic_ads.pop_front() { - Some((node_id, insert_time)) => { + Some((node_record, insert_time)) => { if topic_ads.is_empty() { self.ads.remove(&topic); } self.total_ads -= 1; - Poll::Ready(Some(Ok(((node_id, insert_time), topic)))) + Poll::Ready(Some(Ok(((node_record, insert_time), topic)))) } None => { #[cfg(debug_assertions)] diff --git a/src/service.rs b/src/service.rs index 0ada96388..926766bfb 100644 --- a/src/service.rs +++ b/src/service.rs @@ -18,7 +18,7 @@ use self::{ query_info::{QueryInfo, QueryType}, }; use crate::{ - advertisement::{Ads, ticket::topic_hash}, + advertisement::{ticket::topic_hash, Ads}, error::{RequestError, ResponseError}, handler::{Handler, HandlerIn, HandlerOut}, kbucket::{ @@ -30,8 +30,7 @@ use crate::{ query_pool::{ FindNodeQueryConfig, PredicateQueryConfig, QueryId, QueryPool, QueryPoolState, TargetKey, }, - rpc, - Discv5Config, Discv5Event, Enr, + rpc, Discv5Config, Discv5Event, Enr, }; use delay_map::HashSetDelay; use enr::{CombinedKey, NodeId}; @@ -531,7 +530,7 @@ impl Service { let id = req.id; match req.body { RequestBody::FindNode { distances } => { - self.send_nodes_response(node_address, id, distances); + self.send_find_nodes_response(node_address, id, distances); } RequestBody::Ping { enr_seq } => { // check if we need to update the known ENR @@ -921,12 +920,19 @@ impl Service { rpc_id: RequestId, topic: [u8; 32], ) { - unimplemented!() + let nodes_to_send = match self.ads.get_ad_nodes(topic) { + Ok(node_records) => node_records, + Err(e) => { + debug!("{}", e); + Vec::new() + } + }; + self.send_nodes_response(nodes_to_send, node_address, rpc_id, "TOPICQUERY"); } /// Sends a NODES response, given a list of found ENR's. This function splits the nodes up /// into multiple responses to ensure the response stays below the maximum packet size. - fn send_nodes_response( + fn send_find_nodes_response( &mut self, node_address: NodeAddress, rpc_id: RequestId, @@ -962,7 +968,16 @@ impl Service { nodes_to_send.push(node); } } + self.send_nodes_response(nodes_to_send, node_address, rpc_id, "FINDNODE"); + } + fn send_nodes_response( + &self, + nodes_to_send: Vec, + node_address: NodeAddress, + rpc_id: RequestId, + query: &str, + ) { // if there are no nodes, send an empty response if nodes_to_send.is_empty() { let response = Response { @@ -973,7 +988,8 @@ impl Service { }, }; trace!( - "Sending empty FINDNODES response to: {}", + "Sending empty {} response to: {}", + query, node_address.node_id ); let _ = self @@ -1029,7 +1045,8 @@ impl Service { for response in responses { trace!( - "Sending FINDNODES response to: {}. Response: {} ", + "Sending {} response to: {}. Response: {} ", + query, node_address, response ); From 47503fd15a73e46153ccac2a41b8cdfdfe800ea8 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 4 Apr 2022 16:42:17 +0200 Subject: [PATCH 007/391] Improve error handling --- src/advertisement/mod.rs | 4 ++-- src/service.rs | 10 ++++++++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 404893cec..2fb0b9eda 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -137,10 +137,10 @@ impl Stream for Ads { } None => { #[cfg(debug_assertions)] - panic!("Panic on debug, mismatched mapping between expiration queue and entry queue"); + panic!("Panic on debug, topic key should be deleted if no ad nodes queued for it"); #[cfg(not(debug_assertions))] { - error!("Mismatched mapping between expiration queue and entry queue"); + error!("Topic key should be deleted if no ad nodes queued for it"); return Poll::Ready(Err("No nodes for topic".into())); } } diff --git a/src/service.rs b/src/service.rs index 926766bfb..8687422a0 100644 --- a/src/service.rs +++ b/src/service.rs @@ -340,6 +340,7 @@ impl Service { error!("Failed to return the event stream channel"); } } + //ServiceRequest::TopicQuery() => {} } } Some(event) = self.handler_recv.recv() => { @@ -582,8 +583,13 @@ impl Service { self.send_event(Discv5Event::TalkRequest(req)); } RequestBody::RegisterTopic { topic, enr, ticket } => { - // todo: temp use of unwrap as hash function not properly impl - let topic_hash = topic_hash(topic).unwrap(); + let topic_hash = match topic_hash(topic) { + Ok(hash) => hash, + Err(e) => { + debug!("{}", e); + [0;32] + } + }; // inspect ticket let ticket_wait_time = self.ads.ticket_wait_time(topic_hash); // send ticket, if has previous valid ticket Duration + 10 secs? From 15cb6f3ed7ca19399ba8618fdebeb8e517e21fab Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 4 Apr 2022 16:58:51 +0200 Subject: [PATCH 008/391] Add test for insert ad --- src/advertisement/mod.rs | 1 + src/advertisement/test.rs | 22 +++++++++++++++++++--- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 2fb0b9eda..331321630 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -9,6 +9,7 @@ use tokio::time::{sleep, Instant, Sleep}; use tracing::debug; pub mod ticket; +mod test; pub const MAX_ADS_PER_TOPIC: usize = 100; pub const MAX_ADS: i32 = 5000; diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 252ab24b8..27868231e 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -1,8 +1,24 @@ #![cfg(test)] use super::*; +use enr::{CombinedKey, EnrBuilder}; +use std::net::IpAddr; -#[test] -fn insert_ad() { - let ads = Ads::new(Duration::from_secs(60)); +#[tokio::test] +async fn insert_ad() { + // Create the test values needed + let port = 6666; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + + let mut ads = Ads::new(Duration::from_secs(60)); + + let topic = [1;32]; + + ads.insert(enr.clone(), topic); + + let nodes = ads.get_ad_nodes(topic).unwrap_or(vec![]); + + assert_eq!(nodes, vec![enr]); } From 4e22caac86e20b0293aba211e7a1585802df687c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 4 Apr 2022 17:03:23 +0200 Subject: [PATCH 009/391] Improve error handling --- src/advertisement/mod.rs | 4 ++-- src/service.rs | 11 ++++------- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 331321630..c1095fc81 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -53,10 +53,10 @@ impl Ads { } None => { #[cfg(debug_assertions)] - panic!("Panic on debug, topic was not removed when empty"); + panic!("Panic on debug,topic key should be deleted if no ad nodes queued for it"); #[cfg(not(debug_assertions))] { - error!("Topic was not removed when empty"); + error!("Topic key should be deleted if no ad nodes queued for it"); return Poll::Ready(Err("No nodes for topic".into())); } } diff --git a/src/service.rs b/src/service.rs index 8687422a0..7eb528c58 100644 --- a/src/service.rs +++ b/src/service.rs @@ -926,13 +926,10 @@ impl Service { rpc_id: RequestId, topic: [u8; 32], ) { - let nodes_to_send = match self.ads.get_ad_nodes(topic) { - Ok(node_records) => node_records, - Err(e) => { - debug!("{}", e); - Vec::new() - } - }; + let nodes_to_send = self.ads.get_ad_nodes(topic).unwrap_or_else(|e| { + debug!("{}", e); + Vec::new() + }); self.send_nodes_response(nodes_to_send, node_address, rpc_id, "TOPICQUERY"); } From 58d3f6d7c1b05796e89ece56d048479878cf445c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 5 Apr 2022 08:53:43 +0200 Subject: [PATCH 010/391] Complete test for insert ad --- src/advertisement/mod.rs | 44 ++++++++++++++++++++++++++++----------- src/advertisement/test.rs | 19 ++++++++++++++--- 2 files changed, 48 insertions(+), 15 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index c1095fc81..f28ec68e2 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -15,9 +15,20 @@ pub const MAX_ADS_PER_TOPIC: usize = 100; pub const MAX_ADS: i32 = 5000; type Topic = [u8; 32]; + +pub struct Ad { + node_record: Enr, + insert_time: Instant, +} + +impl PartialEq for Ad { + fn eq(&self, other: &Self) -> bool { + self.node_record == other.node_record + } +} pub struct Ads { expirations: VecDeque<(Pin>, Topic)>, - ads: HashMap, Instant)>>, + ads: HashMap>, total_ads: i32, ad_lifetime: Duration, } @@ -34,7 +45,7 @@ impl Ads { pub fn get_ad_nodes(&self, topic: Topic) -> Result>, String> { match self.ads.get(&topic) { - Some(topic_ads) => Ok(topic_ads.into_iter().map(|(enr, _)| enr.clone()).collect()), + Some(topic_ads) => Ok(topic_ads.into_iter().map(|ad| ad.node_record.clone()).collect()), None => Err("No ads for this topic".into()), } } @@ -47,8 +58,8 @@ impl Ads { Duration::from_secs(0) } else { match nodes.get(0) { - Some((_, insert_time)) => { - let elapsed_time = now.saturating_duration_since(*insert_time); + Some(ad) => { + let elapsed_time = now.saturating_duration_since(ad.insert_time); self.ad_lifetime.saturating_sub(elapsed_time) } None => { @@ -84,22 +95,31 @@ impl Ads { } } - pub fn insert(&mut self, node_record: Enr, topic: Topic) { + /*pub fn regconfirmation(&self, node_record: Enr, topic: Topic, ticket: Vec) -> Result<(), String> { + // check if ticket is valid + self.insert(node_record, topic); + }*/ + + fn insert(&mut self, node_record: Enr, topic: Topic) -> Result<(), String> { let now = Instant::now(); if let Some(nodes) = self.ads.get_mut(&topic) { - nodes.push_back((node_record, now)); + if nodes.contains(&Ad { node_record: node_record.clone(), insert_time: now }) { + debug!("This node {} is already advertising this topic", node_record.node_id()); + return Err("Node already advertising this topic".into()); + } + nodes.push_back(Ad { node_record, insert_time: now }); } else { let mut nodes = VecDeque::new(); - nodes.push_back((node_record, now)); + nodes.push_back(Ad { node_record, insert_time: now }); self.ads.insert(topic, nodes); } self.expirations .push_back((Box::pin(sleep(self.ad_lifetime)), topic)); self.total_ads += 1; + Ok(()) } - // Should first be be called after checking if list is empty in - fn next_to_expire(&mut self) -> Result<(&mut Pin>, Topic), String> { + fn next_to_expire_table(&mut self) -> Result<(&mut Pin>, Topic), String> { if self.expirations.is_empty() { return Err("No ads in 'table'".into()); } @@ -124,17 +144,17 @@ impl Stream for Ads { // type returned can be unit type but for testing easier to get values, worth the overhead to keep? type Item = Result<((Enr, Instant), Topic), String>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.next_to_expire() { + match self.next_to_expire_table() { Ok((fut, topic)) => match fut.poll_unpin(cx) { Poll::Ready(()) => match self.ads.get_mut(&topic) { Some(topic_ads) => { match topic_ads.pop_front() { - Some((node_record, insert_time)) => { + Some(ad) => { if topic_ads.is_empty() { self.ads.remove(&topic); } self.total_ads -= 1; - Poll::Ready(Some(Ok(((node_record, insert_time), topic)))) + Poll::Ready(Some(Ok(((ad.node_record, ad.insert_time), topic)))) } None => { #[cfg(debug_assertions)] diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 27868231e..c86885759 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -5,20 +5,33 @@ use enr::{CombinedKey, EnrBuilder}; use std::net::IpAddr; #[tokio::test] -async fn insert_ad() { +async fn insert_ads() { // Create the test values needed let port = 6666; let ip: IpAddr = "127.0.0.1".parse().unwrap(); let key = CombinedKey::generate_secp256k1(); let enr = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + let port = 5000; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let enr_2 = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + let mut ads = Ads::new(Duration::from_secs(60)); let topic = [1;32]; + let topic_2 = [2;32]; + + ads.insert(enr.clone(), topic).unwrap(); + + assert_eq!(ads.insert(enr.clone(), topic).map_err(|e| e), Err("Node already advertising this topic".into())); - ads.insert(enr.clone(), topic); + ads.insert(enr_2.clone(), topic).unwrap(); + ads.insert(enr.clone(), topic_2).unwrap(); let nodes = ads.get_ad_nodes(topic).unwrap_or(vec![]); + let nodes_topic_2 = ads.get_ad_nodes(topic_2).unwrap_or(vec![]); - assert_eq!(nodes, vec![enr]); + assert_eq!(nodes, vec![enr.clone(), enr_2]); + assert_eq!(nodes_topic_2, vec![enr]); } From 1b1bca67ea2deae84c68dd39852eb4e854bcf198 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 5 Apr 2022 09:40:57 +0200 Subject: [PATCH 011/391] Test poll_next for ads --- src/advertisement/mod.rs | 36 ++++++++------------------------ src/advertisement/test.rs | 43 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 51 insertions(+), 28 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index f28ec68e2..226628146 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -118,34 +118,20 @@ impl Ads { self.total_ads += 1; Ok(()) } - - fn next_to_expire_table(&mut self) -> Result<(&mut Pin>, Topic), String> { - if self.expirations.is_empty() { - return Err("No ads in 'table'".into()); - } - match self.expirations.get_mut(0) { - Some((fut, topic)) => Ok((fut, *topic)), - None => { - #[cfg(debug_assertions)] - panic!( - "Panic on debug, mismatched mapping between expiration queue and entry queue" - ); - #[cfg(not(debug_assertions))] - { - error!("Mismatched mapping between expiration queue and entry queue"); - return Err("Topic doesn't exist".into()); - } - } - } - } } impl Stream for Ads { // type returned can be unit type but for testing easier to get values, worth the overhead to keep? type Item = Result<((Enr, Instant), Topic), String>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.next_to_expire_table() { - Ok((fut, topic)) => match fut.poll_unpin(cx) { + let (fut, topic) = match self.expirations.get_mut(0) { + Some((fut, topic)) => (fut, *topic), + None => { + debug!("No ads in 'table'"); + return Poll::Pending; + }, + }; + match fut.poll_unpin(cx) { Poll::Ready(()) => match self.ads.get_mut(&topic) { Some(topic_ads) => { match topic_ads.pop_front() { @@ -154,6 +140,7 @@ impl Stream for Ads { self.ads.remove(&topic); } self.total_ads -= 1; + self.expirations.remove(0); Poll::Ready(Some(Ok(((ad.node_record, ad.insert_time), topic)))) } None => { @@ -178,11 +165,6 @@ impl Stream for Ads { } }, Poll::Pending => Poll::Pending, - }, - Err(e) => { - debug!("{}", e); - Poll::Pending } - } } } diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index c86885759..d419dcf40 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -5,7 +5,7 @@ use enr::{CombinedKey, EnrBuilder}; use std::net::IpAddr; #[tokio::test] -async fn insert_ads() { +async fn insert_ad() { // Create the test values needed let port = 6666; let ip: IpAddr = "127.0.0.1".parse().unwrap(); @@ -35,3 +35,44 @@ async fn insert_ads() { assert_eq!(nodes, vec![enr.clone(), enr_2]); assert_eq!(nodes_topic_2, vec![enr]); } + +#[tokio::test] +async fn poll_ads() { + // Create the test values needed + let port = 6666; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + + let mut ads = Ads::new(Duration::from_secs(1)); + + let topic = [1;32]; + let topic_2 = [2;32]; + let topic_3= [3;32]; + + ads.insert(enr.clone(), topic).unwrap(); + tokio::time::sleep(Duration::from_secs(1)).await; + ads.insert(enr.clone(), topic_2).unwrap(); + tokio::time::sleep(Duration::from_secs(1)).await; + ads.insert(enr.clone(), topic_3).unwrap(); + + let mut expired_ads = Vec::new(); + + tokio::select! { + Some(Ok((_, topic))) = ads.next() => { + expired_ads.push(topic); + } + } + tokio::select! { + Some(Ok((_, topic))) = ads.next() => { + expired_ads.push(topic); + } + } + tokio::select! { + Some(Ok((_, topic))) = ads.next() => { + expired_ads.push(topic); + } + } + assert_eq!(expired_ads, vec![topic, topic_2, topic_3]) +} + From e4bfdd8dc2c3ea0cabb416386117c1f34964f962 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 5 Apr 2022 10:35:47 +0200 Subject: [PATCH 012/391] Improve poll_ads test --- src/advertisement/mod.rs | 1 + src/advertisement/test.rs | 43 ++++++++++++++++++++------------------- 2 files changed, 23 insertions(+), 21 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 226628146..2b0a4235b 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -16,6 +16,7 @@ pub const MAX_ADS: i32 = 5000; type Topic = [u8; 32]; +#[derive(Debug)] pub struct Ad { node_record: Enr, insert_time: Instant, diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index d419dcf40..52ce6c20f 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -5,7 +5,7 @@ use enr::{CombinedKey, EnrBuilder}; use std::net::IpAddr; #[tokio::test] -async fn insert_ad() { +async fn insert_ad_and_get_nodes() { // Create the test values needed let port = 6666; let ip: IpAddr = "127.0.0.1".parse().unwrap(); @@ -24,6 +24,7 @@ async fn insert_ad() { ads.insert(enr.clone(), topic).unwrap(); + // Since 60 seconds haven't passed assert_eq!(ads.insert(enr.clone(), topic).map_err(|e| e), Err("Node already advertising this topic".into())); ads.insert(enr_2.clone(), topic).unwrap(); @@ -44,35 +45,35 @@ async fn poll_ads() { let key = CombinedKey::generate_secp256k1(); let enr = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + let port = 5000; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let enr_2 = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + let mut ads = Ads::new(Duration::from_secs(1)); - let topic = [1;32]; + let topic_1 = [1;32]; let topic_2 = [2;32]; - let topic_3= [3;32]; - ads.insert(enr.clone(), topic).unwrap(); + ads.insert(enr.clone(), topic_1).unwrap(); + ads.insert(enr_2, topic_1).unwrap(); + tokio::time::sleep(Duration::from_secs(1)).await; ads.insert(enr.clone(), topic_2).unwrap(); - tokio::time::sleep(Duration::from_secs(1)).await; - ads.insert(enr.clone(), topic_3).unwrap(); let mut expired_ads = Vec::new(); - tokio::select! { - Some(Ok((_, topic))) = ads.next() => { - expired_ads.push(topic); - } - } - tokio::select! { - Some(Ok((_, topic))) = ads.next() => { - expired_ads.push(topic); + for _ in 0..4 { + tokio::select! { + Some(Ok((_, topic))) = ads.next() => { + expired_ads.push(topic); + if topic == topic_2 { + // Since (enr, topic_1) should have expired, inserting it anew should be possible + ads.insert(enr.clone(), topic_1).unwrap(); + } + } } } - tokio::select! { - Some(Ok((_, topic))) = ads.next() => { - expired_ads.push(topic); - } - } - assert_eq!(expired_ads, vec![topic, topic_2, topic_3]) -} + assert_eq!(expired_ads, vec![topic_1, topic_1, topic_2, topic_1]) +} From 9ad68059cf6cb42de22f717ef7c5f42b3294c076 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 5 Apr 2022 15:40:29 +0200 Subject: [PATCH 013/391] Set max ads in constructor to allow for easy testing of ticket wait time --- Cargo.toml | 1 + src/advertisement/mod.rs | 13 +++++++------ src/advertisement/test.rs | 41 +++++++++++++++++++++++++++++++++++++-- src/lib.rs | 1 + src/service.rs | 2 +- src/service/test.rs | 2 +- 6 files changed, 50 insertions(+), 10 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 76468aa1b..bc5b7d077 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,6 +38,7 @@ tracing-subscriber = { version = "0.3.3", features = ["env-filter"] } lru = "0.7.1" hashlink = "0.7.0" delay_map = "0.1.1" +more-asserts = "0.2.2" [dev-dependencies] rand_07 = { package = "rand", version = "0.7" } diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 2b0a4235b..33d3397bc 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -11,9 +11,6 @@ use tracing::debug; pub mod ticket; mod test; -pub const MAX_ADS_PER_TOPIC: usize = 100; -pub const MAX_ADS: i32 = 5000; - type Topic = [u8; 32]; #[derive(Debug)] @@ -32,15 +29,19 @@ pub struct Ads { ads: HashMap>, total_ads: i32, ad_lifetime: Duration, + max_ads_per_topic: usize, + max_ads: i32, } impl Ads { - pub fn new(ad_lifetime: Duration) -> Self { + pub fn new(ad_lifetime: Duration, max_ads_per_topic: usize, max_ads: i32) -> Self { Ads { expirations: VecDeque::new(), ads: HashMap::new(), total_ads: 0, ad_lifetime, + max_ads_per_topic, + max_ads, } } @@ -55,7 +56,7 @@ impl Ads { let now = Instant::now(); match self.ads.get(&topic) { Some(nodes) => { - if nodes.len() < MAX_ADS_PER_TOPIC { + if nodes.len() < self.max_ads_per_topic { Duration::from_secs(0) } else { match nodes.get(0) { @@ -76,7 +77,7 @@ impl Ads { } } None => { - if self.total_ads < MAX_ADS { + if self.total_ads < self.max_ads { Duration::from_secs(0) } else { match self.expirations.get(0) { diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 52ce6c20f..5eb3910ac 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -3,6 +3,7 @@ use super::*; use enr::{CombinedKey, EnrBuilder}; use std::net::IpAddr; +use more_asserts::{assert_gt, assert_lt}; #[tokio::test] async fn insert_ad_and_get_nodes() { @@ -17,7 +18,7 @@ async fn insert_ad_and_get_nodes() { let key = CombinedKey::generate_secp256k1(); let enr_2 = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); - let mut ads = Ads::new(Duration::from_secs(60)); + let mut ads = Ads::new(Duration::from_secs(60), 10, 50); let topic = [1;32]; let topic_2 = [2;32]; @@ -37,6 +38,42 @@ async fn insert_ad_and_get_nodes() { assert_eq!(nodes_topic_2, vec![enr]); } +#[tokio::test] +async fn ticket_wait_time_no_wait_time() { + let ads = Ads::new(Duration::from_secs(1), 10, 50); + let topic = [1;32]; + let wait_time = ads.ticket_wait_time(topic); + assert_eq!(wait_time, Duration::from_secs(0)) +} + +#[tokio::test] +async fn ticket_wait_time() { + // Create the test values needed + let port = 6666; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + + let port = 5000; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let enr_2 = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + + let mut ads = Ads::new(Duration::from_secs(2), 2, 50); + + let topic = [1;32]; + + ads.insert(enr, topic).unwrap(); + assert_eq!(ads.ticket_wait_time(topic), Duration::from_secs(0)); + + ads.insert(enr_2, topic).unwrap(); + assert_gt!(ads.ticket_wait_time(topic), Duration::from_secs(1)); + assert_lt!(ads.ticket_wait_time(topic), Duration::from_secs(2)); + + tokio::time::sleep(Duration::from_secs(2)).await; + assert_eq!(ads.ticket_wait_time(topic), Duration::from_secs(0)); +} + #[tokio::test] async fn poll_ads() { // Create the test values needed @@ -50,7 +87,7 @@ async fn poll_ads() { let key = CombinedKey::generate_secp256k1(); let enr_2 = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); - let mut ads = Ads::new(Duration::from_secs(1)); + let mut ads = Ads::new(Duration::from_secs(1), 10, 50); let topic_1 = [1;32]; let topic_2 = [2;32]; diff --git a/src/lib.rs b/src/lib.rs index 1572c515a..0905bd4fa 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -121,6 +121,7 @@ pub mod socket; #[macro_use] extern crate lazy_static; +extern crate more_asserts; pub type Enr = enr::Enr; diff --git a/src/service.rs b/src/service.rs index 7eb528c58..484468675 100644 --- a/src/service.rs +++ b/src/service.rs @@ -289,7 +289,7 @@ impl Service { peers_to_ping: HashSetDelay::new(config.ping_interval), discv5_recv, event_stream: None, - ads: Ads::new(Duration::from_secs(60 * 15)), + ads: Ads::new(Duration::from_secs(60 * 15), 100 as usize, 50000), exit, config: config.clone(), }; diff --git a/src/service/test.rs b/src/service/test.rs index 2649e3052..833e150e8 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -97,7 +97,7 @@ async fn build_service( peers_to_ping: HashSetDelay::new(config.ping_interval), discv5_recv, event_stream: None, - ads: Ads::new(Duration::from_secs(60)), + ads: Ads::new(Duration::from_secs(60*15), 100 as usize, 50000), exit, config, } From f3f9a1a4ed50220c5d30fc1fa6ca22acbfeb96b0 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 5 Apr 2022 16:05:19 +0200 Subject: [PATCH 014/391] Add skeleton for tickets --- src/advertisement/mod.rs | 9 +++++---- src/advertisement/ticket.rs | 16 ++++++++++++++++ src/service.rs | 28 +++++++++++++++++++++++----- 3 files changed, 44 insertions(+), 9 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 33d3397bc..573a9f208 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -1,4 +1,5 @@ use super::*; +use ticket::Ticket; use core::time::Duration; use enr::{CombinedKey, Enr}; use futures::prelude::*; @@ -97,10 +98,10 @@ impl Ads { } } - /*pub fn regconfirmation(&self, node_record: Enr, topic: Topic, ticket: Vec) -> Result<(), String> { - // check if ticket is valid - self.insert(node_record, topic); - }*/ + pub fn regconfirmation(&mut self, node_record: Enr, topic: Topic, ticket: Ticket) -> Result<(), String> { + // chose which ad to insert from some pool of registrants-within-10-seconds-from-x + self.insert(node_record, topic) + } fn insert(&mut self, node_record: Enr, topic: Topic) -> Result<(), String> { let now = Instant::now(); diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 72bf1f6bf..471f897d0 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -9,3 +9,19 @@ pub fn topic_hash(topic: Vec) -> Result { topic_hash[32 - topic.len()..].copy_from_slice(&topic); Ok(topic_hash) } + +pub struct Ticket {} + +impl Ticket { + pub fn new() -> Self { + Ticket{} + } + + pub fn decode(ticket_bytes: Vec) -> Result { + if ticket_bytes.is_empty() { + return Err("Ticket has wrong format".into()); + } + Ok(Ticket{}) + } +} + diff --git a/src/service.rs b/src/service.rs index 484468675..b0ff3c16e 100644 --- a/src/service.rs +++ b/src/service.rs @@ -18,7 +18,7 @@ use self::{ query_info::{QueryInfo, QueryType}, }; use crate::{ - advertisement::{ticket::topic_hash, Ads}, + advertisement::{ticket::{topic_hash, Ticket}, Ads}, error::{RequestError, ResponseError}, handler::{Handler, HandlerIn, HandlerOut}, kbucket::{ @@ -590,10 +590,15 @@ impl Service { [0;32] } }; - // inspect ticket - let ticket_wait_time = self.ads.ticket_wait_time(topic_hash); - // send ticket, if has previous valid ticket Duration + 10 secs? - // do regconfirmation checks + let wait_time = self.ads.ticket_wait_time(topic_hash); + self.send_ticket_response(wait_time); + match Ticket::decode(ticket) { + Ok(ticket) => match self.ads.regconfirmation(enr, topic_hash, ticket) { + Ok(()) => self.send_regconfirmation_response(), + Err(e) => debug!("{}", e), + }, + Err(e) => debug!("{}", e), + } debug!("Received RegisterTopic request which is unimplemented"); } RequestBody::TopicQuery { topic } => { @@ -920,6 +925,19 @@ impl Service { self.send_rpc_request(active_request); } + fn send_ticket_response( + &mut self, + wait_time: Duration, + ) { + unimplemented!() + } + + fn send_regconfirmation_response( + &mut self, + ) { + unimplemented!() + } + fn send_topic_query_response( &mut self, node_address: NodeAddress, From 2af4d0f0be13074afde35e47e7d8710205055b5d Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 5 Apr 2022 19:12:26 +0200 Subject: [PATCH 015/391] Add automatic re-regtopic upon end of wait-time ticket & add topics --- src/advertisement/mod.rs | 2 +- src/advertisement/ticket.rs | 64 ++++++++++++++++++++++++++++++++++--- src/service.rs | 52 ++++++++++++++++++++++++------ src/service/test.rs | 2 ++ 4 files changed, 105 insertions(+), 15 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 573a9f208..ac3bfa4fa 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -12,7 +12,7 @@ use tracing::debug; pub mod ticket; mod test; -type Topic = [u8; 32]; +pub type Topic = [u8; 32]; #[derive(Debug)] pub struct Ad { diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 471f897d0..b2f61dce0 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -1,4 +1,6 @@ -type Topic = [u8; 32]; +use super::*; +use crate::node_info::NodeAddress; +use delay_map::HashMapDelay; // Temporary, some hash function will probably be used here instead of padding pub fn topic_hash(topic: Vec) -> Result { @@ -10,18 +12,70 @@ pub fn topic_hash(topic: Vec) -> Result { Ok(topic_hash) } -pub struct Ticket {} +pub struct Ticket { + //nonce: u64, + //src_node_id: NodeId, + //src_ip: IpAddr, + topic: Topic, + //req_time: Instant, + //wait_time: Duration, + //cum_wait: Duration,*/ +} impl Ticket { - pub fn new() -> Self { - Ticket{} + pub fn new( + //nonce: u64, + //src_node_id: NodeId, + //src_ip: IpAddr, + topic: Topic, + //req_time: Instant, + //wait_time: Duration,*/ + ) -> Self { + Ticket{ + //nonce, + //src_node_id, + //src_ip, + topic, + //req_time, + //wait_time, + } } pub fn decode(ticket_bytes: Vec) -> Result { if ticket_bytes.is_empty() { return Err("Ticket has wrong format".into()); } - Ok(Ticket{}) + Ok(Ticket{topic: [0u8; 32]}) } } +pub struct Tickets { + tickets: HashMapDelay<(NodeAddress, Topic), Ticket> +} + +impl Tickets { + pub fn new() -> Self { + Tickets{ + tickets: HashMapDelay::new(Duration::default()), + } + } + + pub fn insert(&mut self, node_address: NodeAddress, ticket: Ticket, wait_time: Duration) { + self.tickets.insert_at((node_address, ticket.topic), ticket, wait_time); + } +} + +impl Stream for Tickets { + type Item = Result<(NodeAddress, Ticket), String>; + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match self.tickets.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(((node_address, _), ticket)))) => Poll::Ready(Some(Ok((node_address, ticket)))), + Poll::Ready(Some(Err(e))) => { + debug!("{}", e); + Poll::Pending + }, + Poll::Ready(None) => Poll::Pending, + Poll::Pending => Poll::Pending, + } + } +} \ No newline at end of file diff --git a/src/service.rs b/src/service.rs index b0ff3c16e..465bf38ab 100644 --- a/src/service.rs +++ b/src/service.rs @@ -18,7 +18,7 @@ use self::{ query_info::{QueryInfo, QueryType}, }; use crate::{ - advertisement::{ticket::{topic_hash, Ticket}, Ads}, + advertisement::{ticket::{topic_hash, Ticket, Tickets}, Ads, Topic}, error::{RequestError, ResponseError}, handler::{Handler, HandlerIn, HandlerOut}, kbucket::{ @@ -42,6 +42,7 @@ use std::{ collections::HashMap, net::SocketAddr, sync::Arc, task::Poll, time::Duration, time::Instant, }; use tokio::sync::{mpsc, oneshot}; +use tokio_util::time::DelayQueue; use tracing::{debug, error, info, trace, warn}; mod ip_vote; @@ -192,8 +193,14 @@ pub struct Service { /// A channel that the service emits events on. event_stream: Option>, - /// Ads + /// Ads advertised for other nodes. ads: Ads, + + /// Tickets received by other nodes. + tickets: Tickets, + + /// Topics advertised on other nodes. + topics: DelayQueue<(NodeAddress, Topic)>, } /// Active RPC request awaiting a response from the handler. @@ -290,6 +297,7 @@ impl Service { discv5_recv, event_stream: None, ads: Ads::new(Duration::from_secs(60 * 15), 100 as usize, 50000), + tickets: Tickets::new(), exit, config: config.clone(), }; @@ -340,7 +348,13 @@ impl Service { error!("Failed to return the event stream channel"); } } - //ServiceRequest::TopicQuery() => {} + /*ServiceRequest::TopicQuery(topic) => { + // to which nodes? + self.send_topic_query(topic); + }*/ + /*ServiceRequest::RegTopic(topic) => { + // to which nodes? + }*/ } } Some(event) = self.handler_recv.recv() => { @@ -423,6 +437,9 @@ impl Service { } } _ = self.ads.next() => {} + Some(Ok((node_address, ticket))) = self.tickets.next() => { + self.send_reg_topic_request(node_address, ticket); + } } } } @@ -591,15 +608,17 @@ impl Service { } }; let wait_time = self.ads.ticket_wait_time(topic_hash); - self.send_ticket_response(wait_time); + let new_ticket = Ticket::new(topic_hash); + self.send_ticket_response(node_address, new_ticket, wait_time); + match Ticket::decode(ticket) { Ok(ticket) => match self.ads.regconfirmation(enr, topic_hash, ticket) { - Ok(()) => self.send_regconfirmation_response(), + Ok(()) => self.send_regconfirmation_response(topic_hash), Err(e) => debug!("{}", e), }, Err(e) => debug!("{}", e), } - debug!("Received RegisterTopic request which is unimplemented"); + debug!("Received RegisterTopic request which is not fully implemented"); } RequestBody::TopicQuery { topic } => { self.send_topic_query_response(node_address, id, topic); @@ -837,10 +856,14 @@ impl Service { _ => error!("Invalid callback for response"), } } - ResponseBody::Ticket { .. } => { - error!("Received a TICKET response. This is unimplemented and should be unreachable."); + ResponseBody::Ticket { ticket, wait_time } => { + // todo(emhane): What should max wait_time be so insert_at in Tickets doesn't panic? + match Ticket::decode(ticket) { + Ok(ticket) => self.tickets.insert(node_address, ticket, Duration::from_secs(wait_time)), + Err(e) => debug!("{}", e), + } } - ResponseBody::RegisterConfirmation { .. } => { + ResponseBody::RegisterConfirmation { topic } => { error!("Received a RegisterConfirmation response. This is unimplemented and should be unreachable."); } } @@ -925,8 +948,18 @@ impl Service { self.send_rpc_request(active_request); } + fn send_reg_topic_request( + &mut self, + node_address: NodeAddress, + ticket: Ticket, + ) { + unimplemented!() + } + fn send_ticket_response( &mut self, + node_address: NodeAddress, + ticket: Ticket, wait_time: Duration, ) { unimplemented!() @@ -934,6 +967,7 @@ impl Service { fn send_regconfirmation_response( &mut self, + topic: Topic, ) { unimplemented!() } diff --git a/src/service/test.rs b/src/service/test.rs index 833e150e8..c9b22847d 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -98,6 +98,8 @@ async fn build_service( discv5_recv, event_stream: None, ads: Ads::new(Duration::from_secs(60*15), 100 as usize, 50000), + tickets: Tickets::new(), + topics: DelayQueue::new(), exit, config, } From b5d75d78a79c92fb01500858fec81cf17d45f0b4 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 5 Apr 2022 20:04:33 +0200 Subject: [PATCH 016/391] Add automatic re-regtopic for topics we are advertising and clean up ads --- src/advertisement/mod.rs | 91 +++++++++++++++++++++---------------- src/advertisement/test.rs | 23 ++++++---- src/advertisement/ticket.rs | 51 +++++++++++++++++---- src/service.rs | 81 +++++++++++++++++++-------------- src/service/test.rs | 2 +- 5 files changed, 154 insertions(+), 94 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index ac3bfa4fa..5dd3219cc 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -1,5 +1,4 @@ use super::*; -use ticket::Ticket; use core::time::Duration; use enr::{CombinedKey, Enr}; use futures::prelude::*; @@ -9,8 +8,8 @@ use std::task::{Context, Poll}; use tokio::time::{sleep, Instant, Sleep}; use tracing::debug; -pub mod ticket; mod test; +pub mod ticket; pub type Topic = [u8; 32]; @@ -20,6 +19,15 @@ pub struct Ad { insert_time: Instant, } +impl Ad { + fn new(node_record: Enr, insert_time: Instant) -> Self { + Ad { + node_record, + insert_time, + } + } +} + impl PartialEq for Ad { fn eq(&self, other: &Self) -> bool { self.node_record == other.node_record @@ -48,7 +56,10 @@ impl Ads { pub fn get_ad_nodes(&self, topic: Topic) -> Result>, String> { match self.ads.get(&topic) { - Some(topic_ads) => Ok(topic_ads.into_iter().map(|ad| ad.node_record.clone()).collect()), + Some(topic_ads) => Ok(topic_ads + .into_iter() + .map(|ad| ad.node_record.clone()) + .collect()), None => Err("No ads for this topic".into()), } } @@ -98,22 +109,26 @@ impl Ads { } } - pub fn regconfirmation(&mut self, node_record: Enr, topic: Topic, ticket: Ticket) -> Result<(), String> { - // chose which ad to insert from some pool of registrants-within-10-seconds-from-x - self.insert(node_record, topic) - } - - fn insert(&mut self, node_record: Enr, topic: Topic) -> Result<(), String> { + pub fn insert(&mut self, node_record: Enr, topic: Topic) -> Result<(), String> { let now = Instant::now(); if let Some(nodes) = self.ads.get_mut(&topic) { - if nodes.contains(&Ad { node_record: node_record.clone(), insert_time: now }) { - debug!("This node {} is already advertising this topic", node_record.node_id()); + if nodes.contains(&Ad::new(node_record.clone(), now)) { + debug!( + "This node {} is already advertising this topic", + node_record.node_id() + ); return Err("Node already advertising this topic".into()); } - nodes.push_back(Ad { node_record, insert_time: now }); + nodes.push_back(Ad { + node_record, + insert_time: now, + }); } else { let mut nodes = VecDeque::new(); - nodes.push_back(Ad { node_record, insert_time: now }); + nodes.push_back(Ad { + node_record, + insert_time: now, + }); self.ads.insert(topic, nodes); } self.expirations @@ -132,42 +147,40 @@ impl Stream for Ads { None => { debug!("No ads in 'table'"); return Poll::Pending; - }, + } }; match fut.poll_unpin(cx) { - Poll::Ready(()) => match self.ads.get_mut(&topic) { - Some(topic_ads) => { - match topic_ads.pop_front() { - Some(ad) => { - if topic_ads.is_empty() { - self.ads.remove(&topic); - } - self.total_ads -= 1; - self.expirations.remove(0); - Poll::Ready(Some(Ok(((ad.node_record, ad.insert_time), topic)))) - } - None => { - #[cfg(debug_assertions)] - panic!("Panic on debug, topic key should be deleted if no ad nodes queued for it"); - #[cfg(not(debug_assertions))] - { - error!("Topic key should be deleted if no ad nodes queued for it"); - return Poll::Ready(Err("No nodes for topic".into())); - } - } + Poll::Ready(()) => match self.ads.get_mut(&topic) { + Some(topic_ads) => match topic_ads.pop_front() { + Some(ad) => { + if topic_ads.is_empty() { + self.ads.remove(&topic); } + self.total_ads -= 1; + self.expirations.remove(0); + Poll::Ready(Some(Ok(((ad.node_record, ad.insert_time), topic)))) } None => { #[cfg(debug_assertions)] - panic!("Panic on debug, mismatched mapping between expiration queue and entry queue"); + panic!("Panic on debug, topic key should be deleted if no ad nodes queued for it"); #[cfg(not(debug_assertions))] { - error!("Mismatched mapping between expiration queue and entry queue"); - return Poll::Ready(Err("Topic doesn't exist".into())); + error!("Topic key should be deleted if no ad nodes queued for it"); + return Poll::Ready(Some(Err("No nodes for topic".into()))); } } }, - Poll::Pending => Poll::Pending, - } + None => { + #[cfg(debug_assertions)] + panic!("Panic on debug, mismatched mapping between expiration queue and entry queue"); + #[cfg(not(debug_assertions))] + { + error!("Mismatched mapping between expiration queue and entry queue"); + return Poll::Ready(Some(Err("Topic doesn't exist".into()))); + } + } + }, + Poll::Pending => Poll::Pending, + } } } diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 5eb3910ac..30cc2483e 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -2,8 +2,8 @@ use super::*; use enr::{CombinedKey, EnrBuilder}; -use std::net::IpAddr; use more_asserts::{assert_gt, assert_lt}; +use std::net::IpAddr; #[tokio::test] async fn insert_ad_and_get_nodes() { @@ -20,13 +20,16 @@ async fn insert_ad_and_get_nodes() { let mut ads = Ads::new(Duration::from_secs(60), 10, 50); - let topic = [1;32]; - let topic_2 = [2;32]; + let topic = [1; 32]; + let topic_2 = [2; 32]; ads.insert(enr.clone(), topic).unwrap(); // Since 60 seconds haven't passed - assert_eq!(ads.insert(enr.clone(), topic).map_err(|e| e), Err("Node already advertising this topic".into())); + assert_eq!( + ads.insert(enr.clone(), topic).map_err(|e| e), + Err("Node already advertising this topic".into()) + ); ads.insert(enr_2.clone(), topic).unwrap(); ads.insert(enr.clone(), topic_2).unwrap(); @@ -38,15 +41,15 @@ async fn insert_ad_and_get_nodes() { assert_eq!(nodes_topic_2, vec![enr]); } -#[tokio::test] +#[tokio::test] async fn ticket_wait_time_no_wait_time() { let ads = Ads::new(Duration::from_secs(1), 10, 50); - let topic = [1;32]; + let topic = [1; 32]; let wait_time = ads.ticket_wait_time(topic); assert_eq!(wait_time, Duration::from_secs(0)) } -#[tokio::test] +#[tokio::test] async fn ticket_wait_time() { // Create the test values needed let port = 6666; @@ -61,7 +64,7 @@ async fn ticket_wait_time() { let mut ads = Ads::new(Duration::from_secs(2), 2, 50); - let topic = [1;32]; + let topic = [1; 32]; ads.insert(enr, topic).unwrap(); assert_eq!(ads.ticket_wait_time(topic), Duration::from_secs(0)); @@ -89,8 +92,8 @@ async fn poll_ads() { let mut ads = Ads::new(Duration::from_secs(1), 10, 50); - let topic_1 = [1;32]; - let topic_2 = [2;32]; + let topic_1 = [1; 32]; + let topic_2 = [2; 32]; ads.insert(enr.clone(), topic_1).unwrap(); ads.insert(enr_2, topic_1).unwrap(); diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index b2f61dce0..58360f104 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -1,8 +1,8 @@ use super::*; use crate::node_info::NodeAddress; use delay_map::HashMapDelay; +use std::cmp::Eq; -// Temporary, some hash function will probably be used here instead of padding pub fn topic_hash(topic: Vec) -> Result { if topic.len() > 32 { return Err("Topic is greater than 32 bytes".into()); @@ -12,6 +12,26 @@ pub fn topic_hash(topic: Vec) -> Result { Ok(topic_hash) } +#[derive(PartialEq, Eq, Hash, Clone)] +pub struct ActiveTopic { + node_address: NodeAddress, + topic: Topic, +} + +impl ActiveTopic { + pub fn new(node_address: NodeAddress, topic: Topic) -> Self { + ActiveTopic { + node_address, + topic, + } + } + + pub fn node_address(&self) -> NodeAddress { + self.node_address.clone() + } +} + +#[derive(Default)] pub struct Ticket { //nonce: u64, //src_node_id: NodeId, @@ -31,7 +51,7 @@ impl Ticket { //req_time: Instant, //wait_time: Duration,*/ ) -> Self { - Ticket{ + Ticket { //nonce, //src_node_id, //src_ip, @@ -45,23 +65,32 @@ impl Ticket { if ticket_bytes.is_empty() { return Err("Ticket has wrong format".into()); } - Ok(Ticket{topic: [0u8; 32]}) + Ok(Ticket { topic: [0u8; 32] }) } + + /*pub fn regconfirmation(&mut self, node_record: Enr, topic: Topic, ticket: Ticket) -> Result<(), String> { + // chose which ad to insert from some pool of registrants-within-10-seconds-from-x + Ok(()) + }*/ } pub struct Tickets { - tickets: HashMapDelay<(NodeAddress, Topic), Ticket> + tickets: HashMapDelay, } impl Tickets { pub fn new() -> Self { - Tickets{ + Tickets { tickets: HashMapDelay::new(Duration::default()), } } pub fn insert(&mut self, node_address: NodeAddress, ticket: Ticket, wait_time: Duration) { - self.tickets.insert_at((node_address, ticket.topic), ticket, wait_time); + self.tickets.insert_at( + ActiveTopic::new(node_address, ticket.topic), + ticket, + wait_time, + ); } } @@ -69,13 +98,15 @@ impl Stream for Tickets { type Item = Result<(NodeAddress, Ticket), String>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { match self.tickets.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(((node_address, _), ticket)))) => Poll::Ready(Some(Ok((node_address, ticket)))), + Poll::Ready(Some(Ok((active_topic, ticket)))) => { + Poll::Ready(Some(Ok((active_topic.node_address, ticket)))) + } Poll::Ready(Some(Err(e))) => { debug!("{}", e); Poll::Pending - }, + } Poll::Ready(None) => Poll::Pending, Poll::Pending => Poll::Pending, - } + } } -} \ No newline at end of file +} diff --git a/src/service.rs b/src/service.rs index 465bf38ab..a32f46daa 100644 --- a/src/service.rs +++ b/src/service.rs @@ -18,7 +18,10 @@ use self::{ query_info::{QueryInfo, QueryType}, }; use crate::{ - advertisement::{ticket::{topic_hash, Ticket, Tickets}, Ads, Topic}, + advertisement::{ + ticket::{topic_hash, ActiveTopic, Ticket, Tickets}, + Ads, Topic, + }, error::{RequestError, ResponseError}, handler::{Handler, HandlerIn, HandlerOut}, kbucket::{ @@ -200,7 +203,7 @@ pub struct Service { tickets: Tickets, /// Topics advertised on other nodes. - topics: DelayQueue<(NodeAddress, Topic)>, + topics: DelayQueue, } /// Active RPC request awaiting a response from the handler. @@ -298,6 +301,7 @@ impl Service { event_stream: None, ads: Ads::new(Duration::from_secs(60 * 15), 100 as usize, 50000), tickets: Tickets::new(), + topics: DelayQueue::new(), exit, config: config.clone(), }; @@ -436,10 +440,13 @@ impl Service { self.send_ping(enr); } } - _ = self.ads.next() => {} Some(Ok((node_address, ticket))) = self.tickets.next() => { self.send_reg_topic_request(node_address, ticket); } + Some(Ok(expired)) = self.topics.next() => { + self.send_reg_topic_request(expired.into_inner().node_address(), Ticket::default()); + } + _ = self.ads.next() => {} } } } @@ -600,24 +607,28 @@ impl Service { self.send_event(Discv5Event::TalkRequest(req)); } RequestBody::RegisterTopic { topic, enr, ticket } => { - let topic_hash = match topic_hash(topic) { - Ok(hash) => hash, - Err(e) => { - debug!("{}", e); - [0;32] + match topic_hash(topic) { + Ok(topic_hash) => { + let wait_time = self.ads.ticket_wait_time(topic_hash); + let new_ticket = Ticket::new(topic_hash); + self.send_ticket_response(node_address, new_ticket, wait_time); + + let ticket = match Ticket::decode(ticket) { + Ok(ticket) => ticket, + Err(e) => { + debug!("{}", e); + Ticket::default() + } + }; + // choose which ad to reg based on ticket, for example if some node has empty ticket + // or is coming back, and possibly other stuff + match self.ads.insert(enr, topic_hash) { + Ok(()) => self.send_regconfirmation_response(topic_hash), + Err(e) => debug!("{}", e), + } } - }; - let wait_time = self.ads.ticket_wait_time(topic_hash); - let new_ticket = Ticket::new(topic_hash); - self.send_ticket_response(node_address, new_ticket, wait_time); - - match Ticket::decode(ticket) { - Ok(ticket) => match self.ads.regconfirmation(enr, topic_hash, ticket) { - Ok(()) => self.send_regconfirmation_response(topic_hash), - Err(e) => debug!("{}", e), - }, Err(e) => debug!("{}", e), - } + }; debug!("Received RegisterTopic request which is not fully implemented"); } RequestBody::TopicQuery { topic } => { @@ -857,14 +868,23 @@ impl Service { } } ResponseBody::Ticket { ticket, wait_time } => { - // todo(emhane): What should max wait_time be so insert_at in Tickets doesn't panic? - match Ticket::decode(ticket) { - Ok(ticket) => self.tickets.insert(node_address, ticket, Duration::from_secs(wait_time)), - Err(e) => debug!("{}", e), - } + // todo(emhane): What should max wait_time be so insert_at in Tickets doesn't panic? + match Ticket::decode(ticket) { + Ok(ticket) => self.tickets.insert( + node_address, + ticket, + Duration::from_secs(wait_time), + ), + Err(e) => debug!("{}", e), + } } ResponseBody::RegisterConfirmation { topic } => { - error!("Received a RegisterConfirmation response. This is unimplemented and should be unreachable."); + match topic_hash(topic) { + Ok(topic_hash) => { + self.topics.insert(ActiveTopic::new(node_address, topic_hash), Duration::from_secs(60*15)); + }, + Err(e) => debug!("{}", e), + } } } } else { @@ -948,11 +968,7 @@ impl Service { self.send_rpc_request(active_request); } - fn send_reg_topic_request( - &mut self, - node_address: NodeAddress, - ticket: Ticket, - ) { + fn send_reg_topic_request(&mut self, node_address: NodeAddress, ticket: Ticket) { unimplemented!() } @@ -965,10 +981,7 @@ impl Service { unimplemented!() } - fn send_regconfirmation_response( - &mut self, - topic: Topic, - ) { + fn send_regconfirmation_response(&mut self, topic: Topic) { unimplemented!() } diff --git a/src/service/test.rs b/src/service/test.rs index c9b22847d..cb3aa6a7a 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -97,7 +97,7 @@ async fn build_service( peers_to_ping: HashSetDelay::new(config.ping_interval), discv5_recv, event_stream: None, - ads: Ads::new(Duration::from_secs(60*15), 100 as usize, 50000), + ads: Ads::new(Duration::from_secs(60 * 15), 100 as usize, 50000), tickets: Tickets::new(), topics: DelayQueue::new(), exit, From f1ba9ad86d3009e17982f9db1f669a936ea0ef7d Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 5 Apr 2022 23:01:27 +0200 Subject: [PATCH 017/391] Implement send methods for various ad related responses --- src/advertisement/mod.rs | 1 + src/advertisement/ticket.rs | 15 ++++--- src/handler/mod.rs | 2 +- src/service.rs | 88 ++++++++++++++++++++++++++++++------- 4 files changed, 84 insertions(+), 22 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 5dd3219cc..2d3e6d6c7 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -13,6 +13,7 @@ pub mod ticket; pub type Topic = [u8; 32]; +/// An ad we are adevrtising for another node #[derive(Debug)] pub struct Ad { node_record: Enr, diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 58360f104..97672c9e9 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -3,6 +3,7 @@ use crate::node_info::NodeAddress; use delay_map::HashMapDelay; use std::cmp::Eq; + pub fn topic_hash(topic: Vec) -> Result { if topic.len() > 32 { return Err("Topic is greater than 32 bytes".into()); @@ -25,13 +26,17 @@ impl ActiveTopic { topic, } } - + pub fn node_address(&self) -> NodeAddress { self.node_address.clone() } + + pub fn topic(&self) -> Topic { + self.topic + } } -#[derive(Default)] +#[derive(Default, Debug)] pub struct Ticket { //nonce: u64, //src_node_id: NodeId, @@ -95,12 +100,10 @@ impl Tickets { } impl Stream for Tickets { - type Item = Result<(NodeAddress, Ticket), String>; + type Item = Result<(ActiveTopic, Ticket), String>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { match self.tickets.poll_next_unpin(cx) { - Poll::Ready(Some(Ok((active_topic, ticket)))) => { - Poll::Ready(Some(Ok((active_topic.node_address, ticket)))) - } + Poll::Ready(Some(Ok((active_topic, ticket)))) => Poll::Ready(Some(Ok((active_topic, ticket)))), Poll::Ready(Some(Err(e))) => { debug!("{}", e); Poll::Pending diff --git a/src/handler/mod.rs b/src/handler/mod.rs index af69d0840..600a4ef16 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -606,7 +606,7 @@ impl Handler { ); // We do not allow multiple WHOAREYOU packets for a single challenge request. If we have - // already sent a WHOAREYOU ourselves, we drop sessions who send us a WHOAREYOU in + // already sent a WHOAREYOU ourselves, we drop sessions which send us a WHOAREYOU in // response. if request_call.handshake_sent { warn!( diff --git a/src/service.rs b/src/service.rs index a32f46daa..71f0674a7 100644 --- a/src/service.rs +++ b/src/service.rs @@ -19,7 +19,7 @@ use self::{ }; use crate::{ advertisement::{ - ticket::{topic_hash, ActiveTopic, Ticket, Tickets}, + ticket::{topic_hash, Ticket, Tickets, ActiveTopic}, Ads, Topic, }, error::{RequestError, ResponseError}, @@ -353,11 +353,10 @@ impl Service { } } /*ServiceRequest::TopicQuery(topic) => { - // to which nodes? self.send_topic_query(topic); - }*/ - /*ServiceRequest::RegTopic(topic) => { - // to which nodes? + } + ServiceRequest::RegisterTopic(topic) => { + self.reg_topic_request(topic, self.local_enr(), Ticket::default()); }*/ } } @@ -440,11 +439,12 @@ impl Service { self.send_ping(enr); } } - Some(Ok((node_address, ticket))) = self.tickets.next() => { - self.send_reg_topic_request(node_address, ticket); + Some(Ok((active_topic, ticket))) = self.tickets.next() => { + self.reg_topic_request(active_topic.topic(), self.local_enr(), ticket); } Some(Ok(expired)) = self.topics.next() => { - self.send_reg_topic_request(expired.into_inner().node_address(), Ticket::default()); + let expired = expired.into_inner(); + self.reg_topic_request(expired.topic(), self.local_enr(), Ticket::default()); } _ = self.ads.next() => {} } @@ -611,7 +611,7 @@ impl Service { Ok(topic_hash) => { let wait_time = self.ads.ticket_wait_time(topic_hash); let new_ticket = Ticket::new(topic_hash); - self.send_ticket_response(node_address, new_ticket, wait_time); + self.send_ticket_response(node_address.clone(), id.clone(), new_ticket, wait_time); let ticket = match Ticket::decode(ticket) { Ok(ticket) => ticket, @@ -620,10 +620,12 @@ impl Service { Ticket::default() } }; + // choose which ad to reg based on ticket, for example if some node has empty ticket // or is coming back, and possibly other stuff + match self.ads.insert(enr, topic_hash) { - Ok(()) => self.send_regconfirmation_response(topic_hash), + Ok(()) => self.send_regconfirmation_response(node_address, id, topic_hash), Err(e) => debug!("{}", e), } } @@ -968,21 +970,73 @@ impl Service { self.send_rpc_request(active_request); } - fn send_reg_topic_request(&mut self, node_address: NodeAddress, ticket: Ticket) { - unimplemented!() + fn reg_topic_request( + &mut self, + topic: Topic, + enr: Enr, + ticket: Ticket + ) { + /*let request_body = RequestBody::RegisterTopic { + topic: topic.to_vec(), + enr, + ticket: format!("{:?}", ticket).as_bytes().to_vec(), + }; + + let active_request = ActiveRequest { + contact, + request_body, + query_id: None, + callback: None, + }; + self.send_rpc_request(active_request);*/ } fn send_ticket_response( &mut self, node_address: NodeAddress, + rpc_id: RequestId, ticket: Ticket, wait_time: Duration, ) { - unimplemented!() + let response = Response { + id: rpc_id, + body: ResponseBody::Ticket { + ticket: format!("{:?}", ticket).as_bytes().to_vec(), + wait_time: wait_time.as_secs(), + }, + }; + trace!( + "Sending TICKET response to: {}. Response: {} ", + node_address, + response + ); + let _ = self.handler_send.send(HandlerIn::Response( + node_address.clone(), + Box::new(response), + )); } - fn send_regconfirmation_response(&mut self, topic: Topic) { - unimplemented!() + fn send_regconfirmation_response( + &mut self, + node_address: NodeAddress, + rpc_id: RequestId, + topic: Topic + ) { + let response = Response { + id: rpc_id, + body: ResponseBody::RegisterConfirmation { + topic: topic.to_vec(), + }, + }; + trace!( + "Sending REGCONFIRMATION response to: {}. Response: {} ", + node_address, + response + ); + let _ = self.handler_send.send(HandlerIn::Response( + node_address.clone(), + Box::new(response), + )); } fn send_topic_query_response( @@ -1505,6 +1559,10 @@ impl Service { }) .await } + + fn local_enr(&self) -> Enr { + self.local_enr.read().clone() + } } /// The result of the `query_event_poll` indicating an action is required to further progress an From 783b46073b142de618810af178f668fff569be58 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 6 Apr 2022 07:49:49 +0200 Subject: [PATCH 018/391] Run cargo fmt --- src/advertisement/mod.rs | 8 +++--- src/advertisement/ticket.rs | 7 ++--- src/service.rs | 53 +++++++++++++++++++++---------------- 3 files changed, 39 insertions(+), 29 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 2d3e6d6c7..eb621cada 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -2,9 +2,11 @@ use super::*; use core::time::Duration; use enr::{CombinedKey, Enr}; use futures::prelude::*; -use std::collections::{HashMap, VecDeque}; -use std::pin::Pin; -use std::task::{Context, Poll}; +use std::{ + collections::{HashMap, VecDeque}, + pin::Pin, + task::{Context, Poll}, +}; use tokio::time::{sleep, Instant, Sleep}; use tracing::debug; diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 97672c9e9..7f6e17fee 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -3,7 +3,6 @@ use crate::node_info::NodeAddress; use delay_map::HashMapDelay; use std::cmp::Eq; - pub fn topic_hash(topic: Vec) -> Result { if topic.len() > 32 { return Err("Topic is greater than 32 bytes".into()); @@ -26,7 +25,7 @@ impl ActiveTopic { topic, } } - + pub fn node_address(&self) -> NodeAddress { self.node_address.clone() } @@ -103,7 +102,9 @@ impl Stream for Tickets { type Item = Result<(ActiveTopic, Ticket), String>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { match self.tickets.poll_next_unpin(cx) { - Poll::Ready(Some(Ok((active_topic, ticket)))) => Poll::Ready(Some(Ok((active_topic, ticket)))), + Poll::Ready(Some(Ok((active_topic, ticket)))) => { + Poll::Ready(Some(Ok((active_topic, ticket)))) + } Poll::Ready(Some(Err(e))) => { debug!("{}", e); Poll::Pending diff --git a/src/service.rs b/src/service.rs index 71f0674a7..747801a56 100644 --- a/src/service.rs +++ b/src/service.rs @@ -19,7 +19,7 @@ use self::{ }; use crate::{ advertisement::{ - ticket::{topic_hash, Ticket, Tickets, ActiveTopic}, + ticket::{topic_hash, ActiveTopic, Ticket, Tickets}, Ads, Topic, }, error::{RequestError, ResponseError}, @@ -42,7 +42,11 @@ use futures::prelude::*; use parking_lot::RwLock; use rpc::*; use std::{ - collections::HashMap, net::SocketAddr, sync::Arc, task::Poll, time::Duration, time::Instant, + collections::HashMap, + net::SocketAddr, + sync::Arc, + task::Poll, + time::{Duration, Instant}, }; use tokio::sync::{mpsc, oneshot}; use tokio_util::time::DelayQueue; @@ -611,7 +615,12 @@ impl Service { Ok(topic_hash) => { let wait_time = self.ads.ticket_wait_time(topic_hash); let new_ticket = Ticket::new(topic_hash); - self.send_ticket_response(node_address.clone(), id.clone(), new_ticket, wait_time); + self.send_ticket_response( + node_address.clone(), + id.clone(), + new_ticket, + wait_time, + ); let ticket = match Ticket::decode(ticket) { Ok(ticket) => ticket, @@ -625,7 +634,9 @@ impl Service { // or is coming back, and possibly other stuff match self.ads.insert(enr, topic_hash) { - Ok(()) => self.send_regconfirmation_response(node_address, id, topic_hash), + Ok(()) => { + self.send_regconfirmation_response(node_address, id, topic_hash) + } Err(e) => debug!("{}", e), } } @@ -880,14 +891,15 @@ impl Service { Err(e) => debug!("{}", e), } } - ResponseBody::RegisterConfirmation { topic } => { - match topic_hash(topic) { - Ok(topic_hash) => { - self.topics.insert(ActiveTopic::new(node_address, topic_hash), Duration::from_secs(60*15)); - }, - Err(e) => debug!("{}", e), + ResponseBody::RegisterConfirmation { topic } => match topic_hash(topic) { + Ok(topic_hash) => { + self.topics.insert( + ActiveTopic::new(node_address, topic_hash), + Duration::from_secs(60 * 15), + ); } - } + Err(e) => debug!("{}", e), + }, } } else { warn!( @@ -970,15 +982,10 @@ impl Service { self.send_rpc_request(active_request); } - fn reg_topic_request( - &mut self, - topic: Topic, - enr: Enr, - ticket: Ticket - ) { - /*let request_body = RequestBody::RegisterTopic { - topic: topic.to_vec(), - enr, + fn reg_topic_request(&mut self, topic: Topic, enr: Enr, ticket: Ticket) { + /*let request_body = RequestBody::RegisterTopic { + topic: topic.to_vec(), + enr, ticket: format!("{:?}", ticket).as_bytes().to_vec(), }; @@ -1017,10 +1024,10 @@ impl Service { } fn send_regconfirmation_response( - &mut self, + &mut self, node_address: NodeAddress, rpc_id: RequestId, - topic: Topic + topic: Topic, ) { let response = Response { id: rpc_id, @@ -1562,7 +1569,7 @@ impl Service { fn local_enr(&self) -> Enr { self.local_enr.read().clone() - } + } } /// The result of the `query_event_poll` indicating an action is required to further progress an From 52210de835f36a5a334bb19f0c302f3a770e4345 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 6 Apr 2022 12:48:12 +0200 Subject: [PATCH 019/391] Fix CI --- src/advertisement/mod.rs | 16 ++++++++-------- src/advertisement/test.rs | 10 +++++----- src/advertisement/ticket.rs | 6 +----- src/lib.rs | 1 - src/service.rs | 37 ++++++++++++++----------------------- 5 files changed, 28 insertions(+), 42 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index eb621cada..6c1271207 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -8,7 +8,7 @@ use std::{ task::{Context, Poll}, }; use tokio::time::{sleep, Instant, Sleep}; -use tracing::debug; +use tracing::{debug, error}; mod test; pub mod ticket; @@ -67,17 +67,17 @@ impl Ads { } } - pub fn ticket_wait_time(&self, topic: Topic) -> Duration { + pub fn ticket_wait_time(&self, topic: Topic) -> Result { let now = Instant::now(); match self.ads.get(&topic) { Some(nodes) => { if nodes.len() < self.max_ads_per_topic { - Duration::from_secs(0) + Ok(Duration::from_secs(0)) } else { match nodes.get(0) { Some(ad) => { let elapsed_time = now.saturating_duration_since(ad.insert_time); - self.ad_lifetime.saturating_sub(elapsed_time) + Ok(self.ad_lifetime.saturating_sub(elapsed_time)) } None => { #[cfg(debug_assertions)] @@ -85,7 +85,7 @@ impl Ads { #[cfg(not(debug_assertions))] { error!("Topic key should be deleted if no ad nodes queued for it"); - return Poll::Ready(Err("No nodes for topic".into())); + return Err("No nodes for topic".into()); } } } @@ -93,17 +93,17 @@ impl Ads { } None => { if self.total_ads < self.max_ads { - Duration::from_secs(0) + Ok(Duration::from_secs(0)) } else { match self.expirations.get(0) { - Some((fut, _)) => fut.deadline().saturating_duration_since(now), + Some((fut, _)) => Ok(fut.deadline().saturating_duration_since(now)), None => { #[cfg(debug_assertions)] panic!("Panic on debug, mismatched mapping between expiration queue and total ads count"); #[cfg(not(debug_assertions))] { error!("Mismatched mapping between expiration queue and total ads count"); - return Duration::from_secs(0); + return Err("No nodes in table".into()); } } } diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 30cc2483e..76420e283 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -46,7 +46,7 @@ async fn ticket_wait_time_no_wait_time() { let ads = Ads::new(Duration::from_secs(1), 10, 50); let topic = [1; 32]; let wait_time = ads.ticket_wait_time(topic); - assert_eq!(wait_time, Duration::from_secs(0)) + assert_eq!(wait_time, Ok(Duration::from_secs(0))) } #[tokio::test] @@ -67,14 +67,14 @@ async fn ticket_wait_time() { let topic = [1; 32]; ads.insert(enr, topic).unwrap(); - assert_eq!(ads.ticket_wait_time(topic), Duration::from_secs(0)); + assert_eq!(ads.ticket_wait_time(topic), Ok(Duration::from_secs(0))); ads.insert(enr_2, topic).unwrap(); - assert_gt!(ads.ticket_wait_time(topic), Duration::from_secs(1)); - assert_lt!(ads.ticket_wait_time(topic), Duration::from_secs(2)); + assert_gt!(ads.ticket_wait_time(topic), Ok(Duration::from_secs(1))); + assert_lt!(ads.ticket_wait_time(topic), Ok(Duration::from_secs(2))); tokio::time::sleep(Duration::from_secs(2)).await; - assert_eq!(ads.ticket_wait_time(topic), Duration::from_secs(0)); + assert_eq!(ads.ticket_wait_time(topic), Ok(Duration::from_secs(0))); } #[tokio::test] diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 7f6e17fee..1fcdaef99 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -26,10 +26,6 @@ impl ActiveTopic { } } - pub fn node_address(&self) -> NodeAddress { - self.node_address.clone() - } - pub fn topic(&self) -> Topic { self.topic } @@ -43,7 +39,7 @@ pub struct Ticket { topic: Topic, //req_time: Instant, //wait_time: Duration, - //cum_wait: Duration,*/ + //cum_wait: Option,*/ } impl Ticket { diff --git a/src/lib.rs b/src/lib.rs index 0905bd4fa..1572c515a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -121,7 +121,6 @@ pub mod socket; #[macro_use] extern crate lazy_static; -extern crate more_asserts; pub type Enr = enr::Enr; diff --git a/src/service.rs b/src/service.rs index 747801a56..b085ebd90 100644 --- a/src/service.rs +++ b/src/service.rs @@ -611,10 +611,9 @@ impl Service { self.send_event(Discv5Event::TalkRequest(req)); } RequestBody::RegisterTopic { topic, enr, ticket } => { - match topic_hash(topic) { - Ok(topic_hash) => { - let wait_time = self.ads.ticket_wait_time(topic_hash); - let new_ticket = Ticket::new(topic_hash); + topic_hash(topic).map(|topic| { + self.ads.ticket_wait_time(topic).map(|wait_time| { + let new_ticket = Ticket::new(topic); self.send_ticket_response( node_address.clone(), id.clone(), @@ -622,26 +621,18 @@ impl Service { wait_time, ); - let ticket = match Ticket::decode(ticket) { - Ok(ticket) => ticket, - Err(e) => { - debug!("{}", e); - Ticket::default() - } - }; + let ticket = Ticket::decode(ticket) + .map(|ticket| ticket) + .unwrap_or(Ticket::default()); // choose which ad to reg based on ticket, for example if some node has empty ticket // or is coming back, and possibly other stuff - match self.ads.insert(enr, topic_hash) { - Ok(()) => { - self.send_regconfirmation_response(node_address, id, topic_hash) - } - Err(e) => debug!("{}", e), - } - } - Err(e) => debug!("{}", e), - }; + self.ads + .insert(enr, topic) + .map(|e| self.send_regconfirmation_response(node_address, id, topic)); + }); + }); debug!("Received RegisterTopic request which is not fully implemented"); } RequestBody::TopicQuery { topic } => { @@ -888,7 +879,7 @@ impl Service { ticket, Duration::from_secs(wait_time), ), - Err(e) => debug!("{}", e), + Err(e) => error!("{}", e), } } ResponseBody::RegisterConfirmation { topic } => match topic_hash(topic) { @@ -898,7 +889,7 @@ impl Service { Duration::from_secs(60 * 15), ); } - Err(e) => debug!("{}", e), + Err(e) => error!("{}", e), }, } } else { @@ -1053,7 +1044,7 @@ impl Service { topic: [u8; 32], ) { let nodes_to_send = self.ads.get_ad_nodes(topic).unwrap_or_else(|e| { - debug!("{}", e); + error!("{}", e); Vec::new() }); self.send_nodes_response(nodes_to_send, node_address, rpc_id, "TOPICQUERY"); From 79e7faef8f048f0d3874d8d7637d59ac99a5e440 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 6 Apr 2022 13:28:36 +0200 Subject: [PATCH 020/391] Use map instead of match --- src/advertisement/mod.rs | 2 +- src/advertisement/ticket.rs | 2 ++ src/service.rs | 3 +-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 6c1271207..ff3d64c59 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -116,7 +116,7 @@ impl Ads { let now = Instant::now(); if let Some(nodes) = self.ads.get_mut(&topic) { if nodes.contains(&Ad::new(node_record.clone(), now)) { - debug!( + error!( "This node {} is already advertising this topic", node_record.node_id() ); diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 1fcdaef99..0cf665954 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -2,9 +2,11 @@ use super::*; use crate::node_info::NodeAddress; use delay_map::HashMapDelay; use std::cmp::Eq; +use tracing::error; pub fn topic_hash(topic: Vec) -> Result { if topic.len() > 32 { + error!("Topic is greater than 32 bytes"); return Err("Topic is greater than 32 bytes".into()); } let mut topic_hash = [0u8; 32]; diff --git a/src/service.rs b/src/service.rs index b085ebd90..e60ceaff6 100644 --- a/src/service.rs +++ b/src/service.rs @@ -622,7 +622,6 @@ impl Service { ); let ticket = Ticket::decode(ticket) - .map(|ticket| ticket) .unwrap_or(Ticket::default()); // choose which ad to reg based on ticket, for example if some node has empty ticket @@ -630,7 +629,7 @@ impl Service { self.ads .insert(enr, topic) - .map(|e| self.send_regconfirmation_response(node_address, id, topic)); + .map(|_| self.send_regconfirmation_response(node_address, id, topic)); }); }); debug!("Received RegisterTopic request which is not fully implemented"); From e6752191028de08d64e0b07b21a06553461d751e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 6 Apr 2022 17:22:32 +0200 Subject: [PATCH 021/391] Implement re-publishing of topics every 15 minutes --- src/advertisement/ticket.rs | 4 ++ src/config.rs | 9 +++ src/service.rs | 139 +++++++++++++++++++++++------------- src/service/query_info.rs | 2 +- src/service/test.rs | 3 +- 5 files changed, 104 insertions(+), 53 deletions(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 0cf665954..ba116e9ba 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -28,6 +28,10 @@ impl ActiveTopic { } } + pub fn node_address(&self) -> NodeAddress { + self.node_address.clone() + } + pub fn topic(&self) -> Topic { self.topic } diff --git a/src/config.rs b/src/config.rs index 264e8852e..989e73f04 100644 --- a/src/config.rs +++ b/src/config.rs @@ -91,6 +91,8 @@ pub struct Discv5Config { /// will last indefinitely. Default is 1 hour. pub ban_duration: Option, + pub topic_radius: Option, + /// A custom executor which can spawn the discv5 tasks. This must be a tokio runtime, with /// timing support. By default, the executor that created the discv5 struct will be used. pub executor: Option>, @@ -131,6 +133,7 @@ impl Default for Discv5Config { filter_max_bans_per_ip: Some(5), permit_ban_list: PermitBanList::default(), ban_duration: Some(Duration::from_secs(3600)), // 1 hour + topic_radius: Some(3), executor: None, } } @@ -294,6 +297,11 @@ impl Discv5ConfigBuilder { self } + pub fn topic_radius(&mut self, topic_radius: Option) -> &mut Self { + self.config.topic_radius = topic_radius; + self + } + /// A custom executor which can spawn the discv5 tasks. This must be a tokio runtime, with /// timing support. pub fn executor(&mut self, executor: Box) -> &mut Self { @@ -334,6 +342,7 @@ impl std::fmt::Debug for Discv5Config { let _ = builder.field("incoming_bucket_limit", &self.incoming_bucket_limit); let _ = builder.field("ping_interval", &self.ping_interval); let _ = builder.field("ban_duration", &self.ban_duration); + let _ = builder.field("topic_radius", &self.topic_radius); builder.finish() } } diff --git a/src/service.rs b/src/service.rs index e60ceaff6..988690cd5 100644 --- a/src/service.rs +++ b/src/service.rs @@ -19,7 +19,7 @@ use self::{ }; use crate::{ advertisement::{ - ticket::{topic_hash, ActiveTopic, Ticket, Tickets}, + ticket::{topic_hash, Ticket, Tickets}, Ads, Topic, }, error::{RequestError, ResponseError}, @@ -42,14 +42,16 @@ use futures::prelude::*; use parking_lot::RwLock; use rpc::*; use std::{ - collections::HashMap, + collections::{HashMap, HashSet}, net::SocketAddr, sync::Arc, task::Poll, time::{Duration, Instant}, }; -use tokio::sync::{mpsc, oneshot}; -use tokio_util::time::DelayQueue; +use tokio::{ + sync::{mpsc, oneshot}, + time::interval, +}; use tracing::{debug, error, info, trace, warn}; mod ip_vote; @@ -206,8 +208,11 @@ pub struct Service { /// Tickets received by other nodes. tickets: Tickets, - /// Topics advertised on other nodes. - topics: DelayQueue, + /// Topics to advertise on other nodes. + topics: HashSet, + + /// Ads currently advertised on other nodes. + active_topics: Ads, } /// Active RPC request awaiting a response from the handler. @@ -305,7 +310,8 @@ impl Service { event_stream: None, ads: Ads::new(Duration::from_secs(60 * 15), 100 as usize, 50000), tickets: Tickets::new(), - topics: DelayQueue::new(), + topics: HashSet::new(), + active_topics: Ads::new(Duration::from_secs(60 * 15), 100 as usize, 50000), exit, config: config.clone(), }; @@ -319,6 +325,8 @@ impl Service { /// The main execution loop of the discv5 serviced. async fn start(&mut self) { + let mut interval = interval(Duration::from_secs(60 * 15)); + loop { tokio::select! { _ = &mut self.exit => { @@ -333,10 +341,10 @@ impl Service { ServiceRequest::StartQuery(query, callback) => { match query { QueryKind::FindNode { target_node } => { - self.start_findnode_query(target_node, callback); + self.start_findnode_query(target_node, Some(callback)); } QueryKind::Predicate { target_node, target_peer_no, predicate } => { - self.start_predicate_query(target_node, target_peer_no, predicate, callback); + self.start_predicate_query(target_node, target_peer_no, predicate, Some(callback)); } } } @@ -422,8 +430,24 @@ impl Service { warn!("ENR not present in queries results"); } } - if result.target.callback.send(found_enrs).is_err() { - warn!("Callback dropped for query {}. Results dropped", *id); + + let node_id = match result.target.query_type { + QueryType::FindNode(node_id) => node_id, + }; + + let topic = match self.topics.get(&node_id.raw()) { + Some(topic) => Some(*topic), + None => None, + }; + + if let Some(topic) = topic { + found_enrs.into_iter().for_each(|enr| self.reg_topic_request(NodeContact::from(enr), topic, self.local_enr(), Ticket::default())); + } else { + if let Some(callback) = result.target.callback { + if callback.send(found_enrs).is_err() { + warn!("Callback dropped for query {}. Results dropped", *id); + } + } } } } @@ -443,20 +467,22 @@ impl Service { self.send_ping(enr); } } - Some(Ok((active_topic, ticket))) = self.tickets.next() => { - self.reg_topic_request(active_topic.topic(), self.local_enr(), ticket); - } - Some(Ok(expired)) = self.topics.next() => { - let expired = expired.into_inner(); - self.reg_topic_request(expired.topic(), self.local_enr(), Ticket::default()); - } + Some(Ok((active_topic, ticket))) = self.tickets.next() => {} _ = self.ads.next() => {} + _ = self.active_topics.next() => {} + _ = interval.tick() => { + self.topics.clone().into_iter().for_each(|topic| self.start_findnode_query(NodeId::new(&topic), None)); + } } } } /// Internal function that starts a query. - fn start_findnode_query(&mut self, target_node: NodeId, callback: oneshot::Sender>) { + fn start_findnode_query( + &mut self, + target_node: NodeId, + callback: Option>>, + ) { let mut target = QueryInfo { query_type: QueryType::FindNode(target_node), untrusted_enrs: Default::default(), @@ -478,8 +504,10 @@ impl Service { if known_closest_peers.is_empty() { warn!("No known_closest_peers found. Return empty result without sending query."); - if target.callback.send(vec![]).is_err() { - warn!("Failed to callback"); + if let Some(callback) = target.callback { + if callback.send(vec![]).is_err() { + warn!("Failed to callback"); + } } } else { let query_config = FindNodeQueryConfig::new_from_config(&self.config); @@ -494,7 +522,7 @@ impl Service { target_node: NodeId, num_nodes: usize, predicate: Box bool + Send>, - callback: oneshot::Sender>, + callback: Option>>, ) { let mut target = QueryInfo { query_type: QueryType::FindNode(target_node), @@ -521,8 +549,10 @@ impl Service { if known_closest_peers.is_empty() { warn!("No known_closest_peers found. Return empty result without sending query."); - if target.callback.send(vec![]).is_err() { - warn!("Failed to callback"); + if let Some(callback) = target.callback { + if callback.send(vec![]).is_err() { + warn!("Failed to callback"); + } } } else { let mut query_config = PredicateQueryConfig::new_from_config(&self.config); @@ -611,27 +641,34 @@ impl Service { self.send_event(Discv5Event::TalkRequest(req)); } RequestBody::RegisterTopic { topic, enr, ticket } => { - topic_hash(topic).map(|topic| { - self.ads.ticket_wait_time(topic).map(|wait_time| { - let new_ticket = Ticket::new(topic); - self.send_ticket_response( - node_address.clone(), - id.clone(), - new_ticket, - wait_time, - ); - - let ticket = Ticket::decode(ticket) - .unwrap_or(Ticket::default()); - - // choose which ad to reg based on ticket, for example if some node has empty ticket - // or is coming back, and possibly other stuff - + topic_hash(topic) + .map(|topic| { self.ads - .insert(enr, topic) - .map(|_| self.send_regconfirmation_response(node_address, id, topic)); - }); - }); + .ticket_wait_time(topic) + .map(|wait_time| { + let new_ticket = Ticket::new(topic); + self.send_ticket_response( + node_address.clone(), + id.clone(), + new_ticket, + wait_time, + ); + + let _ticket = Ticket::decode(ticket).unwrap_or(Ticket::default()); + + // choose which ad to reg based on ticket, for example if some node has empty ticket + // or is coming back, and possibly other stuff + + self.ads + .insert(enr, topic) + .map(|_| { + self.send_regconfirmation_response(node_address, id, topic) + }) + .ok(); + }) + .ok(); + }) + .ok(); debug!("Received RegisterTopic request which is not fully implemented"); } RequestBody::TopicQuery { topic } => { @@ -881,15 +918,15 @@ impl Service { Err(e) => error!("{}", e), } } - ResponseBody::RegisterConfirmation { topic } => match topic_hash(topic) { + ResponseBody::RegisterConfirmation { topic } => /* match topic_hash(topic) { Ok(topic_hash) => { - self.topics.insert( - ActiveTopic::new(node_address, topic_hash), + self.active_topics.insert( + topic, Duration::from_secs(60 * 15), ); } Err(e) => error!("{}", e), - }, + }*/{}, } } else { warn!( @@ -972,8 +1009,8 @@ impl Service { self.send_rpc_request(active_request); } - fn reg_topic_request(&mut self, topic: Topic, enr: Enr, ticket: Ticket) { - /*let request_body = RequestBody::RegisterTopic { + fn reg_topic_request(&mut self, contact: NodeContact, topic: Topic, enr: Enr, ticket: Ticket) { + let request_body = RequestBody::RegisterTopic { topic: topic.to_vec(), enr, ticket: format!("{:?}", ticket).as_bytes().to_vec(), @@ -985,7 +1022,7 @@ impl Service { query_id: None, callback: None, }; - self.send_rpc_request(active_request);*/ + self.send_rpc_request(active_request); } fn send_ticket_response( diff --git a/src/service/query_info.rs b/src/service/query_info.rs index 980474efd..5e6a139d5 100644 --- a/src/service/query_info.rs +++ b/src/service/query_info.rs @@ -14,7 +14,7 @@ pub struct QueryInfo { pub untrusted_enrs: SmallVec<[Enr; 16]>, /// A callback channel for the service that requested the query. - pub callback: oneshot::Sender>, + pub callback: Option>>, /// The number of distances we request for each peer. pub distances_to_request: usize, diff --git a/src/service/test.rs b/src/service/test.rs index cb3aa6a7a..cc5d3479d 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -99,7 +99,8 @@ async fn build_service( event_stream: None, ads: Ads::new(Duration::from_secs(60 * 15), 100 as usize, 50000), tickets: Tickets::new(), - topics: DelayQueue::new(), + topics: HashSet::new(), + active_topics: Ads::new(Duration::from_secs(60 * 15), 100 as usize, 50000), exit, config, } From 727197eb227166b50406c9c33e7aeec048e688e4 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 7 Apr 2022 09:46:49 +0200 Subject: [PATCH 022/391] Add auto resend of REGTOPIC when ticket wait time expires --- src/handler/mod.rs | 188 +++++++++++++++++++++++++++++++------------ src/handler/tests.rs | 13 ++- src/service.rs | 56 +++++++++---- src/service/test.rs | 2 +- 4 files changed, 187 insertions(+), 72 deletions(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 600a4ef16..8d0e7bc03 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -85,7 +85,7 @@ pub enum HandlerIn { /// /// Note: To update an ENR for an unknown node, we request a FINDNODE with distance 0 to the /// `NodeContact` we know of. - Request(NodeContact, Box), + Request(RequestContact, Box), /// A Response to send to a particular node to answer a HandlerOut::Request has been /// received from the application layer. @@ -149,10 +149,41 @@ pub struct Challenge { remote_enr: Option, } +#[derive(Debug, Clone, PartialEq)] +pub enum RequestContact { + Auto(NodeAddress), + Initiated(NodeContact), +} + +impl RequestContact { + pub fn node_address(&self) -> Result { + match self { + RequestContact::Auto(node_address) => Ok(node_address.clone()), + RequestContact::Initiated(contact) => contact.node_address(), + } + } + + pub fn node_id(&self) -> NodeId { + match self { + RequestContact::Auto(node_address) => node_address.node_id, + RequestContact::Initiated(contact) => contact.node_id(), + } + } +} + +impl std::fmt::Display for RequestContact { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + RequestContact::Auto(node_address) => write!(f, "{}", node_address), + RequestContact::Initiated(contact) => write!(f, "{}", contact), + } + } +} + /// A request to a node that we are waiting for a response. #[derive(Debug)] pub(crate) struct RequestCall { - contact: NodeContact, + contact: RequestContact, /// The raw discv5 packet sent. packet: Packet, /// The unencrypted message. Required if need to re-encrypt and re-send. @@ -171,7 +202,7 @@ pub(crate) struct RequestCall { impl RequestCall { fn new( - contact: NodeContact, + contact: RequestContact, packet: Packet, request: Request, initiating_session: bool, @@ -208,7 +239,7 @@ pub struct Handler { /// The expected responses by SocketAddr which allows packets to pass the underlying filter. filter_expected_responses: Arc>>, /// Requests awaiting a handshake completion. - pending_requests: HashMap>, + pending_requests: HashMap>, /// Currently in-progress handshakes with peers. active_challenges: LruTimeCache, /// Established sessions with peers. @@ -447,55 +478,96 @@ impl Handler { /// Sends a `Request` to a node. async fn send_request( &mut self, - contact: NodeContact, + contact: RequestContact, request: Request, ) -> Result<(), RequestError> { - let node_address = contact - .node_address() - .map_err(|e| RequestError::InvalidEnr(e.into()))?; + match contact.clone() { + RequestContact::Auto(_) => self.regtopic_with_ticket(contact, request).await, + RequestContact::Initiated(node_contact) => { + let node_address = node_contact + .node_address() + .map_err(|e| RequestError::InvalidEnr(e.into()))?; + + if node_address.socket_addr == self.listen_socket { + debug!("Filtered request to self"); + return Err(RequestError::SelfRequest); + } - if node_address.socket_addr == self.listen_socket { - debug!("Filtered request to self"); - return Err(RequestError::SelfRequest); - } + // If there is already an active request for this node, add to pending requests + if self.active_requests.get(&node_address).is_some() { + trace!("Request queued for node: {}", node_address); + self.pending_requests + .entry(node_address) + .or_insert_with(Vec::new) + .push((contact, request)); + return Ok(()); + } + + let (packet, initiating_session) = { + if let Some(session) = self.sessions.get_mut(&node_address) { + // Encrypt the message and send + let packet = session + .encrypt_message(self.node_id, &request.clone().encode()) + .map_err(|e| RequestError::EncryptionFailed(format!("{:?}", e)))?; + (packet, false) + } else { + // No session exists, start a new handshake + trace!( + "Starting session. Sending random packet to: {}", + node_address + ); + let packet = Packet::new_random(&self.node_id) + .map_err(RequestError::EntropyFailure)?; + // We are initiating a new session + (packet, true) + } + }; - // If there is already an active request for this node, add to pending requests - if self.active_requests.get(&node_address).is_some() { - trace!("Request queued for node: {}", node_address); - self.pending_requests - .entry(node_address) - .or_insert_with(Vec::new) - .push((contact, request)); - return Ok(()); + let call = RequestCall::new(contact, packet.clone(), request, initiating_session); + // let the filter know we are expecting a response + self.add_expected_response(node_address.socket_addr); + self.send(node_address.clone(), packet).await; + self.active_requests.insert(node_address, call); + Ok(()) + } } + } - let (packet, initiating_session) = { + async fn regtopic_with_ticket( + &mut self, + contact: RequestContact, + request: Request, + ) -> Result<(), RequestError> { + Ok(if let Ok(node_address) = contact.node_address() { if let Some(session) = self.sessions.get_mut(&node_address) { - // Encrypt the message and send + // If there is already an active request for this node, add to pending requests + if self.active_requests.get(&node_address).is_some() { + trace!("Request queued for node: {}", node_address); + self.pending_requests + .entry(node_address) + .or_insert_with(Vec::new) + .push((contact, request)); + return Ok(()); + } + let packet = session .encrypt_message(self.node_id, &request.clone().encode()) .map_err(|e| RequestError::EncryptionFailed(format!("{:?}", e)))?; - (packet, false) + + let call = RequestCall::new(contact, packet.clone(), request, false); + // let the filter know we are expecting a response + self.add_expected_response(node_address.socket_addr); + self.send(node_address.clone(), packet).await; + self.active_requests.insert(node_address, call); } else { - // No session exists, start a new handshake - trace!( - "Starting session. Sending random packet to: {}", - node_address + // Either the session is being established or has expired. We simply drop the + // response in this case. + warn!( + "Session is not established. Dropping request {} for node: {}", + request, node_address.node_id ); - let packet = - Packet::new_random(&self.node_id).map_err(RequestError::EntropyFailure)?; - // We are initiating a new session - (packet, true) } - }; - - let call = RequestCall::new(contact, packet.clone(), request, initiating_session); - // let the filter know we are expecting a response - self.add_expected_response(node_address.socket_addr); - self.send(node_address.clone(), packet).await; - - self.active_requests.insert(node_address, call); - Ok(()) + }) } /// Sends an RPC Response. @@ -605,13 +677,28 @@ impl Handler { request_call.contact ); + // Drop session for REGTOPIC requests automatically re-sent on ticket wait_time + // expiration. These packets are only sent to active sessions. + let contact = match request_call.contact.clone() { + RequestContact::Auto(_) => { + warn!( + "REGTOPIC reuqest automatically initiated upon ticket wait time expiration are only set to active sessions. Dropping session. Node: {}", + request_call.contact + ); + self.fail_request(request_call, RequestError::InvalidRemotePacket, true) + .await; + return; + } + RequestContact::Initiated(contact) => contact, + }; + // We do not allow multiple WHOAREYOU packets for a single challenge request. If we have // already sent a WHOAREYOU ourselves, we drop sessions which send us a WHOAREYOU in // response. if request_call.handshake_sent { warn!( "Authentication response already sent. Dropping session. Node: {}", - request_call.contact + contact ); self.fail_request(request_call, RequestError::InvalidRemotePacket, true) .await; @@ -629,7 +716,7 @@ impl Handler { // Generate a new session and authentication packet let (auth_packet, mut session) = match Session::encrypt_with_header( - &request_call.contact, + &contact, self.key.clone(), updated_enr, &self.node_id, @@ -659,11 +746,10 @@ impl Handler { // // All sent requests must have an associated node_id. Therefore the following // must not panic. - let node_address = request_call - .contact + let node_address = contact .node_address() .expect("All sent requests must have a node address"); - match request_call.contact.clone() { + match contact.clone() { NodeContact::Enr(enr) => { // NOTE: Here we decide if the session is outgoing or ingoing. The condition for an // outgoing session is that we originally sent a RANDOM packet (signifying we did @@ -697,13 +783,10 @@ impl Handler { // Don't know the ENR. Establish the session, but request an ENR also // Send the Auth response - let contact = request_call.contact.clone(); + let contact = contact.clone(); trace!( "Sending Authentication response to node: {}", - request_call - .contact - .node_address() - .expect("Sanitized contact") + contact.node_address().expect("Sanitized contact") ); request_call.packet = auth_packet.clone(); request_call.handshake_sent = true; @@ -718,7 +801,9 @@ impl Handler { }; session.awaiting_enr = Some(id); - let _ = self.send_request(contact, request).await; + let _ = self + .send_request(RequestContact::Initiated(contact), request) + .await; } } self.new_session(node_address, session); @@ -974,7 +1059,6 @@ impl Handler { *remaining_responses -= 1; if remaining_responses != &0 { // more responses remaining, add back the request and send the response - // add back the request and send the response self.active_requests .insert(node_address.clone(), request_call); let _ = self diff --git a/src/handler/tests.rs b/src/handler/tests.rs index 5f18525f3..90ef31b69 100644 --- a/src/handler/tests.rs +++ b/src/handler/tests.rs @@ -71,7 +71,7 @@ async fn simple_session_message() { }); let _ = sender_send.send(HandlerIn::Request( - receiver_enr.into(), + RequestContact::Initiated(receiver_enr.into()), send_message.clone(), )); @@ -148,7 +148,7 @@ async fn multiple_messages() { // sender to send the first message then await for the session to be established let _ = sender_handler.send(HandlerIn::Request( - receiver_enr.clone().into(), + RequestContact::Initiated(receiver_enr.clone().into()), send_message.clone(), )); @@ -173,7 +173,7 @@ async fn multiple_messages() { // now the session is established, send the rest of the messages for _ in 0..messages_to_send - 1 { let _ = sender_handler.send(HandlerIn::Request( - receiver_enr.clone().into(), + RequestContact::Initiated(receiver_enr.clone().into()), send_message.clone(), )); } @@ -240,7 +240,12 @@ async fn test_active_requests_insert() { body: RequestBody::Ping { enr_seq: 1 }, }; let initiating_session = true; - let request_call = RequestCall::new(contact, packet, request, initiating_session); + let request_call = RequestCall::new( + RequestContact::Initiated(contact), + packet, + request, + initiating_session, + ); // insert the pair and verify the mapping remains in sync let nonce = request_call.packet.message_nonce().clone(); diff --git a/src/service.rs b/src/service.rs index 988690cd5..5729ec0d5 100644 --- a/src/service.rs +++ b/src/service.rs @@ -23,7 +23,7 @@ use crate::{ Ads, Topic, }, error::{RequestError, ResponseError}, - handler::{Handler, HandlerIn, HandlerOut}, + handler::{Handler, HandlerIn, HandlerOut, RequestContact}, kbucket::{ self, ConnectionDirection, ConnectionState, FailureReason, InsertResult, KBucketsTable, NodeStatus, UpdateResult, @@ -218,7 +218,7 @@ pub struct Service { /// Active RPC request awaiting a response from the handler. struct ActiveRequest { /// The address the request was sent to. - pub contact: NodeContact, + pub contact: RequestContact, /// The request that was sent. pub request_body: RequestBody, /// The query ID if the request was related to a query. @@ -441,7 +441,10 @@ impl Service { }; if let Some(topic) = topic { - found_enrs.into_iter().for_each(|enr| self.reg_topic_request(NodeContact::from(enr), topic, self.local_enr(), Ticket::default())); + let local_enr = match self.local_enr.read().clone() { + enr => enr, + }; + found_enrs.into_iter().for_each(|enr| self.reg_topic_request(NodeContact::from(enr), topic, local_enr.clone(), Ticket::default())); } else { if let Some(callback) = result.target.callback { if callback.send(found_enrs).is_err() { @@ -467,12 +470,17 @@ impl Service { self.send_ping(enr); } } - Some(Ok((active_topic, ticket))) = self.tickets.next() => {} - _ = self.ads.next() => {} - _ = self.active_topics.next() => {} + Some(Ok((active_topic, ticket))) = self.tickets.next() => { + let enr = match self.local_enr.read().clone() { + enr => enr, + }; + self.auto_reattempt_reg_topic_request(active_topic.node_address(), active_topic.topic(), enr, ticket); + } _ = interval.tick() => { self.topics.clone().into_iter().for_each(|topic| self.start_findnode_query(NodeId::new(&topic), None)); } + _ = self.ads.next() => {} + _ = self.active_topics.next() => {} } } } @@ -944,7 +952,7 @@ impl Service { enr_seq: self.local_enr.read().seq(), }; let active_request = ActiveRequest { - contact: enr.into(), + contact: RequestContact::Initiated(enr.into()), request_body, query_id: None, callback: None, @@ -982,7 +990,7 @@ impl Service { ) { let request_body = RequestBody::FindNode { distances: vec![0] }; let active_request = ActiveRequest { - contact, + contact: RequestContact::Initiated(contact), request_body, query_id: None, callback: callback.map(CallbackResponse::Enr), @@ -1001,7 +1009,7 @@ impl Service { let request_body = RequestBody::Talk { protocol, request }; let active_request = ActiveRequest { - contact, + contact: RequestContact::Initiated(contact), request_body, query_id: None, callback: Some(CallbackResponse::Talk(callback)), @@ -1017,7 +1025,29 @@ impl Service { }; let active_request = ActiveRequest { - contact, + contact: RequestContact::Initiated(contact), + request_body, + query_id: None, + callback: None, + }; + self.send_rpc_request(active_request); + } + + fn auto_reattempt_reg_topic_request( + &mut self, + node_address: NodeAddress, + topic: Topic, + enr: Enr, + ticket: Ticket, + ) { + let request_body = RequestBody::RegisterTopic { + topic: topic.to_vec(), + enr, + ticket: format!("{:?}", ticket).as_bytes().to_vec(), + }; + + let active_request = ActiveRequest { + contact: RequestContact::Auto(node_address), request_body, query_id: None, callback: None, @@ -1224,7 +1254,7 @@ impl Service { // find the ENR associated with the query if let Some(enr) = self.find_enr(&return_peer) { let active_request = ActiveRequest { - contact: enr.into(), + contact: RequestContact::Initiated(enr.into()), request_body, query_id: Some(query_id), callback: None, @@ -1593,10 +1623,6 @@ impl Service { }) .await } - - fn local_enr(&self) -> Enr { - self.local_enr.read().clone() - } } /// The result of the `query_event_poll` indicating an action is required to further progress an diff --git a/src/service/test.rs b/src/service/test.rs index cc5d3479d..85bb3e1c1 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -162,7 +162,7 @@ async fn test_updating_connection_on_ping() { service.active_requests.insert( RequestId(vec![1]), ActiveRequest { - contact: node_contact, + contact: RequestContact::Initiated(node_contact), request_body: rpc::RequestBody::Ping { enr_seq: 2 }, query_id: Some(QueryId(1)), callback: None, From cf419b8166edd8cfacb8fddf55f79f28fc90be65 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 7 Apr 2022 16:33:40 +0200 Subject: [PATCH 023/391] Remove unnecessary async layer in ads --- src/advertisement/mod.rs | 54 ++++++++++++++++++++++----------------- src/advertisement/test.rs | 5 +++- src/service.rs | 4 +-- 3 files changed, 37 insertions(+), 26 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index ff3d64c59..e0c87ad9c 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -7,7 +7,7 @@ use std::{ pin::Pin, task::{Context, Poll}, }; -use tokio::time::{sleep, Instant, Sleep}; +use tokio::time::Instant; use tracing::{debug, error}; mod test; @@ -37,7 +37,7 @@ impl PartialEq for Ad { } } pub struct Ads { - expirations: VecDeque<(Pin>, Topic)>, + expirations: VecDeque<(Instant, Topic)>, ads: HashMap>, total_ads: i32, ad_lifetime: Duration, @@ -96,7 +96,10 @@ impl Ads { Ok(Duration::from_secs(0)) } else { match self.expirations.get(0) { - Some((fut, _)) => Ok(fut.deadline().saturating_duration_since(now)), + Some((insert_time, _)) => { + let elapsed_time = now.saturating_duration_since(*insert_time); + Ok(self.ad_lifetime.saturating_sub(elapsed_time)) + } None => { #[cfg(debug_assertions)] panic!("Panic on debug, mismatched mapping between expiration queue and total ads count"); @@ -134,8 +137,7 @@ impl Ads { }); self.ads.insert(topic, nodes); } - self.expirations - .push_back((Box::pin(sleep(self.ad_lifetime)), topic)); + self.expirations.push_back((now, topic)); self.total_ads += 1; Ok(()) } @@ -144,24 +146,29 @@ impl Ads { impl Stream for Ads { // type returned can be unit type but for testing easier to get values, worth the overhead to keep? type Item = Result<((Enr, Instant), Topic), String>; - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let (fut, topic) = match self.expirations.get_mut(0) { - Some((fut, topic)) => (fut, *topic), + fn poll_next(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + let (insert_time, topic) = match self.expirations.get_mut(0) { + Some((insert_time, topic)) => (insert_time, *topic), None => { debug!("No ads in 'table'"); return Poll::Pending; } }; - match fut.poll_unpin(cx) { - Poll::Ready(()) => match self.ads.get_mut(&topic) { - Some(topic_ads) => match topic_ads.pop_front() { + + if insert_time.elapsed() < self.ad_lifetime { + return Poll::Pending; + } + + match self.ads.get_mut(&topic) { + Some(topic_ads) => { + match topic_ads.pop_front() { Some(ad) => { if topic_ads.is_empty() { self.ads.remove(&topic); } self.total_ads -= 1; self.expirations.remove(0); - Poll::Ready(Some(Ok(((ad.node_record, ad.insert_time), topic)))) + return Poll::Ready(Some(Ok(((ad.node_record, ad.insert_time), topic)))); } None => { #[cfg(debug_assertions)] @@ -172,18 +179,19 @@ impl Stream for Ads { return Poll::Ready(Some(Err("No nodes for topic".into()))); } } - }, - None => { - #[cfg(debug_assertions)] - panic!("Panic on debug, mismatched mapping between expiration queue and entry queue"); - #[cfg(not(debug_assertions))] - { - error!("Mismatched mapping between expiration queue and entry queue"); - return Poll::Ready(Some(Err("Topic doesn't exist".into()))); - } } - }, - Poll::Pending => Poll::Pending, + } + None => { + #[cfg(debug_assertions)] + panic!( + "Panic on debug, mismatched mapping between expiration queue and entry queue" + ); + #[cfg(not(debug_assertions))] + { + error!("Mismatched mapping between expiration queue and entry queue"); + return Poll::Ready(Some(Err("Topic doesn't exist".into()))); + } + } } } } diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 76420e283..68f2ed721 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -103,7 +103,9 @@ async fn poll_ads() { let mut expired_ads = Vec::new(); - for _ in 0..4 { + let mut interval = tokio::time::interval(Duration::from_secs(1)); + + for _ in 0..10 { tokio::select! { Some(Ok((_, topic))) = ads.next() => { expired_ads.push(topic); @@ -112,6 +114,7 @@ async fn poll_ads() { ads.insert(enr.clone(), topic_1).unwrap(); } } + _ = interval.tick() => {} } } diff --git a/src/service.rs b/src/service.rs index 5729ec0d5..bdc287952 100644 --- a/src/service.rs +++ b/src/service.rs @@ -325,7 +325,7 @@ impl Service { /// The main execution loop of the discv5 serviced. async fn start(&mut self) { - let mut interval = interval(Duration::from_secs(60 * 15)); + let mut publish_topics = interval(Duration::from_secs(60 * 15)); loop { tokio::select! { @@ -476,7 +476,7 @@ impl Service { }; self.auto_reattempt_reg_topic_request(active_topic.node_address(), active_topic.topic(), enr, ticket); } - _ = interval.tick() => { + _ = publish_topics.tick() => { self.topics.clone().into_iter().for_each(|topic| self.start_findnode_query(NodeId::new(&topic), None)); } _ = self.ads.next() => {} From 2e20624550831c7ef5c161339a9705c707ab6ce6 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 7 Apr 2022 17:06:00 +0200 Subject: [PATCH 024/391] Fix ticket wait time logic --- src/advertisement/mod.rs | 70 +++++++++++++++++++-------------------- src/advertisement/test.rs | 14 ++++++-- 2 files changed, 46 insertions(+), 38 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index e0c87ad9c..8108fa2bd 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -69,46 +69,46 @@ impl Ads { pub fn ticket_wait_time(&self, topic: Topic) -> Result { let now = Instant::now(); - match self.ads.get(&topic) { - Some(nodes) => { - if nodes.len() < self.max_ads_per_topic { - Ok(Duration::from_secs(0)) - } else { - match nodes.get(0) { - Some(ad) => { - let elapsed_time = now.saturating_duration_since(ad.insert_time); - Ok(self.ad_lifetime.saturating_sub(elapsed_time)) - } - None => { - #[cfg(debug_assertions)] - panic!("Panic on debug,topic key should be deleted if no ad nodes queued for it"); - #[cfg(not(debug_assertions))] - { - error!("Topic key should be deleted if no ad nodes queued for it"); - return Err("No nodes for topic".into()); + if self.total_ads < self.max_ads { + match self.ads.get(&topic) { + Some(nodes) => { + if nodes.len() < self.max_ads_per_topic { + Ok(Duration::from_secs(0)) + } else { + match nodes.get(0) { + Some(ad) => { + let elapsed_time = now.saturating_duration_since(ad.insert_time); + Ok(self.ad_lifetime.saturating_sub(elapsed_time)) + } + None => { + #[cfg(debug_assertions)] + panic!("Panic on debug,topic key should be deleted if no ad nodes queued for it"); + #[cfg(not(debug_assertions))] + { + error!( + "Topic key should be deleted if no ad nodes queued for it" + ); + return Err("No nodes for topic".into()); + } } } } } + None => Ok(Duration::from_secs(0)), } - None => { - if self.total_ads < self.max_ads { - Ok(Duration::from_secs(0)) - } else { - match self.expirations.get(0) { - Some((insert_time, _)) => { - let elapsed_time = now.saturating_duration_since(*insert_time); - Ok(self.ad_lifetime.saturating_sub(elapsed_time)) - } - None => { - #[cfg(debug_assertions)] - panic!("Panic on debug, mismatched mapping between expiration queue and total ads count"); - #[cfg(not(debug_assertions))] - { - error!("Mismatched mapping between expiration queue and total ads count"); - return Err("No nodes in table".into()); - } - } + } else { + match self.expirations.get(0) { + Some((insert_time, _)) => { + let elapsed_time = now.saturating_duration_since(*insert_time); + Ok(self.ad_lifetime.saturating_sub(elapsed_time)) + } + None => { + #[cfg(debug_assertions)] + panic!("Panic on debug, mismatched mapping between expiration queue and total ads count"); + #[cfg(not(debug_assertions))] + { + error!("Mismatched mapping between expiration queue and total ads count"); + return Err("No nodes in table".into()); } } } diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 68f2ed721..046748e81 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -62,18 +62,26 @@ async fn ticket_wait_time() { let key = CombinedKey::generate_secp256k1(); let enr_2 = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); - let mut ads = Ads::new(Duration::from_secs(2), 2, 50); + let mut ads = Ads::new(Duration::from_secs(2), 2, 3); let topic = [1; 32]; + let topic_2 = [2; 32]; - ads.insert(enr, topic).unwrap(); + ads.insert(enr.clone(), topic).unwrap(); assert_eq!(ads.ticket_wait_time(topic), Ok(Duration::from_secs(0))); - ads.insert(enr_2, topic).unwrap(); + ads.insert(enr_2.clone(), topic).unwrap(); + // Now max_ads_per_topic is reached for topic + assert_gt!(ads.ticket_wait_time(topic), Ok(Duration::from_secs(1))); + assert_lt!(ads.ticket_wait_time(topic), Ok(Duration::from_secs(2))); + + ads.insert(enr, topic_2).unwrap(); + // Now max_ads in table is reched assert_gt!(ads.ticket_wait_time(topic), Ok(Duration::from_secs(1))); assert_lt!(ads.ticket_wait_time(topic), Ok(Duration::from_secs(2))); tokio::time::sleep(Duration::from_secs(2)).await; + // The first ads for topic have expired assert_eq!(ads.ticket_wait_time(topic), Ok(Duration::from_secs(0))); } From 51d472c7bcdc2c3bca4d1e372a9161f2d4f1b0c0 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 7 Apr 2022 17:28:14 +0200 Subject: [PATCH 025/391] Set adequate datatypes --- src/advertisement/mod.rs | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 8108fa2bd..5c2ef1845 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -15,6 +15,9 @@ pub mod ticket; pub type Topic = [u8; 32]; +const MAX_ADS_PER_TOPIC_DEFAULT: usize = 100; +const MAX_ADS_DEFAULT: usize = 50000; + /// An ad we are adevrtising for another node #[derive(Debug)] pub struct Ad { @@ -39,14 +42,20 @@ impl PartialEq for Ad { pub struct Ads { expirations: VecDeque<(Instant, Topic)>, ads: HashMap>, - total_ads: i32, + total_ads: usize, ad_lifetime: Duration, max_ads_per_topic: usize, - max_ads: i32, + max_ads: usize, } impl Ads { - pub fn new(ad_lifetime: Duration, max_ads_per_topic: usize, max_ads: i32) -> Self { + pub fn new(ad_lifetime: Duration, max_ads_per_topic: usize, max_ads: usize) -> Self { + let (max_ads_per_topic, max_ads) = if max_ads_per_topic <= max_ads { + (max_ads_per_topic, max_ads) + } else { + (MAX_ADS_PER_TOPIC_DEFAULT, MAX_ADS_DEFAULT) + }; + Ads { expirations: VecDeque::new(), ads: HashMap::new(), From 50c528c030eb2f34c2a80aad0198ccd4a37868c4 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 7 Apr 2022 18:33:37 +0200 Subject: [PATCH 026/391] Improve return values --- src/advertisement/mod.rs | 46 ++++++++++++++++++------------------- src/advertisement/test.rs | 26 +++++++++++++-------- src/advertisement/ticket.rs | 6 ++--- src/service.rs | 29 ++++++++++++----------- 4 files changed, 58 insertions(+), 49 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 5c2ef1845..24edccf3b 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -3,7 +3,7 @@ use core::time::Duration; use enr::{CombinedKey, Enr}; use futures::prelude::*; use std::{ - collections::{HashMap, VecDeque}, + collections::{vec_deque::Iter, HashMap, VecDeque}, pin::Pin, task::{Context, Poll}, }; @@ -15,9 +15,6 @@ pub mod ticket; pub type Topic = [u8; 32]; -const MAX_ADS_PER_TOPIC_DEFAULT: usize = 100; -const MAX_ADS_DEFAULT: usize = 50000; - /// An ad we are adevrtising for another node #[derive(Debug)] pub struct Ad { @@ -26,12 +23,16 @@ pub struct Ad { } impl Ad { - fn new(node_record: Enr, insert_time: Instant) -> Self { + pub fn new(node_record: Enr, insert_time: Instant) -> Self { Ad { node_record, insert_time, } } + + pub fn node_record(&self) -> &Enr { + &self.node_record + } } impl PartialEq for Ad { @@ -49,45 +50,42 @@ pub struct Ads { } impl Ads { - pub fn new(ad_lifetime: Duration, max_ads_per_topic: usize, max_ads: usize) -> Self { + pub fn new(ad_lifetime: Duration, max_ads_per_topic: usize, max_ads: usize) -> Result { let (max_ads_per_topic, max_ads) = if max_ads_per_topic <= max_ads { (max_ads_per_topic, max_ads) } else { - (MAX_ADS_PER_TOPIC_DEFAULT, MAX_ADS_DEFAULT) + return Err("Values passed to max_ads_per_topic and max_ads don't make sense"); }; - Ads { + Ok(Ads { expirations: VecDeque::new(), ads: HashMap::new(), total_ads: 0, ad_lifetime, max_ads_per_topic, max_ads, - } + }) } - pub fn get_ad_nodes(&self, topic: Topic) -> Result>, String> { + pub fn get_ad_nodes(&self, topic: Topic) -> Result, &str> { match self.ads.get(&topic) { - Some(topic_ads) => Ok(topic_ads - .into_iter() - .map(|ad| ad.node_record.clone()) - .collect()), - None => Err("No ads for this topic".into()), + Some(topic_ads) => Ok(topic_ads.into_iter()), + None => Err("No ads for this topic"), } } - pub fn ticket_wait_time(&self, topic: Topic) -> Result { + pub fn ticket_wait_time(&self, topic: Topic) -> Option { let now = Instant::now(); if self.total_ads < self.max_ads { match self.ads.get(&topic) { Some(nodes) => { if nodes.len() < self.max_ads_per_topic { - Ok(Duration::from_secs(0)) + Some(Duration::from_secs(0)) } else { match nodes.get(0) { Some(ad) => { let elapsed_time = now.saturating_duration_since(ad.insert_time); - Ok(self.ad_lifetime.saturating_sub(elapsed_time)) + Some(self.ad_lifetime.saturating_sub(elapsed_time)) } None => { #[cfg(debug_assertions)] @@ -97,19 +95,19 @@ impl Ads { error!( "Topic key should be deleted if no ad nodes queued for it" ); - return Err("No nodes for topic".into()); + return None; } } } } } - None => Ok(Duration::from_secs(0)), + None => Some(Duration::from_secs(0)), } } else { match self.expirations.get(0) { Some((insert_time, _)) => { let elapsed_time = now.saturating_duration_since(*insert_time); - Ok(self.ad_lifetime.saturating_sub(elapsed_time)) + Some(self.ad_lifetime.saturating_sub(elapsed_time)) } None => { #[cfg(debug_assertions)] @@ -117,14 +115,14 @@ impl Ads { #[cfg(not(debug_assertions))] { error!("Mismatched mapping between expiration queue and total ads count"); - return Err("No nodes in table".into()); + return None; } } } } } - pub fn insert(&mut self, node_record: Enr, topic: Topic) -> Result<(), String> { + pub fn insert(&mut self, node_record: Enr, topic: Topic) -> Result<(), &str> { let now = Instant::now(); if let Some(nodes) = self.ads.get_mut(&topic) { if nodes.contains(&Ad::new(node_record.clone(), now)) { @@ -132,7 +130,7 @@ impl Ads { "This node {} is already advertising this topic", node_record.node_id() ); - return Err("Node already advertising this topic".into()); + return Err("Node already advertising this topic"); } nodes.push_back(Ad { node_record, diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 046748e81..4f719707c 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -34,8 +34,16 @@ async fn insert_ad_and_get_nodes() { ads.insert(enr_2.clone(), topic).unwrap(); ads.insert(enr.clone(), topic_2).unwrap(); - let nodes = ads.get_ad_nodes(topic).unwrap_or(vec![]); - let nodes_topic_2 = ads.get_ad_nodes(topic_2).unwrap_or(vec![]); + let nodes: Vec> = ads + .get_ad_nodes(topic) + .unwrap() + .map(|ad| ad.node_record().clone()) + .collect(); + let nodes_topic_2: Vec> = ads + .get_ad_nodes(topic_2) + .unwrap() + .map(|ad| ad.node_record().clone()) + .collect(); assert_eq!(nodes, vec![enr.clone(), enr_2]); assert_eq!(nodes_topic_2, vec![enr]); @@ -46,7 +54,7 @@ async fn ticket_wait_time_no_wait_time() { let ads = Ads::new(Duration::from_secs(1), 10, 50); let topic = [1; 32]; let wait_time = ads.ticket_wait_time(topic); - assert_eq!(wait_time, Ok(Duration::from_secs(0))) + assert_eq!(wait_time, Some(Duration::from_secs(0))) } #[tokio::test] @@ -68,21 +76,21 @@ async fn ticket_wait_time() { let topic_2 = [2; 32]; ads.insert(enr.clone(), topic).unwrap(); - assert_eq!(ads.ticket_wait_time(topic), Ok(Duration::from_secs(0))); + assert_eq!(ads.ticket_wait_time(topic), Some(Duration::from_secs(0))); ads.insert(enr_2.clone(), topic).unwrap(); // Now max_ads_per_topic is reached for topic - assert_gt!(ads.ticket_wait_time(topic), Ok(Duration::from_secs(1))); - assert_lt!(ads.ticket_wait_time(topic), Ok(Duration::from_secs(2))); + assert_gt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(1))); + assert_lt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(2))); ads.insert(enr, topic_2).unwrap(); // Now max_ads in table is reched - assert_gt!(ads.ticket_wait_time(topic), Ok(Duration::from_secs(1))); - assert_lt!(ads.ticket_wait_time(topic), Ok(Duration::from_secs(2))); + assert_gt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(1))); + assert_lt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(2))); tokio::time::sleep(Duration::from_secs(2)).await; // The first ads for topic have expired - assert_eq!(ads.ticket_wait_time(topic), Ok(Duration::from_secs(0))); + assert_eq!(ads.ticket_wait_time(topic), Some(Duration::from_secs(0))); } #[tokio::test] diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index ba116e9ba..b72a8bdcb 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -4,14 +4,14 @@ use delay_map::HashMapDelay; use std::cmp::Eq; use tracing::error; -pub fn topic_hash(topic: Vec) -> Result { +pub fn topic_hash(topic: Vec) -> Option { if topic.len() > 32 { error!("Topic is greater than 32 bytes"); - return Err("Topic is greater than 32 bytes".into()); + return None; } let mut topic_hash = [0u8; 32]; topic_hash[32 - topic.len()..].copy_from_slice(&topic); - Ok(topic_hash) + Some(topic_hash) } #[derive(PartialEq, Eq, Hash, Clone)] diff --git a/src/service.rs b/src/service.rs index bdc287952..a9eaeacb6 100644 --- a/src/service.rs +++ b/src/service.rs @@ -664,19 +664,19 @@ impl Service { let _ticket = Ticket::decode(ticket).unwrap_or(Ticket::default()); + // use wait time to see if there is any point at doing ticket validation + // choose which ad to reg based on ticket, for example if some node has empty ticket // or is coming back, and possibly other stuff - self.ads - .insert(enr, topic) - .map(|_| { + match self.ads.insert(enr, topic) { + Ok(()) => { self.send_regconfirmation_response(node_address, id, topic) - }) - .ok(); - }) - .ok(); - }) - .ok(); + } + Err(e) => error!("{}", e), + } + }); + }); debug!("Received RegisterTopic request which is not fully implemented"); } RequestBody::TopicQuery { topic } => { @@ -1109,10 +1109,13 @@ impl Service { rpc_id: RequestId, topic: [u8; 32], ) { - let nodes_to_send = self.ads.get_ad_nodes(topic).unwrap_or_else(|e| { - error!("{}", e); - Vec::new() - }); + let nodes_to_send = match self.ads.get_ad_nodes(topic) { + Ok(iter) => iter.map(|ad| ad.node_record().clone()).collect(), + Err(e) => { + error!("{}", e); + Vec::new() + } + }; self.send_nodes_response(nodes_to_send, node_address, rpc_id, "TOPICQUERY"); } From 7dff9a6a953df0c1cb3db90dff975fd87300ca94 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 7 Apr 2022 18:58:09 +0200 Subject: [PATCH 027/391] Use state enums --- src/advertisement/mod.rs | 48 +++++++++++++++------------ src/advertisement/test.rs | 8 ++--- src/service.rs | 69 ++++++++++++++++++++++----------------- src/service/test.rs | 4 +-- 4 files changed, 73 insertions(+), 56 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 24edccf3b..c9dcf17b2 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -3,7 +3,7 @@ use core::time::Duration; use enr::{CombinedKey, Enr}; use futures::prelude::*; use std::{ - collections::{vec_deque::Iter, HashMap, VecDeque}, + collections::{hash_map::Entry, vec_deque::Iter, HashMap, VecDeque}, pin::Pin, task::{Context, Poll}, }; @@ -50,7 +50,11 @@ pub struct Ads { } impl Ads { - pub fn new(ad_lifetime: Duration, max_ads_per_topic: usize, max_ads: usize) -> Result { + pub fn new( + ad_lifetime: Duration, + max_ads_per_topic: usize, + max_ads: usize, + ) -> Result { let (max_ads_per_topic, max_ads) = if max_ads_per_topic <= max_ads { (max_ads_per_topic, max_ads) } else { @@ -124,25 +128,29 @@ impl Ads { pub fn insert(&mut self, node_record: Enr, topic: Topic) -> Result<(), &str> { let now = Instant::now(); - if let Some(nodes) = self.ads.get_mut(&topic) { - if nodes.contains(&Ad::new(node_record.clone(), now)) { - error!( - "This node {} is already advertising this topic", - node_record.node_id() - ); - return Err("Node already advertising this topic"); + match self.ads.entry(topic) { + Entry::Occupied(ref mut entry) => { + let nodes = entry.get_mut(); + if nodes.contains(&Ad::new(node_record.clone(), now)) { + error!( + "This node {} is already advertising this topic", + node_record.node_id() + ); + return Err("Node already advertising this topic"); + } + nodes.push_back(Ad { + node_record, + insert_time: now, + }); + } + Entry::Vacant(_) => { + let mut nodes = VecDeque::new(); + nodes.push_back(Ad { + node_record, + insert_time: now, + }); + self.ads.insert(topic, nodes); } - nodes.push_back(Ad { - node_record, - insert_time: now, - }); - } else { - let mut nodes = VecDeque::new(); - nodes.push_back(Ad { - node_record, - insert_time: now, - }); - self.ads.insert(topic, nodes); } self.expirations.push_back((now, topic)); self.total_ads += 1; diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 4f719707c..4161d5b3a 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -18,7 +18,7 @@ async fn insert_ad_and_get_nodes() { let key = CombinedKey::generate_secp256k1(); let enr_2 = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); - let mut ads = Ads::new(Duration::from_secs(60), 10, 50); + let mut ads = Ads::new(Duration::from_secs(60), 10, 50).unwrap(); let topic = [1; 32]; let topic_2 = [2; 32]; @@ -51,7 +51,7 @@ async fn insert_ad_and_get_nodes() { #[tokio::test] async fn ticket_wait_time_no_wait_time() { - let ads = Ads::new(Duration::from_secs(1), 10, 50); + let ads = Ads::new(Duration::from_secs(1), 10, 50).unwrap(); let topic = [1; 32]; let wait_time = ads.ticket_wait_time(topic); assert_eq!(wait_time, Some(Duration::from_secs(0))) @@ -70,7 +70,7 @@ async fn ticket_wait_time() { let key = CombinedKey::generate_secp256k1(); let enr_2 = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); - let mut ads = Ads::new(Duration::from_secs(2), 2, 3); + let mut ads = Ads::new(Duration::from_secs(2), 2, 3).unwrap(); let topic = [1; 32]; let topic_2 = [2; 32]; @@ -106,7 +106,7 @@ async fn poll_ads() { let key = CombinedKey::generate_secp256k1(); let enr_2 = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); - let mut ads = Ads::new(Duration::from_secs(1), 10, 50); + let mut ads = Ads::new(Duration::from_secs(1), 10, 50).unwrap(); let topic_1 = [1; 32]; let topic_2 = [2; 32]; diff --git a/src/service.rs b/src/service.rs index a9eaeacb6..88a8cbd96 100644 --- a/src/service.rs +++ b/src/service.rs @@ -43,6 +43,7 @@ use parking_lot::RwLock; use rpc::*; use std::{ collections::{HashMap, HashSet}, + io::{Error, ErrorKind}, net::SocketAddr, sync::Arc, task::Poll, @@ -265,7 +266,7 @@ impl Service { kbuckets: Arc>>, config: Discv5Config, listen_socket: SocketAddr, - ) -> Result<(oneshot::Sender<()>, mpsc::Sender), std::io::Error> { + ) -> Result<(oneshot::Sender<()>, mpsc::Sender), Error> { // process behaviour-level configuration parameters let ip_votes = if config.enr_update { Some(IpVote::new( @@ -289,6 +290,19 @@ impl Service { let (discv5_send, discv5_recv) = mpsc::channel(30); let (exit_send, exit) = oneshot::channel(); + let ads = match Ads::new(Duration::from_secs(60 * 15), 100, 50000) { + Ok(ads) => ads, + Err(e) => { + return Err(Error::new(ErrorKind::InvalidInput, e)); + } + }; + let active_topics = match Ads::new(Duration::from_secs(60 * 15), 100, 50000) { + Ok(ads) => ads, + Err(e) => { + return Err(Error::new(ErrorKind::InvalidInput, e)); + } + }; + config .executor .clone() @@ -308,10 +322,10 @@ impl Service { peers_to_ping: HashSetDelay::new(config.ping_interval), discv5_recv, event_stream: None, - ads: Ads::new(Duration::from_secs(60 * 15), 100 as usize, 50000), + ads, tickets: Tickets::new(), topics: HashSet::new(), - active_topics: Ads::new(Duration::from_secs(60 * 15), 100 as usize, 50000), + active_topics, exit, config: config.clone(), }; @@ -649,34 +663,29 @@ impl Service { self.send_event(Discv5Event::TalkRequest(req)); } RequestBody::RegisterTopic { topic, enr, ticket } => { - topic_hash(topic) - .map(|topic| { - self.ads - .ticket_wait_time(topic) - .map(|wait_time| { - let new_ticket = Ticket::new(topic); - self.send_ticket_response( - node_address.clone(), - id.clone(), - new_ticket, - wait_time, - ); - - let _ticket = Ticket::decode(ticket).unwrap_or(Ticket::default()); - - // use wait time to see if there is any point at doing ticket validation - - // choose which ad to reg based on ticket, for example if some node has empty ticket - // or is coming back, and possibly other stuff - - match self.ads.insert(enr, topic) { - Ok(()) => { - self.send_regconfirmation_response(node_address, id, topic) - } - Err(e) => error!("{}", e), - } - }); + topic_hash(topic).map(|topic| { + self.ads.ticket_wait_time(topic).map(|wait_time| { + let new_ticket = Ticket::new(topic); + self.send_ticket_response( + node_address.clone(), + id.clone(), + new_ticket, + wait_time, + ); + + let _ticket = Ticket::decode(ticket).unwrap_or(Ticket::default()); + + // use wait time to see if there is any point at doing ticket validation + + // choose which ad to reg based on ticket, for example if some node has empty ticket + // or is coming back, and possibly other stuff + + match self.ads.insert(enr, topic) { + Ok(()) => self.send_regconfirmation_response(node_address, id, topic), + Err(e) => error!("{}", e), + } }); + }); debug!("Received RegisterTopic request which is not fully implemented"); } RequestBody::TopicQuery { topic } => { diff --git a/src/service/test.rs b/src/service/test.rs index 85bb3e1c1..23f20e18a 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -97,10 +97,10 @@ async fn build_service( peers_to_ping: HashSetDelay::new(config.ping_interval), discv5_recv, event_stream: None, - ads: Ads::new(Duration::from_secs(60 * 15), 100 as usize, 50000), + ads: Ads::new(Duration::from_secs(60 * 15), 100, 50000).unwrap(), tickets: Tickets::new(), topics: HashSet::new(), - active_topics: Ads::new(Duration::from_secs(60 * 15), 100 as usize, 50000), + active_topics: Ads::new(Duration::from_secs(60 * 15), 100, 50000).unwrap(), exit, config, } From ce77ae2ab04e1bd366b87a804cbde1743b3f3c71 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 7 Apr 2022 20:58:12 +0200 Subject: [PATCH 028/391] Make code more compact --- src/advertisement/mod.rs | 34 +++++++++++----------------------- 1 file changed, 11 insertions(+), 23 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index c9dcf17b2..feddd877e 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -128,30 +128,18 @@ impl Ads { pub fn insert(&mut self, node_record: Enr, topic: Topic) -> Result<(), &str> { let now = Instant::now(); - match self.ads.entry(topic) { - Entry::Occupied(ref mut entry) => { - let nodes = entry.get_mut(); - if nodes.contains(&Ad::new(node_record.clone(), now)) { - error!( - "This node {} is already advertising this topic", - node_record.node_id() - ); - return Err("Node already advertising this topic"); - } - nodes.push_back(Ad { - node_record, - insert_time: now, - }); - } - Entry::Vacant(_) => { - let mut nodes = VecDeque::new(); - nodes.push_back(Ad { - node_record, - insert_time: now, - }); - self.ads.insert(topic, nodes); - } + let nodes = self.ads.entry(topic).or_default(); + if nodes.contains(&Ad::new(node_record.clone(), now)) { + error!( + "This node {} is already advertising this topic", + node_record.node_id() + ); + return Err("Node already advertising this topic"); } + nodes.push_back(Ad { + node_record, + insert_time: now, + }); self.expirations.push_back((now, topic)); self.total_ads += 1; Ok(()) From 2332c01963fd40230be282a9c51eb6955eb7d6ca Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 8 Apr 2022 09:51:07 +0200 Subject: [PATCH 029/391] Make better use of included crate --- src/advertisement/mod.rs | 45 +++++++++++----------------------- src/handler/active_requests.rs | 23 +++++++---------- 2 files changed, 23 insertions(+), 45 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index feddd877e..04169feeb 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -2,8 +2,9 @@ use super::*; use core::time::Duration; use enr::{CombinedKey, Enr}; use futures::prelude::*; +use more_asserts::debug_unreachable; use std::{ - collections::{hash_map::Entry, vec_deque::Iter, HashMap, VecDeque}, + collections::{vec_deque::Iter, HashMap, VecDeque}, pin::Pin, task::{Context, Poll}, }; @@ -92,15 +93,9 @@ impl Ads { Some(self.ad_lifetime.saturating_sub(elapsed_time)) } None => { - #[cfg(debug_assertions)] - panic!("Panic on debug,topic key should be deleted if no ad nodes queued for it"); - #[cfg(not(debug_assertions))] - { - error!( - "Topic key should be deleted if no ad nodes queued for it" - ); - return None; - } + debug_unreachable!("Panic on debug,topic key should be deleted if no ad nodes queued for it"); + error!("Topic key should be deleted if no ad nodes queued for it"); + return None; } } } @@ -114,13 +109,9 @@ impl Ads { Some(self.ad_lifetime.saturating_sub(elapsed_time)) } None => { - #[cfg(debug_assertions)] - panic!("Panic on debug, mismatched mapping between expiration queue and total ads count"); - #[cfg(not(debug_assertions))] - { - error!("Mismatched mapping between expiration queue and total ads count"); - return None; - } + debug_unreachable!("Panic on debug, mismatched mapping between expiration queue and total ads count"); + error!("Mismatched mapping between expiration queue and total ads count"); + return None; } } } @@ -174,26 +165,18 @@ impl Stream for Ads { return Poll::Ready(Some(Ok(((ad.node_record, ad.insert_time), topic)))); } None => { - #[cfg(debug_assertions)] - panic!("Panic on debug, topic key should be deleted if no ad nodes queued for it"); - #[cfg(not(debug_assertions))] - { - error!("Topic key should be deleted if no ad nodes queued for it"); - return Poll::Ready(Some(Err("No nodes for topic".into()))); - } + debug_unreachable!("Panic on debug, topic key should be deleted if no ad nodes queued for it"); + error!("Topic key should be deleted if no ad nodes queued for it"); + return Poll::Ready(Some(Err("No nodes for topic".into()))); } } } None => { - #[cfg(debug_assertions)] - panic!( + debug_unreachable!( "Panic on debug, mismatched mapping between expiration queue and entry queue" ); - #[cfg(not(debug_assertions))] - { - error!("Mismatched mapping between expiration queue and entry queue"); - return Poll::Ready(Some(Err("Topic doesn't exist".into()))); - } + error!("Mismatched mapping between expiration queue and entry queue"); + return Poll::Ready(Some(Err("Topic doesn't exist".into()))); } } } diff --git a/src/handler/active_requests.rs b/src/handler/active_requests.rs index fbe2abdfe..929d78a32 100644 --- a/src/handler/active_requests.rs +++ b/src/handler/active_requests.rs @@ -1,5 +1,6 @@ use super::*; use delay_map::HashMapDelay; +use more_asserts::debug_unreachable; pub(crate) struct ActiveRequests { /// A list of raw messages we are awaiting a response from the remote. @@ -39,13 +40,9 @@ impl ActiveRequests { Some(node_address) => match self.active_requests_mapping.remove(&node_address) { Some(request_call) => Some((node_address, request_call)), None => { - #[cfg(debug_assertions)] - panic!("Panic on debug, a matching request call doesn't exist"); - #[cfg(not(debug_assertions))] - { - error!("A matching request call doesn't exist"); - return None; - } + debug_unreachable!("Panic on debug, a matching request call doesn't exist"); + error!("A matching request call doesn't exist"); + return None; } }, None => None, @@ -62,13 +59,11 @@ impl ActiveRequests { { Some(_) => Some(request_call), None => { - #[cfg(debug_assertions)] - panic!("Panic on debug, a matching nonce mapping doesn't exist"); - #[cfg(not(debug_assertions))] - { - error!("A matching nonce mapping doesn't exist"); - return None; - } + debug_unreachable!( + "Panic on debug, a matching nonce mapping doesn't exist" + ); + error!("A matching nonce mapping doesn't exist"); + return None; } } } From 92d722973963a627d48a4eeb9e0b59c736a0c56c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 8 Apr 2022 11:20:32 +0200 Subject: [PATCH 030/391] Separate service ticket logic from Handler --- src/advertisement/ticket.rs | 50 ++++++---- src/handler/mod.rs | 190 ++++++++++-------------------------- src/handler/tests.rs | 8 +- src/service.rs | 44 +++------ src/service/test.rs | 2 +- 5 files changed, 102 insertions(+), 192 deletions(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index b72a8bdcb..273b87801 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -1,6 +1,7 @@ use super::*; -use crate::node_info::NodeAddress; use delay_map::HashMapDelay; +use enr::NodeId; +use node_info::NodeContact; use std::cmp::Eq; use tracing::error; @@ -16,20 +17,13 @@ pub fn topic_hash(topic: Vec) -> Option { #[derive(PartialEq, Eq, Hash, Clone)] pub struct ActiveTopic { - node_address: NodeAddress, + node_id: NodeId, topic: Topic, } impl ActiveTopic { - pub fn new(node_address: NodeAddress, topic: Topic) -> Self { - ActiveTopic { - node_address, - topic, - } - } - - pub fn node_address(&self) -> NodeAddress { - self.node_address.clone() + pub fn new(node_id: NodeId, topic: Topic) -> Self { + ActiveTopic { node_id, topic } } pub fn topic(&self) -> Topic { @@ -37,7 +31,7 @@ impl ActiveTopic { } } -#[derive(Default, Debug)] +#[derive(Default, Debug, Copy, Clone)] pub struct Ticket { //nonce: u64, //src_node_id: NodeId, @@ -80,8 +74,30 @@ impl Ticket { }*/ } +pub struct ActiveTicket { + contact: NodeContact, + ticket: Ticket, +} + +impl ActiveTicket { + pub fn new(contact: NodeContact, ticket: Ticket) -> Self { + ActiveTicket { + contact, + ticket, + } + } + + pub fn contact(&self) -> NodeContact { + self.contact.clone() + } + + pub fn ticket(&self) -> Ticket { + self.ticket + } +} + pub struct Tickets { - tickets: HashMapDelay, + tickets: HashMapDelay, } impl Tickets { @@ -91,17 +107,17 @@ impl Tickets { } } - pub fn insert(&mut self, node_address: NodeAddress, ticket: Ticket, wait_time: Duration) { + pub fn insert(&mut self, contact: NodeContact, ticket: Ticket, wait_time: Duration) { self.tickets.insert_at( - ActiveTopic::new(node_address, ticket.topic), - ticket, + ActiveTopic::new(contact.node_id(), ticket.topic), + ActiveTicket::new(contact, ticket), wait_time, ); } } impl Stream for Tickets { - type Item = Result<(ActiveTopic, Ticket), String>; + type Item = Result<(ActiveTopic, ActiveTicket), String>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { match self.tickets.poll_next_unpin(cx) { Poll::Ready(Some(Ok((active_topic, ticket)))) => { diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 8d0e7bc03..af69d0840 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -85,7 +85,7 @@ pub enum HandlerIn { /// /// Note: To update an ENR for an unknown node, we request a FINDNODE with distance 0 to the /// `NodeContact` we know of. - Request(RequestContact, Box), + Request(NodeContact, Box), /// A Response to send to a particular node to answer a HandlerOut::Request has been /// received from the application layer. @@ -149,41 +149,10 @@ pub struct Challenge { remote_enr: Option, } -#[derive(Debug, Clone, PartialEq)] -pub enum RequestContact { - Auto(NodeAddress), - Initiated(NodeContact), -} - -impl RequestContact { - pub fn node_address(&self) -> Result { - match self { - RequestContact::Auto(node_address) => Ok(node_address.clone()), - RequestContact::Initiated(contact) => contact.node_address(), - } - } - - pub fn node_id(&self) -> NodeId { - match self { - RequestContact::Auto(node_address) => node_address.node_id, - RequestContact::Initiated(contact) => contact.node_id(), - } - } -} - -impl std::fmt::Display for RequestContact { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - RequestContact::Auto(node_address) => write!(f, "{}", node_address), - RequestContact::Initiated(contact) => write!(f, "{}", contact), - } - } -} - /// A request to a node that we are waiting for a response. #[derive(Debug)] pub(crate) struct RequestCall { - contact: RequestContact, + contact: NodeContact, /// The raw discv5 packet sent. packet: Packet, /// The unencrypted message. Required if need to re-encrypt and re-send. @@ -202,7 +171,7 @@ pub(crate) struct RequestCall { impl RequestCall { fn new( - contact: RequestContact, + contact: NodeContact, packet: Packet, request: Request, initiating_session: bool, @@ -239,7 +208,7 @@ pub struct Handler { /// The expected responses by SocketAddr which allows packets to pass the underlying filter. filter_expected_responses: Arc>>, /// Requests awaiting a handshake completion. - pending_requests: HashMap>, + pending_requests: HashMap>, /// Currently in-progress handshakes with peers. active_challenges: LruTimeCache, /// Established sessions with peers. @@ -478,96 +447,55 @@ impl Handler { /// Sends a `Request` to a node. async fn send_request( &mut self, - contact: RequestContact, + contact: NodeContact, request: Request, ) -> Result<(), RequestError> { - match contact.clone() { - RequestContact::Auto(_) => self.regtopic_with_ticket(contact, request).await, - RequestContact::Initiated(node_contact) => { - let node_address = node_contact - .node_address() - .map_err(|e| RequestError::InvalidEnr(e.into()))?; - - if node_address.socket_addr == self.listen_socket { - debug!("Filtered request to self"); - return Err(RequestError::SelfRequest); - } - - // If there is already an active request for this node, add to pending requests - if self.active_requests.get(&node_address).is_some() { - trace!("Request queued for node: {}", node_address); - self.pending_requests - .entry(node_address) - .or_insert_with(Vec::new) - .push((contact, request)); - return Ok(()); - } + let node_address = contact + .node_address() + .map_err(|e| RequestError::InvalidEnr(e.into()))?; - let (packet, initiating_session) = { - if let Some(session) = self.sessions.get_mut(&node_address) { - // Encrypt the message and send - let packet = session - .encrypt_message(self.node_id, &request.clone().encode()) - .map_err(|e| RequestError::EncryptionFailed(format!("{:?}", e)))?; - (packet, false) - } else { - // No session exists, start a new handshake - trace!( - "Starting session. Sending random packet to: {}", - node_address - ); - let packet = Packet::new_random(&self.node_id) - .map_err(RequestError::EntropyFailure)?; - // We are initiating a new session - (packet, true) - } - }; + if node_address.socket_addr == self.listen_socket { + debug!("Filtered request to self"); + return Err(RequestError::SelfRequest); + } - let call = RequestCall::new(contact, packet.clone(), request, initiating_session); - // let the filter know we are expecting a response - self.add_expected_response(node_address.socket_addr); - self.send(node_address.clone(), packet).await; - self.active_requests.insert(node_address, call); - Ok(()) - } + // If there is already an active request for this node, add to pending requests + if self.active_requests.get(&node_address).is_some() { + trace!("Request queued for node: {}", node_address); + self.pending_requests + .entry(node_address) + .or_insert_with(Vec::new) + .push((contact, request)); + return Ok(()); } - } - async fn regtopic_with_ticket( - &mut self, - contact: RequestContact, - request: Request, - ) -> Result<(), RequestError> { - Ok(if let Ok(node_address) = contact.node_address() { + let (packet, initiating_session) = { if let Some(session) = self.sessions.get_mut(&node_address) { - // If there is already an active request for this node, add to pending requests - if self.active_requests.get(&node_address).is_some() { - trace!("Request queued for node: {}", node_address); - self.pending_requests - .entry(node_address) - .or_insert_with(Vec::new) - .push((contact, request)); - return Ok(()); - } - + // Encrypt the message and send let packet = session .encrypt_message(self.node_id, &request.clone().encode()) .map_err(|e| RequestError::EncryptionFailed(format!("{:?}", e)))?; - - let call = RequestCall::new(contact, packet.clone(), request, false); - // let the filter know we are expecting a response - self.add_expected_response(node_address.socket_addr); - self.send(node_address.clone(), packet).await; - self.active_requests.insert(node_address, call); + (packet, false) } else { - // Either the session is being established or has expired. We simply drop the - // response in this case. - warn!( - "Session is not established. Dropping request {} for node: {}", - request, node_address.node_id + // No session exists, start a new handshake + trace!( + "Starting session. Sending random packet to: {}", + node_address ); + let packet = + Packet::new_random(&self.node_id).map_err(RequestError::EntropyFailure)?; + // We are initiating a new session + (packet, true) } - }) + }; + + let call = RequestCall::new(contact, packet.clone(), request, initiating_session); + // let the filter know we are expecting a response + self.add_expected_response(node_address.socket_addr); + self.send(node_address.clone(), packet).await; + + self.active_requests.insert(node_address, call); + Ok(()) } /// Sends an RPC Response. @@ -677,28 +605,13 @@ impl Handler { request_call.contact ); - // Drop session for REGTOPIC requests automatically re-sent on ticket wait_time - // expiration. These packets are only sent to active sessions. - let contact = match request_call.contact.clone() { - RequestContact::Auto(_) => { - warn!( - "REGTOPIC reuqest automatically initiated upon ticket wait time expiration are only set to active sessions. Dropping session. Node: {}", - request_call.contact - ); - self.fail_request(request_call, RequestError::InvalidRemotePacket, true) - .await; - return; - } - RequestContact::Initiated(contact) => contact, - }; - // We do not allow multiple WHOAREYOU packets for a single challenge request. If we have - // already sent a WHOAREYOU ourselves, we drop sessions which send us a WHOAREYOU in + // already sent a WHOAREYOU ourselves, we drop sessions who send us a WHOAREYOU in // response. if request_call.handshake_sent { warn!( "Authentication response already sent. Dropping session. Node: {}", - contact + request_call.contact ); self.fail_request(request_call, RequestError::InvalidRemotePacket, true) .await; @@ -716,7 +629,7 @@ impl Handler { // Generate a new session and authentication packet let (auth_packet, mut session) = match Session::encrypt_with_header( - &contact, + &request_call.contact, self.key.clone(), updated_enr, &self.node_id, @@ -746,10 +659,11 @@ impl Handler { // // All sent requests must have an associated node_id. Therefore the following // must not panic. - let node_address = contact + let node_address = request_call + .contact .node_address() .expect("All sent requests must have a node address"); - match contact.clone() { + match request_call.contact.clone() { NodeContact::Enr(enr) => { // NOTE: Here we decide if the session is outgoing or ingoing. The condition for an // outgoing session is that we originally sent a RANDOM packet (signifying we did @@ -783,10 +697,13 @@ impl Handler { // Don't know the ENR. Establish the session, but request an ENR also // Send the Auth response - let contact = contact.clone(); + let contact = request_call.contact.clone(); trace!( "Sending Authentication response to node: {}", - contact.node_address().expect("Sanitized contact") + request_call + .contact + .node_address() + .expect("Sanitized contact") ); request_call.packet = auth_packet.clone(); request_call.handshake_sent = true; @@ -801,9 +718,7 @@ impl Handler { }; session.awaiting_enr = Some(id); - let _ = self - .send_request(RequestContact::Initiated(contact), request) - .await; + let _ = self.send_request(contact, request).await; } } self.new_session(node_address, session); @@ -1059,6 +974,7 @@ impl Handler { *remaining_responses -= 1; if remaining_responses != &0 { // more responses remaining, add back the request and send the response + // add back the request and send the response self.active_requests .insert(node_address.clone(), request_call); let _ = self diff --git a/src/handler/tests.rs b/src/handler/tests.rs index 90ef31b69..95b72042c 100644 --- a/src/handler/tests.rs +++ b/src/handler/tests.rs @@ -71,7 +71,7 @@ async fn simple_session_message() { }); let _ = sender_send.send(HandlerIn::Request( - RequestContact::Initiated(receiver_enr.into()), + receiver_enr.into(), send_message.clone(), )); @@ -148,7 +148,7 @@ async fn multiple_messages() { // sender to send the first message then await for the session to be established let _ = sender_handler.send(HandlerIn::Request( - RequestContact::Initiated(receiver_enr.clone().into()), + receiver_enr.clone().into(), send_message.clone(), )); @@ -173,7 +173,7 @@ async fn multiple_messages() { // now the session is established, send the rest of the messages for _ in 0..messages_to_send - 1 { let _ = sender_handler.send(HandlerIn::Request( - RequestContact::Initiated(receiver_enr.clone().into()), + receiver_enr.clone().into(), send_message.clone(), )); } @@ -241,7 +241,7 @@ async fn test_active_requests_insert() { }; let initiating_session = true; let request_call = RequestCall::new( - RequestContact::Initiated(contact), + contact, packet, request, initiating_session, diff --git a/src/service.rs b/src/service.rs index 88a8cbd96..e66ba9375 100644 --- a/src/service.rs +++ b/src/service.rs @@ -19,11 +19,11 @@ use self::{ }; use crate::{ advertisement::{ - ticket::{topic_hash, Ticket, Tickets}, + ticket::{topic_hash, ActiveTicket, ActiveTopic, Ticket, Tickets}, Ads, Topic, }, error::{RequestError, ResponseError}, - handler::{Handler, HandlerIn, HandlerOut, RequestContact}, + handler::{Handler, HandlerIn, HandlerOut}, kbucket::{ self, ConnectionDirection, ConnectionState, FailureReason, InsertResult, KBucketsTable, NodeStatus, UpdateResult, @@ -219,7 +219,7 @@ pub struct Service { /// Active RPC request awaiting a response from the handler. struct ActiveRequest { /// The address the request was sent to. - pub contact: RequestContact, + pub contact: NodeContact, /// The request that was sent. pub request_body: RequestBody, /// The query ID if the request was related to a query. @@ -484,11 +484,11 @@ impl Service { self.send_ping(enr); } } - Some(Ok((active_topic, ticket))) = self.tickets.next() => { + Some(Ok((active_topic, active_ticket))) = self.tickets.next() => { let enr = match self.local_enr.read().clone() { enr => enr, }; - self.auto_reattempt_reg_topic_request(active_topic.node_address(), active_topic.topic(), enr, ticket); + self.reg_topic_request(active_ticket.contact(), active_topic.topic(), enr, active_ticket.ticket()); } _ = publish_topics.tick() => { self.topics.clone().into_iter().for_each(|topic| self.start_findnode_query(NodeId::new(&topic), None)); @@ -928,7 +928,7 @@ impl Service { // todo(emhane): What should max wait_time be so insert_at in Tickets doesn't panic? match Ticket::decode(ticket) { Ok(ticket) => self.tickets.insert( - node_address, + active_request.contact, ticket, Duration::from_secs(wait_time), ), @@ -961,7 +961,7 @@ impl Service { enr_seq: self.local_enr.read().seq(), }; let active_request = ActiveRequest { - contact: RequestContact::Initiated(enr.into()), + contact: enr.into(), request_body, query_id: None, callback: None, @@ -999,7 +999,7 @@ impl Service { ) { let request_body = RequestBody::FindNode { distances: vec![0] }; let active_request = ActiveRequest { - contact: RequestContact::Initiated(contact), + contact: contact, request_body, query_id: None, callback: callback.map(CallbackResponse::Enr), @@ -1018,7 +1018,7 @@ impl Service { let request_body = RequestBody::Talk { protocol, request }; let active_request = ActiveRequest { - contact: RequestContact::Initiated(contact), + contact: contact, request_body, query_id: None, callback: Some(CallbackResponse::Talk(callback)), @@ -1034,29 +1034,7 @@ impl Service { }; let active_request = ActiveRequest { - contact: RequestContact::Initiated(contact), - request_body, - query_id: None, - callback: None, - }; - self.send_rpc_request(active_request); - } - - fn auto_reattempt_reg_topic_request( - &mut self, - node_address: NodeAddress, - topic: Topic, - enr: Enr, - ticket: Ticket, - ) { - let request_body = RequestBody::RegisterTopic { - topic: topic.to_vec(), - enr, - ticket: format!("{:?}", ticket).as_bytes().to_vec(), - }; - - let active_request = ActiveRequest { - contact: RequestContact::Auto(node_address), + contact: contact, request_body, query_id: None, callback: None, @@ -1266,7 +1244,7 @@ impl Service { // find the ENR associated with the query if let Some(enr) = self.find_enr(&return_peer) { let active_request = ActiveRequest { - contact: RequestContact::Initiated(enr.into()), + contact: enr.into(), request_body, query_id: Some(query_id), callback: None, diff --git a/src/service/test.rs b/src/service/test.rs index 23f20e18a..8520afdfe 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -162,7 +162,7 @@ async fn test_updating_connection_on_ping() { service.active_requests.insert( RequestId(vec![1]), ActiveRequest { - contact: RequestContact::Initiated(node_contact), + contact: node_contact, request_body: rpc::RequestBody::Ping { enr_seq: 2 }, query_id: Some(QueryId(1)), callback: None, From 2ecc61015a4bf48492d1c4d6b50d92265a51502b Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 8 Apr 2022 12:39:31 +0200 Subject: [PATCH 031/391] Remove unnecessary typing --- src/advertisement/mod.rs | 12 ++++++------ src/advertisement/test.rs | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 04169feeb..8710e55da 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -1,6 +1,6 @@ use super::*; +use crate::Enr; use core::time::Duration; -use enr::{CombinedKey, Enr}; use futures::prelude::*; use more_asserts::debug_unreachable; use std::{ @@ -19,19 +19,19 @@ pub type Topic = [u8; 32]; /// An ad we are adevrtising for another node #[derive(Debug)] pub struct Ad { - node_record: Enr, + node_record: Enr, insert_time: Instant, } impl Ad { - pub fn new(node_record: Enr, insert_time: Instant) -> Self { + pub fn new(node_record: Enr, insert_time: Instant) -> Self { Ad { node_record, insert_time, } } - pub fn node_record(&self) -> &Enr { + pub fn node_record(&self) -> &Enr { &self.node_record } } @@ -117,7 +117,7 @@ impl Ads { } } - pub fn insert(&mut self, node_record: Enr, topic: Topic) -> Result<(), &str> { + pub fn insert(&mut self, node_record: Enr, topic: Topic) -> Result<(), &str> { let now = Instant::now(); let nodes = self.ads.entry(topic).or_default(); if nodes.contains(&Ad::new(node_record.clone(), now)) { @@ -139,7 +139,7 @@ impl Ads { impl Stream for Ads { // type returned can be unit type but for testing easier to get values, worth the overhead to keep? - type Item = Result<((Enr, Instant), Topic), String>; + type Item = Result<((Enr, Instant), Topic), String>; fn poll_next(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { let (insert_time, topic) = match self.expirations.get_mut(0) { Some((insert_time, topic)) => (insert_time, *topic), diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 4161d5b3a..96ce56ecf 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -34,12 +34,12 @@ async fn insert_ad_and_get_nodes() { ads.insert(enr_2.clone(), topic).unwrap(); ads.insert(enr.clone(), topic_2).unwrap(); - let nodes: Vec> = ads + let nodes: Vec = ads .get_ad_nodes(topic) .unwrap() .map(|ad| ad.node_record().clone()) .collect(); - let nodes_topic_2: Vec> = ads + let nodes_topic_2: Vec = ads .get_ad_nodes(topic_2) .unwrap() .map(|ad| ad.node_record().clone()) From 99c889981ed683a261fff96a2ebed18e5f384f42 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 8 Apr 2022 16:06:54 +0200 Subject: [PATCH 032/391] Remove stream impl --- src/advertisement/mod.rs | 145 +++++++++++++++++++++--------------- src/advertisement/test.rs | 99 ++++++++++++++---------- src/advertisement/ticket.rs | 20 +---- src/handler/tests.rs | 7 +- src/service.rs | 57 ++++++-------- 5 files changed, 170 insertions(+), 158 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 8710e55da..787e0577e 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -4,10 +4,12 @@ use core::time::Duration; use futures::prelude::*; use more_asserts::debug_unreachable; use std::{ + cmp::Ordering, collections::{vec_deque::Iter, HashMap, VecDeque}, pin::Pin, task::{Context, Poll}, }; +use ticket::Ticket; use tokio::time::Instant; use tracing::{debug, error}; @@ -18,14 +20,14 @@ pub type Topic = [u8; 32]; /// An ad we are adevrtising for another node #[derive(Debug)] -pub struct Ad { +pub struct AdNode { node_record: Enr, insert_time: Instant, } -impl Ad { +impl AdNode { pub fn new(node_record: Enr, insert_time: Instant) -> Self { - Ad { + AdNode { node_record, insert_time, } @@ -36,14 +38,39 @@ impl Ad { } } -impl PartialEq for Ad { +impl PartialEq for AdNode { fn eq(&self, other: &Self) -> bool { self.node_record == other.node_record } } + +#[derive(Ord, Eq)] +struct AdTopic { + topic: Topic, + insert_time: Instant, +} + +impl AdTopic { + pub fn new(topic: Topic, insert_time: Instant) -> Self { + AdTopic { topic, insert_time } + } +} + +impl PartialEq for AdTopic { + fn eq(&self, other: &Self) -> bool { + self.topic == other.topic + } +} + +impl PartialOrd for AdTopic { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.topic.cmp(&other.topic)) + } +} + pub struct Ads { - expirations: VecDeque<(Instant, Topic)>, - ads: HashMap>, + expirations: VecDeque, + ads: HashMap>, total_ads: usize, ad_lifetime: Duration, max_ads_per_topic: usize, @@ -72,20 +99,21 @@ impl Ads { }) } - pub fn get_ad_nodes(&self, topic: Topic) -> Result, &str> { + pub fn get_ad_nodes(&self, topic: Topic) -> Result, &str> { match self.ads.get(&topic) { Some(topic_ads) => Ok(topic_ads.into_iter()), None => Err("No ads for this topic"), } } - pub fn ticket_wait_time(&self, topic: Topic) -> Option { + pub fn ticket_wait_time(&mut self, topic: Topic) -> Option { + self.remove_expired(); let now = Instant::now(); if self.total_ads < self.max_ads { match self.ads.get(&topic) { Some(nodes) => { if nodes.len() < self.max_ads_per_topic { - Some(Duration::from_secs(0)) + None } else { match nodes.get(0) { Some(ad) => { @@ -100,12 +128,12 @@ impl Ads { } } } - None => Some(Duration::from_secs(0)), + None => None, } } else { match self.expirations.get(0) { - Some((insert_time, _)) => { - let elapsed_time = now.saturating_duration_since(*insert_time); + Some(ad) => { + let elapsed_time = now.saturating_duration_since(ad.insert_time); Some(self.ad_lifetime.saturating_sub(elapsed_time)) } None => { @@ -117,67 +145,62 @@ impl Ads { } } - pub fn insert(&mut self, node_record: Enr, topic: Topic) -> Result<(), &str> { + pub fn remove_expired(&mut self) -> Option<((Enr, Instant), Topic)> { + let mut map: HashMap = HashMap::new(); + + self.expirations + .iter() + .take_while(|ad| ad.insert_time.elapsed() >= self.ad_lifetime) + .for_each(|ad| { + let count = map.entry(ad.topic).or_default(); + *count += 1; + }); + + map.into_iter().for_each(|(topic, index)| { + let entry_ref = self.ads.entry(topic).or_default(); + for _ in 0..index { + entry_ref.pop_front(); + } + if entry_ref.is_empty() { + self.ads.remove(&topic); + } + self.total_ads -= index; + }); + + None + } + + pub fn regconfirmation( + &mut self, + node_record: Enr, + topic: Topic, + wait_time: Duration, + _ticket: Ticket, + ) -> Result<(), &str> { + if wait_time > Duration::from_secs(0) { + return Err("currently no space for this ad"); + } + // do some validation of tiket against other tickets received in registration window + self.insert(node_record, topic) + } + + fn insert(&mut self, node_record: Enr, topic: Topic) -> Result<(), &str> { + self.remove_expired(); let now = Instant::now(); let nodes = self.ads.entry(topic).or_default(); - if nodes.contains(&Ad::new(node_record.clone(), now)) { + if nodes.contains(&AdNode::new(node_record.clone(), now)) { error!( "This node {} is already advertising this topic", node_record.node_id() ); return Err("Node already advertising this topic"); } - nodes.push_back(Ad { + nodes.push_back(AdNode { node_record, insert_time: now, }); - self.expirations.push_back((now, topic)); + self.expirations.push_back(AdTopic::new(topic, now)); self.total_ads += 1; Ok(()) } } - -impl Stream for Ads { - // type returned can be unit type but for testing easier to get values, worth the overhead to keep? - type Item = Result<((Enr, Instant), Topic), String>; - fn poll_next(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - let (insert_time, topic) = match self.expirations.get_mut(0) { - Some((insert_time, topic)) => (insert_time, *topic), - None => { - debug!("No ads in 'table'"); - return Poll::Pending; - } - }; - - if insert_time.elapsed() < self.ad_lifetime { - return Poll::Pending; - } - - match self.ads.get_mut(&topic) { - Some(topic_ads) => { - match topic_ads.pop_front() { - Some(ad) => { - if topic_ads.is_empty() { - self.ads.remove(&topic); - } - self.total_ads -= 1; - self.expirations.remove(0); - return Poll::Ready(Some(Ok(((ad.node_record, ad.insert_time), topic)))); - } - None => { - debug_unreachable!("Panic on debug, topic key should be deleted if no ad nodes queued for it"); - error!("Topic key should be deleted if no ad nodes queued for it"); - return Poll::Ready(Some(Err("No nodes for topic".into()))); - } - } - } - None => { - debug_unreachable!( - "Panic on debug, mismatched mapping between expiration queue and entry queue" - ); - error!("Mismatched mapping between expiration queue and entry queue"); - return Poll::Ready(Some(Err("Topic doesn't exist".into()))); - } - } - } -} diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 96ce56ecf..87e58debb 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -5,6 +5,30 @@ use enr::{CombinedKey, EnrBuilder}; use more_asserts::{assert_gt, assert_lt}; use std::net::IpAddr; +#[tokio::test] +async fn insert_same_node() { + // Create the test values needed + let port = 6666; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + + let mut ads = Ads::new(Duration::from_secs(2), 10, 50).unwrap(); + + let topic = [1; 32]; + + ads.insert(enr.clone(), topic).unwrap(); + + // Since 2 seconds haven't passed + assert_eq!( + ads.insert(enr.clone(), topic).map_err(|e| e), + Err("Node already advertising this topic".into()) + ); + + tokio::time::sleep(Duration::from_secs(2)).await; + ads.insert(enr.clone(), topic).unwrap(); +} + #[tokio::test] async fn insert_ad_and_get_nodes() { // Create the test values needed @@ -18,14 +42,14 @@ async fn insert_ad_and_get_nodes() { let key = CombinedKey::generate_secp256k1(); let enr_2 = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); - let mut ads = Ads::new(Duration::from_secs(60), 10, 50).unwrap(); + let mut ads = Ads::new(Duration::from_secs(2), 10, 50).unwrap(); let topic = [1; 32]; let topic_2 = [2; 32]; ads.insert(enr.clone(), topic).unwrap(); - // Since 60 seconds haven't passed + // Since 2 seconds haven't passed assert_eq!( ads.insert(enr.clone(), topic).map_err(|e| e), Err("Node already advertising this topic".into()) @@ -51,14 +75,14 @@ async fn insert_ad_and_get_nodes() { #[tokio::test] async fn ticket_wait_time_no_wait_time() { - let ads = Ads::new(Duration::from_secs(1), 10, 50).unwrap(); + let mut ads = Ads::new(Duration::from_secs(1), 10, 50).unwrap(); let topic = [1; 32]; let wait_time = ads.ticket_wait_time(topic); assert_eq!(wait_time, Some(Duration::from_secs(0))) } #[tokio::test] -async fn ticket_wait_time() { +async fn ticket_wait_time_full_table() { // Create the test values needed let port = 6666; let ip: IpAddr = "127.0.0.1".parse().unwrap(); @@ -70,31 +94,33 @@ async fn ticket_wait_time() { let key = CombinedKey::generate_secp256k1(); let enr_2 = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); - let mut ads = Ads::new(Duration::from_secs(2), 2, 3).unwrap(); + let mut ads = Ads::new(Duration::from_secs(3), 2, 3).unwrap(); let topic = [1; 32]; let topic_2 = [2; 32]; + // Add 2 ads for topic ads.insert(enr.clone(), topic).unwrap(); - assert_eq!(ads.ticket_wait_time(topic), Some(Duration::from_secs(0))); - ads.insert(enr_2.clone(), topic).unwrap(); - // Now max_ads_per_topic is reached for topic - assert_gt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(1))); - assert_lt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(2))); - - ads.insert(enr, topic_2).unwrap(); - // Now max_ads in table is reched - assert_gt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(1))); - assert_lt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(2))); tokio::time::sleep(Duration::from_secs(2)).await; - // The first ads for topic have expired - assert_eq!(ads.ticket_wait_time(topic), Some(Duration::from_secs(0))); + + // Add an ad for topic_2 + ads.insert(enr.clone(), topic_2).unwrap(); + + // Now max_ads in table is reached so the second ad for topic_2 has to wait + assert_ne!(ads.ticket_wait_time(topic_2), None); + + tokio::time::sleep(Duration::from_secs(1)).await; + + // Now the first ads have expired and the table is not full so no neither topic + // or topic_2 ads have to wait + assert_eq!(ads.ticket_wait_time(topic), None); + assert_eq!(ads.ticket_wait_time(topic_2), None); } #[tokio::test] -async fn poll_ads() { +async fn ticket_wait_time_full_topic() { // Create the test values needed let port = 6666; let ip: IpAddr = "127.0.0.1".parse().unwrap(); @@ -106,33 +132,28 @@ async fn poll_ads() { let key = CombinedKey::generate_secp256k1(); let enr_2 = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); - let mut ads = Ads::new(Duration::from_secs(1), 10, 50).unwrap(); + let mut ads = Ads::new(Duration::from_secs(3), 2, 4).unwrap(); - let topic_1 = [1; 32]; + let topic = [1; 32]; let topic_2 = [2; 32]; - ads.insert(enr.clone(), topic_1).unwrap(); - ads.insert(enr_2, topic_1).unwrap(); + ads.insert(enr.clone(), topic).unwrap(); + ads.insert(enr_2.clone(), topic).unwrap(); - tokio::time::sleep(Duration::from_secs(1)).await; - ads.insert(enr.clone(), topic_2).unwrap(); + // Now max_ads_per_topic is reached for topic + assert_gt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(2))); + assert_lt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(3))); - let mut expired_ads = Vec::new(); + ads.insert(enr, topic_2).unwrap(); - let mut interval = tokio::time::interval(Duration::from_secs(1)); + // The table isn't full so we can insert more ads for topic_2 + assert_eq!(ads.ticket_wait_time(topic_2), None); - for _ in 0..10 { - tokio::select! { - Some(Ok((_, topic))) = ads.next() => { - expired_ads.push(topic); - if topic == topic_2 { - // Since (enr, topic_1) should have expired, inserting it anew should be possible - ads.insert(enr.clone(), topic_1).unwrap(); - } - } - _ = interval.tick() => {} - } - } + // But not for topic until an ad for topic expires + //assert_gt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(2))); + //assert_lt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(3))); + assert_ne!(ads.ticket_wait_time(topic), None); - assert_eq!(expired_ads, vec![topic_1, topic_1, topic_2, topic_1]) + tokio::time::sleep(Duration::from_secs(3)).await; + assert_eq!(ads.ticket_wait_time(topic), None); } diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 273b87801..fa01a9046 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -3,16 +3,12 @@ use delay_map::HashMapDelay; use enr::NodeId; use node_info::NodeContact; use std::cmp::Eq; -use tracing::error; -pub fn topic_hash(topic: Vec) -> Option { - if topic.len() > 32 { - error!("Topic is greater than 32 bytes"); - return None; - } +// Placeholder function +pub fn topic_hash(topic: Vec) -> Topic { let mut topic_hash = [0u8; 32]; topic_hash[32 - topic.len()..].copy_from_slice(&topic); - Some(topic_hash) + topic_hash } #[derive(PartialEq, Eq, Hash, Clone)] @@ -67,11 +63,6 @@ impl Ticket { } Ok(Ticket { topic: [0u8; 32] }) } - - /*pub fn regconfirmation(&mut self, node_record: Enr, topic: Topic, ticket: Ticket) -> Result<(), String> { - // chose which ad to insert from some pool of registrants-within-10-seconds-from-x - Ok(()) - }*/ } pub struct ActiveTicket { @@ -81,10 +72,7 @@ pub struct ActiveTicket { impl ActiveTicket { pub fn new(contact: NodeContact, ticket: Ticket) -> Self { - ActiveTicket { - contact, - ticket, - } + ActiveTicket { contact, ticket } } pub fn contact(&self) -> NodeContact { diff --git a/src/handler/tests.rs b/src/handler/tests.rs index 95b72042c..5f18525f3 100644 --- a/src/handler/tests.rs +++ b/src/handler/tests.rs @@ -240,12 +240,7 @@ async fn test_active_requests_insert() { body: RequestBody::Ping { enr_seq: 1 }, }; let initiating_session = true; - let request_call = RequestCall::new( - contact, - packet, - request, - initiating_session, - ); + let request_call = RequestCall::new(contact, packet, request, initiating_session); // insert the pair and verify the mapping remains in sync let nonce = request_call.packet.message_nonce().clone(); diff --git a/src/service.rs b/src/service.rs index e66ba9375..9c931f127 100644 --- a/src/service.rs +++ b/src/service.rs @@ -19,7 +19,7 @@ use self::{ }; use crate::{ advertisement::{ - ticket::{topic_hash, ActiveTicket, ActiveTopic, Ticket, Tickets}, + ticket::{topic_hash, Ticket, Tickets}, Ads, Topic, }, error::{RequestError, ResponseError}, @@ -340,6 +340,7 @@ impl Service { /// The main execution loop of the discv5 serviced. async fn start(&mut self) { let mut publish_topics = interval(Duration::from_secs(60 * 15)); + let mut prune_active_topics = interval(Duration::from_secs(60 * 13)); loop { tokio::select! { @@ -493,8 +494,9 @@ impl Service { _ = publish_topics.tick() => { self.topics.clone().into_iter().for_each(|topic| self.start_findnode_query(NodeId::new(&topic), None)); } - _ = self.ads.next() => {} - _ = self.active_topics.next() => {} + _ = prune_active_topics.tick() => { + self.active_topics.remove_expired(); + } } } } @@ -663,29 +665,20 @@ impl Service { self.send_event(Discv5Event::TalkRequest(req)); } RequestBody::RegisterTopic { topic, enr, ticket } => { - topic_hash(topic).map(|topic| { - self.ads.ticket_wait_time(topic).map(|wait_time| { - let new_ticket = Ticket::new(topic); - self.send_ticket_response( - node_address.clone(), - id.clone(), - new_ticket, - wait_time, - ); - - let _ticket = Ticket::decode(ticket).unwrap_or(Ticket::default()); - - // use wait time to see if there is any point at doing ticket validation - - // choose which ad to reg based on ticket, for example if some node has empty ticket - // or is coming back, and possibly other stuff - - match self.ads.insert(enr, topic) { - Ok(()) => self.send_regconfirmation_response(node_address, id, topic), - Err(e) => error!("{}", e), - } - }); - }); + let topic = topic_hash(topic); + let wait_time = self + .ads + .ticket_wait_time(topic) + .unwrap_or(Duration::from_secs(0)); + let new_ticket = Ticket::new(topic); + self.send_ticket_response(node_address.clone(), id.clone(), new_ticket, wait_time); + + let ticket = Ticket::decode(ticket).unwrap_or(Ticket::default()); + + match self.ads.regconfirmation(enr, topic, wait_time, ticket) { + Ok(()) => self.send_regconfirmation_response(node_address, id, topic), + Err(e) => error!("{}", e), + } debug!("Received RegisterTopic request which is not fully implemented"); } RequestBody::TopicQuery { topic } => { @@ -928,22 +921,14 @@ impl Service { // todo(emhane): What should max wait_time be so insert_at in Tickets doesn't panic? match Ticket::decode(ticket) { Ok(ticket) => self.tickets.insert( - active_request.contact, + active_request.contact, ticket, Duration::from_secs(wait_time), ), Err(e) => error!("{}", e), } } - ResponseBody::RegisterConfirmation { topic } => /* match topic_hash(topic) { - Ok(topic_hash) => { - self.active_topics.insert( - topic, - Duration::from_secs(60 * 15), - ); - } - Err(e) => error!("{}", e), - }*/{}, + ResponseBody::RegisterConfirmation { topic } => {} } } else { warn!( From bd4f6f7017865d29bf08ee5ddda2aa7de67a1db8 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 8 Apr 2022 19:01:50 +0200 Subject: [PATCH 033/391] Fix remove_expired --- src/advertisement/mod.rs | 7 ++----- src/advertisement/test.rs | 2 +- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 787e0577e..fdc5296ae 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -71,7 +71,6 @@ impl PartialOrd for AdTopic { pub struct Ads { expirations: VecDeque, ads: HashMap>, - total_ads: usize, ad_lifetime: Duration, max_ads_per_topic: usize, max_ads: usize, @@ -92,7 +91,6 @@ impl Ads { Ok(Ads { expirations: VecDeque::new(), ads: HashMap::new(), - total_ads: 0, ad_lifetime, max_ads_per_topic, max_ads, @@ -109,7 +107,7 @@ impl Ads { pub fn ticket_wait_time(&mut self, topic: Topic) -> Option { self.remove_expired(); let now = Instant::now(); - if self.total_ads < self.max_ads { + if self.expirations.len() < self.max_ads { match self.ads.get(&topic) { Some(nodes) => { if nodes.len() < self.max_ads_per_topic { @@ -160,11 +158,11 @@ impl Ads { let entry_ref = self.ads.entry(topic).or_default(); for _ in 0..index { entry_ref.pop_front(); + self.expirations.remove(0); } if entry_ref.is_empty() { self.ads.remove(&topic); } - self.total_ads -= index; }); None @@ -200,7 +198,6 @@ impl Ads { insert_time: now, }); self.expirations.push_back(AdTopic::new(topic, now)); - self.total_ads += 1; Ok(()) } } diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 87e58debb..253dadb34 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -111,7 +111,7 @@ async fn ticket_wait_time_full_table() { // Now max_ads in table is reached so the second ad for topic_2 has to wait assert_ne!(ads.ticket_wait_time(topic_2), None); - tokio::time::sleep(Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_secs(3)).await; // Now the first ads have expired and the table is not full so no neither topic // or topic_2 ads have to wait From 6b150a79d8b15cc801991dd6c18d45a5c5716264 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 8 Apr 2022 19:13:17 +0200 Subject: [PATCH 034/391] Fix tests for ads --- src/advertisement/test.rs | 40 ++++++++++++++++++++++++++++++--------- 1 file changed, 31 insertions(+), 9 deletions(-) diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 253dadb34..31d864247 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -47,15 +47,19 @@ async fn insert_ad_and_get_nodes() { let topic = [1; 32]; let topic_2 = [2; 32]; + // Add an ad for topic from enr ads.insert(enr.clone(), topic).unwrap(); - // Since 2 seconds haven't passed + // The ad hasn't expired and duplicates are not allowed assert_eq!( ads.insert(enr.clone(), topic).map_err(|e| e), Err("Node already advertising this topic".into()) ); + // Add an ad for topic from enr_2 ads.insert(enr_2.clone(), topic).unwrap(); + + // Add an ad for topic_2 from enr ads.insert(enr.clone(), topic_2).unwrap(); let nodes: Vec = ads @@ -63,6 +67,7 @@ async fn insert_ad_and_get_nodes() { .unwrap() .map(|ad| ad.node_record().clone()) .collect(); + let nodes_topic_2: Vec = ads .get_ad_nodes(topic_2) .unwrap() @@ -77,8 +82,26 @@ async fn insert_ad_and_get_nodes() { async fn ticket_wait_time_no_wait_time() { let mut ads = Ads::new(Duration::from_secs(1), 10, 50).unwrap(); let topic = [1; 32]; - let wait_time = ads.ticket_wait_time(topic); - assert_eq!(wait_time, Some(Duration::from_secs(0))) + assert_eq!(ads.ticket_wait_time(topic), None) +} + +#[tokio::test] +async fn ticket_wait_time_duration() { + // Create the test values needed + let port = 6666; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + + let mut ads = Ads::new(Duration::from_secs(3), 1, 3).unwrap(); + + let topic = [1; 32]; + + // Add an add for topic + ads.insert(enr.clone(), topic).unwrap(); + + assert_gt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(2))); + assert_lt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(3))); } #[tokio::test] @@ -137,21 +160,20 @@ async fn ticket_wait_time_full_topic() { let topic = [1; 32]; let topic_2 = [2; 32]; + // Add 2 ads for topic ads.insert(enr.clone(), topic).unwrap(); ads.insert(enr_2.clone(), topic).unwrap(); // Now max_ads_per_topic is reached for topic - assert_gt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(2))); - assert_lt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(3))); + assert_ne!(ads.ticket_wait_time(topic), None); + // Add a topic_2 ad ads.insert(enr, topic_2).unwrap(); - // The table isn't full so we can insert more ads for topic_2 + // The table isn't full so topic_2 ads don't have to wait assert_eq!(ads.ticket_wait_time(topic_2), None); - // But not for topic until an ad for topic expires - //assert_gt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(2))); - //assert_lt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(3))); + // But for topic they do until the first ads have expired assert_ne!(ads.ticket_wait_time(topic), None); tokio::time::sleep(Duration::from_secs(3)).await; From 2bb7bfef459ac3f18b8942f353eb762ad28bbc02 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 8 Apr 2022 19:54:32 +0200 Subject: [PATCH 035/391] Simplify code --- src/advertisement/mod.rs | 49 ++++++++++++---------------------------- 1 file changed, 15 insertions(+), 34 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index fdc5296ae..9fd80defb 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -108,42 +108,25 @@ impl Ads { self.remove_expired(); let now = Instant::now(); if self.expirations.len() < self.max_ads { - match self.ads.get(&topic) { - Some(nodes) => { - if nodes.len() < self.max_ads_per_topic { - None - } else { - match nodes.get(0) { - Some(ad) => { - let elapsed_time = now.saturating_duration_since(ad.insert_time); - Some(self.ad_lifetime.saturating_sub(elapsed_time)) - } - None => { - debug_unreachable!("Panic on debug,topic key should be deleted if no ad nodes queued for it"); - error!("Topic key should be deleted if no ad nodes queued for it"); - return None; - } - } - } - } - None => None, - } + self.ads + .get(&topic) + .filter(|nodes| nodes.len() >= self.max_ads_per_topic) + .map(|nodes| { + nodes.get(0).map(|ad| { + let elapsed_time = now.saturating_duration_since(ad.insert_time); + self.ad_lifetime.saturating_sub(elapsed_time) + }) + }) + .unwrap_or_default() } else { - match self.expirations.get(0) { - Some(ad) => { - let elapsed_time = now.saturating_duration_since(ad.insert_time); - Some(self.ad_lifetime.saturating_sub(elapsed_time)) - } - None => { - debug_unreachable!("Panic on debug, mismatched mapping between expiration queue and total ads count"); - error!("Mismatched mapping between expiration queue and total ads count"); - return None; - } - } + self.expirations.get(0).map(|ad| { + let elapsed_time = now.saturating_duration_since(ad.insert_time); + self.ad_lifetime.saturating_sub(elapsed_time) + }) } } - pub fn remove_expired(&mut self) -> Option<((Enr, Instant), Topic)> { + pub fn remove_expired(&mut self) { let mut map: HashMap = HashMap::new(); self.expirations @@ -164,8 +147,6 @@ impl Ads { self.ads.remove(&topic); } }); - - None } pub fn regconfirmation( From eef4db4424b5563dfe85b0effb693125fac814a5 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 8 Apr 2022 19:56:58 +0200 Subject: [PATCH 036/391] Clean up code --- src/advertisement/mod.rs | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 9fd80defb..9ad2b41a8 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -2,9 +2,7 @@ use super::*; use crate::Enr; use core::time::Duration; use futures::prelude::*; -use more_asserts::debug_unreachable; use std::{ - cmp::Ordering, collections::{vec_deque::Iter, HashMap, VecDeque}, pin::Pin, task::{Context, Poll}, @@ -44,7 +42,6 @@ impl PartialEq for AdNode { } } -#[derive(Ord, Eq)] struct AdTopic { topic: Topic, insert_time: Instant, @@ -56,18 +53,6 @@ impl AdTopic { } } -impl PartialEq for AdTopic { - fn eq(&self, other: &Self) -> bool { - self.topic == other.topic - } -} - -impl PartialOrd for AdTopic { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.topic.cmp(&other.topic)) - } -} - pub struct Ads { expirations: VecDeque, ads: HashMap>, From 621acfa1e1b4771b671259399911c576413729fe Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 8 Apr 2022 20:25:55 +0200 Subject: [PATCH 037/391] Fix clippy warnings --- src/advertisement/mod.rs | 2 +- src/handler/active_requests.rs | 4 +-- src/handler/crypto/mod.rs | 2 +- src/handler/mod.rs | 2 +- src/packet/mod.rs | 4 +-- src/service.rs | 47 ++++++++++++---------------------- 6 files changed, 24 insertions(+), 37 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 9ad2b41a8..3486c91e9 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -84,7 +84,7 @@ impl Ads { pub fn get_ad_nodes(&self, topic: Topic) -> Result, &str> { match self.ads.get(&topic) { - Some(topic_ads) => Ok(topic_ads.into_iter()), + Some(topic_ads) => Ok(topic_ads.iter()), None => Err("No ads for this topic"), } } diff --git a/src/handler/active_requests.rs b/src/handler/active_requests.rs index 929d78a32..a55324ce8 100644 --- a/src/handler/active_requests.rs +++ b/src/handler/active_requests.rs @@ -42,7 +42,7 @@ impl ActiveRequests { None => { debug_unreachable!("Panic on debug, a matching request call doesn't exist"); error!("A matching request call doesn't exist"); - return None; + None } }, None => None, @@ -63,7 +63,7 @@ impl ActiveRequests { "Panic on debug, a matching nonce mapping doesn't exist" ); error!("A matching nonce mapping doesn't exist"); - return None; + None } } } diff --git a/src/handler/crypto/mod.rs b/src/handler/crypto/mod.rs index 5a0d764b0..a6c9d2c15 100644 --- a/src/handler/crypto/mod.rs +++ b/src/handler/crypto/mod.rs @@ -179,7 +179,7 @@ fn generate_signing_nonce( let mut data = ID_SIGNATURE_TEXT.as_bytes().to_vec(); data.extend_from_slice(challenge_data.as_ref()); data.extend_from_slice(ephem_pubkey); - data.extend_from_slice(&dst_id.raw().to_vec()); + data.extend_from_slice(dst_id.raw().as_ref()); data } diff --git a/src/handler/mod.rs b/src/handler/mod.rs index af69d0840..1fb371620 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -1077,7 +1077,7 @@ impl Handler { for request in self .pending_requests .remove(node_address) - .unwrap_or_else(Vec::new) + .unwrap_or_default() { let _ = self .service_send diff --git a/src/packet/mod.rs b/src/packet/mod.rs index 240618cbf..adf8facae 100644 --- a/src/packet/mod.rs +++ b/src/packet/mod.rs @@ -514,7 +514,7 @@ impl std::fmt::Display for Packet { f, "Packet {{ iv: {}, header: {}, message {} }}", hex::encode(self.iv.to_be_bytes()), - self.header.to_string(), + self.header, hex::encode(&self.message) ) } @@ -526,7 +526,7 @@ impl std::fmt::Display for PacketHeader { f, "PacketHeader {{ message_nonce: {}, kind: {} }}", hex::encode(self.message_nonce), - self.kind.to_string() + self.kind ) } } diff --git a/src/service.rs b/src/service.rs index 9c931f127..0a87fb474 100644 --- a/src/service.rs +++ b/src/service.rs @@ -433,7 +433,7 @@ impl Service { let mut result = query.into_result(); // obtain the ENR's for the resulting nodes let mut found_enrs = Vec::new(); - for node_id in result.closest_peers.into_iter() { + for node_id in result.closest_peers { if let Some(position) = result.target.untrusted_enrs.iter().position(|enr| enr.node_id() == node_id) { let enr = result.target.untrusted_enrs.swap_remove(position); found_enrs.push(enr); @@ -446,27 +446,18 @@ impl Service { } } - let node_id = match result.target.query_type { - QueryType::FindNode(node_id) => node_id, - }; + let QueryType::FindNode(node_id) = result.target.query_type; - let topic = match self.topics.get(&node_id.raw()) { - Some(topic) => Some(*topic), - None => None, - }; + let topic = self.topics.get(&node_id.raw()).copied(); if let Some(topic) = topic { - let local_enr = match self.local_enr.read().clone() { - enr => enr, - }; + let local_enr = self.local_enr.read().clone(); found_enrs.into_iter().for_each(|enr| self.reg_topic_request(NodeContact::from(enr), topic, local_enr.clone(), Ticket::default())); - } else { - if let Some(callback) = result.target.callback { + } else if let Some(callback) = result.target.callback { if callback.send(found_enrs).is_err() { warn!("Callback dropped for query {}. Results dropped", *id); } } - } } } } @@ -486,9 +477,7 @@ impl Service { } } Some(Ok((active_topic, active_ticket))) = self.tickets.next() => { - let enr = match self.local_enr.read().clone() { - enr => enr, - }; + let enr = self.local_enr.read().clone(); self.reg_topic_request(active_ticket.contact(), active_topic.topic(), enr, active_ticket.ticket()); } _ = publish_topics.tick() => { @@ -673,7 +662,7 @@ impl Service { let new_ticket = Ticket::new(topic); self.send_ticket_response(node_address.clone(), id.clone(), new_ticket, wait_time); - let ticket = Ticket::decode(ticket).unwrap_or(Ticket::default()); + let ticket = Ticket::decode(ticket).unwrap_or_default(); match self.ads.regconfirmation(enr, topic, wait_time, ticket) { Ok(()) => self.send_regconfirmation_response(node_address, id, topic), @@ -928,7 +917,7 @@ impl Service { Err(e) => error!("{}", e), } } - ResponseBody::RegisterConfirmation { topic } => {} + ResponseBody::RegisterConfirmation { .. } => {} } } else { warn!( @@ -984,7 +973,7 @@ impl Service { ) { let request_body = RequestBody::FindNode { distances: vec![0] }; let active_request = ActiveRequest { - contact: contact, + contact, request_body, query_id: None, callback: callback.map(CallbackResponse::Enr), @@ -1003,7 +992,7 @@ impl Service { let request_body = RequestBody::Talk { protocol, request }; let active_request = ActiveRequest { - contact: contact, + contact, request_body, query_id: None, callback: Some(CallbackResponse::Talk(callback)), @@ -1019,7 +1008,7 @@ impl Service { }; let active_request = ActiveRequest { - contact: contact, + contact, request_body, query_id: None, callback: None, @@ -1046,10 +1035,9 @@ impl Service { node_address, response ); - let _ = self.handler_send.send(HandlerIn::Response( - node_address.clone(), - Box::new(response), - )); + let _ = self + .handler_send + .send(HandlerIn::Response(node_address, Box::new(response))); } fn send_regconfirmation_response( @@ -1069,10 +1057,9 @@ impl Service { node_address, response ); - let _ = self.handler_send.send(HandlerIn::Response( - node_address.clone(), - Box::new(response), - )); + let _ = self + .handler_send + .send(HandlerIn::Response(node_address, Box::new(response))); } fn send_topic_query_response( From 17acaa11d3930b8b10afad65b2fc9546b60df289 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 8 Apr 2022 20:46:36 +0200 Subject: [PATCH 038/391] Simplify code --- src/advertisement/mod.rs | 18 ++++++++---------- src/advertisement/test.rs | 14 +++----------- src/service.rs | 9 ++------- 3 files changed, 13 insertions(+), 28 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 3486c91e9..b77ba9c25 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -3,7 +3,7 @@ use crate::Enr; use core::time::Duration; use futures::prelude::*; use std::{ - collections::{vec_deque::Iter, HashMap, VecDeque}, + collections::{HashMap, VecDeque}, pin::Pin, task::{Context, Poll}, }; @@ -30,10 +30,6 @@ impl AdNode { insert_time, } } - - pub fn node_record(&self) -> &Enr { - &self.node_record - } } impl PartialEq for AdNode { @@ -82,11 +78,13 @@ impl Ads { }) } - pub fn get_ad_nodes(&self, topic: Topic) -> Result, &str> { - match self.ads.get(&topic) { - Some(topic_ads) => Ok(topic_ads.iter()), - None => Err("No ads for this topic"), - } + pub fn get_ad_nodes(&self, topic: Topic) -> impl Iterator + '_ { + self.ads + .get(&topic) + .into_iter() + .map(|nodes| nodes.into_iter()) + .flatten() + .map(|node| node.node_record.clone()) } pub fn ticket_wait_time(&mut self, topic: Topic) -> Option { diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 31d864247..8a3637c09 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -62,17 +62,9 @@ async fn insert_ad_and_get_nodes() { // Add an ad for topic_2 from enr ads.insert(enr.clone(), topic_2).unwrap(); - let nodes: Vec = ads - .get_ad_nodes(topic) - .unwrap() - .map(|ad| ad.node_record().clone()) - .collect(); - - let nodes_topic_2: Vec = ads - .get_ad_nodes(topic_2) - .unwrap() - .map(|ad| ad.node_record().clone()) - .collect(); + let nodes: Vec = ads.get_ad_nodes(topic).collect(); + + let nodes_topic_2: Vec = ads.get_ad_nodes(topic_2).collect(); assert_eq!(nodes, vec![enr.clone(), enr_2]); assert_eq!(nodes_topic_2, vec![enr]); diff --git a/src/service.rs b/src/service.rs index 0a87fb474..f294bafcd 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1068,13 +1068,8 @@ impl Service { rpc_id: RequestId, topic: [u8; 32], ) { - let nodes_to_send = match self.ads.get_ad_nodes(topic) { - Ok(iter) => iter.map(|ad| ad.node_record().clone()).collect(), - Err(e) => { - error!("{}", e); - Vec::new() - } - }; + let nodes_to_send = self.ads.get_ad_nodes(topic).collect(); + self.send_nodes_response(nodes_to_send, node_address, rpc_id, "TOPICQUERY"); } From fc5f87e2ca2a479091660c74e44a586626516d58 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 8 Apr 2022 20:53:08 +0200 Subject: [PATCH 039/391] Fix clippy warnings --- src/advertisement/mod.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index b77ba9c25..e7695c204 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -82,8 +82,7 @@ impl Ads { self.ads .get(&topic) .into_iter() - .map(|nodes| nodes.into_iter()) - .flatten() + .flat_map(|nodes| nodes.iter()) .map(|node| node.node_record.clone()) } From 3a1074e0a094a44730134f78afe24ade45929995 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 8 Apr 2022 20:56:02 +0200 Subject: [PATCH 040/391] Simplify code --- src/advertisement/mod.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index e7695c204..2c7fbb1b8 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -63,11 +63,9 @@ impl Ads { max_ads_per_topic: usize, max_ads: usize, ) -> Result { - let (max_ads_per_topic, max_ads) = if max_ads_per_topic <= max_ads { - (max_ads_per_topic, max_ads) - } else { - return Err("Values passed to max_ads_per_topic and max_ads don't make sense"); - }; + if max_ads_per_topic > max_ads { + return Err("Adds per topic cannot be > max_ads"); + } Ok(Ads { expirations: VecDeque::new(), From d1d96f308dbd6a1e7af48efd5956a25a56115903 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 11 Apr 2022 10:44:31 +0200 Subject: [PATCH 041/391] Keep ads we place on other nodes --- src/advertisement/mod.rs | 6 +++--- src/service.rs | 11 ++++++----- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 2c7fbb1b8..7f907764e 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -64,7 +64,7 @@ impl Ads { max_ads: usize, ) -> Result { if max_ads_per_topic > max_ads { - return Err("Adds per topic cannot be > max_ads"); + return Err("Ads per topic cannot be > max_ads"); } Ok(Ads { @@ -106,7 +106,7 @@ impl Ads { } } - pub fn remove_expired(&mut self) { + fn remove_expired(&mut self) { let mut map: HashMap = HashMap::new(); self.expirations @@ -143,7 +143,7 @@ impl Ads { self.insert(node_record, topic) } - fn insert(&mut self, node_record: Enr, topic: Topic) -> Result<(), &str> { + pub fn insert(&mut self, node_record: Enr, topic: Topic) -> Result<(), &str> { self.remove_expired(); let now = Instant::now(); let nodes = self.ads.entry(topic).or_default(); diff --git a/src/service.rs b/src/service.rs index f294bafcd..d874ae6f4 100644 --- a/src/service.rs +++ b/src/service.rs @@ -340,7 +340,6 @@ impl Service { /// The main execution loop of the discv5 serviced. async fn start(&mut self) { let mut publish_topics = interval(Duration::from_secs(60 * 15)); - let mut prune_active_topics = interval(Duration::from_secs(60 * 13)); loop { tokio::select! { @@ -483,9 +482,6 @@ impl Service { _ = publish_topics.tick() => { self.topics.clone().into_iter().for_each(|topic| self.start_findnode_query(NodeId::new(&topic), None)); } - _ = prune_active_topics.tick() => { - self.active_topics.remove_expired(); - } } } } @@ -917,7 +913,12 @@ impl Service { Err(e) => error!("{}", e), } } - ResponseBody::RegisterConfirmation { .. } => {} + ResponseBody::RegisterConfirmation { topic } => { + if let NodeContact::Enr(enr) = active_request.contact { + let topic = topic_hash(topic); + self.active_topics.insert(*enr, topic).ok(); + } + } } } else { warn!( From 6e91bfbd601e060bf41a0e3f62abf4f4c0a8dd00 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 11 Apr 2022 12:45:23 +0200 Subject: [PATCH 042/391] Limit rate limit ticket acceptance --- src/advertisement/ticket.rs | 108 ++++++++++++++++++++++++++++++------ src/service.rs | 27 +++++---- 2 files changed, 106 insertions(+), 29 deletions(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index fa01a9046..ee1aa7325 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -11,7 +11,7 @@ pub fn topic_hash(topic: Vec) -> Topic { topic_hash } -#[derive(PartialEq, Eq, Hash, Clone)] +#[derive(PartialEq, Eq, Hash, Clone, Copy)] pub struct ActiveTopic { node_id: NodeId, topic: Topic, @@ -27,41 +27,52 @@ impl ActiveTopic { } } -#[derive(Default, Debug, Copy, Clone)] +#[derive(Debug, Copy, Clone)] pub struct Ticket { //nonce: u64, //src_node_id: NodeId, //src_ip: IpAddr, topic: Topic, - //req_time: Instant, - //wait_time: Duration, + req_time: Instant, + wait_time: Duration, //cum_wait: Option,*/ } +impl Default for Ticket { + fn default() -> Self { + Ticket { + topic: [0u8; 32], + req_time: Instant::now(), + wait_time: Duration::default(), + } + } +} + impl Ticket { pub fn new( //nonce: u64, //src_node_id: NodeId, //src_ip: IpAddr, topic: Topic, - //req_time: Instant, - //wait_time: Duration,*/ + req_time: Instant, + wait_time: Duration, ) -> Self { Ticket { //nonce, //src_node_id, //src_ip, topic, - //req_time, - //wait_time, + req_time, + wait_time, } } - pub fn decode(ticket_bytes: Vec) -> Result { + pub fn decode(ticket_bytes: Vec) -> Option { if ticket_bytes.is_empty() { - return Err("Ticket has wrong format".into()); + debug!("Empty ticket"); + return None; } - Ok(Ticket { topic: [0u8; 32] }) + Some(Ticket::default()) } } @@ -86,21 +97,31 @@ impl ActiveTicket { pub struct Tickets { tickets: HashMapDelay, + ticket_history: TicketHistory, } impl Tickets { - pub fn new() -> Self { + pub fn new(ticket_cache_duration: Duration) -> Self { Tickets { tickets: HashMapDelay::new(Duration::default()), + ticket_history: TicketHistory::new(ticket_cache_duration), } } - pub fn insert(&mut self, contact: NodeContact, ticket: Ticket, wait_time: Duration) { - self.tickets.insert_at( - ActiveTopic::new(contact.node_id(), ticket.topic), - ActiveTicket::new(contact, ticket), - wait_time, - ); + pub fn insert( + &mut self, + contact: NodeContact, + ticket: Ticket, + wait_time: Duration, + ) -> Result<(), &str> { + let active_topic = ActiveTopic::new(contact.node_id(), ticket.topic); + + if let Err(e) = self.ticket_history.insert(active_topic) { + return Err(e); + } + self.tickets + .insert_at(active_topic, ActiveTicket::new(contact, ticket), wait_time); + Ok(()) } } @@ -120,3 +141,54 @@ impl Stream for Tickets { } } } + +struct TicketLimiter { + active_topic: ActiveTopic, + first_seen: Instant, +} + +#[derive(Default)] +struct TicketHistory { + ticket_cache: HashMap, + expirations: VecDeque, + ticket_cache_duration: Duration, +} + +impl TicketHistory { + pub fn new(ticket_cache_duration: Duration) -> Self { + TicketHistory { + ticket_cache: HashMap::new(), + expirations: VecDeque::new(), + ticket_cache_duration, + } + } + + pub fn insert(&mut self, active_topic: ActiveTopic) -> Result<(), &str> { + self.remove_expired(); + let count = self.ticket_cache.entry(active_topic).or_default(); + if *count >= 3 { + error!("Max 3 tickets per (NodeId, Topic) accepted in 15 minutes"); + return Err("Ticket limit reached"); + } + *count += 1; + Ok(()) + } + + fn remove_expired(&mut self) { + let now = Instant::now(); + let cached_tickets = self + .expirations + .iter() + .take_while(|ticket_limiter| { + now.saturating_duration_since(ticket_limiter.first_seen) + >= self.ticket_cache_duration + }) + .map(|ticket_limiter| ticket_limiter.active_topic) + .collect::>(); + + cached_tickets.iter().for_each(|active_topic| { + self.ticket_cache.remove(active_topic); + self.expirations.pop_front(); + }); + } +} diff --git a/src/service.rs b/src/service.rs index d874ae6f4..96fc9ebce 100644 --- a/src/service.rs +++ b/src/service.rs @@ -133,6 +133,9 @@ impl TalkRequest { /// The number of distances (buckets) we simultaneously request from each peer. pub(crate) const DISTANCES_TO_REQUEST_PER_PEER: usize = 3; +/// The max wait time accpeted for tickets. +const MAX_WAIT_TIME_TICKET: u64 = 60 * 5; + /// The types of requests to send to the Discv5 service. pub enum ServiceRequest { /// A request to start a query. There are two types of queries: @@ -323,7 +326,7 @@ impl Service { discv5_recv, event_stream: None, ads, - tickets: Tickets::new(), + tickets: Tickets::new(Duration::from_secs(60 * 15)), topics: HashSet::new(), active_topics, exit, @@ -655,7 +658,7 @@ impl Service { .ads .ticket_wait_time(topic) .unwrap_or(Duration::from_secs(0)); - let new_ticket = Ticket::new(topic); + let new_ticket = Ticket::new(topic, tokio::time::Instant::now(), wait_time); self.send_ticket_response(node_address.clone(), id.clone(), new_ticket, wait_time); let ticket = Ticket::decode(ticket).unwrap_or_default(); @@ -903,18 +906,20 @@ impl Service { } } ResponseBody::Ticket { ticket, wait_time } => { - // todo(emhane): What should max wait_time be so insert_at in Tickets doesn't panic? - match Ticket::decode(ticket) { - Ok(ticket) => self.tickets.insert( - active_request.contact, - ticket, - Duration::from_secs(wait_time), - ), - Err(e) => error!("{}", e), + if wait_time <= MAX_WAIT_TIME_TICKET { + Ticket::decode(ticket).map(|ticket| { + self.tickets + .insert( + active_request.contact, + ticket, + Duration::from_secs(wait_time), + ) + .ok(); + }); } } ResponseBody::RegisterConfirmation { topic } => { - if let NodeContact::Enr(enr) = active_request.contact { + if let NodeContact::Enr(enr) = active_request.contact { let topic = topic_hash(topic); self.active_topics.insert(*enr, topic).ok(); } From 673f72ba51b99c39b959fc33228c582532a207d7 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 11 Apr 2022 12:51:36 +0200 Subject: [PATCH 043/391] Fix broken test --- src/advertisement/ticket.rs | 4 ++-- src/service/test.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index ee1aa7325..de918cccd 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -142,7 +142,7 @@ impl Stream for Tickets { } } -struct TicketLimiter { +struct TicketRateLimiter { active_topic: ActiveTopic, first_seen: Instant, } @@ -150,7 +150,7 @@ struct TicketLimiter { #[derive(Default)] struct TicketHistory { ticket_cache: HashMap, - expirations: VecDeque, + expirations: VecDeque, ticket_cache_duration: Duration, } diff --git a/src/service/test.rs b/src/service/test.rs index 8520afdfe..a3a7e50d4 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -98,7 +98,7 @@ async fn build_service( discv5_recv, event_stream: None, ads: Ads::new(Duration::from_secs(60 * 15), 100, 50000).unwrap(), - tickets: Tickets::new(), + tickets: Tickets::new(Duration::from_secs(60 * 15)), topics: HashSet::new(), active_topics: Ads::new(Duration::from_secs(60 * 15), 100, 50000).unwrap(), exit, From ba2d91a762772814799bbf1071242a4beea52ac9 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 11 Apr 2022 18:57:57 +0200 Subject: [PATCH 044/391] Implement ticket pool --- src/advertisement/mod.rs | 15 ----- src/advertisement/ticket.rs | 109 +++++++++++++++++++++++++++++++----- src/service.rs | 72 ++++++++++++++++-------- src/service/test.rs | 1 + 4 files changed, 145 insertions(+), 52 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 7f907764e..c070446e5 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -7,7 +7,6 @@ use std::{ pin::Pin, task::{Context, Poll}, }; -use ticket::Ticket; use tokio::time::Instant; use tracing::{debug, error}; @@ -129,20 +128,6 @@ impl Ads { }); } - pub fn regconfirmation( - &mut self, - node_record: Enr, - topic: Topic, - wait_time: Duration, - _ticket: Ticket, - ) -> Result<(), &str> { - if wait_time > Duration::from_secs(0) { - return Err("currently no space for this ad"); - } - // do some validation of tiket against other tickets received in registration window - self.insert(node_record, topic) - } - pub fn insert(&mut self, node_record: Enr, topic: Topic) -> Result<(), &str> { self.remove_expired(); let now = Instant::now(); diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index de918cccd..31243867c 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -1,8 +1,12 @@ use super::*; +use crate::{ + enr::{CombinedKey, EnrBuilder}, + rpc::RequestId, +}; use delay_map::HashMapDelay; use enr::NodeId; use node_info::NodeContact; -use std::cmp::Eq; +use std::{cmp::Eq, net::IpAddr}; // Placeholder function pub fn topic_hash(topic: Vec) -> Topic { @@ -30,17 +34,28 @@ impl ActiveTopic { #[derive(Debug, Copy, Clone)] pub struct Ticket { //nonce: u64, - //src_node_id: NodeId, - //src_ip: IpAddr, + src_node_id: NodeId, + src_ip: IpAddr, topic: Topic, req_time: Instant, wait_time: Duration, //cum_wait: Option,*/ } +// DEBUG impl Default for Ticket { fn default() -> Self { + let port = 5000; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + + let key = CombinedKey::generate_secp256k1(); + + let enr = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + let node_id = enr.node_id(); + Ticket { + src_node_id: node_id, + src_ip: ip, topic: [0u8; 32], req_time: Instant::now(), wait_time: Duration::default(), @@ -48,31 +63,35 @@ impl Default for Ticket { } } +impl PartialEq for Ticket { + fn eq(&self, other: &Self) -> bool { + self.src_node_id == other.src_node_id + && self.src_ip == other.src_ip + && self.topic == other.topic + } +} + impl Ticket { pub fn new( //nonce: u64, - //src_node_id: NodeId, - //src_ip: IpAddr, + src_node_id: NodeId, + src_ip: IpAddr, topic: Topic, req_time: Instant, wait_time: Duration, ) -> Self { Ticket { //nonce, - //src_node_id, - //src_ip, + src_node_id, + src_ip, topic, req_time, wait_time, } } - pub fn decode(ticket_bytes: Vec) -> Option { - if ticket_bytes.is_empty() { - debug!("Empty ticket"); - return None; - } - Some(Ticket::default()) + pub fn decode(_ticket_bytes: Vec) -> Result { + Ok(Ticket::default()) } } @@ -95,6 +114,7 @@ impl ActiveTicket { } } +/// Tickets received from other nodes as response to REGTOPIC req pub struct Tickets { tickets: HashMapDelay, ticket_history: TicketHistory, @@ -155,7 +175,7 @@ struct TicketHistory { } impl TicketHistory { - pub fn new(ticket_cache_duration: Duration) -> Self { + fn new(ticket_cache_duration: Duration) -> Self { TicketHistory { ticket_cache: HashMap::new(), expirations: VecDeque::new(), @@ -192,3 +212,64 @@ impl TicketHistory { }); } } + +#[derive(Clone, Copy)] +struct RegistrationWindow { + topic: Topic, + open_time: Instant, +} + +pub struct TicketPools { + ticket_pools: HashMap>, + expirations: VecDeque, +} + +impl TicketPools { + pub fn new() -> Self { + TicketPools { + ticket_pools: HashMap::new(), + expirations: VecDeque::new(), + } + } + + pub fn insert(&mut self, node_record: Enr, req_id: RequestId, ticket: Ticket) { + let open_time = ticket.req_time.checked_add(ticket.wait_time).unwrap(); + if open_time.elapsed() > Duration::from_secs(10) { + return; + } + let pool = self.ticket_pools.entry(ticket.topic).or_default(); + if pool.is_empty() { + self.expirations.push_back(RegistrationWindow { + topic: ticket.topic, + open_time, + }); + } + pool.insert(node_record.node_id(), (node_record, req_id, ticket)); + } +} + +impl Stream for TicketPools { + type Item = Result<(Topic, Enr, RequestId), String>; + fn poll_next(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + self.expirations + .get(0) + .map(|reg_window| *reg_window) + .map(|reg_window| { + if reg_window.open_time.elapsed() >= Duration::from_secs(10) { + self.ticket_pools + .remove_entry(®_window.topic) + .map(|(topic, ticket_pool)| { + // do some proper selection based on node_address and ticket + let (_node_id, (node_record, req_id, _ticket)) = + ticket_pool.into_iter().next().unwrap(); + self.expirations.pop_front(); + Poll::Ready(Some(Ok((topic, node_record, req_id)))) + }) + .unwrap_or(Poll::Ready(Some(Err("Ticket selection failed".into())))) + } else { + Poll::Pending + } + }) + .unwrap_or(Poll::Pending) + } +} diff --git a/src/service.rs b/src/service.rs index 96fc9ebce..1e395ebc8 100644 --- a/src/service.rs +++ b/src/service.rs @@ -19,7 +19,7 @@ use self::{ }; use crate::{ advertisement::{ - ticket::{topic_hash, Ticket, Tickets}, + ticket::{topic_hash, Ticket, TicketPools, Tickets}, Ads, Topic, }, error::{RequestError, ResponseError}, @@ -217,6 +217,9 @@ pub struct Service { /// Ads currently advertised on other nodes. active_topics: Ads, + + /// Tickets pending registration + ticket_pools: TicketPools, } /// Active RPC request awaiting a response from the handler. @@ -329,6 +332,7 @@ impl Service { tickets: Tickets::new(Duration::from_secs(60 * 15)), topics: HashSet::new(), active_topics, + ticket_pools: TicketPools::new(), exit, config: config.clone(), }; @@ -485,6 +489,12 @@ impl Service { _ = publish_topics.tick() => { self.topics.clone().into_iter().for_each(|topic| self.start_findnode_query(NodeId::new(&topic), None)); } + Some(Ok((topic, node_record, req_id))) = self.ticket_pools.next() => { + self.ads.insert(node_record.clone(), topic).ok(); + NodeContact::from(node_record).node_address().map(|node_address| { + self.send_regconfirmation_response(node_address, req_id, topic); + }).ok(); + } } } } @@ -654,20 +664,34 @@ impl Service { } RequestBody::RegisterTopic { topic, enr, ticket } => { let topic = topic_hash(topic); - let wait_time = self - .ads - .ticket_wait_time(topic) - .unwrap_or(Duration::from_secs(0)); - let new_ticket = Ticket::new(topic, tokio::time::Instant::now(), wait_time); - self.send_ticket_response(node_address.clone(), id.clone(), new_ticket, wait_time); - - let ticket = Ticket::decode(ticket).unwrap_or_default(); - - match self.ads.regconfirmation(enr, topic, wait_time, ticket) { - Ok(()) => self.send_regconfirmation_response(node_address, id, topic), - Err(e) => error!("{}", e), + let wait_time = self.ads.ticket_wait_time(topic); + + let new_ticket = Ticket::new( + node_address.node_id, + node_address.socket_addr.ip(), + topic, + tokio::time::Instant::now(), + wait_time.unwrap_or(Duration::from_secs(0)), + ); + self.send_ticket_response( + node_address.clone(), + id.clone(), + new_ticket, + wait_time.unwrap_or(Duration::from_secs(0)), + ); + + if ticket.is_empty() { + self.ticket_pools.insert(enr, id, new_ticket); + } else { + Ticket::decode(ticket) + .map(|ticket| { + // Validate ticket + if ticket == new_ticket { + self.ticket_pools.insert(enr, id, ticket); + } + }) + .ok(); } - debug!("Received RegisterTopic request which is not fully implemented"); } RequestBody::TopicQuery { topic } => { self.send_topic_query_response(node_address, id, topic); @@ -907,15 +931,17 @@ impl Service { } ResponseBody::Ticket { ticket, wait_time } => { if wait_time <= MAX_WAIT_TIME_TICKET { - Ticket::decode(ticket).map(|ticket| { - self.tickets - .insert( - active_request.contact, - ticket, - Duration::from_secs(wait_time), - ) - .ok(); - }); + Ticket::decode(ticket) + .map(|ticket| { + self.tickets + .insert( + active_request.contact, + ticket, + Duration::from_secs(wait_time), + ) + .ok(); + }) + .ok(); } } ResponseBody::RegisterConfirmation { topic } => { diff --git a/src/service/test.rs b/src/service/test.rs index a3a7e50d4..2aec38aa8 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -101,6 +101,7 @@ async fn build_service( tickets: Tickets::new(Duration::from_secs(60 * 15)), topics: HashSet::new(), active_topics: Ads::new(Duration::from_secs(60 * 15), 100, 50000).unwrap(), + ticket_pools: TicketPools::new(), exit, config, } From 5489a9fe8047d3b93216a0c44188a9005080e965 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 12 Apr 2022 11:21:47 +0200 Subject: [PATCH 045/391] Move selection of new ad from ticket pool to Service --- src/advertisement/ticket.rs | 34 +++++++++++++++++----------------- src/service.rs | 20 +++++++++++++++----- 2 files changed, 32 insertions(+), 22 deletions(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 31243867c..a6cf59d8f 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -233,37 +233,37 @@ impl TicketPools { } pub fn insert(&mut self, node_record: Enr, req_id: RequestId, ticket: Ticket) { - let open_time = ticket.req_time.checked_add(ticket.wait_time).unwrap(); - if open_time.elapsed() > Duration::from_secs(10) { - return; - } - let pool = self.ticket_pools.entry(ticket.topic).or_default(); - if pool.is_empty() { - self.expirations.push_back(RegistrationWindow { - topic: ticket.topic, - open_time, + ticket + .req_time + .checked_add(ticket.wait_time) + .map(|open_time| { + if open_time.elapsed() <= Duration::from_secs(10) { + let pool = self.ticket_pools.entry(ticket.topic).or_default(); + if pool.is_empty() { + self.expirations.push_back(RegistrationWindow { + topic: ticket.topic, + open_time, + }); + } + pool.insert(node_record.node_id(), (node_record, req_id, ticket)); + } }); - } - pool.insert(node_record.node_id(), (node_record, req_id, ticket)); } } impl Stream for TicketPools { - type Item = Result<(Topic, Enr, RequestId), String>; + type Item = Result<(Topic, HashMap), String>; fn poll_next(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { self.expirations .get(0) .map(|reg_window| *reg_window) .map(|reg_window| { - if reg_window.open_time.elapsed() >= Duration::from_secs(10) { + if reg_window.open_time.elapsed() > Duration::from_secs(10) { self.ticket_pools .remove_entry(®_window.topic) .map(|(topic, ticket_pool)| { - // do some proper selection based on node_address and ticket - let (_node_id, (node_record, req_id, _ticket)) = - ticket_pool.into_iter().next().unwrap(); self.expirations.pop_front(); - Poll::Ready(Some(Ok((topic, node_record, req_id)))) + Poll::Ready(Some(Ok((topic, ticket_pool)))) }) .unwrap_or(Poll::Ready(Some(Err("Ticket selection failed".into())))) } else { diff --git a/src/service.rs b/src/service.rs index 1e395ebc8..dcc968b26 100644 --- a/src/service.rs +++ b/src/service.rs @@ -489,11 +489,21 @@ impl Service { _ = publish_topics.tick() => { self.topics.clone().into_iter().for_each(|topic| self.start_findnode_query(NodeId::new(&topic), None)); } - Some(Ok((topic, node_record, req_id))) = self.ticket_pools.next() => { - self.ads.insert(node_record.clone(), topic).ok(); - NodeContact::from(node_record).node_address().map(|node_address| { - self.send_regconfirmation_response(node_address, req_id, topic); - }).ok(); + Some(Ok((topic, ticket_pool))) = self.ticket_pools.next() => { + // Selection of node for free ad slot + let kbucket_keys = self.kbuckets.write().iter().map(|entry| *entry.node.key.preimage()).collect::>(); + let selection = ticket_pool.keys().filter(|node_id| !kbucket_keys.contains(node_id)).collect::>(); + let new_ad: Option<&(Enr, RequestId, Ticket)> = if selection.is_empty() { + ticket_pool.values().next() + } else { + selection.into_iter().next().map(|node_id| ticket_pool.get(node_id)).unwrap_or(None) + }; + new_ad.map(|(node_record, req_id, ticket)| (node_record.clone(), req_id.clone(), ticket)).map(|(node_record, req_id, _ticket)| { + self.ads.insert(node_record.clone(), topic).ok(); + NodeContact::from(node_record).node_address().map(|node_address| { + self.send_regconfirmation_response(node_address, req_id, topic); + }).ok(); + }); } } } From c23161e05745aa1513c1f652675f9d56f74a9558 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 13 Apr 2022 13:15:49 +0200 Subject: [PATCH 046/391] Monitor outgoing REGTOPIC requests --- src/advertisement/mod.rs | 3 +- src/advertisement/ticket.rs | 96 ++++++++++++++++++++++++++++++++++--- src/service.rs | 81 ++++++++++++++++++------------- src/service/test.rs | 1 + 4 files changed, 140 insertions(+), 41 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index c070446e5..f9f9fe9e0 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -15,7 +15,6 @@ pub mod ticket; pub type Topic = [u8; 32]; -/// An ad we are adevrtising for another node #[derive(Debug)] pub struct AdNode { node_record: Enr, @@ -120,7 +119,7 @@ impl Ads { let entry_ref = self.ads.entry(topic).or_default(); for _ in 0..index { entry_ref.pop_front(); - self.expirations.remove(0); + self.expirations.pop_front(); } if entry_ref.is_empty() { self.ads.remove(&topic); diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index a6cf59d8f..299e6fba1 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -6,7 +6,7 @@ use crate::{ use delay_map::HashMapDelay; use enr::NodeId; use node_info::NodeContact; -use std::{cmp::Eq, net::IpAddr}; +use std::{cmp::Eq, collections::HashSet, net::IpAddr}; // Placeholder function pub fn topic_hash(topic: Vec) -> Topic { @@ -239,13 +239,16 @@ impl TicketPools { .map(|open_time| { if open_time.elapsed() <= Duration::from_secs(10) { let pool = self.ticket_pools.entry(ticket.topic).or_default(); - if pool.is_empty() { - self.expirations.push_back(RegistrationWindow { - topic: ticket.topic, - open_time, - }); + // Drop request if pool contains 50 nodes + if pool.len() < 50 { + if pool.is_empty() { + self.expirations.push_back(RegistrationWindow { + topic: ticket.topic, + open_time, + }); + } + pool.insert(node_record.node_id(), (node_record, req_id, ticket)); } - pool.insert(node_record.node_id(), (node_record, req_id, ticket)); } }); } @@ -273,3 +276,82 @@ impl Stream for TicketPools { .unwrap_or(Poll::Pending) } } + +#[derive(Clone, Copy)] +pub struct ActiveRegtopicRequest { + active_topic: ActiveTopic, + insert_time: Instant, +} + +impl ActiveRegtopicRequest { + fn new(active_topic: ActiveTopic, insert_time: Instant) -> Self { + ActiveRegtopicRequest { + active_topic, + insert_time, + } + } +} + +pub struct ActiveRegtopicRequests { + requests: HashMap>, + expirations: VecDeque, +} + +impl ActiveRegtopicRequests { + pub fn new() -> Self { + ActiveRegtopicRequests { + requests: HashMap::new(), + expirations: VecDeque::new(), + } + } + + pub fn is_active_req( + &mut self, + req_id: RequestId, + node_id: NodeId, + topic: Topic, + ) -> Option { + self.remove_expired(); + self.requests + .remove(&ActiveTopic::new(node_id, topic)) + .map(|ids| ids.contains(&req_id)) + } + + pub fn insert(&mut self, node_id: NodeId, topic: Topic, req_id: RequestId) { + self.remove_expired(); + let now = Instant::now(); + let active_topic = ActiveTopic::new(node_id, topic); + + // Since a REGTOPIC request always receives a TICKET response, when we come to register with a ticket which + // wait-time is up we get a TICKET response with wait-time 0, hence we initiate a new REGTOPIC request. + // Since the registration window is 10 seconds, incase we would receive a RECONGIRMATION for that first + // REGTOPIC, that req-id would have been replaced, so we use a set. We extend the req-id set life-time upon + // each insert incase a REGCONFIRMATION comes to a later req-id. Max req-ids in a set is limited by our + // implementation accepting max 3 tickets for a (NodeId, Topic) within 15 minutes. + self.requests + .entry(active_topic) + .or_default() + .insert(req_id); + self.expirations + .iter() + .enumerate() + .find(|(_, req)| req.active_topic == active_topic) + .map(|(index, _)| index) + .map(|index| self.expirations.remove(index)); + self.expirations + .push_back(ActiveRegtopicRequest::new(active_topic, now)); + } + + fn remove_expired(&mut self) { + self.expirations + .iter() + .take_while(|req| req.insert_time.elapsed() >= Duration::from_secs(15)) + .map(|req| *req) + .collect::>() + .iter() + .for_each(|req| { + self.requests.remove(&req.active_topic); + self.expirations.pop_front(); + }); + } +} diff --git a/src/service.rs b/src/service.rs index dcc968b26..83e919c44 100644 --- a/src/service.rs +++ b/src/service.rs @@ -19,7 +19,7 @@ use self::{ }; use crate::{ advertisement::{ - ticket::{topic_hash, Ticket, TicketPools, Tickets}, + ticket::{topic_hash, ActiveRegtopicRequests, Ticket, TicketPools, Tickets}, Ads, Topic, }, error::{RequestError, ResponseError}, @@ -182,6 +182,9 @@ pub struct Service { /// Keeps track of the number of responses received from a NODES response. active_nodes_responses: HashMap, + /// Keeps track of expected REGCONFIRMATION responses that may be received from a REGTOPIC request. + active_regtopic_requests: ActiveRegtopicRequests, + /// A map of votes nodes have made about our external IP address. We accept the majority. ip_votes: Option, @@ -321,6 +324,7 @@ impl Service { queries: QueryPool::new(config.query_timeout), active_requests: Default::default(), active_nodes_responses: HashMap::new(), + active_regtopic_requests: ActiveRegtopicRequests::new(), ip_votes, handler_send, handler_recv, @@ -673,34 +677,43 @@ impl Service { self.send_event(Discv5Event::TalkRequest(req)); } RequestBody::RegisterTopic { topic, enr, ticket } => { - let topic = topic_hash(topic); - let wait_time = self.ads.ticket_wait_time(topic); - - let new_ticket = Ticket::new( - node_address.node_id, - node_address.socket_addr.ip(), - topic, - tokio::time::Instant::now(), - wait_time.unwrap_or(Duration::from_secs(0)), - ); - self.send_ticket_response( - node_address.clone(), - id.clone(), - new_ticket, - wait_time.unwrap_or(Duration::from_secs(0)), - ); + // Drop if request tries to advertise another node than sender + if enr.node_id() == node_address.node_id + && enr.udp_socket() == Some(node_address.socket_addr) + { + let topic = topic_hash(topic); + let wait_time = self.ads.ticket_wait_time(topic); + + let new_ticket = Ticket::new( + node_address.node_id, + node_address.socket_addr.ip(), + topic, + tokio::time::Instant::now(), + wait_time.unwrap_or(Duration::from_secs(0)), + ); - if ticket.is_empty() { - self.ticket_pools.insert(enr, id, new_ticket); - } else { - Ticket::decode(ticket) - .map(|ticket| { - // Validate ticket - if ticket == new_ticket { - self.ticket_pools.insert(enr, id, ticket); - } - }) - .ok(); + self.send_ticket_response( + node_address.clone(), + id.clone(), + new_ticket, + wait_time.unwrap_or(Duration::from_secs(0)), + ); + + // use id for expecting regconfirmation + + if ticket.is_empty() { + self.ticket_pools.insert(enr, id, new_ticket); + } else { + Ticket::decode(ticket) + .map(|ticket| { + // Drop if src_node_id, src_ip and topic derived from node_address and request + // don't match those in ticket + if ticket == new_ticket { + self.ticket_pools.insert(enr, id, ticket); + } + }) + .ok(); + } } } RequestBody::TopicQuery { topic } => { @@ -955,10 +968,14 @@ impl Service { } } ResponseBody::RegisterConfirmation { topic } => { - if let NodeContact::Enr(enr) = active_request.contact { - let topic = topic_hash(topic); - self.active_topics.insert(*enr, topic).ok(); - } + let topic = topic_hash(topic); + self.active_regtopic_requests + .is_active_req(id, node_id, topic) + .map(|_| { + if let NodeContact::Enr(enr) = active_request.contact { + self.active_topics.insert(*enr, topic).ok(); + } + }); } } } else { diff --git a/src/service/test.rs b/src/service/test.rs index 2aec38aa8..d2c21c445 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -90,6 +90,7 @@ async fn build_service( queries: QueryPool::new(config.query_timeout), active_requests: Default::default(), active_nodes_responses: HashMap::new(), + active_regtopic_requests: ActiveRegtopicRequests::new(), ip_votes: None, handler_send, handler_recv, From 9a5d425fc0a1092f90f276a44f1ae7945298210b Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 13 Apr 2022 13:44:35 +0200 Subject: [PATCH 047/391] Allow REGCONFIRMATION responses through Hanlder Active Requests --- src/handler/mod.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 1fb371620..27c485f9c 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -1010,6 +1010,16 @@ impl Handler { )) .await; self.send_next_request(node_address).await; + } else if let ResponseBody::RegisterConfirmation { .. } = response.body { + let _ = self + .service_send + .send(HandlerOut::Response( + node_address.clone(), + Box::new(response), + )) + .await; + self.send_next_request(node_address).await; + trace!("REGCONFIRMATION response from node: {}", node_address); } else { // This is likely a late response and we have already failed the request. These get // dropped here. From 0dac5f707e4e9b090e26347f7cb62b61f08b60b2 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 13 Apr 2022 13:46:42 +0200 Subject: [PATCH 048/391] Fix tiny borrow bug --- src/handler/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 27c485f9c..91d2446ee 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -1018,7 +1018,7 @@ impl Handler { Box::new(response), )) .await; - self.send_next_request(node_address).await; + self.send_next_request(node_address.clone()).await; trace!("REGCONFIRMATION response from node: {}", node_address); } else { // This is likely a late response and we have already failed the request. These get From 1b080c2397c33790b500e43de301cb6ff0216b41 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 13 Apr 2022 17:31:26 +0200 Subject: [PATCH 049/391] Fix clippy warnings and use active_regtopic_requests --- src/advertisement/ticket.rs | 36 ++++++++++++++++-------------------- src/service.rs | 30 ++++++++++++++++++------------ 2 files changed, 34 insertions(+), 32 deletions(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 299e6fba1..df03cb927 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -233,24 +233,21 @@ impl TicketPools { } pub fn insert(&mut self, node_record: Enr, req_id: RequestId, ticket: Ticket) { - ticket - .req_time - .checked_add(ticket.wait_time) - .map(|open_time| { - if open_time.elapsed() <= Duration::from_secs(10) { - let pool = self.ticket_pools.entry(ticket.topic).or_default(); - // Drop request if pool contains 50 nodes - if pool.len() < 50 { - if pool.is_empty() { - self.expirations.push_back(RegistrationWindow { - topic: ticket.topic, - open_time, - }); - } - pool.insert(node_record.node_id(), (node_record, req_id, ticket)); + if let Some(open_time) = ticket.req_time.checked_add(ticket.wait_time) { + if open_time.elapsed() <= Duration::from_secs(10) { + let pool = self.ticket_pools.entry(ticket.topic).or_default(); + // Drop request if pool contains 50 nodes + if pool.len() < 50 { + if pool.is_empty() { + self.expirations.push_back(RegistrationWindow { + topic: ticket.topic, + open_time, + }); } + pool.insert(node_record.node_id(), (node_record, req_id, ticket)); } - }); + } + } } } @@ -258,8 +255,7 @@ impl Stream for TicketPools { type Item = Result<(Topic, HashMap), String>; fn poll_next(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { self.expirations - .get(0) - .map(|reg_window| *reg_window) + .pop_front() .map(|reg_window| { if reg_window.open_time.elapsed() > Duration::from_secs(10) { self.ticket_pools @@ -268,7 +264,7 @@ impl Stream for TicketPools { self.expirations.pop_front(); Poll::Ready(Some(Ok((topic, ticket_pool)))) }) - .unwrap_or(Poll::Ready(Some(Err("Ticket selection failed".into())))) + .unwrap_or_else(|| Poll::Ready(Some(Err("Ticket selection failed".into())))) } else { Poll::Pending } @@ -346,7 +342,7 @@ impl ActiveRegtopicRequests { self.expirations .iter() .take_while(|req| req.insert_time.elapsed() >= Duration::from_secs(15)) - .map(|req| *req) + .copied() .collect::>() .iter() .for_each(|req| { diff --git a/src/service.rs b/src/service.rs index 83e919c44..ca6e2dc80 100644 --- a/src/service.rs +++ b/src/service.rs @@ -502,12 +502,12 @@ impl Service { } else { selection.into_iter().next().map(|node_id| ticket_pool.get(node_id)).unwrap_or(None) }; - new_ad.map(|(node_record, req_id, ticket)| (node_record.clone(), req_id.clone(), ticket)).map(|(node_record, req_id, _ticket)| { + if let Some((node_record, req_id, _ticket)) = new_ad.map(|(node_record, req_id, ticket)| (node_record.clone(), req_id.clone(), ticket)) { self.ads.insert(node_record.clone(), topic).ok(); NodeContact::from(node_record).node_address().map(|node_address| { self.send_regconfirmation_response(node_address, req_id, topic); }).ok(); - }); + } } } } @@ -693,7 +693,7 @@ impl Service { ); self.send_ticket_response( - node_address.clone(), + node_address, id.clone(), new_ticket, wait_time.unwrap_or(Duration::from_secs(0)), @@ -969,13 +969,15 @@ impl Service { } ResponseBody::RegisterConfirmation { topic } => { let topic = topic_hash(topic); - self.active_regtopic_requests + if self + .active_regtopic_requests .is_active_req(id, node_id, topic) - .map(|_| { - if let NodeContact::Enr(enr) = active_request.contact { - self.active_topics.insert(*enr, topic).ok(); - } - }); + .is_some() + { + if let NodeContact::Enr(enr) = active_request.contact { + self.active_topics.insert(*enr, topic).ok(); + } + } } } } else { @@ -1060,6 +1062,7 @@ impl Service { } fn reg_topic_request(&mut self, contact: NodeContact, topic: Topic, enr: Enr, ticket: Ticket) { + let node_id = enr.node_id(); let request_body = RequestBody::RegisterTopic { topic: topic.to_vec(), enr, @@ -1072,7 +1075,8 @@ impl Service { query_id: None, callback: None, }; - self.send_rpc_request(active_request); + let req_id = self.send_rpc_request(active_request); + self.active_regtopic_requests.insert(node_id, topic, req_id); } fn send_ticket_response( @@ -1282,7 +1286,7 @@ impl Service { } /// Sends generic RPC requests. Each request gets added to known outputs, awaiting a response. - fn send_rpc_request(&mut self, active_request: ActiveRequest) { + fn send_rpc_request(&mut self, active_request: ActiveRequest) -> RequestId { // Generate a random rpc_id which is matched per node id let id = RequestId::random(); let request: Request = Request { @@ -1290,12 +1294,14 @@ impl Service { body: active_request.request_body.clone(), }; let contact = active_request.contact.clone(); - self.active_requests.insert(id, active_request); + self.active_requests.insert(id.clone(), active_request); debug!("Sending RPC {} to node: {}", request, contact); let _ = self .handler_send .send(HandlerIn::Request(contact, Box::new(request))); + + id } fn send_event(&mut self, event: Discv5Event) { From 56e728dfe2a1bbf8cc98934ea5e83acb55207b0b Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 13 Apr 2022 21:26:20 +0200 Subject: [PATCH 050/391] Add en-/decoding for Ticket --- src/advertisement/mod.rs | 3 +- src/advertisement/ticket.rs | 79 ++------------- src/rpc.rs | 195 ++++++++++++++++++++++++++++++++++-- src/service.rs | 56 +++++------ 4 files changed, 218 insertions(+), 115 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index f9f9fe9e0..cbf832219 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -2,6 +2,7 @@ use super::*; use crate::Enr; use core::time::Duration; use futures::prelude::*; +use rpc::Topic; use std::{ collections::{HashMap, VecDeque}, pin::Pin, @@ -13,8 +14,6 @@ use tracing::{debug, error}; mod test; pub mod ticket; -pub type Topic = [u8; 32]; - #[derive(Debug)] pub struct AdNode { node_record: Enr, diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index df03cb927..47feaf9b5 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -1,12 +1,9 @@ use super::*; -use crate::{ - enr::{CombinedKey, EnrBuilder}, - rpc::RequestId, -}; +use crate::rpc::{RequestId, Ticket}; use delay_map::HashMapDelay; use enr::NodeId; use node_info::NodeContact; -use std::{cmp::Eq, collections::HashSet, net::IpAddr}; +use std::{cmp::Eq, collections::HashSet}; // Placeholder function pub fn topic_hash(topic: Vec) -> Topic { @@ -31,70 +28,6 @@ impl ActiveTopic { } } -#[derive(Debug, Copy, Clone)] -pub struct Ticket { - //nonce: u64, - src_node_id: NodeId, - src_ip: IpAddr, - topic: Topic, - req_time: Instant, - wait_time: Duration, - //cum_wait: Option,*/ -} - -// DEBUG -impl Default for Ticket { - fn default() -> Self { - let port = 5000; - let ip: IpAddr = "127.0.0.1".parse().unwrap(); - - let key = CombinedKey::generate_secp256k1(); - - let enr = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); - let node_id = enr.node_id(); - - Ticket { - src_node_id: node_id, - src_ip: ip, - topic: [0u8; 32], - req_time: Instant::now(), - wait_time: Duration::default(), - } - } -} - -impl PartialEq for Ticket { - fn eq(&self, other: &Self) -> bool { - self.src_node_id == other.src_node_id - && self.src_ip == other.src_ip - && self.topic == other.topic - } -} - -impl Ticket { - pub fn new( - //nonce: u64, - src_node_id: NodeId, - src_ip: IpAddr, - topic: Topic, - req_time: Instant, - wait_time: Duration, - ) -> Self { - Ticket { - //nonce, - src_node_id, - src_ip, - topic, - req_time, - wait_time, - } - } - - pub fn decode(_ticket_bytes: Vec) -> Result { - Ok(Ticket::default()) - } -} - pub struct ActiveTicket { contact: NodeContact, ticket: Ticket, @@ -134,7 +67,7 @@ impl Tickets { ticket: Ticket, wait_time: Duration, ) -> Result<(), &str> { - let active_topic = ActiveTopic::new(contact.node_id(), ticket.topic); + let active_topic = ActiveTopic::new(contact.node_id(), ticket.topic()); if let Err(e) = self.ticket_history.insert(active_topic) { return Err(e); @@ -233,14 +166,14 @@ impl TicketPools { } pub fn insert(&mut self, node_record: Enr, req_id: RequestId, ticket: Ticket) { - if let Some(open_time) = ticket.req_time.checked_add(ticket.wait_time) { + if let Some(open_time) = ticket.req_time().checked_add(ticket.wait_time()) { if open_time.elapsed() <= Duration::from_secs(10) { - let pool = self.ticket_pools.entry(ticket.topic).or_default(); + let pool = self.ticket_pools.entry(ticket.topic()).or_default(); // Drop request if pool contains 50 nodes if pool.len() < 50 { if pool.is_empty() { self.expirations.push_back(RegistrationWindow { - topic: ticket.topic, + topic: ticket.topic(), open_time, }); } diff --git a/src/rpc.rs b/src/rpc.rs index 742ecc6c9..55c05c974 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -1,6 +1,10 @@ -use enr::{CombinedKey, Enr}; -use rlp::{DecoderError, RlpStream}; -use std::net::{IpAddr, Ipv6Addr}; +use enr::{CombinedKey, Enr, NodeId}; +use rlp::{DecoderError, Rlp, RlpStream}; +use std::{ + net::{IpAddr, Ipv6Addr}, + time::{SystemTime, UNIX_EPOCH}, +}; +use tokio::time::{Duration, Instant}; use tracing::{debug, warn}; type TopicHash = [u8; 32]; @@ -87,7 +91,7 @@ pub enum RequestBody { // Current node record of sender. enr: crate::Enr, // Ticket content of ticket from a previous registration attempt or empty. - ticket: Vec, + ticket: Option, }, /// A TOPICQUERY request. TopicQuery { @@ -122,7 +126,7 @@ pub enum ResponseBody { /// The TICKET response. Ticket { /// The response to a REGTOPIC request. - ticket: Vec, + ticket: Ticket, /// The time in seconds to wait before attempting to register again. wait_time: u64, }, @@ -374,10 +378,10 @@ impl std::fmt::Display for RequestBody { RequestBody::TopicQuery { topic } => write!(f, "TOPICQUERY: topic: {:?}", topic), RequestBody::RegisterTopic { topic, enr, ticket } => write!( f, - "RegisterTopic: topic: {}, enr: {}, ticket: {}", + "RegisterTopic: topic: {}, enr: {}, ticket: {:?}", hex::encode(topic), enr.to_base64(), - hex::encode(ticket) + ticket, ), } } @@ -569,7 +573,7 @@ impl Message { let topic = rlp.val_at::>(1)?; let enr_rlp = rlp.at(2)?; let enr = enr_rlp.as_val::>()?; - let ticket = rlp.val_at::>(3)?; + let ticket = rlp.val_at::>(3)?; Message::Request(Request { id, body: RequestBody::RegisterTopic { topic, enr, ticket }, @@ -581,7 +585,7 @@ impl Message { debug!("RegisterTopic Response has an invalid RLP list length. Expected 2, found {}", list_len); return Err(DecoderError::RlpIncorrectListLen); } - let ticket = rlp.val_at::>(1)?; + let ticket = rlp.val_at::(1)?; let wait_time = rlp.val_at::(2)?; Message::Response(Response { id, @@ -636,6 +640,159 @@ impl Message { } } +pub type Topic = [u8; 32]; + +#[derive(Debug, Copy, Clone)] +pub struct Ticket { + //nonce: u64, + src_node_id: NodeId, + src_ip: IpAddr, + topic: Topic, + req_time: Instant, + wait_time: Duration, + //cum_wait: Option, +} + +impl rlp::Encodable for Ticket { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(5); + s.append(&self.src_node_id.raw().to_vec()); + match self.src_ip { + IpAddr::V4(addr) => s.append(&(addr.octets().to_vec())), + IpAddr::V6(addr) => s.append(&(addr.octets().to_vec())), + }; + s.append(&(self.topic.to_vec())); + if let Ok(time_since_unix) = SystemTime::now().duration_since(UNIX_EPOCH) { + let time_since_req = self.req_time.elapsed(); + let time_stamp = time_since_unix - time_since_req; + s.append(&time_stamp.as_millis()); + } + s.append(&self.wait_time.as_secs()); + } +} + +impl rlp::Decodable for Ticket { + fn decode(rlp: &Rlp<'_>) -> Result { + if !rlp.is_list() { + debug!("Failed to decode ENR. Not an RLP list: {}", rlp); + return Err(DecoderError::RlpExpectedToBeList); + } + + if rlp.item_count() != Ok(5) { + return Err(DecoderError::Custom("List has wrong item count")); + } + + let mut decoded_list: Vec> = rlp.iter().collect(); + + let src_node_id = { + let data = decoded_list.remove(0).data()?; + if data.len() != 32 { + debug!("Ticket's node id is not 32 bytes"); + return Err(DecoderError::RlpIsTooBig); + } + let mut raw = [0u8; 32]; + raw.copy_from_slice(&data); + NodeId::new(&raw) + }; + + let src_ip = { + let data = decoded_list.remove(0).data()?; + match data.len() { + 4 => { + let mut ip = [0u8; 4]; + ip.copy_from_slice(&data); + IpAddr::from(ip) + } + 16 => { + let mut ip = [0u8; 16]; + ip.copy_from_slice(&data); + IpAddr::from(ip) + } + _ => { + debug!("Ticket has incorrect byte length for IP"); + return Err(DecoderError::RlpIncorrectListLen); + } + } + }; + let topic = { + let data = decoded_list.remove(0).data()?; + if data.len() != 32 { + debug!("Ticket's topic hash is not 32 bytes"); + return Err(DecoderError::RlpIsTooBig); + } + let mut topic = [0u8; 32]; + topic.copy_from_slice(&data); + topic + }; + let req_time = { + if let Ok(time_since_unix) = SystemTime::now().duration_since(UNIX_EPOCH) { + let ms = rlp.val_at::(0)?; + let req_time_since_unix = Duration::from_millis(ms); + let time_since_req = time_since_unix - req_time_since_unix; + if let Some(req_time) = Instant::now().checked_sub(time_since_req) { + req_time + } else { + return Err(DecoderError::Custom( + "Could not compute ticket req-time instant", + )); + } + } else { + return Err(DecoderError::Custom("SystemTime before UNIX EPOCH!")); + } + }; + let wait_time = Duration::from_secs(rlp.val_at::(1)?); + Ok(Self { + src_node_id, + src_ip, + topic, + req_time, + wait_time, + }) + } +} + +impl PartialEq for Ticket { + fn eq(&self, other: &Self) -> bool { + self.src_node_id == other.src_node_id + && self.src_ip == other.src_ip + && self.topic == other.topic + } +} + +impl Ticket { + pub fn new( + //nonce: u64, + src_node_id: NodeId, + src_ip: IpAddr, + topic: Topic, + req_time: Instant, + wait_time: Duration, + //cum_wait: Option, + ) -> Self { + Ticket { + //nonce, + src_node_id, + src_ip, + topic, + req_time, + wait_time, + //cum_wait, + } + } + + pub fn topic(&self) -> Topic { + self.topic + } + + pub fn req_time(&self) -> Instant { + self.req_time + } + + pub fn wait_time(&self) -> Duration { + self.wait_time + } +} + #[cfg(test)] mod tests { use super::*; @@ -890,7 +1047,7 @@ mod tests { body: RequestBody::RegisterTopic { topic: vec![1, 2, 3], enr, - ticket: vec![1, 2, 3, 4, 5], + ticket: None, }, }); @@ -902,15 +1059,31 @@ mod tests { #[test] fn encode_decode_ticket_response() { + // Create the test values needed + let port = 5000; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + + let key = CombinedKey::generate_secp256k1(); + + let enr = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + let node_id = enr.node_id(); + let ticket = Ticket::new( + node_id, + ip, + [1; 32], + Instant::now(), + Duration::from_secs(11), + ); let response = Message::Response(Response { id: RequestId(vec![1]), body: ResponseBody::Ticket { - ticket: vec![1, 2, 3], + ticket, wait_time: 1u64, }, }); let encoded = response.clone().encode(); + println!("{:?}", encoded); let decoded = Message::decode(&encoded).unwrap(); assert_eq!(response, decoded); diff --git a/src/service.rs b/src/service.rs index ca6e2dc80..b76c49f85 100644 --- a/src/service.rs +++ b/src/service.rs @@ -19,8 +19,8 @@ use self::{ }; use crate::{ advertisement::{ - ticket::{topic_hash, ActiveRegtopicRequests, Ticket, TicketPools, Tickets}, - Ads, Topic, + ticket::{topic_hash, ActiveRegtopicRequests, TicketPools, Tickets}, + Ads, }, error::{RequestError, ResponseError}, handler::{Handler, HandlerIn, HandlerOut}, @@ -393,7 +393,7 @@ impl Service { self.send_topic_query(topic); } ServiceRequest::RegisterTopic(topic) => { - self.reg_topic_request(topic, self.local_enr(), Ticket::default()); + self.reg_topic_request(topic, self.local_enr(), None); }*/ } } @@ -462,7 +462,7 @@ impl Service { if let Some(topic) = topic { let local_enr = self.local_enr.read().clone(); - found_enrs.into_iter().for_each(|enr| self.reg_topic_request(NodeContact::from(enr), topic, local_enr.clone(), Ticket::default())); + found_enrs.into_iter().for_each(|enr| self.reg_topic_request(NodeContact::from(enr), topic, local_enr.clone(), None)); } else if let Some(callback) = result.target.callback { if callback.send(found_enrs).is_err() { warn!("Callback dropped for query {}. Results dropped", *id); @@ -488,7 +488,7 @@ impl Service { } Some(Ok((active_topic, active_ticket))) = self.tickets.next() => { let enr = self.local_enr.read().clone(); - self.reg_topic_request(active_ticket.contact(), active_topic.topic(), enr, active_ticket.ticket()); + self.reg_topic_request(active_ticket.contact(), active_topic.topic(), enr, Some(active_ticket.ticket())); } _ = publish_topics.tick() => { self.topics.clone().into_iter().for_each(|topic| self.start_findnode_query(NodeId::new(&topic), None)); @@ -701,18 +701,14 @@ impl Service { // use id for expecting regconfirmation - if ticket.is_empty() { - self.ticket_pools.insert(enr, id, new_ticket); + if let Some(ticket) = ticket { + // Drop if src_node_id, src_ip and topic derived from node_address and request + // don't match those in ticket + if ticket == new_ticket { + self.ticket_pools.insert(enr, id, ticket); + } } else { - Ticket::decode(ticket) - .map(|ticket| { - // Drop if src_node_id, src_ip and topic derived from node_address and request - // don't match those in ticket - if ticket == new_ticket { - self.ticket_pools.insert(enr, id, ticket); - } - }) - .ok(); + self.ticket_pools.insert(enr, id, new_ticket); } } } @@ -954,16 +950,12 @@ impl Service { } ResponseBody::Ticket { ticket, wait_time } => { if wait_time <= MAX_WAIT_TIME_TICKET { - Ticket::decode(ticket) - .map(|ticket| { - self.tickets - .insert( - active_request.contact, - ticket, - Duration::from_secs(wait_time), - ) - .ok(); - }) + self.tickets + .insert( + active_request.contact, + ticket, + Duration::from_secs(wait_time), + ) .ok(); } } @@ -1061,12 +1053,18 @@ impl Service { self.send_rpc_request(active_request); } - fn reg_topic_request(&mut self, contact: NodeContact, topic: Topic, enr: Enr, ticket: Ticket) { + fn reg_topic_request( + &mut self, + contact: NodeContact, + topic: Topic, + enr: Enr, + ticket: Option, + ) { let node_id = enr.node_id(); let request_body = RequestBody::RegisterTopic { topic: topic.to_vec(), enr, - ticket: format!("{:?}", ticket).as_bytes().to_vec(), + ticket, }; let active_request = ActiveRequest { @@ -1089,7 +1087,7 @@ impl Service { let response = Response { id: rpc_id, body: ResponseBody::Ticket { - ticket: format!("{:?}", ticket).as_bytes().to_vec(), + ticket, wait_time: wait_time.as_secs(), }, }; From dd4eee8b8e3c96ed38180e7e800269cb68566443 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 13 Apr 2022 21:40:24 +0200 Subject: [PATCH 051/391] Fix clippy warnings --- src/rpc.rs | 17 ++++++++++++----- src/service.rs | 6 +++--- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index 55c05c974..8a8bed495 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -691,7 +691,7 @@ impl rlp::Decodable for Ticket { return Err(DecoderError::RlpIsTooBig); } let mut raw = [0u8; 32]; - raw.copy_from_slice(&data); + raw.copy_from_slice(data); NodeId::new(&raw) }; @@ -700,13 +700,20 @@ impl rlp::Decodable for Ticket { match data.len() { 4 => { let mut ip = [0u8; 4]; - ip.copy_from_slice(&data); + ip.copy_from_slice(data); IpAddr::from(ip) } 16 => { let mut ip = [0u8; 16]; - ip.copy_from_slice(&data); - IpAddr::from(ip) + ip.copy_from_slice(data); + let ipv6 = Ipv6Addr::from(ip); + // If the ipv6 is ipv4 compatible/mapped, simply return the ipv4. + // Ipv6 for Discv5 is coming soon. + if let Some(ipv4) = ipv6.to_ipv4() { + IpAddr::V4(ipv4) + } else { + IpAddr::V6(ipv6) + } } _ => { debug!("Ticket has incorrect byte length for IP"); @@ -721,7 +728,7 @@ impl rlp::Decodable for Ticket { return Err(DecoderError::RlpIsTooBig); } let mut topic = [0u8; 32]; - topic.copy_from_slice(&data); + topic.copy_from_slice(data); topic }; let req_time = { diff --git a/src/service.rs b/src/service.rs index b76c49f85..17cb51242 100644 --- a/src/service.rs +++ b/src/service.rs @@ -434,7 +434,7 @@ impl Service { query_event = Service::query_event_poll(&mut self.queries) => { match query_event { QueryEvent::Waiting(query_id, node_id, request_body) => { - self.send_rpc_query(query_id, node_id, request_body); + self.send_rpc_query(query_id, node_id, *request_body); } // Note: Currently the distinction between a timed-out query and a finished // query is superfluous, however it may be useful in future versions. @@ -1633,7 +1633,7 @@ impl Service { } }; - Poll::Ready(QueryEvent::Waiting(query.id(), node_id, request_body)) + Poll::Ready(QueryEvent::Waiting(query.id(), node_id, Box::new(request_body))) } QueryPoolState::Timeout(query) => { warn!("Query id: {:?} timed out", query.id()); @@ -1649,7 +1649,7 @@ impl Service { /// active query. enum QueryEvent { /// The query is waiting for a peer to be contacted. - Waiting(QueryId, NodeId, RequestBody), + Waiting(QueryId, NodeId, Box), /// The query has timed out, possible returning peers. TimedOut(Box>), /// The query has completed successfully. From 1e50e438ce9a64228d9592f40509cf5942d3cb28 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 13 Apr 2022 22:45:05 +0200 Subject: [PATCH 052/391] Fix part of Encodable impl Ticket --- src/rpc.rs | 51 ++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 46 insertions(+), 5 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index 8a8bed495..5a283c2bc 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -687,7 +687,7 @@ impl rlp::Decodable for Ticket { let src_node_id = { let data = decoded_list.remove(0).data()?; if data.len() != 32 { - debug!("Ticket's node id is not 32 bytes"); + debug!("Ticket's src-node-id is not 32 bytes"); return Err(DecoderError::RlpIsTooBig); } let mut raw = [0u8; 32]; @@ -716,7 +716,7 @@ impl rlp::Decodable for Ticket { } } _ => { - debug!("Ticket has incorrect byte length for IP"); + debug!("Ticket has incorrect byte length for src-ip"); return Err(DecoderError::RlpIncorrectListLen); } } @@ -733,8 +733,11 @@ impl rlp::Decodable for Ticket { }; let req_time = { if let Ok(time_since_unix) = SystemTime::now().duration_since(UNIX_EPOCH) { - let ms = rlp.val_at::(0)?; - let req_time_since_unix = Duration::from_millis(ms); + let s_bytes = decoded_list.remove(0).data()?; + let mut s = [0u8; 8]; + s.copy_from_slice(s_bytes); + let secs = u64::from_be_bytes(s); + let req_time_since_unix = Duration::from_secs(secs); let time_since_req = time_since_unix - req_time_since_unix; if let Some(req_time) = Instant::now().checked_sub(time_since_req) { req_time @@ -747,7 +750,13 @@ impl rlp::Decodable for Ticket { return Err(DecoderError::Custom("SystemTime before UNIX EPOCH!")); } }; - let wait_time = Duration::from_secs(rlp.val_at::(1)?); + let wait_time = { + let s_bytes = decoded_list.remove(0).data()?; + let mut s = [0u8; 8]; + s.copy_from_slice(s_bytes); + let secs = u64::from_be_bytes(s); + Duration::from_secs(secs) + }; Ok(Self { src_node_id, src_ip, @@ -1064,6 +1073,38 @@ mod tests { assert_eq!(request, decoded); } + #[test] + fn encode_decode_ticket() { + // Create the test values needed + let port = 5000; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + + let key = CombinedKey::generate_secp256k1(); + + let enr = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + let node_id = enr.node_id(); + let ticket = Ticket::new( + node_id, + ip, + [1; 32], + Instant::now(), + Duration::from_secs(11), + ); + + let mut buf = Vec::with_capacity(60); + + let mut s = RlpStream::new(); + s.begin_list(1); + s.append(&ticket); + buf.extend_from_slice(&s.out()); + println!("{:?}", buf); + + let rlp = rlp::Rlp::new(&buf); + let decoded = rlp.val_at::(0).unwrap(); + println!("{:?}", decoded); + assert_eq!(ticket, decoded); + } + #[test] fn encode_decode_ticket_response() { // Create the test values needed From 98ed8dda050e55a32a2db92e0b599c0a5ec6da73 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 14 Apr 2022 09:48:41 +0200 Subject: [PATCH 053/391] Run cargo fmt --- src/rpc.rs | 3 ++- src/service.rs | 6 +++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index 5a283c2bc..804115df7 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -665,7 +665,8 @@ impl rlp::Encodable for Ticket { if let Ok(time_since_unix) = SystemTime::now().duration_since(UNIX_EPOCH) { let time_since_req = self.req_time.elapsed(); let time_stamp = time_since_unix - time_since_req; - s.append(&time_stamp.as_millis()); + s.append(&time_stamp.as_secs()); + println!("{:?}", &time_stamp.as_secs()); } s.append(&self.wait_time.as_secs()); } diff --git a/src/service.rs b/src/service.rs index 17cb51242..ace4ac6e8 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1633,7 +1633,11 @@ impl Service { } }; - Poll::Ready(QueryEvent::Waiting(query.id(), node_id, Box::new(request_body))) + Poll::Ready(QueryEvent::Waiting( + query.id(), + node_id, + Box::new(request_body), + )) } QueryPoolState::Timeout(query) => { warn!("Query id: {:?} timed out", query.id()); From 5ed8a5a3d92d83dcd6163148eb0914377a4decb4 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 14 Apr 2022 10:17:45 +0200 Subject: [PATCH 054/391] Fix ticket en-/decode tests --- src/rpc.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index 804115df7..646e354f2 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -665,10 +665,9 @@ impl rlp::Encodable for Ticket { if let Ok(time_since_unix) = SystemTime::now().duration_since(UNIX_EPOCH) { let time_since_req = self.req_time.elapsed(); let time_stamp = time_since_unix - time_since_req; - s.append(&time_stamp.as_secs()); - println!("{:?}", &time_stamp.as_secs()); + s.append(&time_stamp.as_secs().to_be_bytes().to_vec()); } - s.append(&self.wait_time.as_secs()); + s.append(&self.wait_time.as_secs().to_be_bytes().to_vec()); } } @@ -1098,11 +1097,9 @@ mod tests { s.begin_list(1); s.append(&ticket); buf.extend_from_slice(&s.out()); - println!("{:?}", buf); let rlp = rlp::Rlp::new(&buf); let decoded = rlp.val_at::(0).unwrap(); - println!("{:?}", decoded); assert_eq!(ticket, decoded); } @@ -1132,7 +1129,6 @@ mod tests { }); let encoded = response.clone().encode(); - println!("{:?}", encoded); let decoded = Message::decode(&encoded).unwrap(); assert_eq!(response, decoded); From 39889be547273bc1928907db96e0a154c7bfc681 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 14 Apr 2022 15:31:07 +0200 Subject: [PATCH 055/391] Add test for regtopic with ticket --- src/rpc.rs | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/src/rpc.rs b/src/rpc.rs index 646e354f2..4e97dd0dd 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -1073,6 +1073,37 @@ mod tests { assert_eq!(request, decoded); } + #[test] + fn encode_decode_register_topic_request_with_ticket() { + let port = 5000; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + + let node_id = enr.node_id(); + let ticket = Ticket::new( + node_id, + ip, + [1; 32], + Instant::now(), + Duration::from_secs(11), + ); + + let request = Message::Request(Request { + id: RequestId(vec![1]), + body: RequestBody::RegisterTopic { + topic: vec![1, 2, 3], + enr, + ticket: Some(ticket), + }, + }); + + let encoded = request.clone().encode(); + let decoded = Message::decode(&encoded).unwrap(); + + assert_eq!(request, decoded); + } + #[test] fn encode_decode_ticket() { // Create the test values needed From d3861ede8ecac362eb542755acf963edf8f9d3f2 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 14 Apr 2022 15:54:06 +0200 Subject: [PATCH 056/391] Start en-/decryption ticket --- src/rpc.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/rpc.rs b/src/rpc.rs index 4e97dd0dd..6956a105f 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -1,3 +1,7 @@ +use aes_gcm::{ + aead::{generic_array::GenericArray, Aead, NewAead, Payload}, + Aes128Gcm, +}; use enr::{CombinedKey, Enr, NodeId}; use rlp::{DecoderError, Rlp, RlpStream}; use std::{ @@ -807,6 +811,15 @@ impl Ticket { pub fn wait_time(&self) -> Duration { self.wait_time } + + /*pub fn encrypt_rlp_encoded_ticket(&mut self, ticket: &[u8], none: u64) -> Result, String> { + let cipher = crypto::encrypt_message( + key, + nonce, + ticket, + b"", + ); + }*/ } #[cfg(test)] From 727202ef378c6f852c05c67ebd7895c5667bdfba Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 15 Apr 2022 08:03:30 +0200 Subject: [PATCH 057/391] Move ticket en-/decoding to Service layer --- src/rpc.rs | 60 ++++++++++++++++++++++++++------------------------ src/service.rs | 56 ++++++++++++++++++++++++++++------------------ 2 files changed, 66 insertions(+), 50 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index 6956a105f..64c745456 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -1,7 +1,3 @@ -use aes_gcm::{ - aead::{generic_array::GenericArray, Aead, NewAead, Payload}, - Aes128Gcm, -}; use enr::{CombinedKey, Enr, NodeId}; use rlp::{DecoderError, Rlp, RlpStream}; use std::{ @@ -95,7 +91,7 @@ pub enum RequestBody { // Current node record of sender. enr: crate::Enr, // Ticket content of ticket from a previous registration attempt or empty. - ticket: Option, + ticket: Vec, }, /// A TOPICQUERY request. TopicQuery { @@ -130,7 +126,7 @@ pub enum ResponseBody { /// The TICKET response. Ticket { /// The response to a REGTOPIC request. - ticket: Ticket, + ticket: Vec, /// The time in seconds to wait before attempting to register again. wait_time: u64, }, @@ -577,7 +573,7 @@ impl Message { let topic = rlp.val_at::>(1)?; let enr_rlp = rlp.at(2)?; let enr = enr_rlp.as_val::>()?; - let ticket = rlp.val_at::>(3)?; + let ticket = rlp.val_at::>(3)?; Message::Request(Request { id, body: RequestBody::RegisterTopic { topic, enr, ticket }, @@ -589,7 +585,7 @@ impl Message { debug!("RegisterTopic Response has an invalid RLP list length. Expected 2, found {}", list_len); return Err(DecoderError::RlpIncorrectListLen); } - let ticket = rlp.val_at::(1)?; + let ticket = rlp.val_at::>(1)?; let wait_time = rlp.val_at::(2)?; Message::Response(Response { id, @@ -812,14 +808,22 @@ impl Ticket { self.wait_time } - /*pub fn encrypt_rlp_encoded_ticket(&mut self, ticket: &[u8], none: u64) -> Result, String> { - let cipher = crypto::encrypt_message( - key, - nonce, - ticket, - b"", - ); - }*/ + pub fn encode(&self) -> Vec { + let mut buf = Vec::new(); + let mut s =RlpStream::new(); + s.append(self); + buf.extend_from_slice(&s.out()); + buf + } + + pub fn decode(ticket: &[u8]) -> Result, DecoderError> { + if ticket.len() > 0 { + let rlp = rlp::Rlp::new(&ticket); + let ticket = rlp.as_val::()?; + return Ok(Some(ticket)); + } + Ok(None) + } } #[cfg(test)] @@ -1065,7 +1069,7 @@ mod tests { } #[test] - fn encode_decode_register_topic_request() { + fn encode_decode_register_topic_request_empty_ticket() { let port = 5000; let ip: IpAddr = "127.0.0.1".parse().unwrap(); let key = CombinedKey::generate_secp256k1(); @@ -1076,7 +1080,7 @@ mod tests { body: RequestBody::RegisterTopic { topic: vec![1, 2, 3], enr, - ticket: None, + ticket: Vec::new(), }, }); @@ -1087,7 +1091,7 @@ mod tests { } #[test] - fn encode_decode_register_topic_request_with_ticket() { + fn encode_decode_register_topic_request() { let port = 5000; let ip: IpAddr = "127.0.0.1".parse().unwrap(); let key = CombinedKey::generate_secp256k1(); @@ -1102,12 +1106,14 @@ mod tests { Duration::from_secs(11), ); + let ticket = ticket.encode(); + let request = Message::Request(Request { id: RequestId(vec![1]), body: RequestBody::RegisterTopic { topic: vec![1, 2, 3], enr, - ticket: Some(ticket), + ticket: ticket, }, }); @@ -1135,16 +1141,10 @@ mod tests { Duration::from_secs(11), ); - let mut buf = Vec::with_capacity(60); + let encoded = ticket.encode(); + let decoded = Ticket::decode(&encoded).unwrap(); - let mut s = RlpStream::new(); - s.begin_list(1); - s.append(&ticket); - buf.extend_from_slice(&s.out()); - - let rlp = rlp::Rlp::new(&buf); - let decoded = rlp.val_at::(0).unwrap(); - assert_eq!(ticket, decoded); + assert_eq!(Some(ticket), decoded); } #[test] @@ -1164,6 +1164,8 @@ mod tests { Instant::now(), Duration::from_secs(11), ); + + let ticket = ticket.encode(); let response = Message::Response(Response { id: RequestId(vec![1]), body: ResponseBody::Ticket { diff --git a/src/service.rs b/src/service.rs index ace4ac6e8..29cc22909 100644 --- a/src/service.rs +++ b/src/service.rs @@ -692,21 +692,25 @@ impl Service { wait_time.unwrap_or(Duration::from_secs(0)), ); + let new_ticket_bytes = new_ticket.encode(); + self.send_ticket_response( node_address, id.clone(), - new_ticket, + new_ticket_bytes, wait_time.unwrap_or(Duration::from_secs(0)), ); - - // use id for expecting regconfirmation - - if let Some(ticket) = ticket { - // Drop if src_node_id, src_ip and topic derived from node_address and request - // don't match those in ticket - if ticket == new_ticket { - self.ticket_pools.insert(enr, id, ticket); - } + + if ticket.len() > 0 { + Ticket::decode(&ticket).map_err(|e| error!("{}", e)).map(|ticket| { + // Drop if src_node_id, src_ip and topic derived from node_address and request + // don't match those in ticket + if let Some(ticket) = ticket { + if ticket == new_ticket { + self.ticket_pools.insert(enr, id, ticket); + } + } + }).ok(); } else { self.ticket_pools.insert(enr, id, new_ticket); } @@ -949,15 +953,19 @@ impl Service { } } ResponseBody::Ticket { ticket, wait_time } => { - if wait_time <= MAX_WAIT_TIME_TICKET { - self.tickets - .insert( - active_request.contact, - ticket, - Duration::from_secs(wait_time), - ) - .ok(); - } + Ticket::decode(&ticket).map_err(|e| error!("{}", e)).map(|ticket| { + if let Some(ticket) = ticket { + if wait_time <= MAX_WAIT_TIME_TICKET { + self.tickets + .insert( + active_request.contact, + ticket, + Duration::from_secs(wait_time), + ) + .ok(); + } + } + }).ok(); } ResponseBody::RegisterConfirmation { topic } => { let topic = topic_hash(topic); @@ -1060,11 +1068,17 @@ impl Service { enr: Enr, ticket: Option, ) { + + let ticket_bytes = if let Some(ticket) = ticket { + ticket.encode() + } else { + Vec::new() + }; let node_id = enr.node_id(); let request_body = RequestBody::RegisterTopic { topic: topic.to_vec(), enr, - ticket, + ticket: ticket_bytes, }; let active_request = ActiveRequest { @@ -1081,7 +1095,7 @@ impl Service { &mut self, node_address: NodeAddress, rpc_id: RequestId, - ticket: Ticket, + ticket: Vec, wait_time: Duration, ) { let response = Response { From a37047d71dfe00abbf4c1d5474646fce3a9f4866 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 15 Apr 2022 09:20:52 +0200 Subject: [PATCH 058/391] Encrypt ticket --- src/rpc.rs | 2 +- src/service.rs | 118 +++++++++++++++++++++++++++++++------------------ 2 files changed, 77 insertions(+), 43 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index 64c745456..92c3bcf72 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -810,7 +810,7 @@ impl Ticket { pub fn encode(&self) -> Vec { let mut buf = Vec::new(); - let mut s =RlpStream::new(); + let mut s = RlpStream::new(); s.append(self); buf.extend_from_slice(&s.out()); buf diff --git a/src/service.rs b/src/service.rs index 29cc22909..dbf10e7c5 100644 --- a/src/service.rs +++ b/src/service.rs @@ -12,7 +12,6 @@ //! Note that although the ENR crate does support Ed25519 keys, these are currently not //! supported as the ECDH procedure isn't specified in the specification. Therefore, only //! secp256k1 keys are supported currently. - use self::{ ip_vote::IpVote, query_info::{QueryInfo, QueryType}, @@ -35,6 +34,10 @@ use crate::{ }, rpc, Discv5Config, Discv5Event, Enr, }; +use aes_gcm::{ + aead::{generic_array::GenericArray, Aead, NewAead, Payload}, + Aes128Gcm, +}; use delay_map::HashSetDelay; use enr::{CombinedKey, NodeId}; use fnv::FnvHashMap; @@ -312,6 +315,14 @@ impl Service { } }; + let ticket_key: [u8; 16] = rand::random(); + match local_enr.write().insert("ticket_key", &ticket_key, &enr_key.write()) { + Ok(_) => {}, + Err(e) => { + return Err(Error::new(ErrorKind::Other, format!("{:?}", e))); + } + } + config .executor .clone() @@ -692,25 +703,26 @@ impl Service { wait_time.unwrap_or(Duration::from_secs(0)), ); - let new_ticket_bytes = new_ticket.encode(); - self.send_ticket_response( node_address, id.clone(), - new_ticket_bytes, + new_ticket, wait_time.unwrap_or(Duration::from_secs(0)), ); - + if ticket.len() > 0 { - Ticket::decode(&ticket).map_err(|e| error!("{}", e)).map(|ticket| { - // Drop if src_node_id, src_ip and topic derived from node_address and request - // don't match those in ticket - if let Some(ticket) = ticket { - if ticket == new_ticket { - self.ticket_pools.insert(enr, id, ticket); + Ticket::decode(&ticket) + .map_err(|e| error!("{}", e)) + .map(|ticket| { + // Drop if src_node_id, src_ip and topic derived from node_address and request + // don't match those in ticket + if let Some(ticket) = ticket { + if ticket == new_ticket { + self.ticket_pools.insert(enr, id, ticket); + } } - } - }).ok(); + }) + .ok(); } else { self.ticket_pools.insert(enr, id, new_ticket); } @@ -953,19 +965,22 @@ impl Service { } } ResponseBody::Ticket { ticket, wait_time } => { - Ticket::decode(&ticket).map_err(|e| error!("{}", e)).map(|ticket| { - if let Some(ticket) = ticket { - if wait_time <= MAX_WAIT_TIME_TICKET { - self.tickets - .insert( - active_request.contact, - ticket, - Duration::from_secs(wait_time), - ) - .ok(); + Ticket::decode(&ticket) + .map_err(|e| error!("{}", e)) + .map(|ticket| { + if let Some(ticket) = ticket { + if wait_time <= MAX_WAIT_TIME_TICKET { + self.tickets + .insert( + active_request.contact, + ticket, + Duration::from_secs(wait_time), + ) + .ok(); + } } - } - }).ok(); + }) + .ok(); } ResponseBody::RegisterConfirmation { topic } => { let topic = topic_hash(topic); @@ -1068,7 +1083,6 @@ impl Service { enr: Enr, ticket: Option, ) { - let ticket_bytes = if let Some(ticket) = ticket { ticket.encode() } else { @@ -1095,24 +1109,44 @@ impl Service { &mut self, node_address: NodeAddress, rpc_id: RequestId, - ticket: Vec, + ticket: Ticket, wait_time: Duration, ) { - let response = Response { - id: rpc_id, - body: ResponseBody::Ticket { - ticket, - wait_time: wait_time.as_secs(), - }, - }; - trace!( - "Sending TICKET response to: {}. Response: {} ", - node_address, - response - ); - let _ = self - .handler_send - .send(HandlerIn::Response(node_address, Box::new(response))); + self + .local_enr + .write() + .to_base64() + .parse::() + .map_err(|e| error!("Failed to send TICKET response: {}", e)) + .map(|decoded_enr| { + if let Some(ticket_key) = decoded_enr.get("ticket_key") { + let aead = Aes128Gcm::new(GenericArray::from_slice(ticket_key)); + let payload = Payload { + msg: &ticket.encode(), + aad: b"", + }; + aead + .encrypt(GenericArray::from_slice(&[1u8; 12]), payload) + .map_err(|e| error!("Failed to send TICKET response: {}", e)) + .map(|encrypted_ticket| { + let response = Response { + id: rpc_id, + body: ResponseBody::Ticket { + ticket: encrypted_ticket, + wait_time: wait_time.as_secs(), + }, + }; + trace!( + "Sending TICKET response to: {}. Response: {} ", + node_address, + response + ); + let _ = self + .handler_send + .send(HandlerIn::Response(node_address, Box::new(response))); + }).ok(); + } + }).ok(); } fn send_regconfirmation_response( From c25da92d09bfaba5092d3f3852ceb4df2407d0b0 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 15 Apr 2022 09:30:20 +0200 Subject: [PATCH 059/391] Fix cargo clippy warnings and fmt --- src/rpc.rs | 4 ++-- src/service.rs | 21 ++++++++++++--------- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index 92c3bcf72..31a1beb88 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -817,8 +817,8 @@ impl Ticket { } pub fn decode(ticket: &[u8]) -> Result, DecoderError> { - if ticket.len() > 0 { - let rlp = rlp::Rlp::new(&ticket); + if ticket.is_empty() { + let rlp = rlp::Rlp::new(ticket); let ticket = rlp.as_val::()?; return Ok(Some(ticket)); } diff --git a/src/service.rs b/src/service.rs index dbf10e7c5..67c2ef40c 100644 --- a/src/service.rs +++ b/src/service.rs @@ -316,8 +316,11 @@ impl Service { }; let ticket_key: [u8; 16] = rand::random(); - match local_enr.write().insert("ticket_key", &ticket_key, &enr_key.write()) { - Ok(_) => {}, + match local_enr + .write() + .insert("ticket_key", &ticket_key, &enr_key.write()) + { + Ok(_) => {} Err(e) => { return Err(Error::new(ErrorKind::Other, format!("{:?}", e))); } @@ -710,7 +713,7 @@ impl Service { wait_time.unwrap_or(Duration::from_secs(0)), ); - if ticket.len() > 0 { + if ticket.is_empty() { Ticket::decode(&ticket) .map_err(|e| error!("{}", e)) .map(|ticket| { @@ -1112,8 +1115,7 @@ impl Service { ticket: Ticket, wait_time: Duration, ) { - self - .local_enr + self.local_enr .write() .to_base64() .parse::() @@ -1125,8 +1127,7 @@ impl Service { msg: &ticket.encode(), aad: b"", }; - aead - .encrypt(GenericArray::from_slice(&[1u8; 12]), payload) + aead.encrypt(GenericArray::from_slice(&[1u8; 12]), payload) .map_err(|e| error!("Failed to send TICKET response: {}", e)) .map(|encrypted_ticket| { let response = Response { @@ -1144,9 +1145,11 @@ impl Service { let _ = self .handler_send .send(HandlerIn::Response(node_address, Box::new(response))); - }).ok(); + }) + .ok(); } - }).ok(); + }) + .ok(); } fn send_regconfirmation_response( From c2ed5621af7a77ac373381a41032eb4f99c84fc7 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 15 Apr 2022 09:38:52 +0200 Subject: [PATCH 060/391] Fix logic bug --- src/rpc.rs | 2 +- src/service.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index 31a1beb88..cf49d51ae 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -817,7 +817,7 @@ impl Ticket { } pub fn decode(ticket: &[u8]) -> Result, DecoderError> { - if ticket.is_empty() { + if !ticket.is_empty() { let rlp = rlp::Rlp::new(ticket); let ticket = rlp.as_val::()?; return Ok(Some(ticket)); diff --git a/src/service.rs b/src/service.rs index 67c2ef40c..bc3395fbb 100644 --- a/src/service.rs +++ b/src/service.rs @@ -713,7 +713,7 @@ impl Service { wait_time.unwrap_or(Duration::from_secs(0)), ); - if ticket.is_empty() { + if !ticket.is_empty() { Ticket::decode(&ticket) .map_err(|e| error!("{}", e)) .map(|ticket| { From 5ede074b1cc7d6895838c22e1ad8fc5575445055 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 15 Apr 2022 21:00:49 +0200 Subject: [PATCH 061/391] Add test for en-decrypt ticket --- src/service.rs | 50 ++++++++++++++++++++++++------- src/service/test.rs | 73 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 112 insertions(+), 11 deletions(-) diff --git a/src/service.rs b/src/service.rs index bc3395fbb..6f8397948 100644 --- a/src/service.rs +++ b/src/service.rs @@ -714,18 +714,46 @@ impl Service { ); if !ticket.is_empty() { - Ticket::decode(&ticket) - .map_err(|e| error!("{}", e)) - .map(|ticket| { - // Drop if src_node_id, src_ip and topic derived from node_address and request - // don't match those in ticket - if let Some(ticket) = ticket { - if ticket == new_ticket { - self.ticket_pools.insert(enr, id, ticket); - } + let decoded_enr = self + .local_enr + .write() + .to_base64() + .parse::() + .map_err(|e| { + error!("Failed to decode ticket in REGTOPIC query: {}", e) + }); + if let Ok(decoded_enr) = decoded_enr { + if let Some(ticket_key) = decoded_enr.get("ticket_key") { + let decrypted_ticket = { + let aead = Aes128Gcm::new(GenericArray::from_slice(ticket_key)); + let payload = Payload { + msg: &ticket, + aad: b"", + }; + aead.encrypt(GenericArray::from_slice(&[1u8; 12]), payload) + .map_err(|e| { + error!( + "Failed to decode ticket in REGTOPIC query: {}", + e + ) + }) + }; + if let Ok(decrypted_ticket) = decrypted_ticket { + Ticket::decode(&decrypted_ticket) + .map_err(|e| error!("{}", e)) + .map(|ticket| { + // Drop if src_node_id, src_ip and topic derived from node_address and request + // don't match those in ticket + if let Some(ticket) = ticket { + if ticket == new_ticket { + self.ticket_pools.insert(enr, id, ticket); + } + } + }) + .ok(); } - }) - .ok(); + } + } } else { self.ticket_pools.insert(enr, id, new_ticket); } diff --git a/src/service/test.rs b/src/service/test.rs index d2c21c445..eb1af58ea 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -177,3 +177,76 @@ async fn test_updating_connection_on_ping() { let node = buckets.iter_ref().next().unwrap(); assert!(node.status.is_connected()) } + +#[tokio::test] +async fn encrypt_decrypt_ticket() { + init(); + let enr_key = CombinedKey::generate_secp256k1(); + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + let enr = EnrBuilder::new("v4") + .ip(ip) + .udp(10001) + .build(&enr_key) + .unwrap(); + + let socket_addr = enr.udp_socket().unwrap(); + + let service = build_service( + Arc::new(RwLock::new(enr)), + Arc::new(RwLock::new(enr_key)), + socket_addr, + false, + ) + .await; + + let ticket_key: [u8; 16] = rand::random(); + service + .local_enr + .write() + .insert("ticket_key", &ticket_key, &service.enr_key.write()) + .unwrap(); + let decoded_enr = service + .local_enr + .write() + .to_base64() + .parse::() + .unwrap(); + + let port = 6666; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + let node_id = enr.node_id(); + + let ticket = Ticket::new( + node_id, + ip, + [2u8; 32], + tokio::time::Instant::now(), + tokio::time::Duration::from_secs(5), + ); + + let ticket_key = decoded_enr.get("ticket_key").unwrap(); + + let aead = Aes128Gcm::new(GenericArray::from_slice(ticket_key)); + let payload = Payload { + msg: &ticket.encode(), + aad: b"", + }; + let nonce = [1u8; 12]; + let encrypted_ticket = aead + .encrypt(GenericArray::from_slice(&nonce), payload) + .unwrap(); + + let decrypted_ticket = { + let payload = Payload { + msg: &encrypted_ticket, + aad: b"", + }; + aead.decrypt(GenericArray::from_slice(&nonce), payload) + .unwrap() + }; + let decoded_ticket = Ticket::decode(&decrypted_ticket).unwrap().unwrap(); + + assert_eq!(decoded_ticket, ticket); +} From 2a36159a65ea610ac5835d245df5226aca7ac06a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 16 Apr 2022 10:47:05 +0200 Subject: [PATCH 062/391] Use topic hash from sigpi/gossipsub --- Cargo.toml | 1 + src/advertisement/mod.rs | 23 ++--- src/advertisement/test.rs | 55 ++++++------ src/advertisement/ticket.rs | 52 ++++++----- src/advertisement/topic.rs | 147 +++++++++++++++++++++++++++++++ src/rpc.rs | 45 ++++++---- src/service.rs | 166 +++++++++++++++++++----------------- src/service/test.rs | 2 +- 8 files changed, 330 insertions(+), 161 deletions(-) create mode 100644 src/advertisement/topic.rs diff --git a/Cargo.toml b/Cargo.toml index bc5b7d077..497e5c3b5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,6 +39,7 @@ lru = "0.7.1" hashlink = "0.7.0" delay_map = "0.1.1" more-asserts = "0.2.2" +base64 = "0.13.0" [dev-dependencies] rand_07 = { package = "rand", version = "0.7" } diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index cbf832219..ee93ae728 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -2,17 +2,18 @@ use super::*; use crate::Enr; use core::time::Duration; use futures::prelude::*; -use rpc::Topic; use std::{ collections::{HashMap, VecDeque}, pin::Pin, task::{Context, Poll}, }; use tokio::time::Instant; +use topic::TopicHash; use tracing::{debug, error}; mod test; pub mod ticket; +pub mod topic; #[derive(Debug)] pub struct AdNode { @@ -36,19 +37,19 @@ impl PartialEq for AdNode { } struct AdTopic { - topic: Topic, + topic: TopicHash, insert_time: Instant, } impl AdTopic { - pub fn new(topic: Topic, insert_time: Instant) -> Self { + pub fn new(topic: TopicHash, insert_time: Instant) -> Self { AdTopic { topic, insert_time } } } pub struct Ads { expirations: VecDeque, - ads: HashMap>, + ads: HashMap>, ad_lifetime: Duration, max_ads_per_topic: usize, max_ads: usize, @@ -73,7 +74,7 @@ impl Ads { }) } - pub fn get_ad_nodes(&self, topic: Topic) -> impl Iterator + '_ { + pub fn get_ad_nodes(&self, topic: TopicHash) -> impl Iterator + '_ { self.ads .get(&topic) .into_iter() @@ -81,7 +82,7 @@ impl Ads { .map(|node| node.node_record.clone()) } - pub fn ticket_wait_time(&mut self, topic: Topic) -> Option { + pub fn ticket_wait_time(&mut self, topic: TopicHash) -> Option { self.remove_expired(); let now = Instant::now(); if self.expirations.len() < self.max_ads { @@ -104,18 +105,18 @@ impl Ads { } fn remove_expired(&mut self) { - let mut map: HashMap = HashMap::new(); + let mut map: HashMap = HashMap::new(); self.expirations .iter() .take_while(|ad| ad.insert_time.elapsed() >= self.ad_lifetime) .for_each(|ad| { - let count = map.entry(ad.topic).or_default(); + let count = map.entry(ad.topic.clone()).or_default(); *count += 1; }); map.into_iter().for_each(|(topic, index)| { - let entry_ref = self.ads.entry(topic).or_default(); + let entry_ref = self.ads.entry(topic.clone()).or_default(); for _ in 0..index { entry_ref.pop_front(); self.expirations.pop_front(); @@ -126,10 +127,10 @@ impl Ads { }); } - pub fn insert(&mut self, node_record: Enr, topic: Topic) -> Result<(), &str> { + pub fn insert(&mut self, node_record: Enr, topic: TopicHash) -> Result<(), &str> { self.remove_expired(); let now = Instant::now(); - let nodes = self.ads.entry(topic).or_default(); + let nodes = self.ads.entry(topic.clone()).or_default(); if nodes.contains(&AdNode::new(node_record.clone(), now)) { error!( "This node {} is already advertising this topic", diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 8a3637c09..31ea37584 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -15,13 +15,13 @@ async fn insert_same_node() { let mut ads = Ads::new(Duration::from_secs(2), 10, 50).unwrap(); - let topic = [1; 32]; + let topic = TopicHash::from_bytes(&[1u8; 32]).unwrap(); - ads.insert(enr.clone(), topic).unwrap(); + ads.insert(enr.clone(), topic.clone()).unwrap(); // Since 2 seconds haven't passed assert_eq!( - ads.insert(enr.clone(), topic).map_err(|e| e), + ads.insert(enr.clone(), topic.clone()).map_err(|e| e), Err("Node already advertising this topic".into()) ); @@ -44,23 +44,23 @@ async fn insert_ad_and_get_nodes() { let mut ads = Ads::new(Duration::from_secs(2), 10, 50).unwrap(); - let topic = [1; 32]; - let topic_2 = [2; 32]; + let topic = TopicHash::from_bytes(&[1u8; 32]).unwrap(); + let topic_2 = TopicHash::from_bytes(&[2u8; 32]).unwrap(); // Add an ad for topic from enr - ads.insert(enr.clone(), topic).unwrap(); + ads.insert(enr.clone(), topic.clone()).unwrap(); // The ad hasn't expired and duplicates are not allowed assert_eq!( - ads.insert(enr.clone(), topic).map_err(|e| e), + ads.insert(enr.clone(), topic.clone()).map_err(|e| e), Err("Node already advertising this topic".into()) ); // Add an ad for topic from enr_2 - ads.insert(enr_2.clone(), topic).unwrap(); + ads.insert(enr_2.clone(), topic.clone()).unwrap(); // Add an ad for topic_2 from enr - ads.insert(enr.clone(), topic_2).unwrap(); + ads.insert(enr.clone(), topic_2.clone()).unwrap(); let nodes: Vec = ads.get_ad_nodes(topic).collect(); @@ -73,7 +73,7 @@ async fn insert_ad_and_get_nodes() { #[tokio::test] async fn ticket_wait_time_no_wait_time() { let mut ads = Ads::new(Duration::from_secs(1), 10, 50).unwrap(); - let topic = [1; 32]; + let topic = TopicHash::from_bytes(&[1u8; 32]).unwrap(); assert_eq!(ads.ticket_wait_time(topic), None) } @@ -87,12 +87,15 @@ async fn ticket_wait_time_duration() { let mut ads = Ads::new(Duration::from_secs(3), 1, 3).unwrap(); - let topic = [1; 32]; + let topic = TopicHash::from_bytes(&[1u8; 32]).unwrap(); // Add an add for topic - ads.insert(enr.clone(), topic).unwrap(); + ads.insert(enr.clone(), topic.clone()).unwrap(); - assert_gt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(2))); + assert_gt!( + ads.ticket_wait_time(topic.clone()), + Some(Duration::from_secs(2)) + ); assert_lt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(3))); } @@ -111,20 +114,20 @@ async fn ticket_wait_time_full_table() { let mut ads = Ads::new(Duration::from_secs(3), 2, 3).unwrap(); - let topic = [1; 32]; - let topic_2 = [2; 32]; + let topic = TopicHash::from_bytes(&[1u8; 32]).unwrap(); + let topic_2 = TopicHash::from_bytes(&[2u8; 32]).unwrap(); // Add 2 ads for topic - ads.insert(enr.clone(), topic).unwrap(); - ads.insert(enr_2.clone(), topic).unwrap(); + ads.insert(enr.clone(), topic.clone()).unwrap(); + ads.insert(enr_2.clone(), topic.clone()).unwrap(); tokio::time::sleep(Duration::from_secs(2)).await; // Add an ad for topic_2 - ads.insert(enr.clone(), topic_2).unwrap(); + ads.insert(enr.clone(), topic_2.clone()).unwrap(); // Now max_ads in table is reached so the second ad for topic_2 has to wait - assert_ne!(ads.ticket_wait_time(topic_2), None); + assert_ne!(ads.ticket_wait_time(topic_2.clone()), None); tokio::time::sleep(Duration::from_secs(3)).await; @@ -149,24 +152,24 @@ async fn ticket_wait_time_full_topic() { let mut ads = Ads::new(Duration::from_secs(3), 2, 4).unwrap(); - let topic = [1; 32]; - let topic_2 = [2; 32]; + let topic = TopicHash::from_bytes(&[1u8; 32]).unwrap(); + let topic_2 = TopicHash::from_bytes(&[2u8; 32]).unwrap(); // Add 2 ads for topic - ads.insert(enr.clone(), topic).unwrap(); - ads.insert(enr_2.clone(), topic).unwrap(); + ads.insert(enr.clone(), topic.clone()).unwrap(); + ads.insert(enr_2.clone(), topic.clone()).unwrap(); // Now max_ads_per_topic is reached for topic - assert_ne!(ads.ticket_wait_time(topic), None); + assert_ne!(ads.ticket_wait_time(topic.clone()), None); // Add a topic_2 ad - ads.insert(enr, topic_2).unwrap(); + ads.insert(enr, topic_2.clone()).unwrap(); // The table isn't full so topic_2 ads don't have to wait assert_eq!(ads.ticket_wait_time(topic_2), None); // But for topic they do until the first ads have expired - assert_ne!(ads.ticket_wait_time(topic), None); + assert_ne!(ads.ticket_wait_time(topic.clone()), None); tokio::time::sleep(Duration::from_secs(3)).await; assert_eq!(ads.ticket_wait_time(topic), None); diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 47feaf9b5..8a2f3c873 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -5,26 +5,19 @@ use enr::NodeId; use node_info::NodeContact; use std::{cmp::Eq, collections::HashSet}; -// Placeholder function -pub fn topic_hash(topic: Vec) -> Topic { - let mut topic_hash = [0u8; 32]; - topic_hash[32 - topic.len()..].copy_from_slice(&topic); - topic_hash -} - -#[derive(PartialEq, Eq, Hash, Clone, Copy)] +#[derive(PartialEq, Eq, Hash, Clone)] pub struct ActiveTopic { node_id: NodeId, - topic: Topic, + topic: TopicHash, } impl ActiveTopic { - pub fn new(node_id: NodeId, topic: Topic) -> Self { + pub fn new(node_id: NodeId, topic: TopicHash) -> Self { ActiveTopic { node_id, topic } } - pub fn topic(&self) -> Topic { - self.topic + pub fn topic(&self) -> TopicHash { + self.topic.clone() } } @@ -43,7 +36,7 @@ impl ActiveTicket { } pub fn ticket(&self) -> Ticket { - self.ticket + self.ticket.clone() } } @@ -69,7 +62,7 @@ impl Tickets { ) -> Result<(), &str> { let active_topic = ActiveTopic::new(contact.node_id(), ticket.topic()); - if let Err(e) = self.ticket_history.insert(active_topic) { + if let Err(e) = self.ticket_history.insert(active_topic.clone()) { return Err(e); } self.tickets @@ -136,7 +129,7 @@ impl TicketHistory { now.saturating_duration_since(ticket_limiter.first_seen) >= self.ticket_cache_duration }) - .map(|ticket_limiter| ticket_limiter.active_topic) + .map(|ticket_limiter| ticket_limiter.active_topic.clone()) .collect::>(); cached_tickets.iter().for_each(|active_topic| { @@ -146,14 +139,14 @@ impl TicketHistory { } } -#[derive(Clone, Copy)] +#[derive(Clone)] struct RegistrationWindow { - topic: Topic, + topic: TopicHash, open_time: Instant, } pub struct TicketPools { - ticket_pools: HashMap>, + ticket_pools: HashMap>, expirations: VecDeque, } @@ -185,7 +178,7 @@ impl TicketPools { } impl Stream for TicketPools { - type Item = Result<(Topic, HashMap), String>; + type Item = Result<(TopicHash, HashMap), String>; fn poll_next(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { self.expirations .pop_front() @@ -206,7 +199,7 @@ impl Stream for TicketPools { } } -#[derive(Clone, Copy)] +#[derive(Clone)] pub struct ActiveRegtopicRequest { active_topic: ActiveTopic, insert_time: Instant, @@ -238,7 +231,7 @@ impl ActiveRegtopicRequests { &mut self, req_id: RequestId, node_id: NodeId, - topic: Topic, + topic: TopicHash, ) -> Option { self.remove_expired(); self.requests @@ -246,7 +239,7 @@ impl ActiveRegtopicRequests { .map(|ids| ids.contains(&req_id)) } - pub fn insert(&mut self, node_id: NodeId, topic: Topic, req_id: RequestId) { + pub fn insert(&mut self, node_id: NodeId, topic: TopicHash, req_id: RequestId) { self.remove_expired(); let now = Instant::now(); let active_topic = ActiveTopic::new(node_id, topic); @@ -258,7 +251,7 @@ impl ActiveRegtopicRequests { // each insert incase a REGCONFIRMATION comes to a later req-id. Max req-ids in a set is limited by our // implementation accepting max 3 tickets for a (NodeId, Topic) within 15 minutes. self.requests - .entry(active_topic) + .entry(active_topic.clone()) .or_default() .insert(req_id); self.expirations @@ -272,15 +265,18 @@ impl ActiveRegtopicRequests { } fn remove_expired(&mut self) { + let mut expired = Vec::new(); + self.expirations .iter() .take_while(|req| req.insert_time.elapsed() >= Duration::from_secs(15)) - .copied() - .collect::>() - .iter() .for_each(|req| { - self.requests.remove(&req.active_topic); - self.expirations.pop_front(); + expired.push(req.clone()); }); + + expired.into_iter().for_each(|req| { + self.requests.remove(&req.active_topic); + self.expirations.pop_front(); + }); } } diff --git a/src/advertisement/topic.rs b/src/advertisement/topic.rs new file mode 100644 index 000000000..b9ac98394 --- /dev/null +++ b/src/advertisement/topic.rs @@ -0,0 +1,147 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use base64::encode; +use rlp::{DecoderError, Rlp, RlpStream}; +use sha2::{Digest, Sha256}; +use std::fmt; + +pub type Sha256Topic = Topic; + +/// A generic trait that can be extended for various hashing types for a topic. +pub trait Hasher { + /// The function that takes a topic string and creates a topic hash. + fn hash(topic_string: String) -> TopicHash; +} + +/// A type for representing topics who use the identity hash. +#[derive(Debug, Clone)] +pub struct IdentityHash {} +impl Hasher for IdentityHash { + /// Creates a [`TopicHash`] as a raw string. + fn hash(topic_string: String) -> TopicHash { + TopicHash { hash: topic_string } + } +} + +#[derive(Debug, Clone)] +pub struct Sha256Hash {} +impl Hasher for Sha256Hash { + /// Creates a [`TopicHash`] by SHA256 hashing the topic then base64 encoding the + /// hash. + fn hash(topic_string: String) -> TopicHash { + let mut bytes = Vec::with_capacity(topic_string.len()); + bytes.copy_from_slice(topic_string.as_bytes()); + let hash = encode(Sha256::digest(&bytes).as_slice()); + TopicHash { hash } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct TopicHash { + /// The topic hash. Stored as a string to align with the protobuf API. + hash: String, +} + +impl TopicHash { + pub fn from_raw(hash: impl Into) -> TopicHash { + TopicHash { hash: hash.into() } + } + + pub fn from_bytes(hash: &[u8; 32]) -> Result { + if hash.len() != 32 { + return Err("Hash is not 32 bytes"); + } + match std::str::from_utf8(hash) { + Ok(hash) => Ok(TopicHash { hash: hash.into() }), + Err(_) => Err("Cannot decode utf8 string"), + } + } + + pub fn as_bytes(&self) -> [u8; 32] { + let mut buf = [0u8; 32]; + buf.copy_from_slice(self.as_str().as_bytes()); + buf + } + + pub fn into_string(self) -> String { + self.hash + } + + pub fn as_str(&self) -> &str { + &self.hash + } +} + +impl rlp::Encodable for TopicHash { + fn rlp_append(&self, s: &mut RlpStream) { + s.append(&self.as_bytes().to_vec()); + } +} + +impl rlp::Decodable for TopicHash { + fn decode(rlp: &Rlp<'_>) -> Result { + let data = rlp.data()?; + let mut buf = [0u8; 32]; + buf.copy_from_slice(data); + match TopicHash::from_bytes(&buf) { + Ok(topic_hash) => Ok(topic_hash), + Err(e) => Err(DecoderError::Custom(e)), + } + } +} + +/// A gossipsub topic. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub struct Topic { + topic: String, + phantom_data: std::marker::PhantomData, +} + +impl From> for TopicHash { + fn from(topic: Topic) -> TopicHash { + topic.hash() + } +} + +impl Topic { + pub fn new(topic: impl Into) -> Self { + Topic { + topic: topic.into(), + phantom_data: std::marker::PhantomData, + } + } + + pub fn hash(&self) -> TopicHash { + H::hash(self.topic.clone()) + } +} + +impl fmt::Display for Topic { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.topic) + } +} + +impl fmt::Display for TopicHash { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.hash) + } +} diff --git a/src/rpc.rs b/src/rpc.rs index cf49d51ae..cef08cc6f 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -1,3 +1,4 @@ +use crate::advertisement::topic::TopicHash; use enr::{CombinedKey, Enr, NodeId}; use rlp::{DecoderError, Rlp, RlpStream}; use std::{ @@ -7,8 +8,6 @@ use std::{ use tokio::time::{Duration, Instant}; use tracing::{debug, warn}; -type TopicHash = [u8; 32]; - /// Type to manage the request IDs. #[derive(Debug, Clone, PartialEq, Hash, Eq)] pub struct RequestId(pub Vec); @@ -197,7 +196,7 @@ impl Request { let mut s = RlpStream::new(); s.begin_list(2); s.append(&id.as_bytes()); - s.append(&(&topic as &[u8])); + s.append(&topic); buf.extend_from_slice(&s.out()); buf } @@ -626,6 +625,13 @@ impl Message { topic[32 - topic_bytes.len()..].copy_from_slice(&topic_bytes); topic }; + let topic = match TopicHash::from_bytes(&topic) { + Ok(topic) => topic, + Err(e) => { + debug!("Failed converting topic bytes to TopicHash"); + return Err(DecoderError::Custom(e)); + } + }; Message::Request(Request { id, body: RequestBody::TopicQuery { topic }, @@ -640,14 +646,12 @@ impl Message { } } -pub type Topic = [u8; 32]; - -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Clone)] pub struct Ticket { //nonce: u64, src_node_id: NodeId, src_ip: IpAddr, - topic: Topic, + topic: TopicHash, req_time: Instant, wait_time: Duration, //cum_wait: Option, @@ -661,7 +665,7 @@ impl rlp::Encodable for Ticket { IpAddr::V4(addr) => s.append(&(addr.octets().to_vec())), IpAddr::V6(addr) => s.append(&(addr.octets().to_vec())), }; - s.append(&(self.topic.to_vec())); + s.append(&self.topic); if let Ok(time_since_unix) = SystemTime::now().duration_since(UNIX_EPOCH) { let time_since_req = self.req_time.elapsed(); let time_stamp = time_since_unix - time_since_req; @@ -729,7 +733,14 @@ impl rlp::Decodable for Ticket { } let mut topic = [0u8; 32]; topic.copy_from_slice(data); - topic + let topic_hash = match TopicHash::from_bytes(&topic) { + Ok(topic_hash) => topic_hash, + Err(e) => { + debug!("Ticket has incorrect topic hash"); + return Err(DecoderError::Custom(e)); + } + }; + topic_hash }; let req_time = { if let Ok(time_since_unix) = SystemTime::now().duration_since(UNIX_EPOCH) { @@ -780,7 +791,7 @@ impl Ticket { //nonce: u64, src_node_id: NodeId, src_ip: IpAddr, - topic: Topic, + topic: TopicHash, req_time: Instant, wait_time: Duration, //cum_wait: Option, @@ -796,8 +807,8 @@ impl Ticket { } } - pub fn topic(&self) -> Topic { - self.topic + pub fn topic(&self) -> TopicHash { + self.topic.clone() } pub fn req_time(&self) -> Instant { @@ -1101,7 +1112,7 @@ mod tests { let ticket = Ticket::new( node_id, ip, - [1; 32], + TopicHash::from_bytes(&[1u8; 32]).unwrap(), Instant::now(), Duration::from_secs(11), ); @@ -1136,7 +1147,7 @@ mod tests { let ticket = Ticket::new( node_id, ip, - [1; 32], + TopicHash::from_bytes(&[1u8; 32]).unwrap(), Instant::now(), Duration::from_secs(11), ); @@ -1160,7 +1171,7 @@ mod tests { let ticket = Ticket::new( node_id, ip, - [1; 32], + TopicHash::from_bytes(&[1u8; 32]).unwrap(), Instant::now(), Duration::from_secs(11), ); @@ -1199,7 +1210,9 @@ mod tests { fn encode_decode_topic_query_request() { let request = Message::Request(Request { id: RequestId(vec![1]), - body: RequestBody::TopicQuery { topic: [0u8; 32] }, + body: RequestBody::TopicQuery { + topic: TopicHash::from_bytes(&[0u8; 32]).unwrap(), + }, }); let encoded = request.clone().encode(); diff --git a/src/service.rs b/src/service.rs index 6f8397948..71e0fc53a 100644 --- a/src/service.rs +++ b/src/service.rs @@ -18,7 +18,8 @@ use self::{ }; use crate::{ advertisement::{ - ticket::{topic_hash, ActiveRegtopicRequests, TicketPools, Tickets}, + ticket::{ActiveRegtopicRequests, TicketPools, Tickets}, + topic::{Sha256Topic as Topic, TopicHash}, Ads, }, error::{RequestError, ResponseError}, @@ -219,7 +220,7 @@ pub struct Service { tickets: Tickets, /// Topics to advertise on other nodes. - topics: HashSet, + topics: HashSet, /// Ads currently advertised on other nodes. active_topics: Ads, @@ -472,16 +473,16 @@ impl Service { let QueryType::FindNode(node_id) = result.target.query_type; - let topic = self.topics.get(&node_id.raw()).copied(); - - if let Some(topic) = topic { - let local_enr = self.local_enr.read().clone(); - found_enrs.into_iter().for_each(|enr| self.reg_topic_request(NodeContact::from(enr), topic, local_enr.clone(), None)); - } else if let Some(callback) = result.target.callback { - if callback.send(found_enrs).is_err() { - warn!("Callback dropped for query {}. Results dropped", *id); + if let Ok(topic_hash) = TopicHash::from_bytes(&node_id.raw()).map_err(|e| error!("{}", e)) { + if self.topics.get(&topic_hash).is_some() { + let local_enr = self.local_enr.read().clone(); + found_enrs.into_iter().for_each(|enr| self.reg_topic_request(NodeContact::from(enr), topic_hash.clone(), local_enr.clone(), None)); + } else if let Some(callback) = result.target.callback { + if callback.send(found_enrs).is_err() { + warn!("Callback dropped for query {}. Results dropped", *id); + } } - } + } } } } @@ -505,7 +506,7 @@ impl Service { self.reg_topic_request(active_ticket.contact(), active_topic.topic(), enr, Some(active_ticket.ticket())); } _ = publish_topics.tick() => { - self.topics.clone().into_iter().for_each(|topic| self.start_findnode_query(NodeId::new(&topic), None)); + self.topics.clone().into_iter().for_each(|topic| self.start_findnode_query(NodeId::new(&topic.as_bytes()), None)); } Some(Ok((topic, ticket_pool))) = self.ticket_pools.next() => { // Selection of node for free ad slot @@ -517,7 +518,7 @@ impl Service { selection.into_iter().next().map(|node_id| ticket_pool.get(node_id)).unwrap_or(None) }; if let Some((node_record, req_id, _ticket)) = new_ad.map(|(node_record, req_id, ticket)| (node_record.clone(), req_id.clone(), ticket)) { - self.ads.insert(node_record.clone(), topic).ok(); + self.ads.insert(node_record.clone(), topic.clone()).ok(); NodeContact::from(node_record).node_address().map(|node_address| { self.send_regconfirmation_response(node_address, req_id, topic); }).ok(); @@ -695,67 +696,71 @@ impl Service { if enr.node_id() == node_address.node_id && enr.udp_socket() == Some(node_address.socket_addr) { - let topic = topic_hash(topic); - let wait_time = self.ads.ticket_wait_time(topic); - - let new_ticket = Ticket::new( - node_address.node_id, - node_address.socket_addr.ip(), - topic, - tokio::time::Instant::now(), - wait_time.unwrap_or(Duration::from_secs(0)), - ); + if let Ok(topic_str) = std::str::from_utf8(&topic).map_err(|e| error!("{}", e)) + { + let topic = Topic::new(topic_str.to_owned()).hash(); + let wait_time = self.ads.ticket_wait_time(topic.clone()); + + let new_ticket = Ticket::new( + node_address.node_id, + node_address.socket_addr.ip(), + topic, + tokio::time::Instant::now(), + wait_time.unwrap_or(Duration::from_secs(0)), + ); - self.send_ticket_response( - node_address, - id.clone(), - new_ticket, - wait_time.unwrap_or(Duration::from_secs(0)), - ); + self.send_ticket_response( + node_address, + id.clone(), + new_ticket.clone(), + wait_time.unwrap_or(Duration::from_secs(0)), + ); - if !ticket.is_empty() { - let decoded_enr = self - .local_enr - .write() - .to_base64() - .parse::() - .map_err(|e| { - error!("Failed to decode ticket in REGTOPIC query: {}", e) - }); - if let Ok(decoded_enr) = decoded_enr { - if let Some(ticket_key) = decoded_enr.get("ticket_key") { - let decrypted_ticket = { - let aead = Aes128Gcm::new(GenericArray::from_slice(ticket_key)); - let payload = Payload { - msg: &ticket, - aad: b"", + if !ticket.is_empty() { + let decoded_enr = self + .local_enr + .write() + .to_base64() + .parse::() + .map_err(|e| { + error!("Failed to decode ticket in REGTOPIC query: {}", e) + }); + if let Ok(decoded_enr) = decoded_enr { + if let Some(ticket_key) = decoded_enr.get("ticket_key") { + let decrypted_ticket = { + let aead = + Aes128Gcm::new(GenericArray::from_slice(ticket_key)); + let payload = Payload { + msg: &ticket, + aad: b"", + }; + aead.encrypt(GenericArray::from_slice(&[1u8; 12]), payload) + .map_err(|e| { + error!( + "Failed to decode ticket in REGTOPIC query: {}", + e + ) + }) }; - aead.encrypt(GenericArray::from_slice(&[1u8; 12]), payload) - .map_err(|e| { - error!( - "Failed to decode ticket in REGTOPIC query: {}", - e - ) - }) - }; - if let Ok(decrypted_ticket) = decrypted_ticket { - Ticket::decode(&decrypted_ticket) - .map_err(|e| error!("{}", e)) - .map(|ticket| { - // Drop if src_node_id, src_ip and topic derived from node_address and request - // don't match those in ticket - if let Some(ticket) = ticket { - if ticket == new_ticket { - self.ticket_pools.insert(enr, id, ticket); + if let Ok(decrypted_ticket) = decrypted_ticket { + Ticket::decode(&decrypted_ticket) + .map_err(|e| error!("{}", e)) + .map(|ticket| { + // Drop if src_node_id, src_ip and topic derived from node_address and request + // don't match those in ticket + if let Some(ticket) = ticket { + if ticket == new_ticket { + self.ticket_pools.insert(enr, id, ticket); + } } - } - }) - .ok(); + }) + .ok(); + } } } + } else { + self.ticket_pools.insert(enr, id, new_ticket); } - } else { - self.ticket_pools.insert(enr, id, new_ticket); } } } @@ -1014,14 +1019,17 @@ impl Service { .ok(); } ResponseBody::RegisterConfirmation { topic } => { - let topic = topic_hash(topic); - if self - .active_regtopic_requests - .is_active_req(id, node_id, topic) - .is_some() + if let Ok(topic_str) = std::str::from_utf8(&topic).map_err(|e| error!("{}", e)) { - if let NodeContact::Enr(enr) = active_request.contact { - self.active_topics.insert(*enr, topic).ok(); + let topic = Topic::new(topic_str.to_owned()).hash(); + if self + .active_regtopic_requests + .is_active_req(id, node_id, topic.clone()) + .is_some() + { + if let NodeContact::Enr(enr) = active_request.contact { + self.active_topics.insert(*enr, topic).ok(); + } } } } @@ -1110,7 +1118,7 @@ impl Service { fn reg_topic_request( &mut self, contact: NodeContact, - topic: Topic, + topic: TopicHash, enr: Enr, ticket: Option, ) { @@ -1121,7 +1129,7 @@ impl Service { }; let node_id = enr.node_id(); let request_body = RequestBody::RegisterTopic { - topic: topic.to_vec(), + topic: topic.as_bytes().to_vec(), enr, ticket: ticket_bytes, }; @@ -1184,12 +1192,12 @@ impl Service { &mut self, node_address: NodeAddress, rpc_id: RequestId, - topic: Topic, + topic: TopicHash, ) { let response = Response { id: rpc_id, body: ResponseBody::RegisterConfirmation { - topic: topic.to_vec(), + topic: topic.as_bytes().to_vec(), }, }; trace!( @@ -1206,7 +1214,7 @@ impl Service { &mut self, node_address: NodeAddress, rpc_id: RequestId, - topic: [u8; 32], + topic: TopicHash, ) { let nodes_to_send = self.ads.get_ad_nodes(topic).collect(); diff --git a/src/service/test.rs b/src/service/test.rs index eb1af58ea..6bd582864 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -221,7 +221,7 @@ async fn encrypt_decrypt_ticket() { let ticket = Ticket::new( node_id, ip, - [2u8; 32], + TopicHash::from_bytes(&[2u8; 32]).unwrap(), tokio::time::Instant::now(), tokio::time::Duration::from_secs(5), ); From e710524e2ec54406fe0b272f95d40285f723ff6e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 16 Apr 2022 10:53:39 +0200 Subject: [PATCH 063/391] Fix test --- src/service/test.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/test.rs b/src/service/test.rs index 6bd582864..4b05b84c2 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -185,7 +185,7 @@ async fn encrypt_decrypt_ticket() { let ip: IpAddr = "127.0.0.1".parse().unwrap(); let enr = EnrBuilder::new("v4") .ip(ip) - .udp(10001) + .udp(10006) .build(&enr_key) .unwrap(); From 01d834cc10fb926bda744c8de8698f895f3fa1a3 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 16 Apr 2022 11:45:15 +0200 Subject: [PATCH 064/391] Only allow conversion from bytes to TopicHash via Topic --- src/advertisement/test.rs | 24 +++++++++++------------ src/advertisement/topic.rs | 24 ++++++++--------------- src/rpc.rs | 39 ++++++++++++++++++-------------------- src/service.rs | 11 ++++++----- src/service/test.rs | 3 ++- 5 files changed, 45 insertions(+), 56 deletions(-) diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 31ea37584..fcc3d9a53 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -1,6 +1,7 @@ #![cfg(test)] use super::*; +use crate::advertisement::topic::{Sha256Topic as Topic} ; use enr::{CombinedKey, EnrBuilder}; use more_asserts::{assert_gt, assert_lt}; use std::net::IpAddr; @@ -15,7 +16,7 @@ async fn insert_same_node() { let mut ads = Ads::new(Duration::from_secs(2), 10, 50).unwrap(); - let topic = TopicHash::from_bytes(&[1u8; 32]).unwrap(); + let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); ads.insert(enr.clone(), topic.clone()).unwrap(); @@ -44,8 +45,8 @@ async fn insert_ad_and_get_nodes() { let mut ads = Ads::new(Duration::from_secs(2), 10, 50).unwrap(); - let topic = TopicHash::from_bytes(&[1u8; 32]).unwrap(); - let topic_2 = TopicHash::from_bytes(&[2u8; 32]).unwrap(); + let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); + let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); // Add an ad for topic from enr ads.insert(enr.clone(), topic.clone()).unwrap(); @@ -73,7 +74,7 @@ async fn insert_ad_and_get_nodes() { #[tokio::test] async fn ticket_wait_time_no_wait_time() { let mut ads = Ads::new(Duration::from_secs(1), 10, 50).unwrap(); - let topic = TopicHash::from_bytes(&[1u8; 32]).unwrap(); + let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); assert_eq!(ads.ticket_wait_time(topic), None) } @@ -87,15 +88,12 @@ async fn ticket_wait_time_duration() { let mut ads = Ads::new(Duration::from_secs(3), 1, 3).unwrap(); - let topic = TopicHash::from_bytes(&[1u8; 32]).unwrap(); + let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); // Add an add for topic ads.insert(enr.clone(), topic.clone()).unwrap(); - assert_gt!( - ads.ticket_wait_time(topic.clone()), - Some(Duration::from_secs(2)) - ); + assert_gt!(ads.ticket_wait_time(topic.clone()), Some(Duration::from_secs(2))); assert_lt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(3))); } @@ -114,8 +112,8 @@ async fn ticket_wait_time_full_table() { let mut ads = Ads::new(Duration::from_secs(3), 2, 3).unwrap(); - let topic = TopicHash::from_bytes(&[1u8; 32]).unwrap(); - let topic_2 = TopicHash::from_bytes(&[2u8; 32]).unwrap(); + let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); + let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); // Add 2 ads for topic ads.insert(enr.clone(), topic.clone()).unwrap(); @@ -152,8 +150,8 @@ async fn ticket_wait_time_full_topic() { let mut ads = Ads::new(Duration::from_secs(3), 2, 4).unwrap(); - let topic = TopicHash::from_bytes(&[1u8; 32]).unwrap(); - let topic_2 = TopicHash::from_bytes(&[2u8; 32]).unwrap(); + let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); + let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); // Add 2 ads for topic ads.insert(enr.clone(), topic.clone()).unwrap(); diff --git a/src/advertisement/topic.rs b/src/advertisement/topic.rs index b9ac98394..85f5a49a8 100644 --- a/src/advertisement/topic.rs +++ b/src/advertisement/topic.rs @@ -23,6 +23,7 @@ use rlp::{DecoderError, Rlp, RlpStream}; use sha2::{Digest, Sha256}; use std::fmt; +pub type IdentTopic = Topic; pub type Sha256Topic = Topic; /// A generic trait that can be extended for various hashing types for a topic. @@ -65,16 +66,6 @@ impl TopicHash { TopicHash { hash: hash.into() } } - pub fn from_bytes(hash: &[u8; 32]) -> Result { - if hash.len() != 32 { - return Err("Hash is not 32 bytes"); - } - match std::str::from_utf8(hash) { - Ok(hash) => Ok(TopicHash { hash: hash.into() }), - Err(_) => Err("Cannot decode utf8 string"), - } - } - pub fn as_bytes(&self) -> [u8; 32] { let mut buf = [0u8; 32]; buf.copy_from_slice(self.as_str().as_bytes()); @@ -99,12 +90,13 @@ impl rlp::Encodable for TopicHash { impl rlp::Decodable for TopicHash { fn decode(rlp: &Rlp<'_>) -> Result { let data = rlp.data()?; - let mut buf = [0u8; 32]; - buf.copy_from_slice(data); - match TopicHash::from_bytes(&buf) { - Ok(topic_hash) => Ok(topic_hash), - Err(e) => Err(DecoderError::Custom(e)), - } + let topic_string = match std::str::from_utf8(data) { + Ok(topic_string) => topic_string, + Err(e) => { + return Err(DecoderError::Custom("Cannot convert from byte data to utf8 string")); + } + }; + Ok(TopicHash { hash: topic_string.into() }) } } diff --git a/src/rpc.rs b/src/rpc.rs index cef08cc6f..99ced9c0c 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -1,4 +1,4 @@ -use crate::advertisement::topic::TopicHash; +use crate::advertisement::topic::{IdentTopic as Topic, TopicHash}; use enr::{CombinedKey, Enr, NodeId}; use rlp::{DecoderError, Rlp, RlpStream}; use std::{ @@ -623,14 +623,13 @@ impl Message { } let mut topic = [0u8; 32]; topic[32 - topic_bytes.len()..].copy_from_slice(&topic_bytes); - topic - }; - let topic = match TopicHash::from_bytes(&topic) { - Ok(topic) => topic, - Err(e) => { - debug!("Failed converting topic bytes to TopicHash"); - return Err(DecoderError::Custom(e)); - } + let topic_string = match std::str::from_utf8(data) { + Ok(topic_string) => topic_string, + Err(_) => { + return Err(DecoderError::Custom("Cannot convert from byte data to utf8 string")); + } + }; + Topic::new(topic_string).hash() }; Message::Request(Request { id, @@ -731,16 +730,14 @@ impl rlp::Decodable for Ticket { debug!("Ticket's topic hash is not 32 bytes"); return Err(DecoderError::RlpIsTooBig); } - let mut topic = [0u8; 32]; - topic.copy_from_slice(data); - let topic_hash = match TopicHash::from_bytes(&topic) { - Ok(topic_hash) => topic_hash, - Err(e) => { - debug!("Ticket has incorrect topic hash"); - return Err(DecoderError::Custom(e)); + // IdentTopic is used as the topic is already a hash + let topic_string = match std::str::from_utf8(data) { + Ok(topic_string) => topic_string, + Err(_) => { + return Err(DecoderError::Custom("Cannot convert from byte data to utf8 string")); } }; - topic_hash + Topic::new(topic_string).hash() }; let req_time = { if let Ok(time_since_unix) = SystemTime::now().duration_since(UNIX_EPOCH) { @@ -1112,7 +1109,7 @@ mod tests { let ticket = Ticket::new( node_id, ip, - TopicHash::from_bytes(&[1u8; 32]).unwrap(), + Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(), Instant::now(), Duration::from_secs(11), ); @@ -1147,7 +1144,7 @@ mod tests { let ticket = Ticket::new( node_id, ip, - TopicHash::from_bytes(&[1u8; 32]).unwrap(), + Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(), Instant::now(), Duration::from_secs(11), ); @@ -1171,7 +1168,7 @@ mod tests { let ticket = Ticket::new( node_id, ip, - TopicHash::from_bytes(&[1u8; 32]).unwrap(), + Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(), Instant::now(), Duration::from_secs(11), ); @@ -1211,7 +1208,7 @@ mod tests { let request = Message::Request(Request { id: RequestId(vec![1]), body: RequestBody::TopicQuery { - topic: TopicHash::from_bytes(&[0u8; 32]).unwrap(), + topic: Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(), }, }); diff --git a/src/service.rs b/src/service.rs index 71e0fc53a..e8fe37294 100644 --- a/src/service.rs +++ b/src/service.rs @@ -19,7 +19,7 @@ use self::{ use crate::{ advertisement::{ ticket::{ActiveRegtopicRequests, TicketPools, Tickets}, - topic::{Sha256Topic as Topic, TopicHash}, + topic::{IdentTopic as Topic, TopicHash}, Ads, }, error::{RequestError, ResponseError}, @@ -472,11 +472,12 @@ impl Service { } let QueryType::FindNode(node_id) = result.target.query_type; - - if let Ok(topic_hash) = TopicHash::from_bytes(&node_id.raw()).map_err(|e| error!("{}", e)) { - if self.topics.get(&topic_hash).is_some() { + + if let Ok(topic_str) = std::str::from_utf8(&node_id.raw()).map_err(|e| error!("{}", e)) { + let topic = Topic::new(topic_str.to_owned()).hash(); + if self.topics.get(&topic).is_some() { let local_enr = self.local_enr.read().clone(); - found_enrs.into_iter().for_each(|enr| self.reg_topic_request(NodeContact::from(enr), topic_hash.clone(), local_enr.clone(), None)); + found_enrs.into_iter().for_each(|enr| self.reg_topic_request(NodeContact::from(enr), topic.clone(), local_enr.clone(), None)); } else if let Some(callback) = result.target.callback { if callback.send(found_enrs).is_err() { warn!("Callback dropped for query {}. Results dropped", *id); diff --git a/src/service/test.rs b/src/service/test.rs index 4b05b84c2..7b8545431 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -218,10 +218,11 @@ async fn encrypt_decrypt_ticket() { let enr = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); let node_id = enr.node_id(); + let topic_hash = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); let ticket = Ticket::new( node_id, ip, - TopicHash::from_bytes(&[2u8; 32]).unwrap(), + topic_hash, tokio::time::Instant::now(), tokio::time::Duration::from_secs(5), ); From cb140f4882be0c5996927e0381425e29b0b17746 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 17 Apr 2022 00:16:17 +0200 Subject: [PATCH 065/391] Run cargo fmt --- src/advertisement/test.rs | 7 +++++-- src/advertisement/topic.rs | 10 +++++++--- src/rpc.rs | 8 ++++++-- src/service.rs | 4 ++-- 4 files changed, 20 insertions(+), 9 deletions(-) diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index fcc3d9a53..3536397c7 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -1,7 +1,7 @@ #![cfg(test)] use super::*; -use crate::advertisement::topic::{Sha256Topic as Topic} ; +use crate::advertisement::topic::Sha256Topic as Topic; use enr::{CombinedKey, EnrBuilder}; use more_asserts::{assert_gt, assert_lt}; use std::net::IpAddr; @@ -93,7 +93,10 @@ async fn ticket_wait_time_duration() { // Add an add for topic ads.insert(enr.clone(), topic.clone()).unwrap(); - assert_gt!(ads.ticket_wait_time(topic.clone()), Some(Duration::from_secs(2))); + assert_gt!( + ads.ticket_wait_time(topic.clone()), + Some(Duration::from_secs(2)) + ); assert_lt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(3))); } diff --git a/src/advertisement/topic.rs b/src/advertisement/topic.rs index 85f5a49a8..ef5b33126 100644 --- a/src/advertisement/topic.rs +++ b/src/advertisement/topic.rs @@ -92,11 +92,15 @@ impl rlp::Decodable for TopicHash { let data = rlp.data()?; let topic_string = match std::str::from_utf8(data) { Ok(topic_string) => topic_string, - Err(e) => { - return Err(DecoderError::Custom("Cannot convert from byte data to utf8 string")); + Err(_) => { + return Err(DecoderError::Custom( + "Cannot convert from byte data to utf8 string", + )); } }; - Ok(TopicHash { hash: topic_string.into() }) + Ok(TopicHash { + hash: topic_string.into(), + }) } } diff --git a/src/rpc.rs b/src/rpc.rs index 99ced9c0c..b7b7d1ae6 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -626,7 +626,9 @@ impl Message { let topic_string = match std::str::from_utf8(data) { Ok(topic_string) => topic_string, Err(_) => { - return Err(DecoderError::Custom("Cannot convert from byte data to utf8 string")); + return Err(DecoderError::Custom( + "Cannot convert from byte data to utf8 string", + )); } }; Topic::new(topic_string).hash() @@ -734,7 +736,9 @@ impl rlp::Decodable for Ticket { let topic_string = match std::str::from_utf8(data) { Ok(topic_string) => topic_string, Err(_) => { - return Err(DecoderError::Custom("Cannot convert from byte data to utf8 string")); + return Err(DecoderError::Custom( + "Cannot convert from byte data to utf8 string", + )); } }; Topic::new(topic_string).hash() diff --git a/src/service.rs b/src/service.rs index e8fe37294..36049afe3 100644 --- a/src/service.rs +++ b/src/service.rs @@ -19,7 +19,7 @@ use self::{ use crate::{ advertisement::{ ticket::{ActiveRegtopicRequests, TicketPools, Tickets}, - topic::{IdentTopic as Topic, TopicHash}, + topic::{Sha256Topic as Topic, TopicHash}, Ads, }, error::{RequestError, ResponseError}, @@ -472,7 +472,7 @@ impl Service { } let QueryType::FindNode(node_id) = result.target.query_type; - + if let Ok(topic_str) = std::str::from_utf8(&node_id.raw()).map_err(|e| error!("{}", e)) { let topic = Topic::new(topic_str.to_owned()).hash(); if self.topics.get(&topic).is_some() { From 27ec491b5034f795405d986e222631ae05be18cf Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 18 Apr 2022 14:55:32 +0200 Subject: [PATCH 066/391] Fix logic error arisen by reusing node lookup for topic look up --- src/service.rs | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/src/service.rs b/src/service.rs index 36049afe3..62ea4b89e 100644 --- a/src/service.rs +++ b/src/service.rs @@ -471,17 +471,18 @@ impl Service { } } - let QueryType::FindNode(node_id) = result.target.query_type; - - if let Ok(topic_str) = std::str::from_utf8(&node_id.raw()).map_err(|e| error!("{}", e)) { - let topic = Topic::new(topic_str.to_owned()).hash(); - if self.topics.get(&topic).is_some() { - let local_enr = self.local_enr.read().clone(); - found_enrs.into_iter().for_each(|enr| self.reg_topic_request(NodeContact::from(enr), topic.clone(), local_enr.clone(), None)); - } else if let Some(callback) = result.target.callback { - if callback.send(found_enrs).is_err() { - warn!("Callback dropped for query {}. Results dropped", *id); - } + if let Some(callback) = result.target.callback { + if callback.send(found_enrs).is_err() { + warn!("Callback dropped for query {}. Results dropped", *id); + } + } else { + let QueryType::FindNode(node_id) = result.target.query_type; + if let Ok(topic_str) = std::str::from_utf8(&node_id.raw()).map_err(|e| error!("{}", e)) { + let topic = Topic::new(topic_str.to_owned()).hash(); + if self.topics.get(&topic).is_some() { + let local_enr = self.local_enr.read().clone(); + found_enrs.into_iter().for_each(|enr| self.reg_topic_request(NodeContact::from(enr), topic.clone(), local_enr.clone(), None)); + } } } } From 76c6efefc58dd5d9a800162ffd0564bf0dcaf000 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 18 Apr 2022 15:07:27 +0200 Subject: [PATCH 067/391] Fix placeholder light-weight hash logic --- src/advertisement/topic.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/advertisement/topic.rs b/src/advertisement/topic.rs index ef5b33126..a3f4a1ed7 100644 --- a/src/advertisement/topic.rs +++ b/src/advertisement/topic.rs @@ -48,7 +48,7 @@ impl Hasher for Sha256Hash { /// Creates a [`TopicHash`] by SHA256 hashing the topic then base64 encoding the /// hash. fn hash(topic_string: String) -> TopicHash { - let mut bytes = Vec::with_capacity(topic_string.len()); + let mut bytes = [0u8; 32]; bytes.copy_from_slice(topic_string.as_bytes()); let hash = encode(Sha256::digest(&bytes).as_slice()); TopicHash { hash } From 5c9dd8f9361714369e990aa4b70d254c37a00baa Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 19 Apr 2022 17:39:41 +0200 Subject: [PATCH 068/391] Integrate Topic for sigpi/gossipsub with discv5 topic --- src/advertisement/mod.rs | 6 +- src/advertisement/ticket.rs | 2 +- src/advertisement/topic.rs | 98 +++++++++++++++--------- src/rpc.rs | 93 +++++++++++----------- src/service.rs | 149 ++++++++++++++++-------------------- src/service/test.rs | 5 +- 6 files changed, 184 insertions(+), 169 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index ee93ae728..dc45976b6 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -111,12 +111,12 @@ impl Ads { .iter() .take_while(|ad| ad.insert_time.elapsed() >= self.ad_lifetime) .for_each(|ad| { - let count = map.entry(ad.topic.clone()).or_default(); + let count = map.entry(ad.topic).or_default(); *count += 1; }); map.into_iter().for_each(|(topic, index)| { - let entry_ref = self.ads.entry(topic.clone()).or_default(); + let entry_ref = self.ads.entry(topic).or_default(); for _ in 0..index { entry_ref.pop_front(); self.expirations.pop_front(); @@ -130,7 +130,7 @@ impl Ads { pub fn insert(&mut self, node_record: Enr, topic: TopicHash) -> Result<(), &str> { self.remove_expired(); let now = Instant::now(); - let nodes = self.ads.entry(topic.clone()).or_default(); + let nodes = self.ads.entry(topic).or_default(); if nodes.contains(&AdNode::new(node_record.clone(), now)) { error!( "This node {} is already advertising this topic", diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 8a2f3c873..cd4a49483 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -17,7 +17,7 @@ impl ActiveTopic { } pub fn topic(&self) -> TopicHash { - self.topic.clone() + self.topic } } diff --git a/src/advertisement/topic.rs b/src/advertisement/topic.rs index a3f4a1ed7..3a0565849 100644 --- a/src/advertisement/topic.rs +++ b/src/advertisement/topic.rs @@ -21,9 +21,10 @@ use base64::encode; use rlp::{DecoderError, Rlp, RlpStream}; use sha2::{Digest, Sha256}; -use std::fmt; +use std::{cmp::Ordering, fmt, hash::Hash}; +use tracing::debug; -pub type IdentTopic = Topic; +//pub type IdentTopic = Topic; pub type Sha256Topic = Topic; /// A generic trait that can be extended for various hashing types for a topic. @@ -33,14 +34,14 @@ pub trait Hasher { } /// A type for representing topics who use the identity hash. -#[derive(Debug, Clone)] +/*#[derive(Debug, Clone)] pub struct IdentityHash {} impl Hasher for IdentityHash { /// Creates a [`TopicHash`] as a raw string. fn hash(topic_string: String) -> TopicHash { - TopicHash { hash: topic_string } + TopicHash { hash: topic_string.as_bytes() } } -} +}*/ #[derive(Debug, Clone)] pub struct Sha256Hash {} @@ -48,64 +49,58 @@ impl Hasher for Sha256Hash { /// Creates a [`TopicHash`] by SHA256 hashing the topic then base64 encoding the /// hash. fn hash(topic_string: String) -> TopicHash { - let mut bytes = [0u8; 32]; - bytes.copy_from_slice(topic_string.as_bytes()); - let hash = encode(Sha256::digest(&bytes).as_slice()); + let sha256 = Sha256::digest(topic_string.as_bytes()); + let mut hash = [0u8; 32]; + hash.copy_from_slice(&sha256); TopicHash { hash } } } -#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct TopicHash { /// The topic hash. Stored as a string to align with the protobuf API. - hash: String, + hash: [u8; 32], } +// Topic Hash decoded into bytes needs to have length 32 bytes to encode it into a +// NodeId, which is necessary to make use of the XOR distance look-up of a topic. It +// makes sense to use a hashing algorithm which produces 32 bytes since the hash of +// any given topic string can then be reproduced by any client when making a topic +// query or publishing the same topic in proximity to others of its kind. impl TopicHash { - pub fn from_raw(hash: impl Into) -> TopicHash { - TopicHash { hash: hash.into() } + pub fn from_raw(hash: [u8; 32]) -> TopicHash { + TopicHash { hash } } pub fn as_bytes(&self) -> [u8; 32] { - let mut buf = [0u8; 32]; - buf.copy_from_slice(self.as_str().as_bytes()); - buf - } - - pub fn into_string(self) -> String { self.hash } - - pub fn as_str(&self) -> &str { - &self.hash - } } impl rlp::Encodable for TopicHash { fn rlp_append(&self, s: &mut RlpStream) { - s.append(&self.as_bytes().to_vec()); + s.append(&self.hash.to_vec()); } } impl rlp::Decodable for TopicHash { fn decode(rlp: &Rlp<'_>) -> Result { - let data = rlp.data()?; - let topic_string = match std::str::from_utf8(data) { - Ok(topic_string) => topic_string, - Err(_) => { - return Err(DecoderError::Custom( - "Cannot convert from byte data to utf8 string", - )); + let topic = { + let topic_bytes = rlp.data()?; + if topic_bytes.len() > 32 { + debug!("Topic greater than 32 bytes"); + return Err(DecoderError::RlpIsTooBig); } + let mut topic = [0u8; 32]; + topic[32 - topic_bytes.len()..].copy_from_slice(topic_bytes); + topic }; - Ok(TopicHash { - hash: topic_string.into(), - }) + Ok(TopicHash::from_raw(topic)) } } /// A gossipsub topic. -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Debug, Clone)] pub struct Topic { topic: String, phantom_data: std::marker::PhantomData, @@ -128,6 +123,39 @@ impl Topic { pub fn hash(&self) -> TopicHash { H::hash(self.topic.clone()) } + + pub fn topic(&self) -> String { + self.topic.clone() + } +} + +// Each hash algortihm chosen to publish a topic with (as XOR +// metric key) is its own Topic +impl PartialEq for Topic { + fn eq(&self, other: &Topic) -> bool { + self.hash() == other.hash() + } +} + +impl Eq for Topic {} + +impl Hash for Topic { + fn hash(&self, _state: &mut T) { + self.hash(); + } +} + +// When sorted topics should group based on the topic string +impl PartialOrd for Topic { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.topic.cmp(&other.topic)) + } +} + +impl Ord for Topic { + fn cmp(&self, other: &Self) -> Ordering { + self.topic.cmp(&other.topic) + } } impl fmt::Display for Topic { @@ -138,6 +166,6 @@ impl fmt::Display for Topic { impl fmt::Display for TopicHash { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.hash) + write!(f, "{}", encode(self.hash)) } } diff --git a/src/rpc.rs b/src/rpc.rs index b7b7d1ae6..55853e4b4 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -1,4 +1,4 @@ -use crate::advertisement::topic::{IdentTopic as Topic, TopicHash}; +use crate::advertisement::topic::TopicHash; use enr::{CombinedKey, Enr, NodeId}; use rlp::{DecoderError, Rlp, RlpStream}; use std::{ @@ -86,7 +86,7 @@ pub enum RequestBody { /// A REGTOPIC request. RegisterTopic { /// The topic we want to advertise at the node receiving this request. - topic: Vec, + topic: TopicHash, // Current node record of sender. enr: crate::Enr, // Ticket content of ticket from a previous registration attempt or empty. @@ -132,7 +132,7 @@ pub enum ResponseBody { /// The REGCONFIRMATION response. RegisterConfirmation { /// The topic of a successful REGTOPIC request. - topic: Vec, + topic: TopicHash, }, } @@ -186,7 +186,7 @@ impl Request { let mut s = RlpStream::new(); s.begin_list(4); s.append(&id.as_bytes()); - s.append(&topic); + s.append(&topic.as_bytes().to_vec()); s.append(&enr); s.append(&ticket); buf.extend_from_slice(&s.out()); @@ -196,7 +196,7 @@ impl Request { let mut s = RlpStream::new(); s.begin_list(2); s.append(&id.as_bytes()); - s.append(&topic); + s.append(&topic.as_bytes().to_vec()); buf.extend_from_slice(&s.out()); buf } @@ -291,7 +291,7 @@ impl Response { let mut s = RlpStream::new(); s.begin_list(2); s.append(&id.as_bytes()); - s.append(&topic); + s.append(&topic.as_bytes().to_vec()); buf.extend_from_slice(&s.out()); buf } @@ -349,7 +349,11 @@ impl std::fmt::Display for ResponseBody { write!(f, "TICKET: Ticket: {:?}, Wait time: {}", ticket, wait_time) } ResponseBody::RegisterConfirmation { topic } => { - write!(f, "REGTOPIC: Registered: {}", hex::encode(topic)) + write!( + f, + "REGTOPIC: Registered: {}", + hex::encode(topic.to_string()) + ) } } } @@ -378,7 +382,7 @@ impl std::fmt::Display for RequestBody { RequestBody::RegisterTopic { topic, enr, ticket } => write!( f, "RegisterTopic: topic: {}, enr: {}, ticket: {:?}", - hex::encode(topic), + hex::encode(topic.to_string()), enr.to_base64(), ticket, ), @@ -569,7 +573,16 @@ impl Message { debug!("RegisterTopic Request has an invalid RLP list length. Expected 2, found {}", list_len); return Err(DecoderError::RlpIncorrectListLen); } - let topic = rlp.val_at::>(1)?; + let topic = { + let topic_bytes = rlp.val_at::>(1)?; + if topic_bytes.len() > 32 { + debug!("RegisterTopic Request has a topic greater than 32 bytes"); + return Err(DecoderError::RlpIsTooBig); + } + let mut topic = [0u8; 32]; + topic[32 - topic_bytes.len()..].copy_from_slice(&topic_bytes); + TopicHash::from_raw(topic) + }; let enr_rlp = rlp.at(2)?; let enr = enr_rlp.as_val::>()?; let ticket = rlp.val_at::>(3)?; @@ -600,7 +613,16 @@ impl Message { ); return Err(DecoderError::RlpIncorrectListLen); } - let topic = rlp.val_at::>(1)?; + let topic = { + let topic_bytes = rlp.val_at::>(1)?; + if topic_bytes.len() > 32 { + debug!("RegisterConfirmation Request has a topic greater than 32 bytes"); + return Err(DecoderError::RlpIsTooBig); + } + let mut topic = [0u8; 32]; + topic[32 - topic_bytes.len()..].copy_from_slice(&topic_bytes); + TopicHash::from_raw(topic) + }; Message::Response(Response { id, body: ResponseBody::RegisterConfirmation { topic }, @@ -623,15 +645,7 @@ impl Message { } let mut topic = [0u8; 32]; topic[32 - topic_bytes.len()..].copy_from_slice(&topic_bytes); - let topic_string = match std::str::from_utf8(data) { - Ok(topic_string) => topic_string, - Err(_) => { - return Err(DecoderError::Custom( - "Cannot convert from byte data to utf8 string", - )); - } - }; - Topic::new(topic_string).hash() + TopicHash::from_raw(topic) }; Message::Request(Request { id, @@ -726,23 +740,9 @@ impl rlp::Decodable for Ticket { } } }; - let topic = { - let data = decoded_list.remove(0).data()?; - if data.len() != 32 { - debug!("Ticket's topic hash is not 32 bytes"); - return Err(DecoderError::RlpIsTooBig); - } - // IdentTopic is used as the topic is already a hash - let topic_string = match std::str::from_utf8(data) { - Ok(topic_string) => topic_string, - Err(_) => { - return Err(DecoderError::Custom( - "Cannot convert from byte data to utf8 string", - )); - } - }; - Topic::new(topic_string).hash() - }; + + let topic = decoded_list.remove(0).as_val::()?; + let req_time = { if let Ok(time_since_unix) = SystemTime::now().duration_since(UNIX_EPOCH) { let s_bytes = decoded_list.remove(0).data()?; @@ -762,6 +762,7 @@ impl rlp::Decodable for Ticket { return Err(DecoderError::Custom("SystemTime before UNIX EPOCH!")); } }; + let wait_time = { let s_bytes = decoded_list.remove(0).data()?; let mut s = [0u8; 8]; @@ -769,6 +770,7 @@ impl rlp::Decodable for Ticket { let secs = u64::from_be_bytes(s); Duration::from_secs(secs) }; + Ok(Self { src_node_id, src_ip, @@ -809,7 +811,7 @@ impl Ticket { } pub fn topic(&self) -> TopicHash { - self.topic.clone() + self.topic } pub fn req_time(&self) -> Instant { @@ -1090,7 +1092,7 @@ mod tests { let request = Message::Request(Request { id: RequestId(vec![1]), body: RequestBody::RegisterTopic { - topic: vec![1, 2, 3], + topic: TopicHash::from_raw([1u8; 32]), enr, ticket: Vec::new(), }, @@ -1113,7 +1115,7 @@ mod tests { let ticket = Ticket::new( node_id, ip, - Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(), + TopicHash::from_raw([1u8; 32]), Instant::now(), Duration::from_secs(11), ); @@ -1123,9 +1125,9 @@ mod tests { let request = Message::Request(Request { id: RequestId(vec![1]), body: RequestBody::RegisterTopic { - topic: vec![1, 2, 3], + topic: TopicHash::from_raw([1u8; 32]), enr, - ticket: ticket, + ticket, }, }); @@ -1148,7 +1150,7 @@ mod tests { let ticket = Ticket::new( node_id, ip, - Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(), + TopicHash::from_raw([1u8; 32]), Instant::now(), Duration::from_secs(11), ); @@ -1172,7 +1174,7 @@ mod tests { let ticket = Ticket::new( node_id, ip, - Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(), + TopicHash::from_raw([1u8; 32]), Instant::now(), Duration::from_secs(11), ); @@ -1197,7 +1199,7 @@ mod tests { let response = Message::Response(Response { id: RequestId(vec![1]), body: ResponseBody::RegisterConfirmation { - topic: vec![1, 2, 3], + topic: TopicHash::from_raw([1u8; 32]), }, }); @@ -1212,10 +1214,9 @@ mod tests { let request = Message::Request(Request { id: RequestId(vec![1]), body: RequestBody::TopicQuery { - topic: Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(), + topic: TopicHash::from_raw([1u8; 32]), }, }); - let encoded = request.clone().encode(); let decoded = Message::decode(&encoded).unwrap(); diff --git a/src/service.rs b/src/service.rs index 62ea4b89e..d217bcd7d 100644 --- a/src/service.rs +++ b/src/service.rs @@ -220,7 +220,7 @@ pub struct Service { tickets: Tickets, /// Topics to advertise on other nodes. - topics: HashSet, + topics: HashMap, /// Ads currently advertised on other nodes. active_topics: Ads, @@ -349,7 +349,7 @@ impl Service { event_stream: None, ads, tickets: Tickets::new(Duration::from_secs(60 * 15)), - topics: HashSet::new(), + topics: HashMap::new(), active_topics, ticket_pools: TicketPools::new(), exit, @@ -477,13 +477,11 @@ impl Service { } } else { let QueryType::FindNode(node_id) = result.target.query_type; - if let Ok(topic_str) = std::str::from_utf8(&node_id.raw()).map_err(|e| error!("{}", e)) { - let topic = Topic::new(topic_str.to_owned()).hash(); - if self.topics.get(&topic).is_some() { + let topic = TopicHash::from_raw(node_id.raw()); + if self.topics.contains_key(&topic){ let local_enr = self.local_enr.read().clone(); - found_enrs.into_iter().for_each(|enr| self.reg_topic_request(NodeContact::from(enr), topic.clone(), local_enr.clone(), None)); + found_enrs.into_iter().for_each(|enr| self.reg_topic_request(NodeContact::from(enr), topic, local_enr.clone(), None)); } - } } } } @@ -508,7 +506,7 @@ impl Service { self.reg_topic_request(active_ticket.contact(), active_topic.topic(), enr, Some(active_ticket.ticket())); } _ = publish_topics.tick() => { - self.topics.clone().into_iter().for_each(|topic| self.start_findnode_query(NodeId::new(&topic.as_bytes()), None)); + self.topics.clone().into_iter().for_each(|(topic_hash, _)| self.start_findnode_query(NodeId::new(&topic_hash.as_bytes()), None)); } Some(Ok((topic, ticket_pool))) = self.ticket_pools.next() => { // Selection of node for free ad slot @@ -520,7 +518,7 @@ impl Service { selection.into_iter().next().map(|node_id| ticket_pool.get(node_id)).unwrap_or(None) }; if let Some((node_record, req_id, _ticket)) = new_ad.map(|(node_record, req_id, ticket)| (node_record.clone(), req_id.clone(), ticket)) { - self.ads.insert(node_record.clone(), topic.clone()).ok(); + self.ads.insert(node_record.clone(), topic).ok(); NodeContact::from(node_record).node_address().map(|node_address| { self.send_regconfirmation_response(node_address, req_id, topic); }).ok(); @@ -698,71 +696,66 @@ impl Service { if enr.node_id() == node_address.node_id && enr.udp_socket() == Some(node_address.socket_addr) { - if let Ok(topic_str) = std::str::from_utf8(&topic).map_err(|e| error!("{}", e)) - { - let topic = Topic::new(topic_str.to_owned()).hash(); - let wait_time = self.ads.ticket_wait_time(topic.clone()); - - let new_ticket = Ticket::new( - node_address.node_id, - node_address.socket_addr.ip(), - topic, - tokio::time::Instant::now(), - wait_time.unwrap_or(Duration::from_secs(0)), - ); + let wait_time = self.ads.ticket_wait_time(topic); + + let new_ticket = Ticket::new( + node_address.node_id, + node_address.socket_addr.ip(), + topic, + tokio::time::Instant::now(), + wait_time.unwrap_or(Duration::from_secs(0)), + ); - self.send_ticket_response( - node_address, - id.clone(), - new_ticket.clone(), - wait_time.unwrap_or(Duration::from_secs(0)), - ); + self.send_ticket_response( + node_address, + id.clone(), + new_ticket.clone(), + wait_time.unwrap_or(Duration::from_secs(0)), + ); - if !ticket.is_empty() { - let decoded_enr = self - .local_enr - .write() - .to_base64() - .parse::() - .map_err(|e| { - error!("Failed to decode ticket in REGTOPIC query: {}", e) - }); - if let Ok(decoded_enr) = decoded_enr { - if let Some(ticket_key) = decoded_enr.get("ticket_key") { - let decrypted_ticket = { - let aead = - Aes128Gcm::new(GenericArray::from_slice(ticket_key)); - let payload = Payload { - msg: &ticket, - aad: b"", - }; - aead.encrypt(GenericArray::from_slice(&[1u8; 12]), payload) - .map_err(|e| { - error!( - "Failed to decode ticket in REGTOPIC query: {}", - e - ) - }) + if !ticket.is_empty() { + let decoded_enr = self + .local_enr + .write() + .to_base64() + .parse::() + .map_err(|e| { + error!("Failed to decode ticket in REGTOPIC query: {}", e) + }); + if let Ok(decoded_enr) = decoded_enr { + if let Some(ticket_key) = decoded_enr.get("ticket_key") { + let decrypted_ticket = { + let aead = Aes128Gcm::new(GenericArray::from_slice(ticket_key)); + let payload = Payload { + msg: &ticket, + aad: b"", }; - if let Ok(decrypted_ticket) = decrypted_ticket { - Ticket::decode(&decrypted_ticket) - .map_err(|e| error!("{}", e)) - .map(|ticket| { - // Drop if src_node_id, src_ip and topic derived from node_address and request - // don't match those in ticket - if let Some(ticket) = ticket { - if ticket == new_ticket { - self.ticket_pools.insert(enr, id, ticket); - } + aead.encrypt(GenericArray::from_slice(&[1u8; 12]), payload) + .map_err(|e| { + error!( + "Failed to decode ticket in REGTOPIC query: {}", + e + ) + }) + }; + if let Ok(decrypted_ticket) = decrypted_ticket { + Ticket::decode(&decrypted_ticket) + .map_err(|e| error!("{}", e)) + .map(|ticket| { + // Drop if src_node_id, src_ip and topic derived from node_address and request + // don't match those in ticket + if let Some(ticket) = ticket { + if ticket == new_ticket { + self.ticket_pools.insert(enr, id, ticket); } - }) - .ok(); - } + } + }) + .ok(); } } - } else { - self.ticket_pools.insert(enr, id, new_ticket); } + } else { + self.ticket_pools.insert(enr, id, new_ticket); } } } @@ -1021,17 +1014,13 @@ impl Service { .ok(); } ResponseBody::RegisterConfirmation { topic } => { - if let Ok(topic_str) = std::str::from_utf8(&topic).map_err(|e| error!("{}", e)) + if self + .active_regtopic_requests + .is_active_req(id, node_id, topic) + .is_some() { - let topic = Topic::new(topic_str.to_owned()).hash(); - if self - .active_regtopic_requests - .is_active_req(id, node_id, topic.clone()) - .is_some() - { - if let NodeContact::Enr(enr) = active_request.contact { - self.active_topics.insert(*enr, topic).ok(); - } + if let NodeContact::Enr(enr) = active_request.contact { + self.active_topics.insert(*enr, topic).ok(); } } } @@ -1131,7 +1120,7 @@ impl Service { }; let node_id = enr.node_id(); let request_body = RequestBody::RegisterTopic { - topic: topic.as_bytes().to_vec(), + topic, enr, ticket: ticket_bytes, }; @@ -1198,9 +1187,7 @@ impl Service { ) { let response = Response { id: rpc_id, - body: ResponseBody::RegisterConfirmation { - topic: topic.as_bytes().to_vec(), - }, + body: ResponseBody::RegisterConfirmation { topic }, }; trace!( "Sending REGCONFIRMATION response to: {}. Response: {} ", diff --git a/src/service/test.rs b/src/service/test.rs index 7b8545431..4ec293d09 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -100,7 +100,7 @@ async fn build_service( event_stream: None, ads: Ads::new(Duration::from_secs(60 * 15), 100, 50000).unwrap(), tickets: Tickets::new(Duration::from_secs(60 * 15)), - topics: HashSet::new(), + topics: HashMap::new(), active_topics: Ads::new(Duration::from_secs(60 * 15), 100, 50000).unwrap(), ticket_pools: TicketPools::new(), exit, @@ -218,11 +218,10 @@ async fn encrypt_decrypt_ticket() { let enr = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); let node_id = enr.node_id(); - let topic_hash = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); let ticket = Ticket::new( node_id, ip, - topic_hash, + TopicHash::from_raw([1u8; 32]), tokio::time::Instant::now(), tokio::time::Duration::from_secs(5), ); From 4eaa1d514f5ee9034b93700f179d6e48b4007719 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 20 Apr 2022 14:43:28 +0200 Subject: [PATCH 069/391] Plug topic requests into discv5 --- src/discv5.rs | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++ src/service.rs | 54 ++++++++++++++++++++++++++++++++++------- 2 files changed, 110 insertions(+), 9 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 74e1c0b50..c7f588ca1 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -13,6 +13,7 @@ //! The server can be shutdown using the [`Discv5::shutdown`] function. use crate::{ + advertisement::topic::{Sha256Topic as Topic, TopicHash}, error::{Discv5Error, QueryError, RequestError}, kbucket::{ self, ConnectionDirection, ConnectionState, FailureReason, InsertResult, KBucketsTable, @@ -479,6 +480,70 @@ impl Discv5 { } } + // Use find_topic to find the Enrs the shortest XOR distance from the topic hash, + // and send the topic query to these nodes + pub fn topic_query_req( + &self, + enr: Enr, + topic_hash: TopicHash, + ) -> impl Future, RequestError>> + 'static { + // convert the ENR to a node_contact. + let node_contact = NodeContact::from(enr); + + // the service will verify if this node is contactable, we just send it and + // await a response. + let (callback_send, callback_recv) = oneshot::channel(); + let channel = self.clone_channel(); + + async move { + let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; + + let event = ServiceRequest::TopicQuery(node_contact, topic_hash, callback_send); + + // send the request + channel + .send(event) + .await + .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; + // await the response + callback_recv + .await + .map_err(|e| RequestError::ChannelFailed(e.to_string()))? + } + } + + // Use find_topic to find the Enrs the shortest XOR distance from the topic hash, + // and send the topic query to these nodes + pub fn reg_topic_req( + &self, + enr: Enr, + topic: Topic, + ) -> impl Future> + 'static { + // convert the ENR to a node_contact. + let node_contact = NodeContact::from(enr); + + let channel = self.clone_channel(); + + async move { + let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; + let event = ServiceRequest::RegisterTopic(node_contact, topic); + // send the request + channel + .send(event) + .await + .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; + Ok(()) + } + } + + pub fn find_topic( + &self, + topic_hash: TopicHash, + ) -> impl Future, QueryError>> + 'static { + let key = NodeId::new(&topic_hash.as_bytes()); + self.find_node(key) + } + /// Runs an iterative `FIND_NODE` request. /// /// This will return peers containing contactable nodes of the DHT closest to the diff --git a/src/service.rs b/src/service.rs index d217bcd7d..48e6ca484 100644 --- a/src/service.rs +++ b/src/service.rs @@ -142,10 +142,11 @@ const MAX_WAIT_TIME_TICKET: u64 = 60 * 5; /// The types of requests to send to the Discv5 service. pub enum ServiceRequest { - /// A request to start a query. There are two types of queries: + /// A request to start a query. There are three types of queries: /// - A FindNode Query - Searches for peers using a random target. /// - A Predicate Query - Searches for peers closest to a random target that match a specified /// predicate. + /// /// - A Topic Query - Searches for peers advertising a given topic. StartQuery(QueryKind, oneshot::Sender>), /// Find the ENR of a node given its multiaddr. FindEnr(NodeContact, oneshot::Sender>), @@ -159,6 +160,14 @@ pub enum ServiceRequest { /// Sets up an event stream where the discv5 server will return various events such as /// discovered nodes as it traverses the DHT. RequestEventStream(oneshot::Sender>), + /// Queries given node for nodes advertising a topic hash + TopicQuery( + NodeContact, + TopicHash, + oneshot::Sender, RequestError>>, + ), + /// RegisterTopic publishes this node as an advertiser for a topic at given node + RegisterTopic(NodeContact, Topic), } use crate::discv5::PERMIT_BAN_LIST; @@ -245,8 +254,10 @@ struct ActiveRequest { pub enum CallbackResponse { /// A response to a requested ENR. Enr(oneshot::Sender>), - /// A response from a TALK request + /// A response from a TALK request. Talk(oneshot::Sender, RequestError>>), + /// A response to a Topic Query. + Topic(oneshot::Sender, RequestError>>), } /// For multiple responses to a FindNodes request, this keeps track of the request count @@ -404,12 +415,15 @@ impl Service { error!("Failed to return the event stream channel"); } } - /*ServiceRequest::TopicQuery(topic) => { - self.send_topic_query(topic); + ServiceRequest::TopicQuery(node_contact, topic_hash, callback) => { + self.topic_query_request(node_contact, topic_hash, callback); + } + ServiceRequest::RegisterTopic(node_contact, topic) => { + let topic_hash = topic.hash(); + self.topics.insert(topic_hash, topic); + let local_enr = self.local_enr.read().clone(); + self.reg_topic_request(node_contact, topic_hash, local_enr, None) } - ServiceRequest::RegisterTopic(topic) => { - self.reg_topic_request(topic, self.local_enr(), None); - }*/ } } Some(event) = self.handler_recv.recv() => { @@ -1135,6 +1149,23 @@ impl Service { self.active_regtopic_requests.insert(node_id, topic, req_id); } + fn topic_query_request( + &mut self, + contact: NodeContact, + topic: TopicHash, + callback: oneshot::Sender, RequestError>>, + ) { + let request_body = RequestBody::TopicQuery { topic }; + + let active_request = ActiveRequest { + contact, + request_body, + query_id: None, + callback: Some(CallbackResponse::Topic(callback)), + }; + self.send_rpc_request(active_request); + } + fn send_ticket_response( &mut self, node_address: NodeAddress, @@ -1206,7 +1237,6 @@ impl Service { topic: TopicHash, ) { let nodes_to_send = self.ads.get_ad_nodes(topic).collect(); - self.send_nodes_response(nodes_to_send, node_address, rpc_id, "TOPICQUERY"); } @@ -1600,7 +1630,7 @@ impl Service { Some(CallbackResponse::Enr(callback)) => { callback .send(Err(error)) - .unwrap_or_else(|_| debug!("Couldn't send TALK error response to user")); + .unwrap_or_else(|_| debug!("Couldn't send ENR error response to user")); return; } Some(CallbackResponse::Talk(callback)) => { @@ -1610,6 +1640,12 @@ impl Service { .unwrap_or_else(|_| debug!("Couldn't send TALK error response to user")); return; } + Some(CallbackResponse::Topic(callback)) => { + callback + .send(Err(error)) + .unwrap_or_else(|_| debug!("Couldn't send TOPIC error response to user")); + return; + } None => { // no callback to send too } From 7073775a6222e63b7c44b7e640649bf1e8534652 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 21 Apr 2022 14:11:55 +0200 Subject: [PATCH 070/391] Add method for hashing a topic string --- src/advertisement/topic.rs | 9 +++++++++ src/discv5.rs | 7 ++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/src/advertisement/topic.rs b/src/advertisement/topic.rs index 3a0565849..11fa25a21 100644 --- a/src/advertisement/topic.rs +++ b/src/advertisement/topic.rs @@ -31,6 +31,7 @@ pub type Sha256Topic = Topic; pub trait Hasher { /// The function that takes a topic string and creates a topic hash. fn hash(topic_string: String) -> TopicHash; + fn hash_function_name() -> String; } /// A type for representing topics who use the identity hash. @@ -54,6 +55,10 @@ impl Hasher for Sha256Hash { hash.copy_from_slice(&sha256); TopicHash { hash } } + + fn hash_function_name() -> String { + "Sha256".to_owned() + } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] @@ -124,6 +129,10 @@ impl Topic { H::hash(self.topic.clone()) } + pub fn hash_function_name(&self) -> String { + H::hash_function_name() + } + pub fn topic(&self) -> String { self.topic.clone() } diff --git a/src/discv5.rs b/src/discv5.rs index c7f588ca1..45e604543 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -408,6 +408,11 @@ impl Discv5 { .collect() } + pub fn hashes(topic: String) -> Vec<(TopicHash, String)> { + let sha256_topic = Topic::new(topic); + vec![(sha256_topic.hash(), sha256_topic.hash_function_name())] + } + /// Requests the ENR of a node corresponding to multiaddr or multi-addr string. /// /// Only `ed25519` and `secp256k1` key types are currently supported. @@ -536,7 +541,7 @@ impl Discv5 { } } - pub fn find_topic( + pub fn find_closest_nodes_to_topic( &self, topic_hash: TopicHash, ) -> impl Future, QueryError>> + 'static { From 90e3cf91a295df0b33a9d10c2889773cee437240 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 21 Apr 2022 14:13:49 +0200 Subject: [PATCH 071/391] Make module public for acces via discv5 cli --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 1572c515a..8aa063a26 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -102,7 +102,7 @@ //! [`Service`]: service/struct.Service.html //! [`Session`]: session/struct.Session.html -mod advertisement; +pub mod advertisement; mod config; mod discv5; mod error; From 2fa06afcafed52f09306eb173441ac83b61ad443 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 21 Apr 2022 14:23:44 +0200 Subject: [PATCH 072/391] Make use of TopicHash public --- src/advertisement/mod.rs | 2 +- src/discv5.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index dc45976b6..40550e684 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -8,7 +8,7 @@ use std::{ task::{Context, Poll}, }; use tokio::time::Instant; -use topic::TopicHash; +pub use topic::TopicHash; use tracing::{debug, error}; mod test; diff --git a/src/discv5.rs b/src/discv5.rs index 45e604543..db1b2a6ec 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -408,7 +408,7 @@ impl Discv5 { .collect() } - pub fn hashes(topic: String) -> Vec<(TopicHash, String)> { + pub fn hashes(&self, topic: String) -> Vec<(TopicHash, String)> { let sha256_topic = Topic::new(topic); vec![(sha256_topic.hash(), sha256_topic.hash_function_name())] } From fea18cabea882fa4338acd0b4e24e1cc8b8c0672 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 21 Apr 2022 21:13:20 +0200 Subject: [PATCH 073/391] Update unreachable code block --- src/advertisement/mod.rs | 2 +- src/advertisement/topic.rs | 12 ++++++------ src/service.rs | 4 +++- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 40550e684..dc45976b6 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -8,7 +8,7 @@ use std::{ task::{Context, Poll}, }; use tokio::time::Instant; -pub use topic::TopicHash; +use topic::TopicHash; use tracing::{debug, error}; mod test; diff --git a/src/advertisement/topic.rs b/src/advertisement/topic.rs index 11fa25a21..83131994d 100644 --- a/src/advertisement/topic.rs +++ b/src/advertisement/topic.rs @@ -104,6 +104,12 @@ impl rlp::Decodable for TopicHash { } } +impl fmt::Display for TopicHash { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", encode(self.hash)) + } +} + /// A gossipsub topic. #[derive(Debug, Clone)] pub struct Topic { @@ -172,9 +178,3 @@ impl fmt::Display for Topic { write!(f, "{}", self.topic) } } - -impl fmt::Display for TopicHash { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", encode(self.hash)) - } -} diff --git a/src/service.rs b/src/service.rs index 48e6ca484..205290cb6 100644 --- a/src/service.rs +++ b/src/service.rs @@ -819,10 +819,12 @@ impl Service { ); } + let all_distances = vec![256 as u64]; // These are sanitized and ordered let distances_requested = match &active_request.request_body { RequestBody::FindNode { distances } => distances, - _ => unreachable!(), + RequestBody::TopicQuery { .. } => &all_distances, + _ => unreachable!() }; // This could be an ENR request from the outer service. If so respond to the From 24297d7e6543ea91892e8e09b99368abd25fce1e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 21 Apr 2022 22:48:35 +0200 Subject: [PATCH 074/391] API to view active topics --- src/advertisement/mod.rs | 4 +++- src/discv5.rs | 28 +++++++++++++++++++++++++++- src/service.rs | 11 ++++++++--- 3 files changed, 38 insertions(+), 5 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index dc45976b6..9c4dc3097 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -15,7 +15,7 @@ mod test; pub mod ticket; pub mod topic; -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct AdNode { node_record: Enr, insert_time: Instant, @@ -36,6 +36,7 @@ impl PartialEq for AdNode { } } +#[derive(Clone)] struct AdTopic { topic: TopicHash, insert_time: Instant, @@ -47,6 +48,7 @@ impl AdTopic { } } +#[derive(Clone)] pub struct Ads { expirations: VecDeque, ads: HashMap>, diff --git a/src/discv5.rs b/src/discv5.rs index db1b2a6ec..2b5b5921f 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -13,7 +13,10 @@ //! The server can be shutdown using the [`Discv5::shutdown`] function. use crate::{ - advertisement::topic::{Sha256Topic as Topic, TopicHash}, + advertisement::{ + topic::{Sha256Topic as Topic, TopicHash}, + Ads, + }, error::{Discv5Error, QueryError, RequestError}, kbucket::{ self, ConnectionDirection, ConnectionState, FailureReason, InsertResult, KBucketsTable, @@ -541,6 +544,29 @@ impl Discv5 { } } + pub fn active_topics(&self) -> impl Future> + 'static { + // the service will verify if this node is contactable, we just send it and + // await a response. + let (callback_send, callback_recv) = oneshot::channel(); + let channel = self.clone_channel(); + + async move { + let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; + + let event = ServiceRequest::ActiveTopics(callback_send); + + // send the request + channel + .send(event) + .await + .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; + // await the response + callback_recv + .await + .map_err(|e| RequestError::ChannelFailed(e.to_string()))? + } + } + pub fn find_closest_nodes_to_topic( &self, topic_hash: TopicHash, diff --git a/src/service.rs b/src/service.rs index 205290cb6..a2697e29e 100644 --- a/src/service.rs +++ b/src/service.rs @@ -142,11 +142,10 @@ const MAX_WAIT_TIME_TICKET: u64 = 60 * 5; /// The types of requests to send to the Discv5 service. pub enum ServiceRequest { - /// A request to start a query. There are three types of queries: + /// A request to start a query. There are two types of queries: /// - A FindNode Query - Searches for peers using a random target. /// - A Predicate Query - Searches for peers closest to a random target that match a specified /// predicate. - /// /// - A Topic Query - Searches for peers advertising a given topic. StartQuery(QueryKind, oneshot::Sender>), /// Find the ENR of a node given its multiaddr. FindEnr(NodeContact, oneshot::Sender>), @@ -168,6 +167,7 @@ pub enum ServiceRequest { ), /// RegisterTopic publishes this node as an advertiser for a topic at given node RegisterTopic(NodeContact, Topic), + ActiveTopics(oneshot::Sender>), } use crate::discv5::PERMIT_BAN_LIST; @@ -424,6 +424,11 @@ impl Service { let local_enr = self.local_enr.read().clone(); self.reg_topic_request(node_contact, topic_hash, local_enr, None) } + ServiceRequest::ActiveTopics(callback) => { + if callback.send(Ok(self.active_topics.clone())).is_err() { + error!("Failed to return active topics"); + } + } } } Some(event) = self.handler_recv.recv() => { @@ -824,7 +829,7 @@ impl Service { let distances_requested = match &active_request.request_body { RequestBody::FindNode { distances } => distances, RequestBody::TopicQuery { .. } => &all_distances, - _ => unreachable!() + _ => unreachable!(), }; // This could be an ENR request from the outer service. If so respond to the From c66f69695f647f6d0d902b9cc5cc05d1ecacf324 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 21 Apr 2022 22:51:18 +0200 Subject: [PATCH 075/391] Impl Debug for discv5-cli output --- src/advertisement/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 9c4dc3097..06e9aa613 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -36,7 +36,7 @@ impl PartialEq for AdNode { } } -#[derive(Clone)] +#[derive(Clone, Debug)] struct AdTopic { topic: TopicHash, insert_time: Instant, @@ -48,7 +48,7 @@ impl AdTopic { } } -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct Ads { expirations: VecDeque, ads: HashMap>, From faf88ec590e6d0396a379e44d8b1528ac66962ac Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 22 Apr 2022 08:08:21 +0200 Subject: [PATCH 076/391] Add info messages for topic messages --- src/config.rs | 6 +++--- src/service.rs | 9 ++++++--- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/src/config.rs b/src/config.rs index 989e73f04..b859f51fd 100644 --- a/src/config.rs +++ b/src/config.rs @@ -91,7 +91,7 @@ pub struct Discv5Config { /// will last indefinitely. Default is 1 hour. pub ban_duration: Option, - pub topic_radius: Option, + pub topic_radius: u64, /// A custom executor which can spawn the discv5 tasks. This must be a tokio runtime, with /// timing support. By default, the executor that created the discv5 struct will be used. @@ -133,7 +133,7 @@ impl Default for Discv5Config { filter_max_bans_per_ip: Some(5), permit_ban_list: PermitBanList::default(), ban_duration: Some(Duration::from_secs(3600)), // 1 hour - topic_radius: Some(3), + topic_radius: 256, executor: None, } } @@ -297,7 +297,7 @@ impl Discv5ConfigBuilder { self } - pub fn topic_radius(&mut self, topic_radius: Option) -> &mut Self { + pub fn topic_radius(&mut self, topic_radius: u64) -> &mut Self { self.config.topic_radius = topic_radius; self } diff --git a/src/service.rs b/src/service.rs index a2697e29e..b8683a4cb 100644 --- a/src/service.rs +++ b/src/service.rs @@ -540,6 +540,7 @@ impl Service { self.ads.insert(node_record.clone(), topic).ok(); NodeContact::from(node_record).node_address().map(|node_address| { self.send_regconfirmation_response(node_address, req_id, topic); + info!("Sent REGCONFIRMATION response"); }).ok(); } } @@ -711,6 +712,7 @@ impl Service { self.send_event(Discv5Event::TalkRequest(req)); } RequestBody::RegisterTopic { topic, enr, ticket } => { + info!("Received RETOPIC request"); // Drop if request tries to advertise another node than sender if enr.node_id() == node_address.node_id && enr.udp_socket() == Some(node_address.socket_addr) @@ -824,12 +826,12 @@ impl Service { ); } - let all_distances = vec![256 as u64]; + let topic_radius = vec![self.config.topic_radius]; // These are sanitized and ordered let distances_requested = match &active_request.request_body { RequestBody::FindNode { distances } => distances, - RequestBody::TopicQuery { .. } => &all_distances, - _ => unreachable!(), + RequestBody::TopicQuery { .. } => &topic_radius, + _ => unreachable!() }; // This could be an ENR request from the outer service. If so respond to the @@ -1035,6 +1037,7 @@ impl Service { .ok(); } ResponseBody::RegisterConfirmation { topic } => { + info!("Received REGCONFIRMATION response"); if self .active_regtopic_requests .is_active_req(id, node_id, topic) From 41e8a1d005746829fc8b4d28f855fc5f25307d64 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 22 Apr 2022 08:22:23 +0200 Subject: [PATCH 077/391] Change info to error for debug --- src/service.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/service.rs b/src/service.rs index b8683a4cb..98dd31b12 100644 --- a/src/service.rs +++ b/src/service.rs @@ -540,7 +540,7 @@ impl Service { self.ads.insert(node_record.clone(), topic).ok(); NodeContact::from(node_record).node_address().map(|node_address| { self.send_regconfirmation_response(node_address, req_id, topic); - info!("Sent REGCONFIRMATION response"); + error!("Sent REGCONFIRMATION response"); }).ok(); } } @@ -712,7 +712,7 @@ impl Service { self.send_event(Discv5Event::TalkRequest(req)); } RequestBody::RegisterTopic { topic, enr, ticket } => { - info!("Received RETOPIC request"); + error!("Received RETOPIC request"); // Drop if request tries to advertise another node than sender if enr.node_id() == node_address.node_id && enr.udp_socket() == Some(node_address.socket_addr) @@ -1037,7 +1037,7 @@ impl Service { .ok(); } ResponseBody::RegisterConfirmation { topic } => { - info!("Received REGCONFIRMATION response"); + error!("Received REGCONFIRMATION response"); if self .active_regtopic_requests .is_active_req(id, node_id, topic) From 7df8136da82896b6b7ec4f2e44e099561989ceb5 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 22 Apr 2022 08:31:26 +0200 Subject: [PATCH 078/391] Remove tracing --- src/service.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/service.rs b/src/service.rs index 98dd31b12..ba1170cf3 100644 --- a/src/service.rs +++ b/src/service.rs @@ -540,7 +540,6 @@ impl Service { self.ads.insert(node_record.clone(), topic).ok(); NodeContact::from(node_record).node_address().map(|node_address| { self.send_regconfirmation_response(node_address, req_id, topic); - error!("Sent REGCONFIRMATION response"); }).ok(); } } @@ -712,7 +711,6 @@ impl Service { self.send_event(Discv5Event::TalkRequest(req)); } RequestBody::RegisterTopic { topic, enr, ticket } => { - error!("Received RETOPIC request"); // Drop if request tries to advertise another node than sender if enr.node_id() == node_address.node_id && enr.udp_socket() == Some(node_address.socket_addr) @@ -1037,7 +1035,6 @@ impl Service { .ok(); } ResponseBody::RegisterConfirmation { topic } => { - error!("Received REGCONFIRMATION response"); if self .active_regtopic_requests .is_active_req(id, node_id, topic) From 3e5e3828ee6bf5baf5768d16b6bc2e231a18ae0a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 22 Apr 2022 08:40:48 +0200 Subject: [PATCH 079/391] Fix clippy warnings --- src/advertisement/ticket.rs | 16 ++-------------- src/service.rs | 6 +++--- src/service/test.rs | 4 ++-- 3 files changed, 7 insertions(+), 19 deletions(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index cd4a49483..014878c6f 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -145,19 +145,13 @@ struct RegistrationWindow { open_time: Instant, } +#[derive(Default)] pub struct TicketPools { ticket_pools: HashMap>, expirations: VecDeque, } impl TicketPools { - pub fn new() -> Self { - TicketPools { - ticket_pools: HashMap::new(), - expirations: VecDeque::new(), - } - } - pub fn insert(&mut self, node_record: Enr, req_id: RequestId, ticket: Ticket) { if let Some(open_time) = ticket.req_time().checked_add(ticket.wait_time()) { if open_time.elapsed() <= Duration::from_secs(10) { @@ -214,19 +208,13 @@ impl ActiveRegtopicRequest { } } +#[derive(Default)] pub struct ActiveRegtopicRequests { requests: HashMap>, expirations: VecDeque, } impl ActiveRegtopicRequests { - pub fn new() -> Self { - ActiveRegtopicRequests { - requests: HashMap::new(), - expirations: VecDeque::new(), - } - } - pub fn is_active_req( &mut self, req_id: RequestId, diff --git a/src/service.rs b/src/service.rs index ba1170cf3..b3b93b45c 100644 --- a/src/service.rs +++ b/src/service.rs @@ -350,7 +350,7 @@ impl Service { queries: QueryPool::new(config.query_timeout), active_requests: Default::default(), active_nodes_responses: HashMap::new(), - active_regtopic_requests: ActiveRegtopicRequests::new(), + active_regtopic_requests: ActiveRegtopicRequests::default(), ip_votes, handler_send, handler_recv, @@ -362,7 +362,7 @@ impl Service { tickets: Tickets::new(Duration::from_secs(60 * 15)), topics: HashMap::new(), active_topics, - ticket_pools: TicketPools::new(), + ticket_pools: TicketPools::default(), exit, config: config.clone(), }; @@ -829,7 +829,7 @@ impl Service { let distances_requested = match &active_request.request_body { RequestBody::FindNode { distances } => distances, RequestBody::TopicQuery { .. } => &topic_radius, - _ => unreachable!() + _ => unreachable!(), }; // This could be an ENR request from the outer service. If so respond to the diff --git a/src/service/test.rs b/src/service/test.rs index 4ec293d09..3c127331a 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -90,7 +90,7 @@ async fn build_service( queries: QueryPool::new(config.query_timeout), active_requests: Default::default(), active_nodes_responses: HashMap::new(), - active_regtopic_requests: ActiveRegtopicRequests::new(), + active_regtopic_requests: ActiveRegtopicRequests::default(), ip_votes: None, handler_send, handler_recv, @@ -102,7 +102,7 @@ async fn build_service( tickets: Tickets::new(Duration::from_secs(60 * 15)), topics: HashMap::new(), active_topics: Ads::new(Duration::from_secs(60 * 15), 100, 50000).unwrap(), - ticket_pools: TicketPools::new(), + ticket_pools: TicketPools::default(), exit, config, } From dd3de537379d9bd76bb0178c1cd3025ffc4919c9 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 22 Apr 2022 08:53:37 +0200 Subject: [PATCH 080/391] Improve error message --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index b3b93b45c..0ef16a2b5 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1018,7 +1018,7 @@ impl Service { } ResponseBody::Ticket { ticket, wait_time } => { Ticket::decode(&ticket) - .map_err(|e| error!("{}", e)) + .map_err(|e| error!("Failed to decode ticket of TICKET response. Error: {}", e)) .map(|ticket| { if let Some(ticket) = ticket { if wait_time <= MAX_WAIT_TIME_TICKET { From 5fa4ba4145c44d8ced52e8c91796333b34234e08 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 22 Apr 2022 09:28:29 +0200 Subject: [PATCH 081/391] Handle received ticket as opaque object --- src/advertisement/ticket.rs | 11 ++++++----- src/rpc.rs | 33 +++++++++++++++++++++++++++------ src/service.rs | 37 ++++++++++++++++++------------------- 3 files changed, 51 insertions(+), 30 deletions(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 014878c6f..887aa2ba7 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -23,11 +23,11 @@ impl ActiveTopic { pub struct ActiveTicket { contact: NodeContact, - ticket: Ticket, + ticket: Vec, } impl ActiveTicket { - pub fn new(contact: NodeContact, ticket: Ticket) -> Self { + pub fn new(contact: NodeContact, ticket: Vec) -> Self { ActiveTicket { contact, ticket } } @@ -35,7 +35,7 @@ impl ActiveTicket { self.contact.clone() } - pub fn ticket(&self) -> Ticket { + pub fn ticket(&self) -> Vec { self.ticket.clone() } } @@ -57,10 +57,11 @@ impl Tickets { pub fn insert( &mut self, contact: NodeContact, - ticket: Ticket, + ticket: Vec, wait_time: Duration, + topic: TopicHash, ) -> Result<(), &str> { - let active_topic = ActiveTopic::new(contact.node_id(), ticket.topic()); + let active_topic = ActiveTopic::new(contact.node_id(), topic); if let Err(e) = self.ticket_history.insert(active_topic.clone()) { return Err(e); diff --git a/src/rpc.rs b/src/rpc.rs index 55853e4b4..46163e723 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -128,6 +128,8 @@ pub enum ResponseBody { ticket: Vec, /// The time in seconds to wait before attempting to register again. wait_time: u64, + /// The topic hash for which the opaque ticket is issued + topic: TopicHash, }, /// The REGCONFIRMATION response. RegisterConfirmation { @@ -278,12 +280,17 @@ impl Response { buf.extend_from_slice(&s.out()); buf } - ResponseBody::Ticket { ticket, wait_time } => { + ResponseBody::Ticket { + ticket, + wait_time, + topic, + } => { let mut s = RlpStream::new(); - s.begin_list(3); + s.begin_list(4); s.append(&id.as_bytes()); s.append(&ticket); s.append(&wait_time); + s.append(&topic); buf.extend_from_slice(&s.out()); buf } @@ -345,8 +352,16 @@ impl std::fmt::Display for ResponseBody { ResponseBody::Talk { response } => { write!(f, "Response: Response {}", hex::encode(response)) } - ResponseBody::Ticket { ticket, wait_time } => { - write!(f, "TICKET: Ticket: {:?}, Wait time: {}", ticket, wait_time) + ResponseBody::Ticket { + ticket, + wait_time, + topic, + } => { + write!( + f, + "TICKET: Ticket: {:?}, Wait time: {}, TopicHash: {}", + ticket, wait_time, topic + ) } ResponseBody::RegisterConfirmation { topic } => { write!( @@ -593,15 +608,20 @@ impl Message { } 8 => { // TicketResponse - if list_len != 3 { + if list_len != 4 { debug!("RegisterTopic Response has an invalid RLP list length. Expected 2, found {}", list_len); return Err(DecoderError::RlpIncorrectListLen); } let ticket = rlp.val_at::>(1)?; let wait_time = rlp.val_at::(2)?; + let topic = rlp.val_at::(3)?; Message::Response(Response { id, - body: ResponseBody::Ticket { ticket, wait_time }, + body: ResponseBody::Ticket { + ticket, + wait_time, + topic, + }, }) } 9 => { @@ -1185,6 +1205,7 @@ mod tests { body: ResponseBody::Ticket { ticket, wait_time: 1u64, + topic: TopicHash::from_raw([1u8; 32]), }, }); diff --git a/src/service.rs b/src/service.rs index 0ef16a2b5..89d3149d4 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1016,23 +1016,21 @@ impl Service { _ => error!("Invalid callback for response"), } } - ResponseBody::Ticket { ticket, wait_time } => { - Ticket::decode(&ticket) - .map_err(|e| error!("Failed to decode ticket of TICKET response. Error: {}", e)) - .map(|ticket| { - if let Some(ticket) = ticket { - if wait_time <= MAX_WAIT_TIME_TICKET { - self.tickets - .insert( - active_request.contact, - ticket, - Duration::from_secs(wait_time), - ) - .ok(); - } - } - }) - .ok(); + ResponseBody::Ticket { + ticket, + wait_time, + topic, + } => { + if wait_time <= MAX_WAIT_TIME_TICKET { + self.tickets + .insert( + active_request.contact, + ticket, + Duration::from_secs(wait_time), + topic, + ) + .ok(); + } } ResponseBody::RegisterConfirmation { topic } => { if self @@ -1132,10 +1130,10 @@ impl Service { contact: NodeContact, topic: TopicHash, enr: Enr, - ticket: Option, + ticket: Option>, ) { let ticket_bytes = if let Some(ticket) = ticket { - ticket.encode() + ticket } else { Vec::new() }; @@ -1200,6 +1198,7 @@ impl Service { body: ResponseBody::Ticket { ticket: encrypted_ticket, wait_time: wait_time.as_secs(), + topic: ticket.topic(), }, }; trace!( From 590f35eb4aed44d926719144680764d038c2af86 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 22 Apr 2022 09:41:35 +0200 Subject: [PATCH 082/391] Fix error messages --- src/rpc.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index 46163e723..90fd0187b 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -585,13 +585,13 @@ impl Message { 7 => { // RegisterTopicRequest if list_len != 4 { - debug!("RegisterTopic Request has an invalid RLP list length. Expected 2, found {}", list_len); + debug!("RegisterTopic request has an invalid RLP list length. Expected 4, found {}", list_len); return Err(DecoderError::RlpIncorrectListLen); } let topic = { let topic_bytes = rlp.val_at::>(1)?; if topic_bytes.len() > 32 { - debug!("RegisterTopic Request has a topic greater than 32 bytes"); + debug!("RegisterTopic request has a topic greater than 32 bytes"); return Err(DecoderError::RlpIsTooBig); } let mut topic = [0u8; 32]; @@ -609,7 +609,7 @@ impl Message { 8 => { // TicketResponse if list_len != 4 { - debug!("RegisterTopic Response has an invalid RLP list length. Expected 2, found {}", list_len); + debug!("Ticket Response has an invalid RLP list length. Expected 4, found {}", list_len); return Err(DecoderError::RlpIncorrectListLen); } let ticket = rlp.val_at::>(1)?; @@ -628,7 +628,7 @@ impl Message { // RegisterConfirmationResponse if list_len != 2 { debug!( - "TopicQuery Request has an invalid RLP list length. Expected 2, found {}", + "RegisterConfirmation response has an invalid RLP list length. Expected 2, found {}", list_len ); return Err(DecoderError::RlpIncorrectListLen); @@ -652,7 +652,7 @@ impl Message { // TopicQueryRequest if list_len != 2 { debug!( - "TopicQuery Request has an invalid RLP list length. Expected 2, found {}", + "TopicQuery request has an invalid RLP list length. Expected 2, found {}", list_len ); return Err(DecoderError::RlpIncorrectListLen); @@ -660,7 +660,7 @@ impl Message { let topic = { let topic_bytes = rlp.val_at::>(1)?; if topic_bytes.len() > 32 { - debug!("TopicQuery Request has a topic greater than 32 bytes"); + debug!("TopicQuery request has a topic greater than 32 bytes"); return Err(DecoderError::RlpIsTooBig); } let mut topic = [0u8; 32]; From 190f98c09bfe85c6fd594bb9b5984eb39e651a60 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 22 Apr 2022 09:54:20 +0200 Subject: [PATCH 083/391] Update display of ticket and topic --- src/rpc.rs | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index 90fd0187b..2eb8ffd88 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -359,16 +359,14 @@ impl std::fmt::Display for ResponseBody { } => { write!( f, - "TICKET: Ticket: {:?}, Wait time: {}, TopicHash: {}", - ticket, wait_time, topic + "TICKET: Ticket: {}, Wait time: {}, Topic: {}", + hex::encode(ticket), + wait_time, + topic ) } ResponseBody::RegisterConfirmation { topic } => { - write!( - f, - "REGTOPIC: Registered: {}", - hex::encode(topic.to_string()) - ) + write!(f, "REGTOPIC: Registered: {}", topic) } } } @@ -393,13 +391,13 @@ impl std::fmt::Display for RequestBody { hex::encode(protocol), hex::encode(request) ), - RequestBody::TopicQuery { topic } => write!(f, "TOPICQUERY: topic: {:?}", topic), + RequestBody::TopicQuery { topic } => write!(f, "TOPICQUERY: topic: {}", topic), RequestBody::RegisterTopic { topic, enr, ticket } => write!( f, - "RegisterTopic: topic: {}, enr: {}, ticket: {:?}", - hex::encode(topic.to_string()), + "RegisterTopic: topic: {}, enr: {}, ticket: {}", + topic, enr.to_base64(), - ticket, + hex::encode(ticket), ), } } @@ -609,7 +607,10 @@ impl Message { 8 => { // TicketResponse if list_len != 4 { - debug!("Ticket Response has an invalid RLP list length. Expected 4, found {}", list_len); + debug!( + "Ticket Response has an invalid RLP list length. Expected 4, found {}", + list_len + ); return Err(DecoderError::RlpIncorrectListLen); } let ticket = rlp.val_at::>(1)?; From a1a183887cdbae8a4abb6c1035301b917d0cba12 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 22 Apr 2022 10:04:34 +0200 Subject: [PATCH 084/391] Modify encodable for TopicHash --- src/advertisement/topic.rs | 1 + src/rpc.rs | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/advertisement/topic.rs b/src/advertisement/topic.rs index 83131994d..c2d7535e8 100644 --- a/src/advertisement/topic.rs +++ b/src/advertisement/topic.rs @@ -84,6 +84,7 @@ impl TopicHash { impl rlp::Encodable for TopicHash { fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(1); s.append(&self.hash.to_vec()); } } diff --git a/src/rpc.rs b/src/rpc.rs index 2eb8ffd88..9fb903a3f 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -188,7 +188,7 @@ impl Request { let mut s = RlpStream::new(); s.begin_list(4); s.append(&id.as_bytes()); - s.append(&topic.as_bytes().to_vec()); + s.append(&topic); s.append(&enr); s.append(&ticket); buf.extend_from_slice(&s.out()); @@ -198,7 +198,7 @@ impl Request { let mut s = RlpStream::new(); s.begin_list(2); s.append(&id.as_bytes()); - s.append(&topic.as_bytes().to_vec()); + s.append(&topic); buf.extend_from_slice(&s.out()); buf } @@ -298,7 +298,7 @@ impl Response { let mut s = RlpStream::new(); s.begin_list(2); s.append(&id.as_bytes()); - s.append(&topic.as_bytes().to_vec()); + s.append(&topic); buf.extend_from_slice(&s.out()); buf } From 3003ca66000f3532e6a9fb466ad64c9184dafb3a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 22 Apr 2022 10:38:18 +0200 Subject: [PATCH 085/391] Use Encodable trait for Topic Hash --- src/advertisement/topic.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/advertisement/topic.rs b/src/advertisement/topic.rs index c2d7535e8..83131994d 100644 --- a/src/advertisement/topic.rs +++ b/src/advertisement/topic.rs @@ -84,7 +84,6 @@ impl TopicHash { impl rlp::Encodable for TopicHash { fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(1); s.append(&self.hash.to_vec()); } } From 3eaa0877f6812132908d1a9875c72ba429b53ef2 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 22 Apr 2022 12:46:11 +0200 Subject: [PATCH 086/391] Add clarification error message --- src/rpc.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rpc.rs b/src/rpc.rs index 9fb903a3f..66806f52b 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -6,7 +6,7 @@ use std::{ time::{SystemTime, UNIX_EPOCH}, }; use tokio::time::{Duration, Instant}; -use tracing::{debug, warn}; +use tracing::{debug, warn, error}; /// Type to manage the request IDs. #[derive(Debug, Clone, PartialEq, Hash, Eq)] @@ -719,6 +719,7 @@ impl rlp::Decodable for Ticket { } if rlp.item_count() != Ok(5) { + error!("List has wrong item count, should be 5 but is {:?}", rlp.item_count()); return Err(DecoderError::Custom("List has wrong item count")); } From f7df916d142fc970f62735bb9fb8f91a7451f2ae Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 22 Apr 2022 16:01:34 +0200 Subject: [PATCH 087/391] Fix ticket decrypt --- src/rpc.rs | 60 ++++++++++++++++++++++++++++++++++++++++++++++++-- src/service.rs | 13 +++++++---- 2 files changed, 67 insertions(+), 6 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index 66806f52b..bb634358e 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -1,4 +1,8 @@ use crate::advertisement::topic::TopicHash; +use aes_gcm::{ + aead::{generic_array::GenericArray, Aead, NewAead, Payload}, + Aes128Gcm, +}; use enr::{CombinedKey, Enr, NodeId}; use rlp::{DecoderError, Rlp, RlpStream}; use std::{ @@ -6,7 +10,7 @@ use std::{ time::{SystemTime, UNIX_EPOCH}, }; use tokio::time::{Duration, Instant}; -use tracing::{debug, warn, error}; +use tracing::{debug, error, warn}; /// Type to manage the request IDs. #[derive(Debug, Clone, PartialEq, Hash, Eq)] @@ -719,7 +723,10 @@ impl rlp::Decodable for Ticket { } if rlp.item_count() != Ok(5) { - error!("List has wrong item count, should be 5 but is {:?}", rlp.item_count()); + error!( + "List has wrong item count, should be 5 but is {:?}", + rlp.item_count() + ); return Err(DecoderError::Custom("List has wrong item count")); } @@ -1183,6 +1190,55 @@ mod tests { assert_eq!(Some(ticket), decoded); } + #[test] + fn encode_decode_ticket_with_encryption() { + // Create the test values needed + let port = 5000; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + + let key = CombinedKey::generate_secp256k1(); + + let enr = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + let node_id = enr.node_id(); + let ticket = Ticket::new( + node_id, + ip, + TopicHash::from_raw([1u8; 32]), + Instant::now(), + Duration::from_secs(11), + ); + + let ticket_key: [u8; 16] = rand::random(); + + let encoded = ticket.encode(); + + let encrypted_ticket = { + let aead = Aes128Gcm::new(GenericArray::from_slice(&ticket_key)); + let payload = Payload { + msg: &encoded, + aad: b"", + }; + aead.encrypt(GenericArray::from_slice(&[1u8; 12]), payload) + .unwrap() + }; + + let decrypted_ticket = { + let aead = Aes128Gcm::new(GenericArray::from_slice(&ticket_key)); + let payload = Payload { + msg: &encrypted_ticket, + aad: b"", + }; + aead.decrypt(GenericArray::from_slice(&[1u8; 12]), payload) + .map_err(|e| error!("Failed to decode ticket in REGTOPIC query: {}", e)) + } + .unwrap(); + + let decoded = Ticket::decode(&decrypted_ticket).unwrap(); + + assert_eq!(encoded, decrypted_ticket); + assert_eq!(Some(ticket), decoded); + } + #[test] fn encode_decode_ticket_response() { // Create the test values needed diff --git a/src/service.rs b/src/service.rs index 89d3149d4..5b0f2ec0f 100644 --- a/src/service.rs +++ b/src/service.rs @@ -739,7 +739,7 @@ impl Service { .to_base64() .parse::() .map_err(|e| { - error!("Failed to decode ticket in REGTOPIC query: {}", e) + error!("Failed to decrypt ticket in REGTOPIC request. Error: {}", e) }); if let Ok(decoded_enr) = decoded_enr { if let Some(ticket_key) = decoded_enr.get("ticket_key") { @@ -749,17 +749,22 @@ impl Service { msg: &ticket, aad: b"", }; - aead.encrypt(GenericArray::from_slice(&[1u8; 12]), payload) + aead.decrypt(GenericArray::from_slice(&[1u8; 12]), payload) .map_err(|e| { error!( - "Failed to decode ticket in REGTOPIC query: {}", + "Failed to decrypt ticket in REGTOPIC request. Error: {}", e ) }) }; if let Ok(decrypted_ticket) = decrypted_ticket { Ticket::decode(&decrypted_ticket) - .map_err(|e| error!("{}", e)) + .map_err(|e| { + error!( + "Failed to decode ticket in REGTOPIC request. Error: {}", + e + ) + }) .map(|ticket| { // Drop if src_node_id, src_ip and topic derived from node_address and request // don't match those in ticket From 5546afbb534c047f119fd6db587ffbbf61e51b8c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 2 May 2022 16:43:59 +0200 Subject: [PATCH 088/391] Make cloning explicit for ENR --- src/advertisement/mod.rs | 9 ++++++--- src/service.rs | 2 +- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 06e9aa613..f62557cf5 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -28,6 +28,10 @@ impl AdNode { insert_time, } } + + pub fn node_record(&self) -> &Enr { + &self.node_record + } } impl PartialEq for AdNode { @@ -76,12 +80,11 @@ impl Ads { }) } - pub fn get_ad_nodes(&self, topic: TopicHash) -> impl Iterator + '_ { + pub fn get_ad_nodes(&self, topic: TopicHash) -> impl Iterator + '_ { self.ads .get(&topic) .into_iter() - .flat_map(|nodes| nodes.iter()) - .map(|node| node.node_record.clone()) + .flat_map(|nodes| nodes) } pub fn ticket_wait_time(&mut self, topic: TopicHash) -> Option { diff --git a/src/service.rs b/src/service.rs index 5b0f2ec0f..fee1926a4 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1247,7 +1247,7 @@ impl Service { rpc_id: RequestId, topic: TopicHash, ) { - let nodes_to_send = self.ads.get_ad_nodes(topic).collect(); + let nodes_to_send = self.ads.get_ad_nodes(topic).map(|ad| ad.node_record().clone()).collect(); self.send_nodes_response(nodes_to_send, node_address, rpc_id, "TOPICQUERY"); } From d6c7a8f0992703e3c7b793872d8826552b0f9fa7 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 2 May 2022 16:47:01 +0200 Subject: [PATCH 089/391] Small fix --- src/advertisement/mod.rs | 8 ++------ src/service.rs | 6 +++++- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index f62557cf5..732617471 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -81,10 +81,7 @@ impl Ads { } pub fn get_ad_nodes(&self, topic: TopicHash) -> impl Iterator + '_ { - self.ads - .get(&topic) - .into_iter() - .flat_map(|nodes| nodes) + self.ads.get(&topic).into_iter().flatten() } pub fn ticket_wait_time(&mut self, topic: TopicHash) -> Option { @@ -116,8 +113,7 @@ impl Ads { .iter() .take_while(|ad| ad.insert_time.elapsed() >= self.ad_lifetime) .for_each(|ad| { - let count = map.entry(ad.topic).or_default(); - *count += 1; + *map.entry(ad.topic).or_default() += 1; }); map.into_iter().for_each(|(topic, index)| { diff --git a/src/service.rs b/src/service.rs index fee1926a4..dd879e682 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1247,7 +1247,11 @@ impl Service { rpc_id: RequestId, topic: TopicHash, ) { - let nodes_to_send = self.ads.get_ad_nodes(topic).map(|ad| ad.node_record().clone()).collect(); + let nodes_to_send = self + .ads + .get_ad_nodes(topic) + .map(|ad| ad.node_record().clone()) + .collect(); self.send_nodes_response(nodes_to_send, node_address, rpc_id, "TOPICQUERY"); } From 1612e6d66b17688725d17786fa5ff1b628242c17 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 2 May 2022 16:57:39 +0200 Subject: [PATCH 090/391] Fix broken tests --- src/advertisement/mod.rs | 6 +++--- src/advertisement/test.rs | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 732617471..91e2b8307 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -117,12 +117,12 @@ impl Ads { }); map.into_iter().for_each(|(topic, index)| { - let entry_ref = self.ads.entry(topic).or_default(); + let topic_ads = self.ads.entry(topic).or_default(); for _ in 0..index { - entry_ref.pop_front(); + topic_ads.pop_front(); self.expirations.pop_front(); } - if entry_ref.is_empty() { + if topic_ads.is_empty() { self.ads.remove(&topic); } }); diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 3536397c7..471373960 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -63,12 +63,12 @@ async fn insert_ad_and_get_nodes() { // Add an ad for topic_2 from enr ads.insert(enr.clone(), topic_2.clone()).unwrap(); - let nodes: Vec = ads.get_ad_nodes(topic).collect(); + let nodes: Vec<&Enr> = ads.get_ad_nodes(topic).map(|ad_node| ad_node.node_record()).collect(); - let nodes_topic_2: Vec = ads.get_ad_nodes(topic_2).collect(); + let nodes_topic_2: Vec<&Enr> = ads.get_ad_nodes(topic_2).map(|ad_node| ad_node.node_record()).collect(); - assert_eq!(nodes, vec![enr.clone(), enr_2]); - assert_eq!(nodes_topic_2, vec![enr]); + assert_eq!(nodes, vec![&enr, &enr_2]); + assert_eq!(nodes_topic_2, vec![&enr]); } #[tokio::test] From dea19c6a36ce1c1cad4e8d50b688d0fb6f3b8f33 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 2 May 2022 17:00:59 +0200 Subject: [PATCH 091/391] Run cargo fmt --- src/advertisement/test.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 471373960..c0c927dc4 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -63,9 +63,15 @@ async fn insert_ad_and_get_nodes() { // Add an ad for topic_2 from enr ads.insert(enr.clone(), topic_2.clone()).unwrap(); - let nodes: Vec<&Enr> = ads.get_ad_nodes(topic).map(|ad_node| ad_node.node_record()).collect(); - - let nodes_topic_2: Vec<&Enr> = ads.get_ad_nodes(topic_2).map(|ad_node| ad_node.node_record()).collect(); + let nodes: Vec<&Enr> = ads + .get_ad_nodes(topic) + .map(|ad_node| ad_node.node_record()) + .collect(); + + let nodes_topic_2: Vec<&Enr> = ads + .get_ad_nodes(topic_2) + .map(|ad_node| ad_node.node_record()) + .collect(); assert_eq!(nodes, vec![&enr, &enr_2]); assert_eq!(nodes_topic_2, vec![&enr]); From ba5536e3ec85bf42ad3e22ab01f0ae97d18093c2 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 3 May 2022 09:25:36 +0200 Subject: [PATCH 092/391] Samll fixes and fix ticket history logic --- src/advertisement/mod.rs | 16 +++++++--------- src/advertisement/ticket.rs | 29 ++++++++++++++++++++--------- src/advertisement/topic.rs | 25 +++---------------------- 3 files changed, 30 insertions(+), 40 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 91e2b8307..2915c46e6 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -107,16 +107,16 @@ impl Ads { } fn remove_expired(&mut self) { - let mut map: HashMap = HashMap::new(); + let mut to_remove_ads: HashMap = HashMap::new(); self.expirations .iter() .take_while(|ad| ad.insert_time.elapsed() >= self.ad_lifetime) .for_each(|ad| { - *map.entry(ad.topic).or_default() += 1; + *to_remove_ads.entry(ad.topic).or_default() += 1; }); - map.into_iter().for_each(|(topic, index)| { + to_remove_ads.into_iter().for_each(|(topic, index)| { let topic_ads = self.ads.entry(topic).or_default(); for _ in 0..index { topic_ads.pop_front(); @@ -132,17 +132,15 @@ impl Ads { self.remove_expired(); let now = Instant::now(); let nodes = self.ads.entry(topic).or_default(); - if nodes.contains(&AdNode::new(node_record.clone(), now)) { + let ad_node = AdNode::new(node_record, now); + if nodes.contains(&ad_node) { error!( "This node {} is already advertising this topic", - node_record.node_id() + ad_node.node_record().node_id() ); return Err("Node already advertising this topic"); } - nodes.push_back(AdNode { - node_record, - insert_time: now, - }); + nodes.push_back(ad_node); self.expirations.push_back(AdTopic::new(topic, now)); Ok(()) } diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 887aa2ba7..f90abc785 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -89,15 +89,16 @@ impl Stream for Tickets { } } -struct TicketRateLimiter { +// The PendingTicket has an ActiveTopic that maps to a ticket in Tickets +struct PendingTicket { active_topic: ActiveTopic, - first_seen: Instant, + insert_time: Instant, } #[derive(Default)] struct TicketHistory { ticket_cache: HashMap, - expirations: VecDeque, + expirations: VecDeque, ticket_cache_duration: Duration, } @@ -112,12 +113,17 @@ impl TicketHistory { pub fn insert(&mut self, active_topic: ActiveTopic) -> Result<(), &str> { self.remove_expired(); - let count = self.ticket_cache.entry(active_topic).or_default(); + let insert_time = Instant::now(); + let count = self.ticket_cache.entry(active_topic.clone()).or_default(); if *count >= 3 { error!("Max 3 tickets per (NodeId, Topic) accepted in 15 minutes"); return Err("Ticket limit reached"); } *count += 1; + self.expirations.push_back(PendingTicket{ + active_topic, + insert_time, + }); Ok(()) } @@ -126,15 +132,20 @@ impl TicketHistory { let cached_tickets = self .expirations .iter() - .take_while(|ticket_limiter| { - now.saturating_duration_since(ticket_limiter.first_seen) + .take_while(|pending_ticket| { + now.saturating_duration_since(pending_ticket.insert_time) >= self.ticket_cache_duration }) - .map(|ticket_limiter| ticket_limiter.active_topic.clone()) + .map(|pending_ticket| pending_ticket.active_topic.clone()) .collect::>(); - cached_tickets.iter().for_each(|active_topic| { - self.ticket_cache.remove(active_topic); + cached_tickets.into_iter().for_each(|active_topic| { + let count = self.ticket_cache.entry(active_topic.clone()).or_default(); + if *count > 0 { + *count -= 1; + } else { + self.ticket_cache.remove(&active_topic); + } self.expirations.pop_front(); }); } diff --git a/src/advertisement/topic.rs b/src/advertisement/topic.rs index 83131994d..85571d01b 100644 --- a/src/advertisement/topic.rs +++ b/src/advertisement/topic.rs @@ -21,7 +21,7 @@ use base64::encode; use rlp::{DecoderError, Rlp, RlpStream}; use sha2::{Digest, Sha256}; -use std::{cmp::Ordering, fmt, hash::Hash}; +use std::{fmt, hash::Hash}; use tracing::debug; //pub type IdentTopic = Topic; @@ -63,7 +63,7 @@ impl Hasher for Sha256Hash { #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct TopicHash { - /// The topic hash. Stored as a string to align with the protobuf API. + /// The topic hash. Stored as a fixed length byte array. hash: [u8; 32], } @@ -110,7 +110,7 @@ impl fmt::Display for TopicHash { } } -/// A gossipsub topic. +/// A topic, as in sigpi/rust-libp2p/protocols/gossipsub #[derive(Debug, Clone)] pub struct Topic { topic: String, @@ -154,25 +154,6 @@ impl PartialEq for Topic { impl Eq for Topic {} -impl Hash for Topic { - fn hash(&self, _state: &mut T) { - self.hash(); - } -} - -// When sorted topics should group based on the topic string -impl PartialOrd for Topic { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.topic.cmp(&other.topic)) - } -} - -impl Ord for Topic { - fn cmp(&self, other: &Self) -> Ordering { - self.topic.cmp(&other.topic) - } -} - impl fmt::Display for Topic { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.topic) From 2f7711cb7ce2bb51cd25a83ea31595e9472d1d2c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 3 May 2022 09:42:25 +0200 Subject: [PATCH 093/391] Make constants of literals --- src/advertisement/ticket.rs | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index f90abc785..a0ab88f00 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -5,6 +5,14 @@ use enr::NodeId; use node_info::NodeContact; use std::{cmp::Eq, collections::HashSet}; +// Max tickets that are stored from one node for a topic (in the configured +// time period) +const MAX_TICKETS_PER_NODE_TOPIC: u8 = 3; +// +const REGISTRATION_WINDOW_IN_SECS: u64 = 10; +// Max nodes that are considered in the selection process for an ad slot. +const MAX_REGISTRANTS_PER_AD_SLOT: usize = 50; + #[derive(PartialEq, Eq, Hash, Clone)] pub struct ActiveTopic { node_id: NodeId, @@ -115,7 +123,7 @@ impl TicketHistory { self.remove_expired(); let insert_time = Instant::now(); let count = self.ticket_cache.entry(active_topic.clone()).or_default(); - if *count >= 3 { + if *count >= MAX_TICKETS_PER_NODE_TOPIC { error!("Max 3 tickets per (NodeId, Topic) accepted in 15 minutes"); return Err("Ticket limit reached"); } @@ -141,7 +149,7 @@ impl TicketHistory { cached_tickets.into_iter().for_each(|active_topic| { let count = self.ticket_cache.entry(active_topic.clone()).or_default(); - if *count > 0 { + if *count > 1 { *count -= 1; } else { self.ticket_cache.remove(&active_topic); @@ -166,10 +174,11 @@ pub struct TicketPools { impl TicketPools { pub fn insert(&mut self, node_record: Enr, req_id: RequestId, ticket: Ticket) { if let Some(open_time) = ticket.req_time().checked_add(ticket.wait_time()) { - if open_time.elapsed() <= Duration::from_secs(10) { + if open_time.elapsed() <= Duration::from_secs(REGISTRATION_WINDOW_IN_SECS) { let pool = self.ticket_pools.entry(ticket.topic()).or_default(); - // Drop request if pool contains 50 nodes - if pool.len() < 50 { + // Drop request if pool contains 50 nodes, these nodes are out of luck and + // won't be automatically included in next registration window for this topic + if pool.len() < MAX_REGISTRANTS_PER_AD_SLOT { if pool.is_empty() { self.expirations.push_back(RegistrationWindow { topic: ticket.topic(), @@ -189,7 +198,7 @@ impl Stream for TicketPools { self.expirations .pop_front() .map(|reg_window| { - if reg_window.open_time.elapsed() > Duration::from_secs(10) { + if reg_window.open_time.elapsed() > Duration::from_secs(REGISTRATION_WINDOW_IN_SECS) { self.ticket_pools .remove_entry(®_window.topic) .map(|(topic, ticket_pool)| { From b14453641931ed61bb620a75c087b14910f64a47 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 3 May 2022 10:01:50 +0200 Subject: [PATCH 094/391] Fix bug of removing entry prematurely --- src/advertisement/ticket.rs | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index a0ab88f00..87454145e 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -128,7 +128,7 @@ impl TicketHistory { return Err("Ticket limit reached"); } *count += 1; - self.expirations.push_back(PendingTicket{ + self.expirations.push_back(PendingTicket { active_topic, insert_time, }); @@ -195,20 +195,24 @@ impl TicketPools { impl Stream for TicketPools { type Item = Result<(TopicHash, HashMap), String>; fn poll_next(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + let ticket_pool = self.expirations.front(); + if let Some(reg_window) = ticket_pool { + if reg_window.open_time.elapsed() < Duration::from_secs(REGISTRATION_WINDOW_IN_SECS) { + return Poll::Pending; + } + } else { + return Poll::Pending; + } self.expirations .pop_front() .map(|reg_window| { - if reg_window.open_time.elapsed() > Duration::from_secs(REGISTRATION_WINDOW_IN_SECS) { - self.ticket_pools - .remove_entry(®_window.topic) - .map(|(topic, ticket_pool)| { - self.expirations.pop_front(); - Poll::Ready(Some(Ok((topic, ticket_pool)))) - }) - .unwrap_or_else(|| Poll::Ready(Some(Err("Ticket selection failed".into())))) - } else { - Poll::Pending - } + self.ticket_pools + .remove_entry(®_window.topic) + .map(|(topic, ticket_pool)| { + self.expirations.pop_front(); + Poll::Ready(Some(Ok((topic, ticket_pool)))) + }) + .unwrap_or_else(|| Poll::Ready(Some(Err("Ticket selection failed".into())))) }) .unwrap_or(Poll::Pending) } From 307a34c85dc09125e05a371b56f994c37ed2fc18 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 3 May 2022 11:54:40 +0200 Subject: [PATCH 095/391] Fix logic bugs in ticketing related stores --- src/advertisement/mod.rs | 18 +++++++++++------- src/advertisement/ticket.rs | 11 +++++++++-- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 2915c46e6..2562cadbb 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -7,6 +7,7 @@ use std::{ pin::Pin, task::{Context, Poll}, }; +use more_asserts::debug_unreachable; use tokio::time::Instant; use topic::TopicHash; use tracing::{debug, error}; @@ -117,13 +118,16 @@ impl Ads { }); to_remove_ads.into_iter().for_each(|(topic, index)| { - let topic_ads = self.ads.entry(topic).or_default(); - for _ in 0..index { - topic_ads.pop_front(); - self.expirations.pop_front(); - } - if topic_ads.is_empty() { - self.ads.remove(&topic); + if let Some(topic_ads) = self.ads.get_mut(&topic) { + for _ in 0..index { + topic_ads.pop_front(); + self.expirations.pop_front(); + } + if topic_ads.is_empty() { + self.ads.remove(&topic); + } + } else { + debug_unreachable!("Mismatched mapping between ads and their expirations"); } }); } diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 87454145e..ca934aa9d 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -2,6 +2,7 @@ use super::*; use crate::rpc::{RequestId, Ticket}; use delay_map::HashMapDelay; use enr::NodeId; +use more_asserts::debug_unreachable; use node_info::NodeContact; use std::{cmp::Eq, collections::HashSet}; @@ -98,6 +99,7 @@ impl Stream for Tickets { } // The PendingTicket has an ActiveTopic that maps to a ticket in Tickets +#[derive(Clone)] struct PendingTicket { active_topic: ActiveTopic, insert_time: Instant, @@ -212,7 +214,12 @@ impl Stream for TicketPools { self.expirations.pop_front(); Poll::Ready(Some(Ok((topic, ticket_pool)))) }) - .unwrap_or_else(|| Poll::Ready(Some(Err("Ticket selection failed".into())))) + .unwrap_or_else(|| { + debug_unreachable!( + "Mismatched mapping between ticket_pools and expirations invariant" + ); + Poll::Pending + }) }) .unwrap_or(Poll::Pending) } @@ -248,7 +255,7 @@ impl ActiveRegtopicRequests { ) -> Option { self.remove_expired(); self.requests - .remove(&ActiveTopic::new(node_id, topic)) + .get(&ActiveTopic::new(node_id, topic)) .map(|ids| ids.contains(&req_id)) } From 7373d0462fda25e31160e06457f3ebc47fc993b1 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 3 May 2022 11:55:20 +0200 Subject: [PATCH 096/391] Run cargo fmt --- src/advertisement/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 2562cadbb..788c290c6 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -2,12 +2,12 @@ use super::*; use crate::Enr; use core::time::Duration; use futures::prelude::*; +use more_asserts::debug_unreachable; use std::{ collections::{HashMap, VecDeque}, pin::Pin, task::{Context, Poll}, }; -use more_asserts::debug_unreachable; use tokio::time::Instant; use topic::TopicHash; use tracing::{debug, error}; From 4f4c122bc0f5317e59815c9f8797554d9aa6055f Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 4 May 2022 16:35:35 +0200 Subject: [PATCH 097/391] Allow REGCONFIRMATIONs through active requests --- src/advertisement/ticket.rs | 115 ++++++++++++++++++++---------------- src/service.rs | 32 +++++----- 2 files changed, 81 insertions(+), 66 deletions(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index ca934aa9d..902e15569 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -1,10 +1,13 @@ use super::*; -use crate::rpc::{RequestId, Ticket}; +use crate::{ + rpc::{RequestId, Ticket}, + service::ActiveRequest, +}; use delay_map::HashMapDelay; use enr::NodeId; use more_asserts::debug_unreachable; use node_info::NodeContact; -use std::{cmp::Eq, collections::HashSet}; +use std::cmp::Eq; // Max tickets that are stored from one node for a topic (in the configured // time period) @@ -14,6 +17,8 @@ const REGISTRATION_WINDOW_IN_SECS: u64 = 10; // Max nodes that are considered in the selection process for an ad slot. const MAX_REGISTRANTS_PER_AD_SLOT: usize = 50; +const MAX_CACHE_TIME_IN_SECS: u64 = 15; + #[derive(PartialEq, Eq, Hash, Clone)] pub struct ActiveTopic { node_id: NodeId, @@ -139,25 +144,29 @@ impl TicketHistory { fn remove_expired(&mut self) { let now = Instant::now(); - let cached_tickets = self + let ticket_cache_duration = self.ticket_cache_duration; + let ticket_cache = &mut self.ticket_cache; + let total_to_remove = self .expirations .iter() .take_while(|pending_ticket| { - now.saturating_duration_since(pending_ticket.insert_time) - >= self.ticket_cache_duration + now.saturating_duration_since(pending_ticket.insert_time) >= ticket_cache_duration + }) + .map(|pending_ticket| { + let count = ticket_cache + .entry(pending_ticket.active_topic.clone()) + .or_default(); + if *count > 1 { + *count -= 1; + } else { + ticket_cache.remove(&pending_ticket.active_topic); + } }) - .map(|pending_ticket| pending_ticket.active_topic.clone()) - .collect::>(); + .count(); - cached_tickets.into_iter().for_each(|active_topic| { - let count = self.ticket_cache.entry(active_topic.clone()).or_default(); - if *count > 1 { - *count -= 1; - } else { - self.ticket_cache.remove(&active_topic); - } + for _ in 0..total_to_remove { self.expirations.pop_front(); - }); + } } } @@ -227,75 +236,77 @@ impl Stream for TicketPools { #[derive(Clone)] pub struct ActiveRegtopicRequest { - active_topic: ActiveTopic, + req_id: RequestId, insert_time: Instant, } impl ActiveRegtopicRequest { - fn new(active_topic: ActiveTopic, insert_time: Instant) -> Self { + fn new(req_id: RequestId, insert_time: Instant) -> Self { ActiveRegtopicRequest { - active_topic, insert_time, + req_id, } } } #[derive(Default)] pub struct ActiveRegtopicRequests { - requests: HashMap>, + requests: HashMap, + request_history: HashMap, expirations: VecDeque, } impl ActiveRegtopicRequests { - pub fn is_active_req( - &mut self, - req_id: RequestId, - node_id: NodeId, - topic: TopicHash, - ) -> Option { - self.remove_expired(); - self.requests - .get(&ActiveTopic::new(node_id, topic)) - .map(|ids| ids.contains(&req_id)) + pub fn remove(&mut self, req_id: &RequestId) -> Option { + if let Some(seen_count) = self.request_history.get_mut(req_id) { + *seen_count += 1; + if *seen_count < 1 { + self.request_history.remove(req_id); + self.requests.remove(req_id).map(|req| ActiveRequest { + contact: req.contact.clone(), + request_body: req.request_body.clone(), + query_id: req.query_id, + callback: None, + }) + } else { + self.requests.get(req_id).map(|req| ActiveRequest { + contact: req.contact.clone(), + request_body: req.request_body.clone(), + query_id: req.query_id, + callback: None, + }) + } + } else { + None + } } - pub fn insert(&mut self, node_id: NodeId, topic: TopicHash, req_id: RequestId) { + pub fn insert(&mut self, req_id: RequestId, req: ActiveRequest) { self.remove_expired(); let now = Instant::now(); - let active_topic = ActiveTopic::new(node_id, topic); - - // Since a REGTOPIC request always receives a TICKET response, when we come to register with a ticket which - // wait-time is up we get a TICKET response with wait-time 0, hence we initiate a new REGTOPIC request. - // Since the registration window is 10 seconds, incase we would receive a RECONGIRMATION for that first - // REGTOPIC, that req-id would have been replaced, so we use a set. We extend the req-id set life-time upon - // each insert incase a REGCONFIRMATION comes to a later req-id. Max req-ids in a set is limited by our - // implementation accepting max 3 tickets for a (NodeId, Topic) within 15 minutes. - self.requests - .entry(active_topic.clone()) - .or_default() - .insert(req_id); - self.expirations - .iter() - .enumerate() - .find(|(_, req)| req.active_topic == active_topic) - .map(|(index, _)| index) - .map(|index| self.expirations.remove(index)); + + self.requests.insert(req_id.clone(), req); + // Each request id can be used twice, once for a TICKET response and + // once for a REGCONFIRMATION response + self.request_history.insert(req_id.clone(), 2); self.expirations - .push_back(ActiveRegtopicRequest::new(active_topic, now)); + .push_back(ActiveRegtopicRequest::new(req_id, now)); } fn remove_expired(&mut self) { let mut expired = Vec::new(); - self.expirations .iter() - .take_while(|req| req.insert_time.elapsed() >= Duration::from_secs(15)) + .take_while(|req| { + req.insert_time.elapsed() >= Duration::from_secs(MAX_CACHE_TIME_IN_SECS) + }) .for_each(|req| { expired.push(req.clone()); }); expired.into_iter().for_each(|req| { - self.requests.remove(&req.active_topic); + self.requests.remove(&req.req_id); + self.request_history.remove(&req.req_id); self.expirations.pop_front(); }); } diff --git a/src/service.rs b/src/service.rs index dd879e682..75bdaf237 100644 --- a/src/service.rs +++ b/src/service.rs @@ -239,7 +239,7 @@ pub struct Service { } /// Active RPC request awaiting a response from the handler. -struct ActiveRequest { +pub struct ActiveRequest { /// The address the request was sent to. pub contact: NodeContact, /// The request that was sent. @@ -794,7 +794,13 @@ impl Service { // verify we know of the rpc_id let id = response.id.clone(); - if let Some(mut active_request) = self.active_requests.remove(&id) { + let active_request = if let Some(active_request) = self.active_requests.remove(&id) { + Some(active_request) + } else { + self.active_regtopic_requests.remove(&id) + }; + + if let Some(mut active_request) = active_request { debug!( "Received RPC response: {} to request: {} from: {}", response.body, active_request.request_body, active_request.contact @@ -1038,14 +1044,8 @@ impl Service { } } ResponseBody::RegisterConfirmation { topic } => { - if self - .active_regtopic_requests - .is_active_req(id, node_id, topic) - .is_some() - { - if let NodeContact::Enr(enr) = active_request.contact { - self.active_topics.insert(*enr, topic).ok(); - } + if let NodeContact::Enr(enr) = active_request.contact { + self.active_topics.insert(*enr, topic).ok(); } } } @@ -1142,7 +1142,6 @@ impl Service { } else { Vec::new() }; - let node_id = enr.node_id(); let request_body = RequestBody::RegisterTopic { topic, enr, @@ -1150,13 +1149,18 @@ impl Service { }; let active_request = ActiveRequest { + contact: contact.clone(), + request_body: request_body.clone(), + query_id: None, + callback: None, + }; + let req_id = self.send_rpc_request(ActiveRequest { contact, request_body, query_id: None, callback: None, - }; - let req_id = self.send_rpc_request(active_request); - self.active_regtopic_requests.insert(node_id, topic, req_id); + }); + self.active_regtopic_requests.insert(req_id, active_request); } fn topic_query_request( From ad410e5ca278f458cb7a77ddbf44f33185a5dc7e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 4 May 2022 16:38:49 +0200 Subject: [PATCH 098/391] Change notification of expected behaviour --- src/advertisement/ticket.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 902e15569..156e32d40 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -131,7 +131,7 @@ impl TicketHistory { let insert_time = Instant::now(); let count = self.ticket_cache.entry(active_topic.clone()).or_default(); if *count >= MAX_TICKETS_PER_NODE_TOPIC { - error!("Max 3 tickets per (NodeId, Topic) accepted in 15 minutes"); + debug!("Max 3 tickets per (NodeId, Topic) accepted in 15 minutes"); return Err("Ticket limit reached"); } *count += 1; From 693442650909d8118cf9240ce89fed74c4cf947e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 4 May 2022 18:36:52 +0200 Subject: [PATCH 099/391] Abstract away finding nodes to target for publishing ads --- src/discv5.rs | 94 +++++++++++++++++++++++++++++++-------------------- src/error.rs | 2 ++ 2 files changed, 59 insertions(+), 37 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 2b5b5921f..42fde84f0 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -488,58 +488,78 @@ impl Discv5 { } } - // Use find_topic to find the Enrs the shortest XOR distance from the topic hash, - // and send the topic query to these nodes pub fn topic_query_req( &self, - enr: Enr, topic_hash: TopicHash, ) -> impl Future, RequestError>> + 'static { - // convert the ENR to a node_contact. - let node_contact = NodeContact::from(enr); - - // the service will verify if this node is contactable, we just send it and - // await a response. - let (callback_send, callback_recv) = oneshot::channel(); - let channel = self.clone_channel(); async move { - let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; - - let event = ServiceRequest::TopicQuery(node_contact, topic_hash, callback_send); - - // send the request - channel - .send(event) + let all_found_ad_nodes: Vec = Vec::new(); + // Use find_topic to find the Enrs the shortest XOR distance from the topic hash, + // and send the topic query to these nodes + let enrs = self + .find_closest_nodes_to_topic(topic_hash) .await - .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; - // await the response - callback_recv - .await - .map_err(|e| RequestError::ChannelFailed(e.to_string()))? + .map_err(|e| RequestError::TopicMetrics(e.to_string()))?; + + for enr in enrs.into_iter() { + // convert the ENR to a node_contact. + let node_contact = NodeContact::from(enr); + + // the service will verify if this node is contactable, we just send it and + // await a response. + let (callback_send, callback_recv) = oneshot::channel(); + let channel = self.clone_channel(); + let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; + + let event = ServiceRequest::TopicQuery(node_contact, topic_hash, callback_send); + + // send the request + channel + .send(event) + .await + .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; + // await the response + let found_ad_nodes = callback_recv + .await + .map_err(|e| RequestError::ChannelFailed(e.to_string()))?; + + if let Ok(found_ad_nodes) = found_ad_nodes { + for ad_node in found_ad_nodes.into_iter() { + all_found_ad_nodes.push(ad_node); + } + } + } + //let ad_nodes = all_found_ad_nodes.into_iter().flatten().collect(); + Ok(all_found_ad_nodes) } } - // Use find_topic to find the Enrs the shortest XOR distance from the topic hash, - // and send the topic query to these nodes pub fn reg_topic_req( - &self, - enr: Enr, + &'static self, topic: Topic, ) -> impl Future> + 'static { - // convert the ENR to a node_contact. - let node_contact = NodeContact::from(enr); - - let channel = self.clone_channel(); - async move { - let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; - let event = ServiceRequest::RegisterTopic(node_contact, topic); - // send the request - channel - .send(event) + // Use find_topic to find the Enrs the shortest XOR distance from the topic hash, + // and send the regtopic to these nodes + let enrs = self + .find_closest_nodes_to_topic(topic.hash()) .await - .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; + .map_err(|e| RequestError::TopicMetrics(e.to_string()))?; + + // convert the ENR to a node_contact. + for enr in enrs.into_iter() { + let node_contact = NodeContact::from(enr); + + let channel = self.clone_channel(); + let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; + let event = ServiceRequest::RegisterTopic(node_contact, topic.clone()); + // send the request + channel + .send(event) + .await + .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; + } Ok(()) } } diff --git a/src/error.rs b/src/error.rs index 26f07574b..fc19d9ae5 100644 --- a/src/error.rs +++ b/src/error.rs @@ -111,6 +111,8 @@ pub enum RequestError { InvalidMultiaddr(String), /// Failure generating random numbers during request. EntropyFailure(&'static str), + /// Finding nodes closest to a topic hash failed. + TopicMetrics(String), } #[derive(Debug, Clone, PartialEq)] From e38770a8ee4b99ad026d922af65976b4dcd6a5f4 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 4 May 2022 19:07:21 +0200 Subject: [PATCH 100/391] Make code uniform --- src/discv5.rs | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 42fde84f0..1af33d70e 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -492,13 +492,14 @@ impl Discv5 { &self, topic_hash: TopicHash, ) -> impl Future, RequestError>> + 'static { + let find_future = self.find_closest_nodes_to_topic(topic_hash); + let channel = self.clone_channel(); async move { - let all_found_ad_nodes: Vec = Vec::new(); + let mut all_found_ad_nodes = Vec::new(); // Use find_topic to find the Enrs the shortest XOR distance from the topic hash, // and send the topic query to these nodes - let enrs = self - .find_closest_nodes_to_topic(topic_hash) + let enrs = find_future .await .map_err(|e| RequestError::TopicMetrics(e.to_string()))?; @@ -509,10 +510,12 @@ impl Discv5 { // the service will verify if this node is contactable, we just send it and // await a response. let (callback_send, callback_recv) = oneshot::channel(); - let channel = self.clone_channel(); - let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; let event = ServiceRequest::TopicQuery(node_contact, topic_hash, callback_send); + let channel = channel + .as_ref() + .clone() + .map_err(|_| RequestError::ServiceNotStarted)?; // send the request channel @@ -520,17 +523,13 @@ impl Discv5 { .await .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; // await the response - let found_ad_nodes = callback_recv + callback_recv .await - .map_err(|e| RequestError::ChannelFailed(e.to_string()))?; - - if let Ok(found_ad_nodes) = found_ad_nodes { - for ad_node in found_ad_nodes.into_iter() { - all_found_ad_nodes.push(ad_node); - } - } + .map_err(|e| RequestError::ChannelFailed(e.to_string()))? + .map(|ad_nodes| all_found_ad_nodes.push(ad_nodes)) + .ok(); } - //let ad_nodes = all_found_ad_nodes.into_iter().flatten().collect(); + let all_found_ad_nodes = all_found_ad_nodes.into_iter().flatten().collect(); Ok(all_found_ad_nodes) } } @@ -539,20 +538,23 @@ impl Discv5 { &'static self, topic: Topic, ) -> impl Future> + 'static { + let find_future = self.find_closest_nodes_to_topic(topic.hash()); + let channel = self.clone_channel(); + async move { // Use find_topic to find the Enrs the shortest XOR distance from the topic hash, // and send the regtopic to these nodes - let enrs = self - .find_closest_nodes_to_topic(topic.hash()) + let enrs = find_future .await .map_err(|e| RequestError::TopicMetrics(e.to_string()))?; // convert the ENR to a node_contact. for enr in enrs.into_iter() { let node_contact = NodeContact::from(enr); - - let channel = self.clone_channel(); - let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; + let channel = channel + .as_ref() + .clone() + .map_err(|_| RequestError::ServiceNotStarted)?; let event = ServiceRequest::RegisterTopic(node_contact, topic.clone()); // send the request channel From 2c421974ef4b3a3b9a3367e9b6843033f46a3103 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 4 May 2022 19:10:52 +0200 Subject: [PATCH 101/391] Fix clippy warnings --- src/discv5.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 1af33d70e..c235af27b 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -514,7 +514,6 @@ impl Discv5 { let event = ServiceRequest::TopicQuery(node_contact, topic_hash, callback_send); let channel = channel .as_ref() - .clone() .map_err(|_| RequestError::ServiceNotStarted)?; // send the request @@ -538,6 +537,7 @@ impl Discv5 { &'static self, topic: Topic, ) -> impl Future> + 'static { + let find_future = self.find_closest_nodes_to_topic(topic.hash()); let channel = self.clone_channel(); @@ -553,7 +553,6 @@ impl Discv5 { let node_contact = NodeContact::from(enr); let channel = channel .as_ref() - .clone() .map_err(|_| RequestError::ServiceNotStarted)?; let event = ServiceRequest::RegisterTopic(node_contact, topic.clone()); // send the request From bbfd2bd264a8f3ff56fc9f50a1069101bd774594 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 4 May 2022 19:42:11 +0200 Subject: [PATCH 102/391] Fix wrong lifetime param bug --- src/discv5.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/discv5.rs b/src/discv5.rs index c235af27b..57b9f2831 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -534,7 +534,7 @@ impl Discv5 { } pub fn reg_topic_req( - &'static self, + &self, topic: Topic, ) -> impl Future> + 'static { From cd8da4fdd8280c65f69ff2b25986f26218e70b59 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 4 May 2022 19:46:45 +0200 Subject: [PATCH 103/391] Run cargo fmt --- src/discv5.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/discv5.rs b/src/discv5.rs index 57b9f2831..a495916dd 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -537,7 +537,6 @@ impl Discv5 { &self, topic: Topic, ) -> impl Future> + 'static { - let find_future = self.find_closest_nodes_to_topic(topic.hash()); let channel = self.clone_channel(); From 9f8a9e78a5412bb3e7ee337a176d0bfb543ee8ba Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 5 May 2022 09:12:20 +0200 Subject: [PATCH 104/391] Simplify cli output for call to discv5's active_topics method --- src/advertisement/mod.rs | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 788c290c6..43eda345b 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -1,10 +1,11 @@ use super::*; -use crate::Enr; +use crate::{enr::NodeId, Enr}; use core::time::Duration; use futures::prelude::*; use more_asserts::debug_unreachable; use std::{ collections::{HashMap, VecDeque}, + fmt, pin::Pin, task::{Context, Poll}, }; @@ -149,3 +150,20 @@ impl Ads { Ok(()) } } + +impl fmt::Display for Ads { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let ads = self + .ads + .iter() + .map(|ad| { + let ad_node_ids = + ad.1.iter() + .map(|ad_node| ad_node.node_record.node_id()) + .collect::>(); + format!("Topic: {}, Advertised at: {:?}", ad.0, ad_node_ids) + }) + .collect::>(); + write!(f, "{:?}", ads) + } +} From c88f355e89736b44e06152ee6ac2715f7b2c2f95 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 5 May 2022 09:32:14 +0200 Subject: [PATCH 105/391] Turn node id output into human readable string --- src/advertisement/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 43eda345b..e237286f3 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -159,8 +159,8 @@ impl fmt::Display for Ads { .map(|ad| { let ad_node_ids = ad.1.iter() - .map(|ad_node| ad_node.node_record.node_id()) - .collect::>(); + .map(|ad_node| base64::encode(ad_node.node_record.node_id().raw())) + .collect::>(); format!("Topic: {}, Advertised at: {:?}", ad.0, ad_node_ids) }) .collect::>(); From 8196c99ab52310f5c6543e7bf3d2e9c5ed98cf1e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 6 May 2022 11:12:22 +0200 Subject: [PATCH 106/391] Document advertisment package --- src/advertisement/mod.rs | 20 +++++++- src/advertisement/ticket.rs | 98 +++++++++++++++++++++++++++---------- src/advertisement/topic.rs | 12 ++--- src/rpc.rs | 7 +-- src/service.rs | 7 +-- 5 files changed, 105 insertions(+), 39 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index e237286f3..66993fb76 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -1,5 +1,5 @@ use super::*; -use crate::{enr::NodeId, Enr}; +use crate::Enr; use core::time::Duration; use futures::prelude::*; use more_asserts::debug_unreachable; @@ -17,9 +17,13 @@ mod test; pub mod ticket; pub mod topic; +/// An AdNode is a node that occupies an ad slot on another node. #[derive(Debug, Clone)] pub struct AdNode { + /// The node being advertised. node_record: Enr, + /// The insert_time is used to retrieve the ticket_wait time for a given + /// topic. insert_time: Instant, } @@ -42,9 +46,13 @@ impl PartialEq for AdNode { } } +/// An AdTopic keeps track of when an AdNode is created. #[derive(Clone, Debug)] struct AdTopic { + /// The topic maps to the topic of an AdNode in Ads's ads. topic: TopicHash, + /// The insert_time is used to make sure and AdNode persists in Ads + /// only the ad_lifetime duration. insert_time: Instant, } @@ -54,12 +62,22 @@ impl AdTopic { } } +/// The Ads struct contains the locally adveritsed AdNodes. #[derive(Clone, Debug)] pub struct Ads { + /// The expirations makes sure that AdNodes are advertised only for the + /// ad_lifetime duration. expirations: VecDeque, + /// The ads store the AdNodes per TopicHash in FIFO order of expiration. ads: HashMap>, + /// The ad_lifetime is specified by the spec but can be modified for + /// testing purposes. ad_lifetime: Duration, + /// The max_ads_per_topic limit is up to the user although recommnedations + /// are given in the specs. max_ads_per_topic: usize, + /// The max_ads limit is up to the user although recommnedations are + /// given in the specs. max_ads: usize, } diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 156e32d40..d160fdb73 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -9,22 +9,30 @@ use more_asserts::debug_unreachable; use node_info::NodeContact; use std::cmp::Eq; -// Max tickets that are stored from one node for a topic (in the configured -// time period) +/// Max tickets that are stored from one node for a topic (in the configured +/// time period). const MAX_TICKETS_PER_NODE_TOPIC: u8 = 3; -// +/// The time window in which tickets are accepted for any given free ad slot. const REGISTRATION_WINDOW_IN_SECS: u64 = 10; -// Max nodes that are considered in the selection process for an ad slot. +/// Max nodes that are considered in the selection process for an ad slot. const MAX_REGISTRANTS_PER_AD_SLOT: usize = 50; - -const MAX_CACHE_TIME_IN_SECS: u64 = 15; - +/// The duration for which requests are stored. +const REQUEST_TIMEOUT_IN_SECS: u64 = 15; +/// Each REGTOPIC request can get both a TICKET response and REGCONFIRMATION +/// response. +const MAX_RESPONSES_PER_REGTOPIC: u8 = 2; + +/// A topic is active when it associated with the node id from a node it is +/// published on. #[derive(PartialEq, Eq, Hash, Clone)] pub struct ActiveTopic { + /// NodeId of the sender of the TICKET response. node_id: NodeId, + /// The topic hash as it is sent in the TICKET response topic: TopicHash, } + impl ActiveTopic { pub fn new(node_id: NodeId, topic: TopicHash) -> Self { ActiveTopic { node_id, topic } @@ -35,8 +43,12 @@ impl ActiveTopic { } } +/// A ticket is active when it is associated with the node contact of +/// the sender of the ticket. pub struct ActiveTicket { + /// Node Contact of the sender of the ticket. contact: NodeContact, + /// The ticket, an opaque object to the receiver. ticket: Vec, } @@ -54,17 +66,21 @@ impl ActiveTicket { } } -/// Tickets received from other nodes as response to REGTOPIC req +/// Tickets holds the tickets recieved in TICKET responses to locally +/// initiated REGTOPIC requests. pub struct Tickets { + /// Tickets maps one ActiveTicket per ActiveTopic. tickets: HashMapDelay, + /// TicketHistory sets a time limit to how many times the ActiveTicket + /// value in tickets can be updated within a given ticket_limiter_duration. ticket_history: TicketHistory, } impl Tickets { - pub fn new(ticket_cache_duration: Duration) -> Self { + pub fn new(ticket_limiter_duration: Duration) -> Self { Tickets { tickets: HashMapDelay::new(Duration::default()), - ticket_history: TicketHistory::new(ticket_cache_duration), + ticket_history: TicketHistory::new(ticket_limiter_duration), } } @@ -103,33 +119,45 @@ impl Stream for Tickets { } } -// The PendingTicket has an ActiveTopic that maps to a ticket in Tickets +/// An PendingTicket maps to a Ticket in Tickets upon insert. #[derive(Clone)] struct PendingTicket { + /// The ActiveTopic serves to match the Ticket to an entry in Tickets' + /// tickets HashMapDelay. active_topic: ActiveTopic, + /// The insert_time is used to check MAX_TICKETS_PER_NODE_TOPIC against + /// the ticket_limiter_duration. insert_time: Instant, } +/// TicketHistory keeps track of how many times a ticket was replaced for +/// an ActiveTopic within the time limit given by ticket_limiter_duration +/// and limits it to MAX_TICKETS_PER_NODE_TOPIC times. #[derive(Default)] struct TicketHistory { - ticket_cache: HashMap, + /// The ticket_count keeps track of how many tickets are stored for the + /// ActiveTopic. + ticket_count: HashMap, + /// Up to MAX_TICKETS_PER_NODE_TOPIC PendingTickets in expirations maps + /// to an ActiveTopic in ticket_count. expirations: VecDeque, - ticket_cache_duration: Duration, + /// The time a PendingTicket remains in expirations. + ticket_limiter_duration: Duration, } impl TicketHistory { - fn new(ticket_cache_duration: Duration) -> Self { + fn new(ticket_limiter_duration: Duration) -> Self { TicketHistory { - ticket_cache: HashMap::new(), + ticket_count: HashMap::new(), expirations: VecDeque::new(), - ticket_cache_duration, + ticket_limiter_duration, } } pub fn insert(&mut self, active_topic: ActiveTopic) -> Result<(), &str> { self.remove_expired(); let insert_time = Instant::now(); - let count = self.ticket_cache.entry(active_topic.clone()).or_default(); + let count = self.ticket_count.entry(active_topic.clone()).or_default(); if *count >= MAX_TICKETS_PER_NODE_TOPIC { debug!("Max 3 tickets per (NodeId, Topic) accepted in 15 minutes"); return Err("Ticket limit reached"); @@ -144,22 +172,22 @@ impl TicketHistory { fn remove_expired(&mut self) { let now = Instant::now(); - let ticket_cache_duration = self.ticket_cache_duration; - let ticket_cache = &mut self.ticket_cache; + let ticket_limiter_duration = self.ticket_limiter_duration; + let ticket_count = &mut self.ticket_count; let total_to_remove = self .expirations .iter() .take_while(|pending_ticket| { - now.saturating_duration_since(pending_ticket.insert_time) >= ticket_cache_duration + now.saturating_duration_since(pending_ticket.insert_time) >= ticket_limiter_duration }) .map(|pending_ticket| { - let count = ticket_cache + let count = ticket_count .entry(pending_ticket.active_topic.clone()) .or_default(); if *count > 1 { *count -= 1; } else { - ticket_cache.remove(&pending_ticket.active_topic); + ticket_count.remove(&pending_ticket.active_topic); } }) .count(); @@ -170,15 +198,26 @@ impl TicketHistory { } } +/// The RegistrationWindow is the time from when an ad slot becomes free +/// until no more registration attempts are accepted for the ad slot. #[derive(Clone)] struct RegistrationWindow { + /// The RegistrationWindow exists for a specific ad slot, so for a + /// specific topic. topic: TopicHash, + /// The open_time is used to make sure the RegistrationWindow closes + /// after REGISTRATION_WINDOW_IN_SECS. open_time: Instant, } +/// The TicketPools collects all the registration attempts for a free ad slot. #[derive(Default)] pub struct TicketPools { + /// The ticket_pools keeps track of all the registrants and their Tickets. One + /// ticket_pool per TopicHash can be open at a time. ticket_pools: HashMap>, + /// The expirations keeps track of when to close a ticket pool so the next one + /// can be opened. expirations: VecDeque, } @@ -234,9 +273,16 @@ impl Stream for TicketPools { } } +/// Since according to spec, a REGTOPIC request can receive both a TICKET and +/// then REGISTRATION_WINDOW_IN_SECS seconds later optionally also a +/// REGCONFIRMATION response, ActiveRegtopicRequests need to be handled separate +/// from ActiveRequests in Service. #[derive(Clone)] pub struct ActiveRegtopicRequest { + /// The RequestId identifies an ActiveRequest. req_id: RequestId, + /// The insert_time is used to make sure an ActiveRegtopicRequest persists + /// no longer than REQUEST_TIMEOUT_IN_SECS. insert_time: Instant, } @@ -249,6 +295,8 @@ impl ActiveRegtopicRequest { } } +/// The ActiveRegtopicRequests keeps ActiveRequests until the have matched +/// with MAX_RESPONSES_PER_REGTOPIC repsonses. #[derive(Default)] pub struct ActiveRegtopicRequests { requests: HashMap, @@ -286,9 +334,7 @@ impl ActiveRegtopicRequests { let now = Instant::now(); self.requests.insert(req_id.clone(), req); - // Each request id can be used twice, once for a TICKET response and - // once for a REGCONFIRMATION response - self.request_history.insert(req_id.clone(), 2); + self.request_history.insert(req_id.clone(), MAX_RESPONSES_PER_REGTOPIC); self.expirations .push_back(ActiveRegtopicRequest::new(req_id, now)); } @@ -298,7 +344,7 @@ impl ActiveRegtopicRequests { self.expirations .iter() .take_while(|req| { - req.insert_time.elapsed() >= Duration::from_secs(MAX_CACHE_TIME_IN_SECS) + req.insert_time.elapsed() >= Duration::from_secs(REQUEST_TIMEOUT_IN_SECS) }) .for_each(|req| { expired.push(req.clone()); diff --git a/src/advertisement/topic.rs b/src/advertisement/topic.rs index 85571d01b..328bac4cc 100644 --- a/src/advertisement/topic.rs +++ b/src/advertisement/topic.rs @@ -61,17 +61,17 @@ impl Hasher for Sha256Hash { } } +/// A topic hashed by the hash algorithm implemented by the sending node. +/// TopicHash is used in place of a Vec in requests and responses. This +/// deviates from the wire protocol, it was necessary that the sender hashes +/// the topic as the hash is used to deteremine by XOR distance which nodes +/// to send the REGTOPIC request to. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct TopicHash { /// The topic hash. Stored as a fixed length byte array. hash: [u8; 32], } -// Topic Hash decoded into bytes needs to have length 32 bytes to encode it into a -// NodeId, which is necessary to make use of the XOR distance look-up of a topic. It -// makes sense to use a hashing algorithm which produces 32 bytes since the hash of -// any given topic string can then be reproduced by any client when making a topic -// query or publishing the same topic in proximity to others of its kind. impl TopicHash { pub fn from_raw(hash: [u8; 32]) -> TopicHash { TopicHash { hash } @@ -145,7 +145,7 @@ impl Topic { } // Each hash algortihm chosen to publish a topic with (as XOR -// metric key) is its own Topic +// metric key) is its own Topic. impl PartialEq for Topic { fn eq(&self, other: &Topic) -> bool { self.hash() == other.hash() diff --git a/src/rpc.rs b/src/rpc.rs index bb634358e..f7929193b 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -132,7 +132,7 @@ pub enum ResponseBody { ticket: Vec, /// The time in seconds to wait before attempting to register again. wait_time: u64, - /// The topic hash for which the opaque ticket is issued + /// The topic hash for which the opaque ticket is issued. topic: TopicHash, }, /// The REGCONFIRMATION response. @@ -686,9 +686,9 @@ impl Message { } } +/// A ticket object, outlined in the spec. #[derive(Debug, Clone)] pub struct Ticket { - //nonce: u64, src_node_id: NodeId, src_ip: IpAddr, topic: TopicHash, @@ -810,6 +810,8 @@ impl rlp::Decodable for Ticket { } } +/// Per topic, one registration attempt per node is stored at once. +/// Tickets that overlap based on these fields are considered equal. impl PartialEq for Ticket { fn eq(&self, other: &Self) -> bool { self.src_node_id == other.src_node_id @@ -820,7 +822,6 @@ impl PartialEq for Ticket { impl Ticket { pub fn new( - //nonce: u64, src_node_id: NodeId, src_ip: IpAddr, topic: TopicHash, diff --git a/src/service.rs b/src/service.rs index 75bdaf237..3175a9a80 100644 --- a/src/service.rs +++ b/src/service.rs @@ -195,7 +195,8 @@ pub struct Service { /// Keeps track of the number of responses received from a NODES response. active_nodes_responses: HashMap, - /// Keeps track of expected REGCONFIRMATION responses that may be received from a REGTOPIC request. + /// Keeps track of expected REGCONFIRMATION responses that may be received from a REGTOPIC + /// request. active_regtopic_requests: ActiveRegtopicRequests, /// A map of votes nodes have made about our external IP address. We accept the majority. @@ -222,7 +223,7 @@ pub struct Service { /// A channel that the service emits events on. event_stream: Option>, - /// Ads advertised for other nodes. + /// Ads advertised locally for other nodes. ads: Ads, /// Tickets received by other nodes. @@ -234,7 +235,7 @@ pub struct Service { /// Ads currently advertised on other nodes. active_topics: Ads, - /// Tickets pending registration + /// Locally issued tickets returned by nodes pending registration for free local ad slots. ticket_pools: TicketPools, } From 46d63cd63748567a6848a42dc9df63fb6e89e305 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 8 May 2022 08:45:48 +0200 Subject: [PATCH 107/391] Run cargo fmt --- src/advertisement/mod.rs | 8 ++++---- src/advertisement/ticket.rs | 22 +++++++++++----------- src/advertisement/topic.rs | 2 +- src/service.rs | 2 +- 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 66993fb76..a9bfe47fa 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -22,7 +22,7 @@ pub mod topic; pub struct AdNode { /// The node being advertised. node_record: Enr, - /// The insert_time is used to retrieve the ticket_wait time for a given + /// The insert_time is used to retrieve the ticket_wait time for a given /// topic. insert_time: Instant, } @@ -51,7 +51,7 @@ impl PartialEq for AdNode { struct AdTopic { /// The topic maps to the topic of an AdNode in Ads's ads. topic: TopicHash, - /// The insert_time is used to make sure and AdNode persists in Ads + /// The insert_time is used to make sure and AdNode persists in Ads /// only the ad_lifetime duration. insert_time: Instant, } @@ -65,7 +65,7 @@ impl AdTopic { /// The Ads struct contains the locally adveritsed AdNodes. #[derive(Clone, Debug)] pub struct Ads { - /// The expirations makes sure that AdNodes are advertised only for the + /// The expirations makes sure that AdNodes are advertised only for the /// ad_lifetime duration. expirations: VecDeque, /// The ads store the AdNodes per TopicHash in FIFO order of expiration. @@ -73,7 +73,7 @@ pub struct Ads { /// The ad_lifetime is specified by the spec but can be modified for /// testing purposes. ad_lifetime: Duration, - /// The max_ads_per_topic limit is up to the user although recommnedations + /// The max_ads_per_topic limit is up to the user although recommnedations /// are given in the specs. max_ads_per_topic: usize, /// The max_ads limit is up to the user although recommnedations are diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index d160fdb73..227cd754b 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -18,7 +18,7 @@ const REGISTRATION_WINDOW_IN_SECS: u64 = 10; const MAX_REGISTRANTS_PER_AD_SLOT: usize = 50; /// The duration for which requests are stored. const REQUEST_TIMEOUT_IN_SECS: u64 = 15; -/// Each REGTOPIC request can get both a TICKET response and REGCONFIRMATION +/// Each REGTOPIC request can get both a TICKET response and REGCONFIRMATION /// response. const MAX_RESPONSES_PER_REGTOPIC: u8 = 2; @@ -32,7 +32,6 @@ pub struct ActiveTopic { topic: TopicHash, } - impl ActiveTopic { pub fn new(node_id: NodeId, topic: TopicHash) -> Self { ActiveTopic { node_id, topic } @@ -71,7 +70,7 @@ impl ActiveTicket { pub struct Tickets { /// Tickets maps one ActiveTicket per ActiveTopic. tickets: HashMapDelay, - /// TicketHistory sets a time limit to how many times the ActiveTicket + /// TicketHistory sets a time limit to how many times the ActiveTicket /// value in tickets can be updated within a given ticket_limiter_duration. ticket_history: TicketHistory, } @@ -130,7 +129,7 @@ struct PendingTicket { insert_time: Instant, } -/// TicketHistory keeps track of how many times a ticket was replaced for +/// TicketHistory keeps track of how many times a ticket was replaced for /// an ActiveTopic within the time limit given by ticket_limiter_duration /// and limits it to MAX_TICKETS_PER_NODE_TOPIC times. #[derive(Default)] @@ -138,7 +137,7 @@ struct TicketHistory { /// The ticket_count keeps track of how many tickets are stored for the /// ActiveTopic. ticket_count: HashMap, - /// Up to MAX_TICKETS_PER_NODE_TOPIC PendingTickets in expirations maps + /// Up to MAX_TICKETS_PER_NODE_TOPIC PendingTickets in expirations maps /// to an ActiveTopic in ticket_count. expirations: VecDeque, /// The time a PendingTicket remains in expirations. @@ -213,7 +212,7 @@ struct RegistrationWindow { /// The TicketPools collects all the registration attempts for a free ad slot. #[derive(Default)] pub struct TicketPools { - /// The ticket_pools keeps track of all the registrants and their Tickets. One + /// The ticket_pools keeps track of all the registrants and their Tickets. One /// ticket_pool per TopicHash can be open at a time. ticket_pools: HashMap>, /// The expirations keeps track of when to close a ticket pool so the next one @@ -273,15 +272,15 @@ impl Stream for TicketPools { } } -/// Since according to spec, a REGTOPIC request can receive both a TICKET and -/// then REGISTRATION_WINDOW_IN_SECS seconds later optionally also a -/// REGCONFIRMATION response, ActiveRegtopicRequests need to be handled separate +/// Since according to spec, a REGTOPIC request can receive both a TICKET and +/// then REGISTRATION_WINDOW_IN_SECS seconds later optionally also a +/// REGCONFIRMATION response, ActiveRegtopicRequests need to be handled separate /// from ActiveRequests in Service. #[derive(Clone)] pub struct ActiveRegtopicRequest { /// The RequestId identifies an ActiveRequest. req_id: RequestId, - /// The insert_time is used to make sure an ActiveRegtopicRequest persists + /// The insert_time is used to make sure an ActiveRegtopicRequest persists /// no longer than REQUEST_TIMEOUT_IN_SECS. insert_time: Instant, } @@ -334,7 +333,8 @@ impl ActiveRegtopicRequests { let now = Instant::now(); self.requests.insert(req_id.clone(), req); - self.request_history.insert(req_id.clone(), MAX_RESPONSES_PER_REGTOPIC); + self.request_history + .insert(req_id.clone(), MAX_RESPONSES_PER_REGTOPIC); self.expirations .push_back(ActiveRegtopicRequest::new(req_id, now)); } diff --git a/src/advertisement/topic.rs b/src/advertisement/topic.rs index 328bac4cc..f7a3a5fe6 100644 --- a/src/advertisement/topic.rs +++ b/src/advertisement/topic.rs @@ -64,7 +64,7 @@ impl Hasher for Sha256Hash { /// A topic hashed by the hash algorithm implemented by the sending node. /// TopicHash is used in place of a Vec in requests and responses. This /// deviates from the wire protocol, it was necessary that the sender hashes -/// the topic as the hash is used to deteremine by XOR distance which nodes +/// the topic as the hash is used to deteremine by XOR distance which nodes /// to send the REGTOPIC request to. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct TopicHash { diff --git a/src/service.rs b/src/service.rs index 3175a9a80..d904a11fc 100644 --- a/src/service.rs +++ b/src/service.rs @@ -195,7 +195,7 @@ pub struct Service { /// Keeps track of the number of responses received from a NODES response. active_nodes_responses: HashMap, - /// Keeps track of expected REGCONFIRMATION responses that may be received from a REGTOPIC + /// Keeps track of expected REGCONFIRMATION responses that may be received from a REGTOPIC /// request. active_regtopic_requests: ActiveRegtopicRequests, From 6ed8b520b8cf544c340d45c977ab9fa6ba10a168 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 8 May 2022 09:00:45 +0200 Subject: [PATCH 108/391] Change confusing naming --- src/error.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/error.rs b/src/error.rs index fc19d9ae5..57ef78b87 100644 --- a/src/error.rs +++ b/src/error.rs @@ -112,7 +112,7 @@ pub enum RequestError { /// Failure generating random numbers during request. EntropyFailure(&'static str), /// Finding nodes closest to a topic hash failed. - TopicMetrics(String), + TopicDistance(String), } #[derive(Debug, Clone, PartialEq)] From 47c795a91b1903e825db17a8e3f596b46d160096 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 8 May 2022 09:41:14 +0200 Subject: [PATCH 109/391] Add metric to track topics to (re)publish --- src/discv5.rs | 4 ++-- src/metrics.rs | 6 ++++++ src/service.rs | 4 +++- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index a495916dd..c499e352a 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -501,7 +501,7 @@ impl Discv5 { // and send the topic query to these nodes let enrs = find_future .await - .map_err(|e| RequestError::TopicMetrics(e.to_string()))?; + .map_err(|e| RequestError::TopicDistance(e.to_string()))?; for enr in enrs.into_iter() { // convert the ENR to a node_contact. @@ -545,7 +545,7 @@ impl Discv5 { // and send the regtopic to these nodes let enrs = find_future .await - .map_err(|e| RequestError::TopicMetrics(e.to_string()))?; + .map_err(|e| RequestError::TopicDistance(e.to_string()))?; // convert the ENR to a node_contact. for enr in enrs.into_iter() { diff --git a/src/metrics.rs b/src/metrics.rs index 2e6f2fc9a..f456a9b06 100644 --- a/src/metrics.rs +++ b/src/metrics.rs @@ -16,6 +16,8 @@ pub struct InternalMetrics { pub bytes_sent: AtomicUsize, /// The number of bytes received. pub bytes_recv: AtomicUsize, + /// The number of topics to attempt advertising on other nodes. + pub topics_to_publish: AtomicUsize, } impl Default for InternalMetrics { @@ -26,6 +28,7 @@ impl Default for InternalMetrics { unsolicited_requests_per_window: AtomicUsize::new(0), bytes_sent: AtomicUsize::new(0), bytes_recv: AtomicUsize::new(0), + topics_to_publish: AtomicUsize::new(0), } } } @@ -55,6 +58,8 @@ pub struct Metrics { pub bytes_sent: usize, /// The number of bytes received. pub bytes_recv: usize, + /// The number of topics to attempt advertising on other nodes. + pub topics_to_publish: usize, } impl From<&METRICS> for Metrics { @@ -67,6 +72,7 @@ impl From<&METRICS> for Metrics { / internal_metrics.moving_window as f64, bytes_sent: internal_metrics.bytes_sent.load(Ordering::Relaxed), bytes_recv: internal_metrics.bytes_recv.load(Ordering::Relaxed), + topics_to_publish: internal_metrics.topics_to_publish.load(Ordering::Relaxed), } } } diff --git a/src/service.rs b/src/service.rs index d904a11fc..eecf7c726 100644 --- a/src/service.rs +++ b/src/service.rs @@ -28,6 +28,7 @@ use crate::{ self, ConnectionDirection, ConnectionState, FailureReason, InsertResult, KBucketsTable, NodeStatus, UpdateResult, }, + metrics::METRICS, node_info::{NodeAddress, NodeContact}, packet::MAX_PACKET_SIZE, query_pool::{ @@ -49,7 +50,7 @@ use std::{ collections::{HashMap, HashSet}, io::{Error, ErrorKind}, net::SocketAddr, - sync::Arc, + sync::{Arc, atomic::Ordering}, task::Poll, time::{Duration, Instant}, }; @@ -422,6 +423,7 @@ impl Service { ServiceRequest::RegisterTopic(node_contact, topic) => { let topic_hash = topic.hash(); self.topics.insert(topic_hash, topic); + METRICS.topics_to_publish.store(self.topics.len(), Ordering::Relaxed); let local_enr = self.local_enr.read().clone(); self.reg_topic_request(node_contact, topic_hash, local_enr, None) } From 06155b9dec25d2fe3858ae8e85c7b129929eab0d Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 8 May 2022 09:44:44 +0200 Subject: [PATCH 110/391] Run cargo fmt --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index eecf7c726..2b0d58a3e 100644 --- a/src/service.rs +++ b/src/service.rs @@ -50,7 +50,7 @@ use std::{ collections::{HashMap, HashSet}, io::{Error, ErrorKind}, net::SocketAddr, - sync::{Arc, atomic::Ordering}, + sync::{atomic::Ordering, Arc}, task::Poll, time::{Duration, Instant}, }; From a41cd7498678f8d0919b0a137795a0fd2fadb8ac Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 8 May 2022 10:30:23 +0200 Subject: [PATCH 111/391] Allow removing topics from republishing set --- src/discv5.rs | 26 ++++++++++++++++++++++++-- src/service.rs | 14 ++++++++++++++ 2 files changed, 38 insertions(+), 2 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index c499e352a..bd9c19174 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -411,7 +411,7 @@ impl Discv5 { .collect() } - pub fn hashes(&self, topic: String) -> Vec<(TopicHash, String)> { + pub fn hashes(topic: String) -> Vec<(TopicHash, String)> { let sha256_topic = Topic::new(topic); vec![(sha256_topic.hash(), sha256_topic.hash_function_name())] } @@ -533,10 +533,32 @@ impl Discv5 { } } + pub fn remove_topic( + &self, + topic: String, + ) -> impl Future, RequestError>> + 'static { + let topic = Topic::new(topic); + let channel = self.clone_channel(); + + async move { + let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; + let (callback_send, callback_recv) = oneshot::channel(); + let event = ServiceRequest::RemoveTopic(topic.hash(), callback_send); + channel + .send(event) + .await + .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; + callback_recv + .await + .map_err(|e| RequestError::ChannelFailed(e.to_string()))? + } + } + pub fn reg_topic_req( &self, - topic: Topic, + topic: String, ) -> impl Future> + 'static { + let topic = Topic::new(topic); let find_future = self.find_closest_nodes_to_topic(topic.hash()); let channel = self.clone_channel(); diff --git a/src/service.rs b/src/service.rs index 2b0d58a3e..9368ed5a9 100644 --- a/src/service.rs +++ b/src/service.rs @@ -169,6 +169,10 @@ pub enum ServiceRequest { /// RegisterTopic publishes this node as an advertiser for a topic at given node RegisterTopic(NodeContact, Topic), ActiveTopics(oneshot::Sender>), + RemoveTopic( + TopicHash, + oneshot::Sender, RequestError>>, + ), } use crate::discv5::PERMIT_BAN_LIST; @@ -432,6 +436,16 @@ impl Service { error!("Failed to return active topics"); } } + ServiceRequest::RemoveTopic(topic_hash, callback) => { + let topic = if let Some(topic) = self.topics.remove(&topic_hash) { + Some(topic.topic()) + } else { + None + }; + if callback.send(Ok(topic)).is_err() { + error!("Failed to return the removed topic"); + } + } } } Some(event) = self.handler_recv.recv() => { From a97c845e5be50220d916255810e93ec56cf1d0b1 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 8 May 2022 13:16:33 +0200 Subject: [PATCH 112/391] Add metrics for topics --- src/advertisement/mod.rs | 8 ++++++++ src/discv5.rs | 6 ++++++ src/metrics.rs | 12 ++++++++++++ src/service.rs | 11 ++++++----- 4 files changed, 32 insertions(+), 5 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index a9bfe47fa..1fe72ecf0 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -100,6 +100,14 @@ impl Ads { }) } + pub fn is_empty(&self) -> bool { + self.expirations.is_empty() + } + + pub fn len(&self) -> usize { + self.expirations.len() + } + pub fn get_ad_nodes(&self, topic: TopicHash) -> impl Iterator + '_ { self.ads.get(&topic).into_iter().flatten() } diff --git a/src/discv5.rs b/src/discv5.rs index bd9c19174..3ab14584c 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -533,6 +533,8 @@ impl Discv5 { } } + /// Removes a topic we do not wish to keep advertising on other nodes, effective + /// from the next interval on. pub fn remove_topic( &self, topic: String, @@ -554,6 +556,7 @@ impl Discv5 { } } + /// Registers a topic for the first time. pub fn reg_topic_req( &self, topic: String, @@ -586,6 +589,7 @@ impl Discv5 { } } + /// Retrieves the topics that we have published on other nodes. pub fn active_topics(&self) -> impl Future> + 'static { // the service will verify if this node is contactable, we just send it and // await a response. @@ -609,6 +613,8 @@ impl Discv5 { } } + /// Finds the relevant nodes to publish the topics on, as far away from the topic as + /// the bits configured by the Discv5 topic_radius distance in the Discv5 config. pub fn find_closest_nodes_to_topic( &self, topic_hash: TopicHash, diff --git a/src/metrics.rs b/src/metrics.rs index f456a9b06..b4e69c201 100644 --- a/src/metrics.rs +++ b/src/metrics.rs @@ -18,6 +18,10 @@ pub struct InternalMetrics { pub bytes_recv: AtomicUsize, /// The number of topics to attempt advertising on other nodes. pub topics_to_publish: AtomicUsize, + /// The number of ads currently advertised on other nodes. + pub active_ads: AtomicUsize, + /// The number of ads currently advertised locally for other nodes. + pub hosted_ads: AtomicUsize, } impl Default for InternalMetrics { @@ -29,6 +33,8 @@ impl Default for InternalMetrics { bytes_sent: AtomicUsize::new(0), bytes_recv: AtomicUsize::new(0), topics_to_publish: AtomicUsize::new(0), + active_ads: AtomicUsize::new(0), + hosted_ads: AtomicUsize::new(0), } } } @@ -60,6 +66,10 @@ pub struct Metrics { pub bytes_recv: usize, /// The number of topics to attempt advertising on other nodes. pub topics_to_publish: usize, + /// The number of ads currently advertised on other nodes. + pub active_ads: usize, + /// The number of ads currently advertised locally for other nodes. + pub hosted_ads: usize, } impl From<&METRICS> for Metrics { @@ -73,6 +83,8 @@ impl From<&METRICS> for Metrics { bytes_sent: internal_metrics.bytes_sent.load(Ordering::Relaxed), bytes_recv: internal_metrics.bytes_recv.load(Ordering::Relaxed), topics_to_publish: internal_metrics.topics_to_publish.load(Ordering::Relaxed), + active_ads: internal_metrics.active_ads.load(Ordering::Relaxed), + hosted_ads: internal_metrics.hosted_ads.load(Ordering::Relaxed), } } } diff --git a/src/service.rs b/src/service.rs index 9368ed5a9..4cfd259d2 100644 --- a/src/service.rs +++ b/src/service.rs @@ -437,11 +437,8 @@ impl Service { } } ServiceRequest::RemoveTopic(topic_hash, callback) => { - let topic = if let Some(topic) = self.topics.remove(&topic_hash) { - Some(topic.topic()) - } else { - None - }; + let topic = self.topics.remove(&topic_hash).map(|topic| topic.topic()); + METRICS.topics_to_publish.store(self.topics.len(), Ordering::Relaxed); if callback.send(Ok(topic)).is_err() { error!("Failed to return the removed topic"); } @@ -558,6 +555,7 @@ impl Service { NodeContact::from(node_record).node_address().map(|node_address| { self.send_regconfirmation_response(node_address, req_id, topic); }).ok(); + METRICS.hosted_ads.store(self.ads.len(), Ordering::Relaxed); } } } @@ -1063,6 +1061,9 @@ impl Service { ResponseBody::RegisterConfirmation { topic } => { if let NodeContact::Enr(enr) = active_request.contact { self.active_topics.insert(*enr, topic).ok(); + METRICS + .active_ads + .store(self.active_topics.len(), Ordering::Relaxed); } } } From 25be08c86564fb6cdc1a96cb507b07236a38b2a9 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 8 May 2022 15:07:20 +0200 Subject: [PATCH 113/391] Add metric for observing how many regtopic requets are active --- src/advertisement/ticket.rs | 8 ++++++++ src/metrics.rs | 6 ++++++ src/service.rs | 6 ++++++ 3 files changed, 20 insertions(+) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 227cd754b..90341500f 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -304,6 +304,14 @@ pub struct ActiveRegtopicRequests { } impl ActiveRegtopicRequests { + pub fn is_empty(&self) -> bool { + self.expirations.is_empty() + } + + pub fn len(&self) -> usize { + self.expirations.len() + } + pub fn remove(&mut self, req_id: &RequestId) -> Option { if let Some(seen_count) = self.request_history.get_mut(req_id) { *seen_count += 1; diff --git a/src/metrics.rs b/src/metrics.rs index b4e69c201..18cf464a9 100644 --- a/src/metrics.rs +++ b/src/metrics.rs @@ -22,6 +22,8 @@ pub struct InternalMetrics { pub active_ads: AtomicUsize, /// The number of ads currently advertised locally for other nodes. pub hosted_ads: AtomicUsize, + /// The number of active regtopic requests awaiting a REGCONFIRMATION response. + pub active_regtopic_req: AtomicUsize, } impl Default for InternalMetrics { @@ -35,6 +37,7 @@ impl Default for InternalMetrics { topics_to_publish: AtomicUsize::new(0), active_ads: AtomicUsize::new(0), hosted_ads: AtomicUsize::new(0), + active_regtopic_req: AtomicUsize::new(0), } } } @@ -70,6 +73,8 @@ pub struct Metrics { pub active_ads: usize, /// The number of ads currently advertised locally for other nodes. pub hosted_ads: usize, + /// The number of active regtopic requests. + pub active_regtopic_req: usize, } impl From<&METRICS> for Metrics { @@ -85,6 +90,7 @@ impl From<&METRICS> for Metrics { topics_to_publish: internal_metrics.topics_to_publish.load(Ordering::Relaxed), active_ads: internal_metrics.active_ads.load(Ordering::Relaxed), hosted_ads: internal_metrics.hosted_ads.load(Ordering::Relaxed), + active_regtopic_req: internal_metrics.active_regtopic_req.load(Ordering::Relaxed), } } } diff --git a/src/service.rs b/src/service.rs index 4cfd259d2..a5c666dbc 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1064,6 +1064,9 @@ impl Service { METRICS .active_ads .store(self.active_topics.len(), Ordering::Relaxed); + METRICS + .active_regtopic_req + .store(self.active_regtopic_requests.len(), Ordering::Relaxed); } } } @@ -1179,6 +1182,9 @@ impl Service { callback: None, }); self.active_regtopic_requests.insert(req_id, active_request); + METRICS + .active_regtopic_req + .store(self.active_regtopic_requests.len(), Ordering::Relaxed); } fn topic_query_request( From 75618f4ef60a418327195fe570c94ac97f89fc28 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 8 May 2022 15:43:07 +0200 Subject: [PATCH 114/391] Remove sisyphean selection process --- src/service.rs | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/service.rs b/src/service.rs index a5c666dbc..f00a3a9a1 100644 --- a/src/service.rs +++ b/src/service.rs @@ -45,9 +45,10 @@ use enr::{CombinedKey, NodeId}; use fnv::FnvHashMap; use futures::prelude::*; use parking_lot::RwLock; +use rand::Rng; use rpc::*; use std::{ - collections::{HashMap, HashSet}, + collections::HashMap, io::{Error, ErrorKind}, net::SocketAddr, sync::{atomic::Ordering, Arc}, @@ -536,22 +537,21 @@ impl Service { } Some(Ok((active_topic, active_ticket))) = self.tickets.next() => { let enr = self.local_enr.read().clone(); + // When the ticket time expires a new regtopic requet is automatically sent + // to the ticket issuer. self.reg_topic_request(active_ticket.contact(), active_topic.topic(), enr, Some(active_ticket.ticket())); } _ = publish_topics.tick() => { + // Topics are republished at regular intervals. self.topics.clone().into_iter().for_each(|(topic_hash, _)| self.start_findnode_query(NodeId::new(&topic_hash.as_bytes()), None)); } Some(Ok((topic, ticket_pool))) = self.ticket_pools.next() => { - // Selection of node for free ad slot - let kbucket_keys = self.kbuckets.write().iter().map(|entry| *entry.node.key.preimage()).collect::>(); - let selection = ticket_pool.keys().filter(|node_id| !kbucket_keys.contains(node_id)).collect::>(); - let new_ad: Option<&(Enr, RequestId, Ticket)> = if selection.is_empty() { - ticket_pool.values().next() - } else { - selection.into_iter().next().map(|node_id| ticket_pool.get(node_id)).unwrap_or(None) - }; - if let Some((node_record, req_id, _ticket)) = new_ad.map(|(node_record, req_id, ticket)| (node_record.clone(), req_id.clone(), ticket)) { - self.ads.insert(node_record.clone(), topic).ok(); + // No particular selection is carried out, the choice of node to give the free ad + // slot to is random. + let random_index = rand::thread_rng().gen_range(0..ticket_pool.len()); + let ticket_pool = ticket_pool.values().step_by(random_index).next(); + if let Some((node_record, req_id, _ticket)) = ticket_pool.map(|(node_record, req_id, ticket)| (node_record.clone(), req_id.clone(), ticket)) { + self.ads.insert(node_record.clone(), topic).ok(); NodeContact::from(node_record).node_address().map(|node_address| { self.send_regconfirmation_response(node_address, req_id, topic); }).ok(); From 0534b23a35c73f94a5c7500ad5b41b60d2ae7e52 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 8 May 2022 15:58:14 +0200 Subject: [PATCH 115/391] Remove possible attack vector of letting tickets without expired wait time be considered for ad slots --- src/service.rs | 69 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 41 insertions(+), 28 deletions(-) diff --git a/src/service.rs b/src/service.rs index f00a3a9a1..9793b13c9 100644 --- a/src/service.rs +++ b/src/service.rs @@ -546,8 +546,8 @@ impl Service { self.topics.clone().into_iter().for_each(|(topic_hash, _)| self.start_findnode_query(NodeId::new(&topic_hash.as_bytes()), None)); } Some(Ok((topic, ticket_pool))) = self.ticket_pools.next() => { - // No particular selection is carried out, the choice of node to give the free ad - // slot to is random. + // No particular selection is carried out at this stage of implementation, the choice of node to give + // the free ad slot to is random. let random_index = rand::thread_rng().gen_range(0..ticket_pool.len()); let ticket_pool = ticket_pool.values().step_by(random_index).next(); if let Some((node_record, req_id, _ticket)) = ticket_pool.map(|(node_record, req_id, ticket)| (node_record.clone(), req_id.clone(), ticket)) { @@ -730,50 +730,62 @@ impl Service { if enr.node_id() == node_address.node_id && enr.udp_socket() == Some(node_address.socket_addr) { - let wait_time = self.ads.ticket_wait_time(topic); + let wait_time = self + .ads + .ticket_wait_time(topic) + .unwrap_or(Duration::from_secs(0)); let new_ticket = Ticket::new( node_address.node_id, node_address.socket_addr.ip(), topic, tokio::time::Instant::now(), - wait_time.unwrap_or(Duration::from_secs(0)), + wait_time, ); + // According to spec, a ticket should always be issued upon receiving a REGTOPIC request. self.send_ticket_response( node_address, id.clone(), new_ticket.clone(), - wait_time.unwrap_or(Duration::from_secs(0)), + wait_time, ); - if !ticket.is_empty() { - let decoded_enr = self - .local_enr - .write() - .to_base64() - .parse::() - .map_err(|e| { - error!("Failed to decrypt ticket in REGTOPIC request. Error: {}", e) - }); - if let Ok(decoded_enr) = decoded_enr { - if let Some(ticket_key) = decoded_enr.get("ticket_key") { - let decrypted_ticket = { - let aead = Aes128Gcm::new(GenericArray::from_slice(ticket_key)); - let payload = Payload { - msg: &ticket, - aad: b"", - }; - aead.decrypt(GenericArray::from_slice(&[1u8; 12]), payload) + // If the wait time has expired, the TICKET is added to the matching ticket pool. If this is + // the first REGTOPIC request from a given node for a given topic, the newly created ticket + // is used to add the registration attempt to to the ticket pool. + if wait_time <= Duration::from_secs(0) { + if !ticket.is_empty() { + let decoded_enr = self + .local_enr + .write() + .to_base64() + .parse::() + .map_err(|e| { + error!( + "Failed to decrypt ticket in REGTOPIC request. Error: {}", + e + ) + }); + if let Ok(decoded_enr) = decoded_enr { + if let Some(ticket_key) = decoded_enr.get("ticket_key") { + let decrypted_ticket = { + let aead = + Aes128Gcm::new(GenericArray::from_slice(ticket_key)); + let payload = Payload { + msg: &ticket, + aad: b"", + }; + aead.decrypt(GenericArray::from_slice(&[1u8; 12]), payload) .map_err(|e| { error!( "Failed to decrypt ticket in REGTOPIC request. Error: {}", e ) }) - }; - if let Ok(decrypted_ticket) = decrypted_ticket { - Ticket::decode(&decrypted_ticket) + }; + if let Ok(decrypted_ticket) = decrypted_ticket { + Ticket::decode(&decrypted_ticket) .map_err(|e| { error!( "Failed to decode ticket in REGTOPIC request. Error: {}", @@ -790,11 +802,12 @@ impl Service { } }) .ok(); + } } } + } else { + self.ticket_pools.insert(enr, id, new_ticket); } - } else { - self.ticket_pools.insert(enr, id, new_ticket); } } } From 6974a625f2f9681751acd7259309a6c71ebfd4c0 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 8 May 2022 16:32:46 +0200 Subject: [PATCH 116/391] Fill in missing docs --- src/config.rs | 3 +++ src/service.rs | 10 ++++++++++ 2 files changed, 13 insertions(+) diff --git a/src/config.rs b/src/config.rs index b859f51fd..3c20f0897 100644 --- a/src/config.rs +++ b/src/config.rs @@ -91,6 +91,9 @@ pub struct Discv5Config { /// will last indefinitely. Default is 1 hour. pub ban_duration: Option, + /// The max length in bits that the suffix of the topic hash is allowed to vary from the node ids that + /// REGTOPIC and TOPICQUERY requests are sent to. Setting it to 256 means that the requests are sent to + /// all of the nodes in the kbuckets. pub topic_radius: u64, /// A custom executor which can spawn the discv5 tasks. This must be a tokio runtime, with diff --git a/src/service.rs b/src/service.rs index 9793b13c9..2bfeeca08 100644 --- a/src/service.rs +++ b/src/service.rs @@ -822,6 +822,9 @@ impl Service { // verify we know of the rpc_id let id = response.id.clone(); + // A REGTOPIC request can receive both a TICKET and then also possibly a REGCONFIRMATION + // response. If no active request exists in active_requests, the response may still be a + // REGCONFIRMATION response. let active_request = if let Some(active_request) = self.active_requests.remove(&id) { Some(active_request) } else { @@ -1164,6 +1167,7 @@ impl Service { self.send_rpc_request(active_request); } + /// Requests a node to advertise the sending node for a given topic hash. fn reg_topic_request( &mut self, contact: NodeContact, @@ -1200,6 +1204,7 @@ impl Service { .store(self.active_regtopic_requests.len(), Ordering::Relaxed); } + /// Queries a node for the ads that node currently advertises for a given topic. fn topic_query_request( &mut self, contact: NodeContact, @@ -1217,6 +1222,7 @@ impl Service { self.send_rpc_request(active_request); } + /// The response sent to every REGTOPIC request, as according to spec. fn send_ticket_response( &mut self, node_address: NodeAddress, @@ -1262,6 +1268,8 @@ impl Service { .ok(); } + /// The response sent to a node which is selected out of a ticket pool of registrants + /// for a free ad slot. fn send_regconfirmation_response( &mut self, node_address: NodeAddress, @@ -1282,6 +1290,8 @@ impl Service { .send(HandlerIn::Response(node_address, Box::new(response))); } + /// Answer to a topic query containing the nodes currently advertised for the + /// requested topic if any. fn send_topic_query_response( &mut self, node_address: NodeAddress, From d750122f3b4ae813c4122e0243914043b66a97a1 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 8 May 2022 16:40:20 +0200 Subject: [PATCH 117/391] Run cargo fmt --- src/config.rs | 4 ++-- src/service.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/config.rs b/src/config.rs index 3c20f0897..8f456ffec 100644 --- a/src/config.rs +++ b/src/config.rs @@ -91,8 +91,8 @@ pub struct Discv5Config { /// will last indefinitely. Default is 1 hour. pub ban_duration: Option, - /// The max length in bits that the suffix of the topic hash is allowed to vary from the node ids that - /// REGTOPIC and TOPICQUERY requests are sent to. Setting it to 256 means that the requests are sent to + /// The max length in bits that the suffix of the topic hash is allowed to vary from the node ids that + /// REGTOPIC and TOPICQUERY requests are sent to. Setting it to 256 means that the requests are sent to /// all of the nodes in the kbuckets. pub topic_radius: u64, diff --git a/src/service.rs b/src/service.rs index 2bfeeca08..c8f83db8a 100644 --- a/src/service.rs +++ b/src/service.rs @@ -822,7 +822,7 @@ impl Service { // verify we know of the rpc_id let id = response.id.clone(); - // A REGTOPIC request can receive both a TICKET and then also possibly a REGCONFIRMATION + // A REGTOPIC request can receive both a TICKET and then also possibly a REGCONFIRMATION // response. If no active request exists in active_requests, the response may still be a // REGCONFIRMATION response. let active_request = if let Some(active_request) = self.active_requests.remove(&id) { @@ -1268,7 +1268,7 @@ impl Service { .ok(); } - /// The response sent to a node which is selected out of a ticket pool of registrants + /// The response sent to a node which is selected out of a ticket pool of registrants /// for a free ad slot. fn send_regconfirmation_response( &mut self, From 5495b9207c0c7ef86f51ae1788a298ce993ebbab Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 3 Jun 2022 10:30:27 +0200 Subject: [PATCH 118/391] Add a kbucket table per topic for ad registration --- src/service.rs | 324 +++++++++++++++++++++++++++---------------------- 1 file changed, 176 insertions(+), 148 deletions(-) diff --git a/src/service.rs b/src/service.rs index c8f83db8a..52b03cc1b 100644 --- a/src/service.rs +++ b/src/service.rs @@ -238,6 +238,9 @@ pub struct Service { /// Topics to advertise on other nodes. topics: HashMap, + /// KBuckets per topic hash. + topics_kbuckets: HashMap>, + /// Ads currently advertised on other nodes. active_topics: Ads, @@ -368,6 +371,7 @@ impl Service { ads, tickets: Tickets::new(Duration::from_secs(60 * 15)), topics: HashMap::new(), + topics_kbuckets: HashMap::new(), active_topics, ticket_pools: TicketPools::default(), exit, @@ -387,178 +391,202 @@ impl Service { loop { tokio::select! { - _ = &mut self.exit => { - if let Some(exit) = self.handler_exit.take() { - let _ = exit.send(()); - info!("Discv5 Service shutdown"); + _ = &mut self.exit => { + if let Some(exit) = self.handler_exit.take() { + let _ = exit.send(()); + info!("Discv5 Service shutdown"); + } + return; } - return; - } - Some(service_request) = self.discv5_recv.recv() => { - match service_request { - ServiceRequest::StartQuery(query, callback) => { - match query { - QueryKind::FindNode { target_node } => { - self.start_findnode_query(target_node, Some(callback)); - } - QueryKind::Predicate { target_node, target_peer_no, predicate } => { - self.start_predicate_query(target_node, target_peer_no, predicate, Some(callback)); + Some(service_request) = self.discv5_recv.recv() => { + match service_request { + ServiceRequest::StartQuery(query, callback) => { + match query { + QueryKind::FindNode { target_node } => { + self.start_findnode_query(target_node, Some(callback)); + } + QueryKind::Predicate { target_node, target_peer_no, predicate } => { + self.start_predicate_query(target_node, target_peer_no, predicate, Some(callback)); + } } } - } - ServiceRequest::FindEnr(node_contact, callback) => { - self.request_enr(node_contact, Some(callback)); - } - ServiceRequest::Talk(node_contact, protocol, request, callback) => { - self.talk_request(node_contact, protocol, request, callback); - } - ServiceRequest::RequestEventStream(callback) => { - // the channel size needs to be large to handle many discovered peers - // if we are reporting them on the event stream. - let channel_size = if self.config.report_discovered_peers { 100 } else { 30 }; - let (event_stream, event_stream_recv) = mpsc::channel(channel_size); - self.event_stream = Some(event_stream); - if callback.send(event_stream_recv).is_err() { - error!("Failed to return the event stream channel"); + ServiceRequest::FindEnr(node_contact, callback) => { + self.request_enr(node_contact, Some(callback)); } - } - ServiceRequest::TopicQuery(node_contact, topic_hash, callback) => { - self.topic_query_request(node_contact, topic_hash, callback); - } - ServiceRequest::RegisterTopic(node_contact, topic) => { - let topic_hash = topic.hash(); - self.topics.insert(topic_hash, topic); - METRICS.topics_to_publish.store(self.topics.len(), Ordering::Relaxed); - let local_enr = self.local_enr.read().clone(); - self.reg_topic_request(node_contact, topic_hash, local_enr, None) - } - ServiceRequest::ActiveTopics(callback) => { - if callback.send(Ok(self.active_topics.clone())).is_err() { - error!("Failed to return active topics"); + ServiceRequest::Talk(node_contact, protocol, request, callback) => { + self.talk_request(node_contact, protocol, request, callback); } - } - ServiceRequest::RemoveTopic(topic_hash, callback) => { - let topic = self.topics.remove(&topic_hash).map(|topic| topic.topic()); - METRICS.topics_to_publish.store(self.topics.len(), Ordering::Relaxed); - if callback.send(Ok(topic)).is_err() { - error!("Failed to return the removed topic"); + ServiceRequest::RequestEventStream(callback) => { + // the channel size needs to be large to handle many discovered peers + // if we are reporting them on the event stream. + let channel_size = if self.config.report_discovered_peers { 100 } else { 30 }; + let (event_stream, event_stream_recv) = mpsc::channel(channel_size); + self.event_stream = Some(event_stream); + if callback.send(event_stream_recv).is_err() { + error!("Failed to return the event stream channel"); + } } - } - } - } - Some(event) = self.handler_recv.recv() => { - match event { - HandlerOut::Established(enr, direction) => { - self.inject_session_established(enr,direction); - } - HandlerOut::Request(node_address, request) => { - self.handle_rpc_request(node_address, *request); + ServiceRequest::TopicQuery(node_contact, topic_hash, callback) => { + self.topic_query_request(node_contact, topic_hash, callback); } - HandlerOut::Response(node_address, response) => { - self.handle_rpc_response(node_address, *response); + ServiceRequest::RegisterTopic(node_contact, topic) => { + let topic_hash = topic.hash(); + if self.topics.insert(topic_hash, topic).is_some() { + warn!("This topic is already being advertised"); + } else { + // NOTE: Currently we don't expose custom filter support in the configuration. Users can + // optionally use the IP filter via the ip_limit configuration parameter. In the future, we + // may expose this functionality to the users if there is demand for it. + let (table_filter, bucket_filter) = if self.config.ip_limit { + ( + Some(Box::new(kbucket::IpTableFilter) as Box>), + Some(Box::new(kbucket::IpBucketFilter) as Box>), + ) + } else { + (None, None) + }; + + let kbuckets = KBucketsTable::new( + self.local_enr.read().node_id().into(), + Duration::from_secs(60), + self.config.incoming_bucket_limit, + table_filter, + bucket_filter, + ); + self.topics_kbuckets.insert(topic_hash, kbuckets); + } + + METRICS.topics_to_publish.store(self.topics.len(), Ordering::Relaxed); + //let local_enr = self.local_enr.read().clone(); + //self.reg_topic_request(node_contact, topic_hash, local_enr, None) } - HandlerOut::WhoAreYou(whoareyou_ref) => { - // check what our latest known ENR is for this node. - if let Some(known_enr) = self.find_enr(&whoareyou_ref.0.node_id) { - let _ = self.handler_send.send(HandlerIn::WhoAreYou(whoareyou_ref, Some(known_enr))); - } else { - // do not know of this peer - debug!("NodeId unknown, requesting ENR. {}", whoareyou_ref.0); - let _ = self.handler_send.send(HandlerIn::WhoAreYou(whoareyou_ref, None)); + ServiceRequest::ActiveTopics(callback) => { + if callback.send(Ok(self.active_topics.clone())).is_err() { + error!("Failed to return active topics"); + } } - } - HandlerOut::RequestFailed(request_id, error) => { - if let RequestError::Timeout = error { - debug!("RPC Request timed out. id: {}", request_id); - } else { - warn!("RPC Request failed: id: {}, error {:?}", request_id, error); + ServiceRequest::RemoveTopic(topic_hash, callback) => { + let topic = self.topics.remove(&topic_hash).map(|topic| topic.topic()); + METRICS.topics_to_publish.store(self.topics.len(), Ordering::Relaxed); + if callback.send(Ok(topic)).is_err() { + error!("Failed to return the removed topic"); + } } - self.rpc_failure(request_id, error); } } - } - event = Service::bucket_maintenance_poll(&self.kbuckets) => { - self.send_event(event); - } - query_event = Service::query_event_poll(&mut self.queries) => { - match query_event { - QueryEvent::Waiting(query_id, node_id, request_body) => { - self.send_rpc_query(query_id, node_id, *request_body); - } - // Note: Currently the distinction between a timed-out query and a finished - // query is superfluous, however it may be useful in future versions. - QueryEvent::Finished(query) | QueryEvent::TimedOut(query) => { - let id = query.id(); - let mut result = query.into_result(); - // obtain the ENR's for the resulting nodes - let mut found_enrs = Vec::new(); - for node_id in result.closest_peers { - if let Some(position) = result.target.untrusted_enrs.iter().position(|enr| enr.node_id() == node_id) { - let enr = result.target.untrusted_enrs.swap_remove(position); - found_enrs.push(enr); - } else if let Some(enr) = self.find_enr(&node_id) { - // look up from the routing table - found_enrs.push(enr); + Some(event) = self.handler_recv.recv() => { + match event { + HandlerOut::Established(enr, direction) => { + self.inject_session_established(enr,direction); + } + HandlerOut::Request(node_address, request) => { + self.handle_rpc_request(node_address, *request); + } + HandlerOut::Response(node_address, response) => { + self.handle_rpc_response(node_address, *response); } - else { - warn!("ENR not present in queries results"); + HandlerOut::WhoAreYou(whoareyou_ref) => { + // check what our latest known ENR is for this node. + if let Some(known_enr) = self.find_enr(&whoareyou_ref.0.node_id) { + let _ = self.handler_send.send(HandlerIn::WhoAreYou(whoareyou_ref, Some(known_enr))); + } else { + // do not know of this peer + debug!("NodeId unknown, requesting ENR. {}", whoareyou_ref.0); + let _ = self.handler_send.send(HandlerIn::WhoAreYou(whoareyou_ref, None)); } } + HandlerOut::RequestFailed(request_id, error) => { + if let RequestError::Timeout = error { + debug!("RPC Request timed out. id: {}", request_id); + } else { + warn!("RPC Request failed: id: {}, error {:?}", request_id, error); + } + self.rpc_failure(request_id, error); + } + } + } + event = Service::bucket_maintenance_poll(&self.kbuckets) => { + self.send_event(event); + } + query_event = Service::query_event_poll(&mut self.queries) => { + match query_event { + QueryEvent::Waiting(query_id, node_id, request_body) => { + self.send_rpc_query(query_id, node_id, *request_body); + } + // Note: Currently the distinction between a timed-out query and a finished + // query is superfluous, however it may be useful in future versions. + QueryEvent::Finished(query) | QueryEvent::TimedOut(query) => { + let id = query.id(); + let mut result = query.into_result(); + // obtain the ENR's for the resulting nodes + let mut found_enrs = Vec::new(); + for node_id in result.closest_peers { + if let Some(position) = result.target.untrusted_enrs.iter().position(|enr| enr.node_id() == node_id) { + let enr = result.target.untrusted_enrs.swap_remove(position); + found_enrs.push(enr); + } else if let Some(enr) = self.find_enr(&node_id) { + // look up from the routing table + found_enrs.push(enr); + } + else { + warn!("ENR not present in queries results"); + } + } - if let Some(callback) = result.target.callback { - if callback.send(found_enrs).is_err() { - warn!("Callback dropped for query {}. Results dropped", *id); + if let Some(callback) = result.target.callback { + if callback.send(found_enrs).is_err() { + warn!("Callback dropped for query {}. Results dropped", *id); + } + } else { + let QueryType::FindNode(node_id) = result.target.query_type; + let topic = TopicHash::from_raw(node_id.raw()); + if self.topics.contains_key(&topic){ + let local_enr = self.local_enr.read().clone(); + found_enrs.into_iter().for_each(|enr| self.reg_topic_request(NodeContact::from(enr), topic, local_enr.clone(), None)); + } } - } else { - let QueryType::FindNode(node_id) = result.target.query_type; - let topic = TopicHash::from_raw(node_id.raw()); - if self.topics.contains_key(&topic){ - let local_enr = self.local_enr.read().clone(); - found_enrs.into_iter().for_each(|enr| self.reg_topic_request(NodeContact::from(enr), topic, local_enr.clone(), None)); - } } } } - } - Some(Ok(node_id)) = self.peers_to_ping.next() => { - // If the node is in the routing table, Ping it and re-queue the node. - let key = kbucket::Key::from(node_id); - let enr = { - if let kbucket::Entry::Present(entry, _) = self.kbuckets.write().entry(&key) { - // The peer is in the routing table, ping it and re-queue the ping - self.peers_to_ping.insert(node_id); - Some(entry.value().clone()) - } else { None } - }; + Some(Ok(node_id)) = self.peers_to_ping.next() => { + // If the node is in the routing table, Ping it and re-queue the node. + let key = kbucket::Key::from(node_id); + let enr = { + if let kbucket::Entry::Present(entry, _) = self.kbuckets.write().entry(&key) { + // The peer is in the routing table, ping it and re-queue the ping + self.peers_to_ping.insert(node_id); + Some(entry.value().clone()) + } else { None } + }; - if let Some(enr) = enr { - self.send_ping(enr); + if let Some(enr) = enr { + self.send_ping(enr); + } } - } - Some(Ok((active_topic, active_ticket))) = self.tickets.next() => { - let enr = self.local_enr.read().clone(); - // When the ticket time expires a new regtopic requet is automatically sent - // to the ticket issuer. - self.reg_topic_request(active_ticket.contact(), active_topic.topic(), enr, Some(active_ticket.ticket())); - } - _ = publish_topics.tick() => { - // Topics are republished at regular intervals. - self.topics.clone().into_iter().for_each(|(topic_hash, _)| self.start_findnode_query(NodeId::new(&topic_hash.as_bytes()), None)); - } - Some(Ok((topic, ticket_pool))) = self.ticket_pools.next() => { - // No particular selection is carried out at this stage of implementation, the choice of node to give - // the free ad slot to is random. - let random_index = rand::thread_rng().gen_range(0..ticket_pool.len()); - let ticket_pool = ticket_pool.values().step_by(random_index).next(); - if let Some((node_record, req_id, _ticket)) = ticket_pool.map(|(node_record, req_id, ticket)| (node_record.clone(), req_id.clone(), ticket)) { - self.ads.insert(node_record.clone(), topic).ok(); - NodeContact::from(node_record).node_address().map(|node_address| { - self.send_regconfirmation_response(node_address, req_id, topic); - }).ok(); - METRICS.hosted_ads.store(self.ads.len(), Ordering::Relaxed); + Some(Ok((active_topic, active_ticket))) = self.tickets.next() => { + let enr = self.local_enr.read().clone(); + // When the ticket time expires a new regtopic requet is automatically sent + // to the ticket issuer. + self.reg_topic_request(active_ticket.contact(), active_topic.topic(), enr, Some(active_ticket.ticket())); + } + _ = publish_topics.tick() => { + // Topics are republished at regular intervals. + self.topics.clone().into_iter().for_each(|(topic_hash, _)| self.start_findnode_query(NodeId::new(&topic_hash.as_bytes()), None)); + } + Some(Ok((topic, ticket_pool))) = self.ticket_pools.next() => { + // No particular selection is carried out at this stage of implementation, the choice of node to give + // the free ad slot to is random. + let random_index = rand::thread_rng().gen_range(0..ticket_pool.len()); + let ticket_pool = ticket_pool.values().step_by(random_index).next(); + if let Some((node_record, req_id, _ticket)) = ticket_pool.map(|(node_record, req_id, ticket)| (node_record.clone(), req_id.clone(), ticket)) { + self.ads.insert(node_record.clone(), topic).ok(); + NodeContact::from(node_record).node_address().map(|node_address| { + self.send_regconfirmation_response(node_address, req_id, topic); + }).ok(); + METRICS.hosted_ads.store(self.ads.len(), Ordering::Relaxed); + } } } - } } } From 31aa8417a954bacdd297795e6246f6b5199d893a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 3 Jun 2022 12:02:44 +0200 Subject: [PATCH 119/391] Separate topics and standard kbuckets on service level --- src/discv5.rs | 27 +-- src/handler/mod.rs | 4 + src/service.rs | 408 ++++++++++++++++++++++++-------------------- src/service/test.rs | 1 + 4 files changed, 239 insertions(+), 201 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 3ab14584c..04779c49d 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -562,29 +562,18 @@ impl Discv5 { topic: String, ) -> impl Future> + 'static { let topic = Topic::new(topic); - let find_future = self.find_closest_nodes_to_topic(topic.hash()); let channel = self.clone_channel(); async move { - // Use find_topic to find the Enrs the shortest XOR distance from the topic hash, - // and send the regtopic to these nodes - let enrs = find_future + let channel = channel + .as_ref() + .map_err(|_| RequestError::ServiceNotStarted)?; + let event = ServiceRequest::RegisterTopic(topic.clone()); + // send the request + channel + .send(event) .await - .map_err(|e| RequestError::TopicDistance(e.to_string()))?; - - // convert the ENR to a node_contact. - for enr in enrs.into_iter() { - let node_contact = NodeContact::from(enr); - let channel = channel - .as_ref() - .map_err(|_| RequestError::ServiceNotStarted)?; - let event = ServiceRequest::RegisterTopic(node_contact, topic.clone()); - // send the request - channel - .send(event) - .await - .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; - } + .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; Ok(()) } } diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 91d2446ee..024d404df 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -27,6 +27,7 @@ //! Messages from the a node on the network come by [`Socket`] and get the form of a [`HandlerOut`] //! and can be forwarded to the application layer via the send channel. use crate::{ + advertisement::topic::TopicHash, config::Discv5Config, discv5::PERMIT_BAN_LIST, error::{Discv5Error, RequestError}, @@ -110,6 +111,9 @@ pub enum HandlerOut { /// node and received messages from it's `SocketAddr` matching it's ENR fields. Established(Enr, ConnectionDirection), + /// A session has been established for the purpose of publishing advertisements. + EstablishedTopic(Enr, ConnectionDirection, TopicHash), + /// A Request has been received from a node on the network. Request(NodeAddress, Box), diff --git a/src/service.rs b/src/service.rs index 52b03cc1b..e8887d7cb 100644 --- a/src/service.rs +++ b/src/service.rs @@ -44,6 +44,7 @@ use delay_map::HashSetDelay; use enr::{CombinedKey, NodeId}; use fnv::FnvHashMap; use futures::prelude::*; +use more_asserts::debug_unreachable; use parking_lot::RwLock; use rand::Rng; use rpc::*; @@ -168,7 +169,7 @@ pub enum ServiceRequest { oneshot::Sender, RequestError>>, ), /// RegisterTopic publishes this node as an advertiser for a topic at given node - RegisterTopic(NodeContact, Topic), + RegisterTopic(Topic), ActiveTopics(oneshot::Sender>), RemoveTopic( TopicHash, @@ -391,202 +392,203 @@ impl Service { loop { tokio::select! { - _ = &mut self.exit => { - if let Some(exit) = self.handler_exit.take() { - let _ = exit.send(()); - info!("Discv5 Service shutdown"); - } - return; + _ = &mut self.exit => { + if let Some(exit) = self.handler_exit.take() { + let _ = exit.send(()); + info!("Discv5 Service shutdown"); } - Some(service_request) = self.discv5_recv.recv() => { - match service_request { - ServiceRequest::StartQuery(query, callback) => { - match query { - QueryKind::FindNode { target_node } => { - self.start_findnode_query(target_node, Some(callback)); - } - QueryKind::Predicate { target_node, target_peer_no, predicate } => { - self.start_predicate_query(target_node, target_peer_no, predicate, Some(callback)); - } + return; + } + Some(service_request) = self.discv5_recv.recv() => { + match service_request { + ServiceRequest::StartQuery(query, callback) => { + match query { + QueryKind::FindNode { target_node } => { + self.start_findnode_query(target_node, Some(callback)); } - } - ServiceRequest::FindEnr(node_contact, callback) => { - self.request_enr(node_contact, Some(callback)); - } - ServiceRequest::Talk(node_contact, protocol, request, callback) => { - self.talk_request(node_contact, protocol, request, callback); - } - ServiceRequest::RequestEventStream(callback) => { - // the channel size needs to be large to handle many discovered peers - // if we are reporting them on the event stream. - let channel_size = if self.config.report_discovered_peers { 100 } else { 30 }; - let (event_stream, event_stream_recv) = mpsc::channel(channel_size); - self.event_stream = Some(event_stream); - if callback.send(event_stream_recv).is_err() { - error!("Failed to return the event stream channel"); + QueryKind::Predicate { target_node, target_peer_no, predicate } => { + self.start_predicate_query(target_node, target_peer_no, predicate, Some(callback)); } } - ServiceRequest::TopicQuery(node_contact, topic_hash, callback) => { - self.topic_query_request(node_contact, topic_hash, callback); + } + ServiceRequest::FindEnr(node_contact, callback) => { + self.request_enr(node_contact, Some(callback)); + } + ServiceRequest::Talk(node_contact, protocol, request, callback) => { + self.talk_request(node_contact, protocol, request, callback); + } + ServiceRequest::RequestEventStream(callback) => { + // the channel size needs to be large to handle many discovered peers + // if we are reporting them on the event stream. + let channel_size = if self.config.report_discovered_peers { 100 } else { 30 }; + let (event_stream, event_stream_recv) = mpsc::channel(channel_size); + self.event_stream = Some(event_stream); + if callback.send(event_stream_recv).is_err() { + error!("Failed to return the event stream channel"); } - ServiceRequest::RegisterTopic(node_contact, topic) => { - let topic_hash = topic.hash(); - if self.topics.insert(topic_hash, topic).is_some() { - warn!("This topic is already being advertised"); + } + ServiceRequest::TopicQuery(node_contact, topic_hash, callback) => { + self.topic_query_request(node_contact, topic_hash, callback); + } + ServiceRequest::RegisterTopic(topic) => { + let topic_hash = topic.hash(); + if self.topics.insert(topic_hash, topic).is_some() { + warn!("This topic is already being advertised"); + } else { + // NOTE: Currently we don't expose custom filter support in the configuration. Users can + // optionally use the IP filter via the ip_limit configuration parameter. In the future, we + // may expose this functionality to the users if there is demand for it. + let (table_filter, bucket_filter) = if self.config.ip_limit { + ( + Some(Box::new(kbucket::IpTableFilter) as Box>), + Some(Box::new(kbucket::IpBucketFilter) as Box>), + ) } else { - // NOTE: Currently we don't expose custom filter support in the configuration. Users can - // optionally use the IP filter via the ip_limit configuration parameter. In the future, we - // may expose this functionality to the users if there is demand for it. - let (table_filter, bucket_filter) = if self.config.ip_limit { - ( - Some(Box::new(kbucket::IpTableFilter) as Box>), - Some(Box::new(kbucket::IpBucketFilter) as Box>), - ) - } else { - (None, None) - }; - - let kbuckets = KBucketsTable::new( - self.local_enr.read().node_id().into(), - Duration::from_secs(60), - self.config.incoming_bucket_limit, - table_filter, - bucket_filter, - ); - self.topics_kbuckets.insert(topic_hash, kbuckets); - } - - METRICS.topics_to_publish.store(self.topics.len(), Ordering::Relaxed); - //let local_enr = self.local_enr.read().clone(); - //self.reg_topic_request(node_contact, topic_hash, local_enr, None) + (None, None) + }; + + let kbuckets = KBucketsTable::new( + self.local_enr.read().node_id().into(), + Duration::from_secs(60), + self.config.incoming_bucket_limit, + table_filter, + bucket_filter, + ); + self.topics_kbuckets.insert(topic_hash, kbuckets); } - ServiceRequest::ActiveTopics(callback) => { - if callback.send(Ok(self.active_topics.clone())).is_err() { - error!("Failed to return active topics"); - } + + METRICS.topics_to_publish.store(self.topics.len(), Ordering::Relaxed); + } + ServiceRequest::ActiveTopics(callback) => { + if callback.send(Ok(self.active_topics.clone())).is_err() { + error!("Failed to return active topics"); } - ServiceRequest::RemoveTopic(topic_hash, callback) => { - let topic = self.topics.remove(&topic_hash).map(|topic| topic.topic()); - METRICS.topics_to_publish.store(self.topics.len(), Ordering::Relaxed); - if callback.send(Ok(topic)).is_err() { - error!("Failed to return the removed topic"); - } + } + ServiceRequest::RemoveTopic(topic_hash, callback) => { + let topic = self.topics.remove(&topic_hash).map(|topic| topic.topic()); + METRICS.topics_to_publish.store(self.topics.len(), Ordering::Relaxed); + if callback.send(Ok(topic)).is_err() { + error!("Failed to return the removed topic"); } } } - Some(event) = self.handler_recv.recv() => { - match event { - HandlerOut::Established(enr, direction) => { - self.inject_session_established(enr,direction); + } + Some(event) = self.handler_recv.recv() => { + match event { + HandlerOut::Established(enr, direction) => { + self.inject_session_established(enr, direction, None); + } + HandlerOut::EstablishedTopic(enr, direction, topic_hash) => { + self.inject_session_established(enr, direction, Some(topic_hash)); + } + HandlerOut::Request(node_address, request) => { + self.handle_rpc_request(node_address, *request); } - HandlerOut::Request(node_address, request) => { - self.handle_rpc_request(node_address, *request); - } - HandlerOut::Response(node_address, response) => { - self.handle_rpc_response(node_address, *response); - } - HandlerOut::WhoAreYou(whoareyou_ref) => { - // check what our latest known ENR is for this node. - if let Some(known_enr) = self.find_enr(&whoareyou_ref.0.node_id) { - let _ = self.handler_send.send(HandlerIn::WhoAreYou(whoareyou_ref, Some(known_enr))); - } else { - // do not know of this peer - debug!("NodeId unknown, requesting ENR. {}", whoareyou_ref.0); - let _ = self.handler_send.send(HandlerIn::WhoAreYou(whoareyou_ref, None)); - } + HandlerOut::Response(node_address, response) => { + self.handle_rpc_response(node_address, *response); } - HandlerOut::RequestFailed(request_id, error) => { - if let RequestError::Timeout = error { - debug!("RPC Request timed out. id: {}", request_id); - } else { - warn!("RPC Request failed: id: {}, error {:?}", request_id, error); - } - self.rpc_failure(request_id, error); + HandlerOut::WhoAreYou(whoareyou_ref) => { + // check what our latest known ENR is for this node. + if let Some(known_enr) = self.find_enr(&whoareyou_ref.0.node_id) { + let _ = self.handler_send.send(HandlerIn::WhoAreYou(whoareyou_ref, Some(known_enr))); + } else { + // do not know of this peer + debug!("NodeId unknown, requesting ENR. {}", whoareyou_ref.0); + let _ = self.handler_send.send(HandlerIn::WhoAreYou(whoareyou_ref, None)); } } - } - event = Service::bucket_maintenance_poll(&self.kbuckets) => { - self.send_event(event); - } - query_event = Service::query_event_poll(&mut self.queries) => { - match query_event { - QueryEvent::Waiting(query_id, node_id, request_body) => { - self.send_rpc_query(query_id, node_id, *request_body); + HandlerOut::RequestFailed(request_id, error) => { + if let RequestError::Timeout = error { + debug!("RPC Request timed out. id: {}", request_id); + } else { + warn!("RPC Request failed: id: {}, error {:?}", request_id, error); } - // Note: Currently the distinction between a timed-out query and a finished - // query is superfluous, however it may be useful in future versions. - QueryEvent::Finished(query) | QueryEvent::TimedOut(query) => { - let id = query.id(); - let mut result = query.into_result(); - // obtain the ENR's for the resulting nodes - let mut found_enrs = Vec::new(); - for node_id in result.closest_peers { - if let Some(position) = result.target.untrusted_enrs.iter().position(|enr| enr.node_id() == node_id) { - let enr = result.target.untrusted_enrs.swap_remove(position); - found_enrs.push(enr); - } else if let Some(enr) = self.find_enr(&node_id) { - // look up from the routing table - found_enrs.push(enr); - } - else { - warn!("ENR not present in queries results"); - } + self.rpc_failure(request_id, error); + } + } + } + event = Service::bucket_maintenance_poll(&self.kbuckets) => { + self.send_event(event); + } + query_event = Service::query_event_poll(&mut self.queries) => { + match query_event { + QueryEvent::Waiting(query_id, node_id, request_body) => { + self.send_rpc_query(query_id, node_id, *request_body); + } + // Note: Currently the distinction between a timed-out query and a finished + // query is superfluous, however it may be useful in future versions. + QueryEvent::Finished(query) | QueryEvent::TimedOut(query) => { + let id = query.id(); + let mut result = query.into_result(); + // obtain the ENR's for the resulting nodes + let mut found_enrs = Vec::new(); + for node_id in result.closest_peers { + if let Some(position) = result.target.untrusted_enrs.iter().position(|enr| enr.node_id() == node_id) { + let enr = result.target.untrusted_enrs.swap_remove(position); + found_enrs.push(enr); + } else if let Some(enr) = self.find_enr(&node_id) { + // look up from the routing table + found_enrs.push(enr); + } + else { + warn!("ENR not present in queries results"); } + } - if let Some(callback) = result.target.callback { - if callback.send(found_enrs).is_err() { - warn!("Callback dropped for query {}. Results dropped", *id); - } - } else { - let QueryType::FindNode(node_id) = result.target.query_type; - let topic = TopicHash::from_raw(node_id.raw()); - if self.topics.contains_key(&topic){ - let local_enr = self.local_enr.read().clone(); - found_enrs.into_iter().for_each(|enr| self.reg_topic_request(NodeContact::from(enr), topic, local_enr.clone(), None)); - } + if let Some(callback) = result.target.callback { + if callback.send(found_enrs).is_err() { + warn!("Callback dropped for query {}. Results dropped", *id); } + } else { + let QueryType::FindNode(node_id) = result.target.query_type; + let topic = TopicHash::from_raw(node_id.raw()); + if self.topics.contains_key(&topic){ + let local_enr = self.local_enr.read().clone(); + found_enrs.into_iter().for_each(|enr| self.reg_topic_request(NodeContact::from(enr), topic, local_enr.clone(), None)); + } } } } - Some(Ok(node_id)) = self.peers_to_ping.next() => { - // If the node is in the routing table, Ping it and re-queue the node. - let key = kbucket::Key::from(node_id); - let enr = { - if let kbucket::Entry::Present(entry, _) = self.kbuckets.write().entry(&key) { - // The peer is in the routing table, ping it and re-queue the ping - self.peers_to_ping.insert(node_id); - Some(entry.value().clone()) - } else { None } - }; + } + Some(Ok(node_id)) = self.peers_to_ping.next() => { + // If the node is in the routing table, Ping it and re-queue the node. + let key = kbucket::Key::from(node_id); + let enr = { + if let kbucket::Entry::Present(entry, _) = self.kbuckets.write().entry(&key) { + // The peer is in the routing table, ping it and re-queue the ping + self.peers_to_ping.insert(node_id); + Some(entry.value().clone()) + } else { None } + }; - if let Some(enr) = enr { - self.send_ping(enr); - } - } - Some(Ok((active_topic, active_ticket))) = self.tickets.next() => { - let enr = self.local_enr.read().clone(); - // When the ticket time expires a new regtopic requet is automatically sent - // to the ticket issuer. - self.reg_topic_request(active_ticket.contact(), active_topic.topic(), enr, Some(active_ticket.ticket())); + if let Some(enr) = enr { + self.send_ping(enr); } - _ = publish_topics.tick() => { - // Topics are republished at regular intervals. - self.topics.clone().into_iter().for_each(|(topic_hash, _)| self.start_findnode_query(NodeId::new(&topic_hash.as_bytes()), None)); - } - Some(Ok((topic, ticket_pool))) = self.ticket_pools.next() => { - // No particular selection is carried out at this stage of implementation, the choice of node to give - // the free ad slot to is random. - let random_index = rand::thread_rng().gen_range(0..ticket_pool.len()); - let ticket_pool = ticket_pool.values().step_by(random_index).next(); - if let Some((node_record, req_id, _ticket)) = ticket_pool.map(|(node_record, req_id, ticket)| (node_record.clone(), req_id.clone(), ticket)) { - self.ads.insert(node_record.clone(), topic).ok(); - NodeContact::from(node_record).node_address().map(|node_address| { - self.send_regconfirmation_response(node_address, req_id, topic); - }).ok(); - METRICS.hosted_ads.store(self.ads.len(), Ordering::Relaxed); - } + } + Some(Ok((active_topic, active_ticket))) = self.tickets.next() => { + let enr = self.local_enr.read().clone(); + // When the ticket time expires a new regtopic requet is automatically sent + // to the ticket issuer. + self.reg_topic_request(active_ticket.contact(), active_topic.topic(), enr, Some(active_ticket.ticket())); + } + _ = publish_topics.tick() => { + // Topics are republished at regular intervals. + self.topics.clone().into_iter().for_each(|(topic_hash, _)| self.start_findnode_query(NodeId::new(&topic_hash.as_bytes()), None)); + } + Some(Ok((topic, ticket_pool))) = self.ticket_pools.next() => { + // No particular selection is carried out at this stage of implementation, the choice of node to give + // the free ad slot to is random. + let random_index = rand::thread_rng().gen_range(0..ticket_pool.len()); + let ticket_pool = ticket_pool.values().step_by(random_index).next(); + if let Some((node_record, req_id, _ticket)) = ticket_pool.map(|(node_record, req_id, ticket)| (node_record.clone(), req_id.clone(), ticket)) { + self.ads.insert(node_record.clone(), topic).ok(); + NodeContact::from(node_record).node_address().map(|node_address| { + self.send_regconfirmation_response(node_address, req_id, topic); + }).ok(); + METRICS.hosted_ads.store(self.ads.len(), Ordering::Relaxed); } } + } } } @@ -1074,7 +1076,7 @@ impl Service { }; self.send_rpc_request(active_request); } - self.connection_updated(node_id, ConnectionStatus::PongReceived(enr)); + self.connection_updated(node_id, ConnectionStatus::PongReceived(enr), None); } } ResponseBody::Talk { response } => { @@ -1586,11 +1588,27 @@ impl Service { /// Update the connection status of a node in the routing table. /// This tracks whether or not we should be pinging peers. Disconnected peers are removed from /// the queue and newly added peers to the routing table are added to the queue. - fn connection_updated(&mut self, node_id: NodeId, new_status: ConnectionStatus) { + fn connection_updated( + &mut self, + node_id: NodeId, + new_status: ConnectionStatus, + topic_hash: Option, + ) { // Variables to that may require post-processing let mut ping_peer = None; let mut event_to_send = None; + let kbuckets_topic = if let Some(topic_hash) = topic_hash { + if let Some(kbuckets) = self.topics_kbuckets.get_mut(&topic_hash) { + Some(kbuckets) + } else { + debug_unreachable!("A kbuckets table should exist for topic hash"); + None + } + } else { + None + }; + let key = kbucket::Key::from(node_id); match new_status { ConnectionStatus::Connected(enr, direction) => { @@ -1599,7 +1617,12 @@ impl Service { state: ConnectionState::Connected, direction, }; - match self.kbuckets.write().insert_or_update(&key, enr, status) { + let insert_result = if let Some(kbuckets) = kbuckets_topic { + kbuckets.insert_or_update(&key, enr, status) + } else { + self.kbuckets.write().insert_or_update(&key, enr, status) + }; + match insert_result { InsertResult::Inserted => { // We added this peer to the table debug!("New connected node added to routing table: {}", node_id); @@ -1651,12 +1674,17 @@ impl Service { } } ConnectionStatus::Disconnected => { + let update_result = if let Some(kbuckets) = kbuckets_topic { + kbuckets.update_node_status(&key, ConnectionState::Disconnected, None) + } else { + self.kbuckets.write().update_node_status( + &key, + ConnectionState::Disconnected, + None, + ) + }; // If the node has disconnected, remove any ping timer for the node. - match self.kbuckets.write().update_node_status( - &key, - ConnectionState::Disconnected, - None, - ) { + match update_result { UpdateResult::Failed(reason) => match reason { FailureReason::KeyNonExistant => {} others => { @@ -1699,7 +1727,12 @@ impl Service { /// The equivalent of libp2p `inject_connected()` for a udp session. We have no stream, but a /// session key-pair has been negotiated. - fn inject_session_established(&mut self, enr: Enr, direction: ConnectionDirection) { + fn inject_session_established( + &mut self, + enr: Enr, + direction: ConnectionDirection, + topic_hash: Option, + ) { // Ignore sessions with non-contactable ENRs if enr.udp_socket().is_none() { return; @@ -1710,7 +1743,11 @@ impl Service { "Session established with Node: {}, direction: {}", node_id, direction ); - self.connection_updated(node_id, ConnectionStatus::Connected(enr, direction)); + self.connection_updated( + node_id, + ConnectionStatus::Connected(enr, direction), + topic_hash, + ); } /// A session could not be established or an RPC request timed-out (after a few retries, if @@ -1798,7 +1835,14 @@ impl Service { } } - self.connection_updated(node_id, ConnectionStatus::Disconnected); + match active_request.request_body { + RequestBody::RegisterTopic { + topic, + enr: _, + ticket: _, + } => self.connection_updated(node_id, ConnectionStatus::Disconnected, Some(topic)), + _ => self.connection_updated(node_id, ConnectionStatus::Disconnected, None), + } } } diff --git a/src/service/test.rs b/src/service/test.rs index 3c127331a..c9187793b 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -101,6 +101,7 @@ async fn build_service( ads: Ads::new(Duration::from_secs(60 * 15), 100, 50000).unwrap(), tickets: Tickets::new(Duration::from_secs(60 * 15)), topics: HashMap::new(), + topics_kbuckets: HashMap::new(), active_topics: Ads::new(Duration::from_secs(60 * 15), 100, 50000).unwrap(), ticket_pools: TicketPools::default(), exit, From 848b8a4fd46befbdf7d7c4c1fd9358e8bdc134aa Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 3 Jun 2022 15:04:02 +0200 Subject: [PATCH 120/391] Separate topics kbuckets from standard kbuckets on REGTOPIC sender side --- src/handler/mod.rs | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 024d404df..3d18642e4 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -97,7 +97,7 @@ pub enum HandlerIn { /// A Random packet has been received and we have requested the application layer to inform /// us what the highest known ENR is for this node. - /// The `WhoAreYouRef` is sent out in the `HandlerResponse::WhoAreYou` event and should + /// The `WhoAreYouRef` is sent to handler via the `HandlerIn::WhoAreYou` event and should /// be returned here to submit the application's response. WhoAreYou(WhoAreYouRef, Option), } @@ -121,7 +121,7 @@ pub enum HandlerOut { Response(NodeAddress, Box), /// An unknown source has requested information from us. Return the reference with the known - /// ENR of this node (if known). See the `HandlerRequest::WhoAreYou` variant. + /// ENR of this node (if known). See the `HandlerOut::WhoAreYou` variant. WhoAreYou(WhoAreYouRef), /// An RPC request failed. @@ -154,7 +154,7 @@ pub struct Challenge { } /// A request to a node that we are waiting for a response. -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct RequestCall { contact: NodeContact, /// The raw discv5 packet sent. @@ -525,7 +525,7 @@ impl Handler { } } - /// This is called in response to a `HandlerResponse::WhoAreYou` event. The applications finds the + /// This is called in response to a `HandlerIn::WhoAreYou` event. The applications finds the /// highest known ENR for a node then we respond to the node with a WHOAREYOU packet. async fn send_challenge(&mut self, wru_ref: WhoAreYouRef, remote_enr: Option) { let node_address = wru_ref.0; @@ -687,13 +687,17 @@ impl Handler { request_call.handshake_sent = true; request_call.initiating_session = false; // Reinsert the request_call - self.insert_active_request(request_call); + self.insert_active_request(request_call.clone()); // Send the actual packet to the send task. self.send(node_address.clone(), auth_packet).await; // Notify the application that the session has been established + let kbucket_addition = match request_call.request.body { + RequestBody::RegisterTopic{topic, enr: _, ticket: _} => HandlerOut::EstablishedTopic(*enr, connection_direction, topic), + _ => HandlerOut::Established(*enr, connection_direction), + }; self.service_send - .send(HandlerOut::Established(*enr, connection_direction)) + .send(kbucket_addition) .await .unwrap_or_else(|e| warn!("Error with sending channel: {}", e)); } From 092a85c243660bc726e1422b1aa80efbe7c98c63 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 3 Jun 2022 19:02:29 +0200 Subject: [PATCH 121/391] Add placeholder logic for register topics --- src/handler/mod.rs | 6 +++++- src/service.rs | 29 +++++++++++++++++++++++++---- 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 3d18642e4..accdb9ced 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -693,7 +693,11 @@ impl Handler { // Notify the application that the session has been established let kbucket_addition = match request_call.request.body { - RequestBody::RegisterTopic{topic, enr: _, ticket: _} => HandlerOut::EstablishedTopic(*enr, connection_direction, topic), + RequestBody::RegisterTopic { + topic, + enr: _, + ticket: _, + } => HandlerOut::EstablishedTopic(*enr, connection_direction, topic), _ => HandlerOut::Established(*enr, connection_direction), }; self.service_send diff --git a/src/service.rs b/src/service.rs index e8887d7cb..817e1db6f 100644 --- a/src/service.rs +++ b/src/service.rs @@ -455,9 +455,9 @@ impl Service { bucket_filter, ); self.topics_kbuckets.insert(topic_hash, kbuckets); + METRICS.topics_to_publish.store(self.topics.len(), Ordering::Relaxed); + self.register_topic(topic_hash); } - - METRICS.topics_to_publish.store(self.topics.len(), Ordering::Relaxed); } ServiceRequest::ActiveTopics(callback) => { if callback.send(Ok(self.active_topics.clone())).is_err() { @@ -572,8 +572,8 @@ impl Service { self.reg_topic_request(active_ticket.contact(), active_topic.topic(), enr, Some(active_ticket.ticket())); } _ = publish_topics.tick() => { - // Topics are republished at regular intervals. - self.topics.clone().into_iter().for_each(|(topic_hash, _)| self.start_findnode_query(NodeId::new(&topic_hash.as_bytes()), None)); + // Topics are republished at regular intervals. + self.topics.clone().keys().for_each(|topic_hash| self.register_topic(*topic_hash)); } Some(Ok((topic, ticket_pool))) = self.ticket_pools.next() => { // No particular selection is carried out at this stage of implementation, the choice of node to give @@ -592,6 +592,27 @@ impl Service { } } + fn register_topic(&mut self, topic_hash: TopicHash) { + // Placeholder for ad distribution logic, X random nodes from bucket at furthest distance + // are sent REGTOPICs, then decreasing by half for each distance range approaching 0 (topic id). + if let Some(kbuckets) = self.topics_kbuckets.clone().get_mut(&topic_hash) { + kbuckets + .iter() + .map(|entry| entry.node.value.clone()) + .for_each(|remote_enr| { + let local_enr = self.local_enr.read().clone(); + self.reg_topic_request( + NodeContact::from(remote_enr), + topic_hash, + local_enr, + None, + ) + }); + } else { + debug_unreachable!("Broken invariant, a kbuckets table should exist for topic hash"); + } + } + /// Internal function that starts a query. fn start_findnode_query( &mut self, From 539c2cbd9b34c6e75981403d3c41336a9d9b600e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 10 Jun 2022 12:55:29 +0200 Subject: [PATCH 122/391] Set kbuckets distance from topic hash --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index 817e1db6f..b0075b2ef 100644 --- a/src/service.rs +++ b/src/service.rs @@ -448,7 +448,7 @@ impl Service { }; let kbuckets = KBucketsTable::new( - self.local_enr.read().node_id().into(), + NodeId::new(&topic_hash.as_bytes()).into(), Duration::from_secs(60), self.config.incoming_bucket_limit, table_filter, From d912fcb9fde62f4c1a1dd1dc4cb63e4dfa5202a2 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 10 Jun 2022 17:37:03 +0200 Subject: [PATCH 123/391] Fill topic kbuckets from standard kbuckets upon insert --- src/service.rs | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/src/service.rs b/src/service.rs index b0075b2ef..ec66e4408 100644 --- a/src/service.rs +++ b/src/service.rs @@ -447,13 +447,30 @@ impl Service { (None, None) }; - let kbuckets = KBucketsTable::new( + let mut kbuckets = KBucketsTable::new( NodeId::new(&topic_hash.as_bytes()).into(), Duration::from_secs(60), self.config.incoming_bucket_limit, table_filter, bucket_filter, ); + self.kbuckets.write().iter().for_each(|entry| { + match kbuckets.insert_or_update( + entry.node.key, + entry.node.value.clone(), + NodeStatus { + state: ConnectionState::Disconnected, + direction: ConnectionDirection::Incoming, + }, + ) { + InsertResult::Failed(FailureReason::BucketFull) => error!("Table full"), + InsertResult::Failed(FailureReason::BucketFilter) => error!("Failed bucket filter"), + InsertResult::Failed(FailureReason::TableFilter) => error!("Failed table filter"), + InsertResult::Failed(FailureReason::InvalidSelfUpdate) => error!("Invalid self update"), + InsertResult::Failed(_) => error!("Failed to insert ENR"), + _ => {}, + } + }); self.topics_kbuckets.insert(topic_hash, kbuckets); METRICS.topics_to_publish.store(self.topics.len(), Ordering::Relaxed); self.register_topic(topic_hash); @@ -573,7 +590,7 @@ impl Service { } _ = publish_topics.tick() => { // Topics are republished at regular intervals. - self.topics.clone().keys().for_each(|topic_hash| self.register_topic(*topic_hash)); + //self.topics.clone().keys().for_each(|topic_hash| self.register_topic(*topic_hash)); } Some(Ok((topic, ticket_pool))) = self.ticket_pools.next() => { // No particular selection is carried out at this stage of implementation, the choice of node to give From fb0312446ed739780ab285e9b37fdf857831bc1b Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 10 Jun 2022 18:02:57 +0200 Subject: [PATCH 124/391] Add kbuckets for topic query --- src/service.rs | 61 +++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 56 insertions(+), 5 deletions(-) diff --git a/src/service.rs b/src/service.rs index ec66e4408..ac3f70839 100644 --- a/src/service.rs +++ b/src/service.rs @@ -428,7 +428,47 @@ impl Service { } } ServiceRequest::TopicQuery(node_contact, topic_hash, callback) => { - self.topic_query_request(node_contact, topic_hash, callback); + // If we look up the topic hash for the first time we initialise its kbuckets. + if !self.topics_kbuckets.contains_key(&topic_hash) { + // NOTE: Currently we don't expose custom filter support in the configuration. Users can + // optionally use the IP filter via the ip_limit configuration parameter. In the future, we + // may expose this functionality to the users if there is demand for it. + let (table_filter, bucket_filter) = if self.config.ip_limit { + ( + Some(Box::new(kbucket::IpTableFilter) as Box>), + Some(Box::new(kbucket::IpBucketFilter) as Box>), + ) + } else { + (None, None) + }; + + let mut kbuckets = KBucketsTable::new( + NodeId::new(&topic_hash.as_bytes()).into(), + Duration::from_secs(60), + self.config.incoming_bucket_limit, + table_filter, + bucket_filter, + ); + self.kbuckets.write().iter().for_each(|entry| { + match kbuckets.insert_or_update( + entry.node.key, + entry.node.value.clone(), + NodeStatus { + state: ConnectionState::Disconnected, + direction: ConnectionDirection::Incoming, + }, + ) { + InsertResult::Failed(FailureReason::BucketFull) => error!("Table full"), + InsertResult::Failed(FailureReason::BucketFilter) => error!("Failed bucket filter"), + InsertResult::Failed(FailureReason::TableFilter) => error!("Failed table filter"), + InsertResult::Failed(FailureReason::InvalidSelfUpdate) => error!("Invalid self update"), + InsertResult::Failed(_) => error!("Failed to insert ENR"), + _ => {}, + } + }); + self.topics_kbuckets.insert(topic_hash, kbuckets); + } + self.topic_query(topic_hash, callback); } ServiceRequest::RegisterTopic(topic) => { let topic_hash = topic.hash(); @@ -609,17 +649,28 @@ impl Service { } } + fn topic_query(&mut self, topic_hash: TopicHash, callback: oneshot::Sender, RequestError>>) { + if let Some(kbuckets) = self.topics_kbuckets.clone().get_mut(&topic_hash) { + kbuckets + .iter() + .for_each(|entry| { + self.topic_query_request(NodeContact::from(entry.node.value.clone()), topic_hash, callback); + }); + } else { + debug_unreachable!("Broken invariant, a kbuckets table should exist for topic hash"); + } + } + fn register_topic(&mut self, topic_hash: TopicHash) { // Placeholder for ad distribution logic, X random nodes from bucket at furthest distance - // are sent REGTOPICs, then decreasing by half for each distance range approaching 0 (topic id). + // are sent REGTOPICs, then decreasing by some number for each distance range approaching 0 (topic id). if let Some(kbuckets) = self.topics_kbuckets.clone().get_mut(&topic_hash) { kbuckets .iter() - .map(|entry| entry.node.value.clone()) - .for_each(|remote_enr| { + .for_each(|entry| { let local_enr = self.local_enr.read().clone(); self.reg_topic_request( - NodeContact::from(remote_enr), + NodeContact::from(entry.node.value.clone()), topic_hash, local_enr, None, From 7e2b06ef097980294a8d057d150feae503b31249 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 10 Jun 2022 19:03:47 +0200 Subject: [PATCH 125/391] Send TopicQuery to topic buckets --- src/discv5.rs | 50 +++++++++++--------------- src/query_pool.rs | 17 +++++++++ src/service.rs | 76 +++++++++++++++++++++++++-------------- src/service/query_info.rs | 8 +++-- 4 files changed, 93 insertions(+), 58 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 04779c49d..c3f38ff90 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -497,37 +497,27 @@ impl Discv5 { async move { let mut all_found_ad_nodes = Vec::new(); - // Use find_topic to find the Enrs the shortest XOR distance from the topic hash, - // and send the topic query to these nodes - let enrs = find_future + + // the service will verify if this node is contactable, we just send it and + // await a response. + let (callback_send, callback_recv) = oneshot::channel(); + + let event = ServiceRequest::TopicQuery(topic_hash, callback_send); + let channel = channel + .as_ref() + .map_err(|_| RequestError::ServiceNotStarted)?; + + // send the request + channel + .send(event) .await - .map_err(|e| RequestError::TopicDistance(e.to_string()))?; - - for enr in enrs.into_iter() { - // convert the ENR to a node_contact. - let node_contact = NodeContact::from(enr); - - // the service will verify if this node is contactable, we just send it and - // await a response. - let (callback_send, callback_recv) = oneshot::channel(); - - let event = ServiceRequest::TopicQuery(node_contact, topic_hash, callback_send); - let channel = channel - .as_ref() - .map_err(|_| RequestError::ServiceNotStarted)?; - - // send the request - channel - .send(event) - .await - .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; - // await the response - callback_recv - .await - .map_err(|e| RequestError::ChannelFailed(e.to_string()))? - .map(|ad_nodes| all_found_ad_nodes.push(ad_nodes)) - .ok(); - } + .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; + // await the response + callback_recv + .await + .map_err(|e| RequestError::ChannelFailed(e.to_string()))? + .map(|ad_nodes| all_found_ad_nodes.push(ad_nodes)) + .ok(); let all_found_ad_nodes = all_found_ad_nodes.into_iter().flatten().collect(); Ok(all_found_ad_nodes) } diff --git a/src/query_pool.rs b/src/query_pool.rs index ad3ba4632..4e60cdae6 100644 --- a/src/query_pool.rs +++ b/src/query_pool.rs @@ -115,6 +115,22 @@ where self.add(peer_iter, target) } + /// Adds a query to the pool that iterates towards the closest peers to the target. + pub fn add_topic_query( + &mut self, + config: TopicQueryConfig, + target: TTarget, + peers: I, + ) -> QueryId + where + I: IntoIterator>, + { + let target_key = target.key(); + let topic_query = TopicQuery::with_config(config, target_key, peers); + let peer_iter = QueryPeerIter::Topic(topic_query); + self.add(peer_iter, target) + } + fn add(&mut self, peer_iter: QueryPeerIter, target: TTarget) -> QueryId { let id = QueryId(self.next_id); self.next_id = self.next_id.wrapping_add(1); @@ -207,6 +223,7 @@ pub struct Query { enum QueryPeerIter { FindNode(FindNodeQuery), Predicate(PredicateQuery), + Topic(TopicQuery), } impl Query diff --git a/src/service.rs b/src/service.rs index ac3f70839..aca7b5541 100644 --- a/src/service.rs +++ b/src/service.rs @@ -163,11 +163,7 @@ pub enum ServiceRequest { /// discovered nodes as it traverses the DHT. RequestEventStream(oneshot::Sender>), /// Queries given node for nodes advertising a topic hash - TopicQuery( - NodeContact, - TopicHash, - oneshot::Sender, RequestError>>, - ), + TopicQuery(TopicHash, oneshot::Sender>), /// RegisterTopic publishes this node as an advertiser for a topic at given node RegisterTopic(Topic), ActiveTopics(oneshot::Sender>), @@ -427,7 +423,7 @@ impl Service { error!("Failed to return the event stream channel"); } } - ServiceRequest::TopicQuery(node_contact, topic_hash, callback) => { + ServiceRequest::TopicQuery(topic_hash, callback) => { // If we look up the topic hash for the first time we initialise its kbuckets. if !self.topics_kbuckets.contains_key(&topic_hash) { // NOTE: Currently we don't expose custom filter support in the configuration. Users can @@ -468,7 +464,7 @@ impl Service { }); self.topics_kbuckets.insert(topic_hash, kbuckets); } - self.topic_query(topic_hash, callback); + self.start_topic_query(topic_hash, Some(callback)); } ServiceRequest::RegisterTopic(topic) => { let topic_hash = topic.hash(); @@ -649,36 +645,64 @@ impl Service { } } - fn topic_query(&mut self, topic_hash: TopicHash, callback: oneshot::Sender, RequestError>>) { + fn register_topic(&mut self, topic_hash: TopicHash) { + // Placeholder for ad distribution logic, X random nodes from bucket at furthest distance + // are sent REGTOPICs, then decreasing by some number for each distance range approaching 0 (topic id). if let Some(kbuckets) = self.topics_kbuckets.clone().get_mut(&topic_hash) { - kbuckets - .iter() - .for_each(|entry| { - self.topic_query_request(NodeContact::from(entry.node.value.clone()), topic_hash, callback); - }); + kbuckets.iter().for_each(|entry| { + let local_enr = self.local_enr.read().clone(); + self.reg_topic_request( + NodeContact::from(entry.node.value.clone()), + topic_hash, + local_enr, + None, + ) + }); } else { debug_unreachable!("Broken invariant, a kbuckets table should exist for topic hash"); } } - fn register_topic(&mut self, topic_hash: TopicHash) { + /// Internal function that starts a query. + fn start_topic_query( + &mut self, + topic_hash: TopicHash, + callback: Option>>, + ) { + let mut target = QueryInfo { + query_type: QueryType::Topic(NodeId::new(&topic_hash.as_bytes())), + untrusted_enrs: Default::default(), + // Placeholder, how many ads to request per peer + distances_to_request: DISTANCES_TO_REQUEST_PER_PEER, + callback, + }; + // Placeholder for ad distribution logic, X random nodes from bucket at furthest distance // are sent REGTOPICs, then decreasing by some number for each distance range approaching 0 (topic id). - if let Some(kbuckets) = self.topics_kbuckets.clone().get_mut(&topic_hash) { - kbuckets - .iter() - .for_each(|entry| { - let local_enr = self.local_enr.read().clone(); - self.reg_topic_request( - NodeContact::from(entry.node.value.clone()), - topic_hash, - local_enr, - None, - ) - }); + let mut target_peers = Vec::new(); + if let Some(kbuckets) = self.topics_kbuckets.get_mut(&topic_hash) { + for entry in kbuckets.iter() { + // Add the known ENR's to the untrusted list + target.untrusted_enrs.push(*entry.node.value); + // Add the key to the list for the query + target_peers.push(entry.node.key); + } } else { debug_unreachable!("Broken invariant, a kbuckets table should exist for topic hash"); } + + if target_peers.is_empty() { + warn!("No known_closest_peers found. Return empty result without sending query."); + if let Some(callback) = target.callback { + if callback.send(vec![]).is_err() { + warn!("Failed to callback"); + } + } + } else { + let query_config = FindTopicQueryConfig::new_from_config(&self.config); + self.queries + .add_topic_query(query_config, target, target_peers); + } } /// Internal function that starts a query. diff --git a/src/service/query_info.rs b/src/service/query_info.rs index 5e6a139d5..04a59897f 100644 --- a/src/service/query_info.rs +++ b/src/service/query_info.rs @@ -1,4 +1,4 @@ -use crate::{kbucket::Key, rpc::RequestBody, Enr}; +use crate::{advertisement::topic::TopicHash, kbucket::Key, rpc::RequestBody, Enr}; use enr::NodeId; use sha2::digest::generic_array::GenericArray; use smallvec::SmallVec; @@ -25,6 +25,7 @@ pub struct QueryInfo { pub enum QueryType { /// The user requested a `FIND_NODE` query to be performed. It should be reported when finished. FindNode(NodeId), + Topic(NodeId), } impl QueryInfo { @@ -36,6 +37,9 @@ impl QueryInfo { .ok_or("Requested a node find itself")?; RequestBody::FindNode { distances } } + QueryType::Topic(key) => RequestBody::TopicQuery { + topic: TopicHash::from_raw(key.raw()), + }, }; Ok(request) @@ -45,7 +49,7 @@ impl QueryInfo { impl crate::query_pool::TargetKey for QueryInfo { fn key(&self) -> Key { match self.query_type { - QueryType::FindNode(ref node_id) => { + QueryType::FindNode(ref node_id) | QueryType::Topic(ref node_id) => { Key::new_raw(*node_id, *GenericArray::from_slice(&node_id.raw())) } } From 43a49275b0dd38d729697a76aa1f11b362b3b493 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 13 Jun 2022 22:55:54 +0200 Subject: [PATCH 126/391] Disconnect TopicQuery from iterative query pool --- src/query_pool.rs | 17 ------- src/service.rs | 103 +++++++++++++++++++++++--------------- src/service/query_info.rs | 8 +-- src/service/test.rs | 1 + 4 files changed, 65 insertions(+), 64 deletions(-) diff --git a/src/query_pool.rs b/src/query_pool.rs index 4e60cdae6..ad3ba4632 100644 --- a/src/query_pool.rs +++ b/src/query_pool.rs @@ -115,22 +115,6 @@ where self.add(peer_iter, target) } - /// Adds a query to the pool that iterates towards the closest peers to the target. - pub fn add_topic_query( - &mut self, - config: TopicQueryConfig, - target: TTarget, - peers: I, - ) -> QueryId - where - I: IntoIterator>, - { - let target_key = target.key(); - let topic_query = TopicQuery::with_config(config, target_key, peers); - let peer_iter = QueryPeerIter::Topic(topic_query); - self.add(peer_iter, target) - } - fn add(&mut self, peer_iter: QueryPeerIter, target: TTarget) -> QueryId { let id = QueryId(self.next_id); self.next_id = self.next_id.wrapping_add(1); @@ -223,7 +207,6 @@ pub struct Query { enum QueryPeerIter { FindNode(FindNodeQuery), Predicate(PredicateQuery), - Topic(TopicQuery), } impl Query diff --git a/src/service.rs b/src/service.rs index aca7b5541..0c8fca646 100644 --- a/src/service.rs +++ b/src/service.rs @@ -43,17 +43,18 @@ use aes_gcm::{ use delay_map::HashSetDelay; use enr::{CombinedKey, NodeId}; use fnv::FnvHashMap; -use futures::prelude::*; +use futures::{stream::futures_unordered::FuturesUnordered, prelude::*}; use more_asserts::debug_unreachable; use parking_lot::RwLock; use rand::Rng; use rpc::*; use std::{ - collections::HashMap, + collections::{HashMap, HashSet}, io::{Error, ErrorKind}, net::SocketAddr, + pin::Pin, sync::{atomic::Ordering, Arc}, - task::Poll, + task::{Context, Poll}, time::{Duration, Instant}, }; use tokio::{ @@ -243,6 +244,39 @@ pub struct Service { /// Locally issued tickets returned by nodes pending registration for free local ad slots. ticket_pools: TicketPools, + + /// + active_topic_queries: FuturesUnordered, +} +pub struct ActiveTopicQuery { + queried_peers: HashMap, + num_results: usize, + // If the same ad enr is returned by two peers it is not counted. + results: HashSet, + time_out: Duration, + start: Instant, + callback: oneshot::Sender>, +} + +pub enum TopicQueryState { + Finished(HashSet), + Unsatisifed(Vec), +} + +impl Future for ActiveTopicQuery { + type Output = TopicQueryState; + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + if self.results.len() >= self.num_results || self.start.elapsed() >= self.time_out { + Poll::Ready(TopicQueryState::Finished(self.results)) + } else { + let peers = self.queried_peers.into_iter().filter(|(peer, return_status)| *return_status).map(|(peer, _)| peer).collect::>(); + if peers.len() >= self.queried_peers.len() { + Poll::Ready(TopicQueryState::Unsatisifed(peers)) + } else { + Poll::Pending + } + } + } } /// Active RPC request awaiting a response from the handler. @@ -371,6 +405,7 @@ impl Service { topics_kbuckets: HashMap::new(), active_topics, ticket_pools: TicketPools::default(), + active_topic_queries: FuturesUnordered::new(), exit, config: config.clone(), }; @@ -464,7 +499,7 @@ impl Service { }); self.topics_kbuckets.insert(topic_hash, kbuckets); } - self.start_topic_query(topic_hash, Some(callback)); + self.send_topic_queries(topic_hash, callback); } ServiceRequest::RegisterTopic(topic) => { let topic_hash = topic.hash(); @@ -509,7 +544,7 @@ impl Service { }); self.topics_kbuckets.insert(topic_hash, kbuckets); METRICS.topics_to_publish.store(self.topics.len(), Ordering::Relaxed); - self.register_topic(topic_hash); + self.send_register_topics(topic_hash); } } ServiceRequest::ActiveTopics(callback) => { @@ -645,7 +680,7 @@ impl Service { } } - fn register_topic(&mut self, topic_hash: TopicHash) { + fn send_register_topics(&mut self, topic_hash: TopicHash) { // Placeholder for ad distribution logic, X random nodes from bucket at furthest distance // are sent REGTOPICs, then decreasing by some number for each distance range approaching 0 (topic id). if let Some(kbuckets) = self.topics_kbuckets.clone().get_mut(&topic_hash) { @@ -664,45 +699,24 @@ impl Service { } /// Internal function that starts a query. - fn start_topic_query( + fn send_topic_queries( &mut self, topic_hash: TopicHash, - callback: Option>>, + callback: oneshot::Sender>, ) { - let mut target = QueryInfo { - query_type: QueryType::Topic(NodeId::new(&topic_hash.as_bytes())), - untrusted_enrs: Default::default(), - // Placeholder, how many ads to request per peer - distances_to_request: DISTANCES_TO_REQUEST_PER_PEER, - callback, - }; - // Placeholder for ad distribution logic, X random nodes from bucket at furthest distance // are sent REGTOPICs, then decreasing by some number for each distance range approaching 0 (topic id). - let mut target_peers = Vec::new(); - if let Some(kbuckets) = self.topics_kbuckets.get_mut(&topic_hash) { - for entry in kbuckets.iter() { - // Add the known ENR's to the untrusted list - target.untrusted_enrs.push(*entry.node.value); - // Add the key to the list for the query - target_peers.push(entry.node.key); - } + if let Some(kbuckets) = self.topics_kbuckets.clone().get_mut(&topic_hash) { + let peers = kbuckets.iter().filter_map(|entry| !self.active_topic_query_peers.entry(&topic_hash).contains_entry.node.value.clone()).for_each(|entry| { + let local_enr = self.local_enr.read().clone(); + self.topic_query_request( + NodeContact::from(entry.node.value.clone()), + topic_hash, + ) + }); } else { debug_unreachable!("Broken invariant, a kbuckets table should exist for topic hash"); } - - if target_peers.is_empty() { - warn!("No known_closest_peers found. Return empty result without sending query."); - if let Some(callback) = target.callback { - if callback.send(vec![]).is_err() { - warn!("Failed to callback"); - } - } - } else { - let query_config = FindTopicQueryConfig::new_from_config(&self.config); - self.queries - .add_topic_query(query_config, target, target_peers); - } } /// Internal function that starts a query. @@ -1098,7 +1112,7 @@ impl Service { "Nodes Response: {} of {} received", current_response.count, total ); - // if there are more requests coming, store the nodes and wait for + // if there are more responses coming, store the nodes and wait for // another response // We allow for implementations to send at a minimum 3 nodes per response. // We allow for the number of nodes to be returned as the maximum we emit. @@ -1133,7 +1147,15 @@ impl Service { // ensure any mapping is removed in this rare case self.active_nodes_responses.remove(&node_id); - self.discovered(&node_id, nodes, active_request.query_id); + match active_request.request_body { + RequestBody::TopicQuery{ topic } => { + if let Some(results) = self.topic_queries_results.get_mut(topic_hash) { + results.insert(nodes); + } + }, + RequestBody::FindNode{ .. } => self.discovered(&node_id, nodes, active_request.query_id), + _ => debug_unreachable!("Only TOPICQUERY and FINDNODE requests expect NODES response") + } } ResponseBody::Pong { enr_seq, ip, port } => { let socket = SocketAddr::new(ip, port); @@ -1352,7 +1374,6 @@ impl Service { &mut self, contact: NodeContact, topic: TopicHash, - callback: oneshot::Sender, RequestError>>, ) { let request_body = RequestBody::TopicQuery { topic }; @@ -1360,7 +1381,7 @@ impl Service { contact, request_body, query_id: None, - callback: Some(CallbackResponse::Topic(callback)), + callback: None, }; self.send_rpc_request(active_request); } diff --git a/src/service/query_info.rs b/src/service/query_info.rs index 04a59897f..5e6a139d5 100644 --- a/src/service/query_info.rs +++ b/src/service/query_info.rs @@ -1,4 +1,4 @@ -use crate::{advertisement::topic::TopicHash, kbucket::Key, rpc::RequestBody, Enr}; +use crate::{kbucket::Key, rpc::RequestBody, Enr}; use enr::NodeId; use sha2::digest::generic_array::GenericArray; use smallvec::SmallVec; @@ -25,7 +25,6 @@ pub struct QueryInfo { pub enum QueryType { /// The user requested a `FIND_NODE` query to be performed. It should be reported when finished. FindNode(NodeId), - Topic(NodeId), } impl QueryInfo { @@ -37,9 +36,6 @@ impl QueryInfo { .ok_or("Requested a node find itself")?; RequestBody::FindNode { distances } } - QueryType::Topic(key) => RequestBody::TopicQuery { - topic: TopicHash::from_raw(key.raw()), - }, }; Ok(request) @@ -49,7 +45,7 @@ impl QueryInfo { impl crate::query_pool::TargetKey for QueryInfo { fn key(&self) -> Key { match self.query_type { - QueryType::FindNode(ref node_id) | QueryType::Topic(ref node_id) => { + QueryType::FindNode(ref node_id) => { Key::new_raw(*node_id, *GenericArray::from_slice(&node_id.raw())) } } diff --git a/src/service/test.rs b/src/service/test.rs index c9187793b..17e3463e7 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -104,6 +104,7 @@ async fn build_service( topics_kbuckets: HashMap::new(), active_topics: Ads::new(Duration::from_secs(60 * 15), 100, 50000).unwrap(), ticket_pools: TicketPools::default(), + active_topic_queries: FuturesUnordered::new(), exit, config, } From eabdef8d785a373208784dde73d299f3ca373ef7 Mon Sep 17 00:00:00 2001 From: Diva M Date: Mon, 13 Jun 2022 16:28:53 -0500 Subject: [PATCH 127/391] fix udp -> udp4 --- src/advertisement/test.rs | 16 ++++++++-------- src/service/test.rs | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index c0c927dc4..080256602 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -12,7 +12,7 @@ async fn insert_same_node() { let port = 6666; let ip: IpAddr = "127.0.0.1".parse().unwrap(); let key = CombinedKey::generate_secp256k1(); - let enr = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); let mut ads = Ads::new(Duration::from_secs(2), 10, 50).unwrap(); @@ -36,12 +36,12 @@ async fn insert_ad_and_get_nodes() { let port = 6666; let ip: IpAddr = "127.0.0.1".parse().unwrap(); let key = CombinedKey::generate_secp256k1(); - let enr = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); let port = 5000; let ip: IpAddr = "127.0.0.1".parse().unwrap(); let key = CombinedKey::generate_secp256k1(); - let enr_2 = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + let enr_2 = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); let mut ads = Ads::new(Duration::from_secs(2), 10, 50).unwrap(); @@ -90,7 +90,7 @@ async fn ticket_wait_time_duration() { let port = 6666; let ip: IpAddr = "127.0.0.1".parse().unwrap(); let key = CombinedKey::generate_secp256k1(); - let enr = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); let mut ads = Ads::new(Duration::from_secs(3), 1, 3).unwrap(); @@ -112,12 +112,12 @@ async fn ticket_wait_time_full_table() { let port = 6666; let ip: IpAddr = "127.0.0.1".parse().unwrap(); let key = CombinedKey::generate_secp256k1(); - let enr = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); let port = 5000; let ip: IpAddr = "127.0.0.1".parse().unwrap(); let key = CombinedKey::generate_secp256k1(); - let enr_2 = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + let enr_2 = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); let mut ads = Ads::new(Duration::from_secs(3), 2, 3).unwrap(); @@ -150,12 +150,12 @@ async fn ticket_wait_time_full_topic() { let port = 6666; let ip: IpAddr = "127.0.0.1".parse().unwrap(); let key = CombinedKey::generate_secp256k1(); - let enr = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); let port = 5000; let ip: IpAddr = "127.0.0.1".parse().unwrap(); let key = CombinedKey::generate_secp256k1(); - let enr_2 = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + let enr_2 = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); let mut ads = Ads::new(Duration::from_secs(3), 2, 4).unwrap(); diff --git a/src/service/test.rs b/src/service/test.rs index 6324d1e6c..2a496655d 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -181,7 +181,7 @@ async fn encrypt_decrypt_ticket() { let ip: IpAddr = "127.0.0.1".parse().unwrap(); let enr = EnrBuilder::new("v4") .ip(ip) - .udp(10006) + .udp4(10006) .build(&enr_key) .unwrap(); @@ -211,7 +211,7 @@ async fn encrypt_decrypt_ticket() { let port = 6666; let ip: IpAddr = "127.0.0.1".parse().unwrap(); let key = CombinedKey::generate_secp256k1(); - let enr = EnrBuilder::new("v4").ip(ip).udp(port).build(&key).unwrap(); + let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); let node_id = enr.node_id(); let ticket = Ticket::new( From 2077d69e87724bb3f9790b01d15d856559cb1cb8 Mon Sep 17 00:00:00 2001 From: Diva M Date: Mon, 13 Jun 2022 16:33:46 -0500 Subject: [PATCH 128/391] fix udp_socket -> udp4_socket --- src/service/test.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/test.rs b/src/service/test.rs index 2a496655d..dbb104196 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -185,7 +185,7 @@ async fn encrypt_decrypt_ticket() { .build(&enr_key) .unwrap(); - let socket_addr = enr.udp_socket().unwrap(); + let socket_addr = enr.udp4_socket().unwrap(); let service = build_service( Arc::new(RwLock::new(enr)), From 9de3708e29aeef32c87a13da50c13d072c739fd2 Mon Sep 17 00:00:00 2001 From: Diva M Date: Mon, 13 Jun 2022 16:35:24 -0500 Subject: [PATCH 129/391] fix clippy --- src/rpc.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index df3857de1..7e3505bd3 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -1,8 +1,4 @@ use crate::advertisement::topic::TopicHash; -use aes_gcm::{ - aead::{generic_array::GenericArray, Aead, NewAead, Payload}, - Aes128Gcm, -}; use enr::{CombinedKey, Enr, NodeId}; use rlp::{DecoderError, Rlp, RlpStream}; use std::{ @@ -874,6 +870,10 @@ impl Ticket { mod tests { use super::*; use enr::EnrBuilder; + use aes_gcm::{ + aead::{generic_array::GenericArray, Aead, NewAead, Payload}, + Aes128Gcm, + }; #[test] fn ref_test_encode_request_ping() { From 58eb48674c3529172ce0d3b508387a09b53e4d51 Mon Sep 17 00:00:00 2001 From: Diva M Date: Mon, 13 Jun 2022 16:36:25 -0500 Subject: [PATCH 130/391] fmt --- src/rpc.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rpc.rs b/src/rpc.rs index 7e3505bd3..e9f03cffa 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -869,11 +869,11 @@ impl Ticket { #[cfg(test)] mod tests { use super::*; - use enr::EnrBuilder; use aes_gcm::{ aead::{generic_array::GenericArray, Aead, NewAead, Payload}, Aes128Gcm, }; + use enr::EnrBuilder; #[test] fn ref_test_encode_request_ping() { From f850a049f057a4ef6b62fcda1de95bf48bf1b61b Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 14 Jun 2022 16:28:51 +0200 Subject: [PATCH 131/391] Query new peers if not enough results for topic query --- src/config.rs | 5 ++ src/discv5.rs | 3 - src/handler/mod.rs | 6 +- src/service.rs | 206 ++++++++++++++++++++++++++++++-------------- src/service/test.rs | 7 +- 5 files changed, 152 insertions(+), 75 deletions(-) diff --git a/src/config.rs b/src/config.rs index 2a28b3c64..f8c58a0ee 100644 --- a/src/config.rs +++ b/src/config.rs @@ -101,6 +101,9 @@ pub struct Discv5Config { /// all of the nodes in the kbuckets. pub topic_radius: u64, + pub topic_query_timeout: Duration, + pub topics_num_results: usize, + /// A custom executor which can spawn the discv5 tasks. This must be a tokio runtime, with /// timing support. By default, the executor that created the discv5 struct will be used. pub executor: Option>, @@ -142,6 +145,8 @@ impl Default for Discv5Config { permit_ban_list: PermitBanList::default(), ban_duration: Some(Duration::from_secs(3600)), // 1 hour topic_radius: 256, + topic_query_timeout: Duration::from_secs(60), + topics_num_results: 16, ip_mode: IpMode::default(), executor: None, } diff --git a/src/discv5.rs b/src/discv5.rs index e29f1508b..f912f73e3 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -507,8 +507,6 @@ impl Discv5 { &self, topic_hash: TopicHash, ) -> impl Future, RequestError>> + 'static { - let find_future = self.find_closest_nodes_to_topic(topic_hash); - let ip_mode = self.config.ip_mode; let channel = self.clone_channel(); async move { @@ -569,7 +567,6 @@ impl Discv5 { ) -> impl Future> + 'static { let topic = Topic::new(topic); let channel = self.clone_channel(); - let ip_mode = self.config.ip_mode; async move { let channel = channel diff --git a/src/handler/mod.rs b/src/handler/mod.rs index c2313a81b..adb95bee8 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -693,11 +693,11 @@ impl Handler { topic, enr: _, ticket: _, - } => HandlerOut::EstablishedTopic(*enr, connection_direction, topic), - _ => HandlerOut::Established(*enr, connection_direction), + } => HandlerOut::EstablishedTopic(enr, connection_direction, topic), + _ => HandlerOut::Established(enr, connection_direction), }; self.service_send - .send(HandlerOut::Established(enr, connection_direction)) + .send(kbucket_addition) .await .unwrap_or_else(|e| warn!("Error with sending channel: {}", e)); } diff --git a/src/service.rs b/src/service.rs index 790133b73..7390a0a75 100644 --- a/src/service.rs +++ b/src/service.rs @@ -43,13 +43,13 @@ use aes_gcm::{ use delay_map::HashSetDelay; use enr::{CombinedKey, NodeId}; use fnv::FnvHashMap; -use futures::{stream::futures_unordered::FuturesUnordered, prelude::*}; +use futures::prelude::*; use more_asserts::debug_unreachable; use parking_lot::RwLock; use rand::Rng; use rpc::*; use std::{ - collections::{HashMap, HashSet}, + collections::{hash_map::Entry, HashMap}, io::{Error, ErrorKind}, net::SocketAddr, pin::Pin, @@ -167,7 +167,7 @@ pub enum ServiceRequest { /// discovered nodes as it traverses the DHT. RequestEventStream(oneshot::Sender>), /// Queries given node for nodes advertising a topic hash - TopicQuery(TopicHash, oneshot::Sender>), + TopicQuery(TopicHash, oneshot::Sender, RequestError>>), /// RegisterTopic publishes this node as an advertiser for a topic at given node RegisterTopic(Topic), ActiveTopics(oneshot::Sender>), @@ -249,36 +249,64 @@ pub struct Service { ticket_pools: TicketPools, /// - active_topic_queries: FuturesUnordered, + active_topic_queries: ActiveTopicQueries, } + pub struct ActiveTopicQuery { + // A NodeId mapped to false is waiting for a response or failed request. queried_peers: HashMap, - num_results: usize, - // If the same ad enr is returned by two peers it is not counted. - results: HashSet, - time_out: Duration, + // An ad returned by multiple peers is only included once. + results: HashMap, + callback: Option, RequestError>>>, start: Instant, - callback: oneshot::Sender>, +} + +pub struct ActiveTopicQueries { + queries: HashMap, + time_out: Duration, + num_results: usize, +} + +impl ActiveTopicQueries { + pub fn new(time_out: Duration, num_results: usize) -> Self { + ActiveTopicQueries { + queries: HashMap::new(), + time_out, + num_results, + } + } } pub enum TopicQueryState { - Finished(HashSet), - Unsatisifed(Vec), + Finished(TopicHash), + TimedOut(TopicHash), + Unsatisfied(TopicHash), } -impl Future for ActiveTopicQuery { - type Output = TopicQueryState; - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - if self.results.len() >= self.num_results || self.start.elapsed() >= self.time_out { - Poll::Ready(TopicQueryState::Finished(self.results)) - } else { - let peers = self.queried_peers.into_iter().filter(|(peer, return_status)| *return_status).map(|(peer, _)| peer).collect::>(); - if peers.len() >= self.queried_peers.len() { - Poll::Ready(TopicQueryState::Unsatisifed(peers)) +impl Stream for ActiveTopicQueries { + type Item = TopicQueryState; + fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + for (topic_hash, query) in self.queries.iter() { + if query.results.len() >= self.num_results { + return Poll::Ready(Some(TopicQueryState::Finished(*topic_hash))); + } else if query.start.elapsed() >= self.time_out { + warn!( + "TOPICQUERY timed out. Only {} ads found for topic hash.", + query.results.len() + ); + return Poll::Ready(Some(TopicQueryState::TimedOut(*topic_hash))); } else { - Poll::Pending + let exhausted_peers = query + .queried_peers + .iter() + .filter(|(_peer, return_status)| **return_status) + .count(); + if exhausted_peers >= query.queried_peers.len() { + return Poll::Ready(Some(TopicQueryState::Unsatisfied(*topic_hash))); + } } } + Poll::Pending } } @@ -300,8 +328,6 @@ pub enum CallbackResponse { Enr(oneshot::Sender>), /// A response from a TALK request. Talk(oneshot::Sender, RequestError>>), - /// A response to a Topic Query. - Topic(oneshot::Sender, RequestError>>), } /// For multiple responses to a FindNodes request, this keeps track of the request count @@ -408,7 +434,10 @@ impl Service { topics_kbuckets: HashMap::new(), active_topics, ticket_pools: TicketPools::default(), - active_topic_queries: FuturesUnordered::new(), + active_topic_queries: ActiveTopicQueries::new( + config.topic_query_timeout, + config.topics_num_results, + ), exit, config: config.clone(), }; @@ -463,7 +492,7 @@ impl Service { } ServiceRequest::TopicQuery(topic_hash, callback) => { // If we look up the topic hash for the first time we initialise its kbuckets. - if !self.topics_kbuckets.contains_key(&topic_hash) { + if let Entry::Vacant(_) = self.topics_kbuckets.entry(topic_hash) { // NOTE: Currently we don't expose custom filter support in the configuration. Users can // optionally use the IP filter via the ip_limit configuration parameter. In the future, we // may expose this functionality to the users if there is demand for it. @@ -502,7 +531,7 @@ impl Service { }); self.topics_kbuckets.insert(topic_hash, kbuckets); } - self.send_topic_queries(topic_hash, callback); + self.send_topic_queries(topic_hash, Some(callback)); } ServiceRequest::RegisterTopic(topic) => { let topic_hash = topic.hash(); @@ -638,8 +667,7 @@ impl Service { let QueryType::FindNode(node_id) = result.target.query_type; let topic = TopicHash::from_raw(node_id.raw()); if self.topics.contains_key(&topic){ - let local_enr = self.local_enr.read().clone(); - found_enrs.into_iter().for_each(|enr| self.reg_topic_request(NodeContact::from(enr), topic, local_enr.clone(), None)); + // add to topic kbuckets? } } } @@ -662,13 +690,14 @@ impl Service { } Some(Ok((active_topic, active_ticket))) = self.tickets.next() => { let enr = self.local_enr.read().clone(); - // When the ticket time expires a new regtopic requet is automatically sent + // When the ticket time expires a new regtopic request is automatically sent // to the ticket issuer. self.reg_topic_request(active_ticket.contact(), active_topic.topic(), enr, Some(active_ticket.ticket())); } _ = publish_topics.tick() => { // Topics are republished at regular intervals. - //self.topics.clone().keys().for_each(|topic_hash| self.register_topic(*topic_hash)); + + self.topics_kbuckets.clone().keys().for_each(|topic_hash| self.send_register_topics(*topic_hash)); } Some(Ok((topic, ticket_pool))) = self.ticket_pools.next() => { // No particular selection is carried out at this stage of implementation, the choice of node to give @@ -683,22 +712,36 @@ impl Service { METRICS.hosted_ads.store(self.ads.len(), Ordering::Relaxed); } } + Some(topic_query_progress) = self.active_topic_queries.next() => { + match topic_query_progress { + TopicQueryState::Finished(topic_hash) | TopicQueryState::TimedOut(topic_hash) => { + if let Some(query) = self.active_topic_queries.queries.remove(&topic_hash) { + if let Some(callback) = query.callback { + if callback.send(Ok(query.results.into_values().collect::>())).is_err() { + warn!("Callback dropped for topic query {}. Results dropped", topic_hash); + } + } + } + }, + TopicQueryState::Unsatisfied(topic_hash) => { + self.send_topic_queries(topic_hash, None); + } + } + } } } } fn send_register_topics(&mut self, topic_hash: TopicHash) { - // Placeholder for ad distribution logic, X random nodes from bucket at furthest distance - // are sent REGTOPICs, then decreasing by some number for each distance range approaching 0 (topic id). - if let Some(kbuckets) = self.topics_kbuckets.clone().get_mut(&topic_hash) { - kbuckets.iter().for_each(|entry| { + if let Entry::Occupied(kbuckets) = self.topics_kbuckets.entry(topic_hash) { + kbuckets.get().clone().iter().for_each(|entry| { let local_enr = self.local_enr.read().clone(); - self.reg_topic_request( - NodeContact::from(entry.node.value.clone()), - topic_hash, - local_enr, - None, - ) + if let Ok(node_contact) = + NodeContact::try_from_enr(entry.node.value.clone(), self.config.ip_mode) + .map_err(|e| error!("Failed to send REGTOPIC to peer. Error: {:?}", e)) + { + self.reg_topic_request(node_contact, topic_hash, local_enr, None) + } }); } else { debug_unreachable!("Broken invariant, a kbuckets table should exist for topic hash"); @@ -709,18 +752,40 @@ impl Service { fn send_topic_queries( &mut self, topic_hash: TopicHash, - callback: oneshot::Sender>, + callback: Option, RequestError>>>, ) { - // Placeholder for ad distribution logic, X random nodes from bucket at furthest distance - // are sent REGTOPICs, then decreasing by some number for each distance range approaching 0 (topic id). - if let Some(kbuckets) = self.topics_kbuckets.clone().get_mut(&topic_hash) { - let peers = kbuckets.iter().filter_map(|entry| !self.active_topic_query_peers.entry(&topic_hash).contains_entry.node.value.clone()).for_each(|entry| { - let local_enr = self.local_enr.read().clone(); - self.topic_query_request( - NodeContact::from(entry.node.value.clone()), - topic_hash, - ) + let query = self + .active_topic_queries + .queries + .entry(topic_hash) + .or_insert(ActiveTopicQuery { + queried_peers: HashMap::new(), + results: HashMap::new(), + callback, + start: Instant::now(), }); + let queried_peers = query.queried_peers.clone(); + if let Entry::Occupied(kbuckets) = self.topics_kbuckets.entry(topic_hash) { + let mut peers = kbuckets.get().clone(); + let new_query_peers = peers + .iter() + .filter(|entry| !queried_peers.contains_key(entry.node.key.preimage())) + .map(|entry| { + query + .queried_peers + .entry(*entry.node.key.preimage()) + .or_default(); + entry.node.value + }) + .collect::>(); + for enr in new_query_peers { + if let Ok(node_contact) = + NodeContact::try_from_enr(enr.clone(), self.config.ip_mode) + .map_err(|e| error!("Failed to send TOPICQUERY to peer. Error: {:?}", e)) + { + self.topic_query_request(node_contact, topic_hash); + } + } } else { debug_unreachable!("Broken invariant, a kbuckets table should exist for topic hash"); } @@ -1167,13 +1232,19 @@ impl Service { self.active_nodes_responses.remove(&node_id); match active_request.request_body { - RequestBody::TopicQuery{ topic } => { - if let Some(results) = self.topic_queries_results.get_mut(topic_hash) { - results.insert(nodes); + RequestBody::TopicQuery { topic } => { + if let Some(query) = self.active_topic_queries.queries.get_mut(&topic) { + nodes.into_iter().for_each(|enr| { + query.results.insert(enr.node_id(), enr); + }); } - }, - RequestBody::FindNode{ .. } => self.discovered(&node_id, nodes, active_request.query_id), - _ => debug_unreachable!("Only TOPICQUERY and FINDNODE requests expect NODES response") + } + RequestBody::FindNode { .. } => { + self.discovered(&node_id, nodes, active_request.query_id) + } + _ => debug_unreachable!( + "Only TOPICQUERY and FINDNODE requests expect NODES response" + ), } } ResponseBody::Pong { enr_seq, ip, port } => { @@ -1428,11 +1499,7 @@ impl Service { } /// Queries a node for the ads that node currently advertises for a given topic. - fn topic_query_request( - &mut self, - contact: NodeContact, - topic: TopicHash, - ) { + fn topic_query_request(&mut self, contact: NodeContact, topic: TopicHash) { let request_body = RequestBody::TopicQuery { topic }; let active_request = ActiveRequest { @@ -1989,12 +2056,6 @@ impl Service { .unwrap_or_else(|_| debug!("Couldn't send TALK error response to user")); return; } - Some(CallbackResponse::Topic(callback)) => { - callback - .send(Err(error)) - .unwrap_or_else(|_| debug!("Couldn't send TOPIC error response to user")); - return; - } None => { // no callback to send too } @@ -2034,6 +2095,17 @@ impl Service { } } } + RequestBody::TopicQuery { topic } => { + if let Some(query) = self.active_topic_queries.queries.get_mut(&topic) { + if let Some(exhausted) = query.queried_peers.get_mut(&node_id) { + *exhausted = true; + debug!( + "Failed TOPICQUERY request: {} for node: {}, reason {:?} ", + active_request.request_body, active_request.contact, error + ); + } + } + } // for all other requests, if any are queries, mark them as failures. _ => { if let Some(query_id) = active_request.query_id { diff --git a/src/service/test.rs b/src/service/test.rs index d6d0987c3..d3e4d5e78 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -100,7 +100,10 @@ async fn build_service( topics_kbuckets: HashMap::new(), active_topics: Ads::new(Duration::from_secs(60 * 15), 100, 50000).unwrap(), ticket_pools: TicketPools::default(), - active_topic_queries: FuturesUnordered::new(), + active_topic_queries: ActiveTopicQueries::new( + config.topic_query_timeout, + config.topics_num_results, + ), exit, config, } @@ -192,7 +195,7 @@ async fn encrypt_decrypt_ticket() { let service = build_service( Arc::new(RwLock::new(enr)), Arc::new(RwLock::new(enr_key)), - socket_addr, + socket_addr.into(), false, ) .await; From 989102fd7a3572722faab9056410e849eade932d Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 15 Jun 2022 00:03:08 +0200 Subject: [PATCH 132/391] Extend kbucket maintenance to topics --- src/service.rs | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/src/service.rs b/src/service.rs index 7390a0a75..f036878fe 100644 --- a/src/service.rs +++ b/src/service.rs @@ -45,7 +45,7 @@ use enr::{CombinedKey, NodeId}; use fnv::FnvHashMap; use futures::prelude::*; use more_asserts::debug_unreachable; -use parking_lot::RwLock; +use parking_lot::{RwLock, RawRwLock}; use rand::Rng; use rpc::*; use std::{ @@ -348,6 +348,11 @@ impl Default for NodesResponse { } } +pub enum KBuckets { + Primary(Arc>>), + Topics(KBucketsTable), +} + impl Service { /// Builds the `Service` main struct. /// @@ -631,9 +636,12 @@ impl Service { } } } - event = Service::bucket_maintenance_poll(&self.kbuckets) => { + event = Service::bucket_maintenance_poll(KBuckets::Primary(self.kbuckets.clone())) => { self.send_event(event); } + event = Service::bucket_maintenance_poll(KBuckets::Topics(self.topics_kbuckets.clone().into_values().next().unwrap())) => { + debug!("Topics KBuckets updated. Event {:?}", event); + } query_event = Service::query_event_poll(&mut self.queries) => { match query_event { QueryEvent::Waiting(query_id, node_id, request_body) => { @@ -2139,11 +2147,14 @@ impl Service { /// A future that maintains the routing table and inserts nodes when required. This returns the /// `Discv5Event::NodeInserted` variant if a new node has been inserted into the routing table. async fn bucket_maintenance_poll( - kbuckets: &Arc>>, + mut kbuckets: KBuckets, ) -> Discv5Event { future::poll_fn(move |_cx| { // Drain applied pending entries from the routing table. - if let Some(entry) = kbuckets.write().take_applied_pending() { + if let Some(entry) = match kbuckets { + KBuckets::Primary(ref kbuckets) => kbuckets.write().take_applied_pending(), + KBuckets::Topics(ref mut kbuckets) => kbuckets.take_applied_pending(), + } { let event = Discv5Event::NodeInserted { node_id: entry.inserted.into_preimage(), replaced: entry.evicted.map(|n| n.key.into_preimage()), From cd5edbfd17c76c1009c57475b1327ce74bca2cdf Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 15 Jun 2022 16:41:28 +0200 Subject: [PATCH 133/391] Simplify syntax --- src/service.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/service.rs b/src/service.rs index f036878fe..cef0db4e2 100644 --- a/src/service.rs +++ b/src/service.rs @@ -777,14 +777,14 @@ impl Service { let mut peers = kbuckets.get().clone(); let new_query_peers = peers .iter() - .filter(|entry| !queried_peers.contains_key(entry.node.key.preimage())) - .map(|entry| { + .filter_map(|entry| (!queried_peers.contains_key(entry.node.key.preimage())) + .then(|| { query .queried_peers .entry(*entry.node.key.preimage()) .or_default(); entry.node.value - }) + })) .collect::>(); for enr in new_query_peers { if let Ok(node_contact) = From 79aa98783199a2b42cd083399112b0fcc9060e38 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 15 Jun 2022 17:10:48 +0200 Subject: [PATCH 134/391] Allow for nodes response(s), a ticket and a regconfiramtion per REGTOPIC request --- src/handler/mod.rs | 27 ++++++++++++++++----------- src/service.rs | 2 +- 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index adb95bee8..8d5735cec 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -39,6 +39,7 @@ use crate::{ }; use enr::{CombinedKey, NodeId}; use futures::prelude::*; +use more_asserts::debug_unreachable; use parking_lot::RwLock; use std::{ collections::HashMap, @@ -1004,7 +1005,17 @@ impl Handler { // This is a multi-response Nodes response if let Some(remaining_responses) = request_call.remaining_responses.as_mut() { *remaining_responses -= 1; - if remaining_responses != &0 { + let reinsert = match request_call.request.body { + RequestBody::FindNode{ .. } | RequestBody::TopicQuery{ .. } => remaining_responses > &mut 0, + // The request is reinserted for either another nodes response, a ticket or a + // register confirmation response that may come, otherwise the request times out. + RequestBody::RegisterTopic{ .. } => remaining_responses >= &mut 0, + _ => { + debug_unreachable!("Only FINDNODE, TOPICQUERY and REGISTERTOPIC expect nodes response"); + false + }, + }; + if reinsert { // more responses remaining, add back the request and send the response // add back the request and send the response self.active_requests @@ -1034,6 +1045,10 @@ impl Handler { return; } } + } else if let ResponseBody::Ticket { .. } = response.body { + // The request is reinserted for either a nodes response or a register + // confirmation response that may come, otherwise the request times out. + self.active_requests.insert(node_address.clone(), request_call); } // Remove the expected response @@ -1051,16 +1066,6 @@ impl Handler { warn!("Failed to inform of response {}", e) } self.send_next_request(node_address).await; - } else if let ResponseBody::RegisterConfirmation { .. } = response.body { - let _ = self - .service_send - .send(HandlerOut::Response( - node_address.clone(), - Box::new(response), - )) - .await; - self.send_next_request(node_address.clone()).await; - trace!("REGCONFIRMATION response from node: {}", node_address); } else { // This is likely a late response and we have already failed the request. These get // dropped here. diff --git a/src/service.rs b/src/service.rs index cef0db4e2..c867a44d0 100644 --- a/src/service.rs +++ b/src/service.rs @@ -45,7 +45,7 @@ use enr::{CombinedKey, NodeId}; use fnv::FnvHashMap; use futures::prelude::*; use more_asserts::debug_unreachable; -use parking_lot::{RwLock, RawRwLock}; +use parking_lot::RwLock; use rand::Rng; use rpc::*; use std::{ From ae41cbb1c7ff90ba50304f996d0141526939ded1 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 15 Jun 2022 17:11:27 +0200 Subject: [PATCH 135/391] Runt cargo fmt --- src/handler/mod.rs | 13 ++++++++----- src/service.rs | 21 ++++++++++----------- 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 8d5735cec..2a533a907 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -1006,14 +1006,16 @@ impl Handler { if let Some(remaining_responses) = request_call.remaining_responses.as_mut() { *remaining_responses -= 1; let reinsert = match request_call.request.body { - RequestBody::FindNode{ .. } | RequestBody::TopicQuery{ .. } => remaining_responses > &mut 0, - // The request is reinserted for either another nodes response, a ticket or a + RequestBody::FindNode { .. } | RequestBody::TopicQuery { .. } => { + remaining_responses > &mut 0 + } + // The request is reinserted for either another nodes response, a ticket or a // register confirmation response that may come, otherwise the request times out. - RequestBody::RegisterTopic{ .. } => remaining_responses >= &mut 0, + RequestBody::RegisterTopic { .. } => remaining_responses >= &mut 0, _ => { debug_unreachable!("Only FINDNODE, TOPICQUERY and REGISTERTOPIC expect nodes response"); false - }, + } }; if reinsert { // more responses remaining, add back the request and send the response @@ -1048,7 +1050,8 @@ impl Handler { } else if let ResponseBody::Ticket { .. } = response.body { // The request is reinserted for either a nodes response or a register // confirmation response that may come, otherwise the request times out. - self.active_requests.insert(node_address.clone(), request_call); + self.active_requests + .insert(node_address.clone(), request_call); } // Remove the expected response diff --git a/src/service.rs b/src/service.rs index c867a44d0..6c19f2471 100644 --- a/src/service.rs +++ b/src/service.rs @@ -777,14 +777,15 @@ impl Service { let mut peers = kbuckets.get().clone(); let new_query_peers = peers .iter() - .filter_map(|entry| (!queried_peers.contains_key(entry.node.key.preimage())) - .then(|| { - query - .queried_peers - .entry(*entry.node.key.preimage()) - .or_default(); - entry.node.value - })) + .filter_map(|entry| { + (!queried_peers.contains_key(entry.node.key.preimage())).then(|| { + query + .queried_peers + .entry(*entry.node.key.preimage()) + .or_default(); + entry.node.value + }) + }) .collect::>(); for enr in new_query_peers { if let Ok(node_contact) = @@ -2146,9 +2147,7 @@ impl Service { /// A future that maintains the routing table and inserts nodes when required. This returns the /// `Discv5Event::NodeInserted` variant if a new node has been inserted into the routing table. - async fn bucket_maintenance_poll( - mut kbuckets: KBuckets, - ) -> Discv5Event { + async fn bucket_maintenance_poll(mut kbuckets: KBuckets) -> Discv5Event { future::poll_fn(move |_cx| { // Drain applied pending entries from the routing table. if let Some(entry) = match kbuckets { From cfec37a63bf69db313e3947eb56893fd078cdb03 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 15 Jun 2022 18:44:54 +0200 Subject: [PATCH 136/391] Add maintenance of all topic kbuckets --- src/discv5.rs | 6 ++++++ src/service.rs | 46 ++++++++++++++++++++++++++++++++-------------- 2 files changed, 38 insertions(+), 14 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index f912f73e3..51320a0ec 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -65,6 +65,12 @@ pub enum Discv5Event { node_id: NodeId, replaced: Option, }, + /// A new node has been added to a topic hash kbucket. + NodeInsertedTopic { + node_id: NodeId, + replaced: Option, + topic_hash: TopicHash, + }, /// Our local ENR IP address has been updated. SocketUpdated(SocketAddr), /// A node has initiated a talk request. diff --git a/src/service.rs b/src/service.rs index 6c19f2471..1f33d54e4 100644 --- a/src/service.rs +++ b/src/service.rs @@ -43,7 +43,7 @@ use aes_gcm::{ use delay_map::HashSetDelay; use enr::{CombinedKey, NodeId}; use fnv::FnvHashMap; -use futures::prelude::*; +use futures::{future::select_all, prelude::*}; use more_asserts::debug_unreachable; use parking_lot::RwLock; use rand::Rng; @@ -348,11 +348,6 @@ impl Default for NodesResponse { } } -pub enum KBuckets { - Primary(Arc>>), - Topics(KBucketsTable), -} - impl Service { /// Builds the `Service` main struct. /// @@ -636,11 +631,11 @@ impl Service { } } } - event = Service::bucket_maintenance_poll(KBuckets::Primary(self.kbuckets.clone())) => { + event = Service::bucket_maintenance_poll(&self.kbuckets) => { self.send_event(event); } - event = Service::bucket_maintenance_poll(KBuckets::Topics(self.topics_kbuckets.clone().into_values().next().unwrap())) => { - debug!("Topics KBuckets updated. Event {:?}", event); + event = Service::bucket_maintenance_poll_topics(self.topics_kbuckets.iter_mut()) => { + self.send_event(event); } query_event = Service::query_event_poll(&mut self.queries) => { match query_event { @@ -2147,13 +2142,12 @@ impl Service { /// A future that maintains the routing table and inserts nodes when required. This returns the /// `Discv5Event::NodeInserted` variant if a new node has been inserted into the routing table. - async fn bucket_maintenance_poll(mut kbuckets: KBuckets) -> Discv5Event { + async fn bucket_maintenance_poll( + kbuckets: &Arc>>, + ) -> Discv5Event { future::poll_fn(move |_cx| { // Drain applied pending entries from the routing table. - if let Some(entry) = match kbuckets { - KBuckets::Primary(ref kbuckets) => kbuckets.write().take_applied_pending(), - KBuckets::Topics(ref mut kbuckets) => kbuckets.take_applied_pending(), - } { + if let Some(entry) = kbuckets.write().take_applied_pending() { let event = Discv5Event::NodeInserted { node_id: entry.inserted.into_preimage(), replaced: entry.evicted.map(|n| n.key.into_preimage()), @@ -2165,6 +2159,30 @@ impl Service { .await } + /// A future that maintains the topic kbuckets and inserts nodes when required. This returns the + /// `Discv5Event::NodeInsertedTopics` variants. + async fn bucket_maintenance_poll_topics( + kbuckets: impl Iterator)>, + ) -> Discv5Event { + // Drain applied pending entries from the routing table. + let mut update_kbuckets_futures = Vec::new(); + for (topic_hash, topic_kbuckets) in kbuckets { + update_kbuckets_futures.push(future::poll_fn(move |_cx| { + if let Some(entry) = (*topic_kbuckets).take_applied_pending() { + let event = Discv5Event::NodeInsertedTopic { + node_id: entry.inserted.into_preimage(), + replaced: entry.evicted.map(|n| n.key.into_preimage()), + topic_hash: *topic_hash, + }; + return Poll::Ready(event); + } + Poll::Pending + })); + } + let (event, _, _) = select_all(update_kbuckets_futures).await; + event + } + /// A future the maintains active queries. This returns completed and timed out queries, as /// well as queries which need to be driven further with extra requests. async fn query_event_poll(queries: &mut QueryPool) -> QueryEvent { From aae0753a4830d0dce13246c44f56d636c3b6280d Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 15 Jun 2022 19:26:50 +0200 Subject: [PATCH 137/391] Fix find_node example --- examples/find_nodes.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/find_nodes.rs b/examples/find_nodes.rs index 658158116..9a6cd2b35 100644 --- a/examples/find_nodes.rs +++ b/examples/find_nodes.rs @@ -193,6 +193,7 @@ async fn main() { Discv5Event::Discovered(enr) => info!("Enr discovered {}", enr), Discv5Event::EnrAdded { enr, replaced: _ } => info!("Enr added {}", enr), Discv5Event::NodeInserted { node_id, replaced: _ } => info!("Node inserted {}", node_id), + Discv5Event::NodeInsertedTopic { node_id, replaced: _, topic_hash } => info!("Node inserted {} in topic hash {} kbucket", node_id, topic_hash), Discv5Event::SocketUpdated(addr) => info!("Socket updated {}", addr), Discv5Event::TalkRequest(_) => info!("Talk request received"), }; From b87ad5c09bf5e114c495b0bdd0eef874a924ee45 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 15 Jun 2022 19:39:11 +0200 Subject: [PATCH 138/391] Fix clippy warnings --- src/advertisement/test.rs | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 080256602..5efaf8a98 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -18,12 +18,12 @@ async fn insert_same_node() { let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); - ads.insert(enr.clone(), topic.clone()).unwrap(); + ads.insert(enr.clone(), topic).unwrap(); // Since 2 seconds haven't passed assert_eq!( - ads.insert(enr.clone(), topic.clone()).map_err(|e| e), - Err("Node already advertising this topic".into()) + ads.insert(enr.clone(), topic).map_err(|e| e), + Err("Node already advertising this topic") ); tokio::time::sleep(Duration::from_secs(2)).await; @@ -49,19 +49,19 @@ async fn insert_ad_and_get_nodes() { let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); // Add an ad for topic from enr - ads.insert(enr.clone(), topic.clone()).unwrap(); + ads.insert(enr.clone(), topic).unwrap(); // The ad hasn't expired and duplicates are not allowed assert_eq!( - ads.insert(enr.clone(), topic.clone()).map_err(|e| e), - Err("Node already advertising this topic".into()) + ads.insert(enr.clone(), topic).map_err(|e| e), + Err("Node already advertising this topic") ); // Add an ad for topic from enr_2 - ads.insert(enr_2.clone(), topic.clone()).unwrap(); + ads.insert(enr_2.clone(), topic).unwrap(); // Add an ad for topic_2 from enr - ads.insert(enr.clone(), topic_2.clone()).unwrap(); + ads.insert(enr.clone(), topic_2).unwrap(); let nodes: Vec<&Enr> = ads .get_ad_nodes(topic) @@ -97,10 +97,10 @@ async fn ticket_wait_time_duration() { let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); // Add an add for topic - ads.insert(enr.clone(), topic.clone()).unwrap(); + ads.insert(enr, topic).unwrap(); assert_gt!( - ads.ticket_wait_time(topic.clone()), + ads.ticket_wait_time(topic), Some(Duration::from_secs(2)) ); assert_lt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(3))); @@ -125,16 +125,16 @@ async fn ticket_wait_time_full_table() { let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); // Add 2 ads for topic - ads.insert(enr.clone(), topic.clone()).unwrap(); - ads.insert(enr_2.clone(), topic.clone()).unwrap(); + ads.insert(enr.clone(), topic).unwrap(); + ads.insert(enr_2.clone(), topic).unwrap(); tokio::time::sleep(Duration::from_secs(2)).await; // Add an ad for topic_2 - ads.insert(enr.clone(), topic_2.clone()).unwrap(); + ads.insert(enr.clone(), topic_2).unwrap(); // Now max_ads in table is reached so the second ad for topic_2 has to wait - assert_ne!(ads.ticket_wait_time(topic_2.clone()), None); + assert_ne!(ads.ticket_wait_time(topic_2), None); tokio::time::sleep(Duration::from_secs(3)).await; @@ -163,20 +163,20 @@ async fn ticket_wait_time_full_topic() { let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); // Add 2 ads for topic - ads.insert(enr.clone(), topic.clone()).unwrap(); - ads.insert(enr_2.clone(), topic.clone()).unwrap(); + ads.insert(enr.clone(), topic).unwrap(); + ads.insert(enr_2.clone(), topic).unwrap(); // Now max_ads_per_topic is reached for topic - assert_ne!(ads.ticket_wait_time(topic.clone()), None); + assert_ne!(ads.ticket_wait_time(topic), None); // Add a topic_2 ad - ads.insert(enr, topic_2.clone()).unwrap(); + ads.insert(enr, topic_2).unwrap(); // The table isn't full so topic_2 ads don't have to wait assert_eq!(ads.ticket_wait_time(topic_2), None); // But for topic they do until the first ads have expired - assert_ne!(ads.ticket_wait_time(topic.clone()), None); + assert_ne!(ads.ticket_wait_time(topic), None); tokio::time::sleep(Duration::from_secs(3)).await; assert_eq!(ads.ticket_wait_time(topic), None); From 767675275b6399cc7f7873f582cd090b540b3bba Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 16 Jun 2022 22:07:52 +0200 Subject: [PATCH 139/391] Return closest nodes to topics --- src/advertisement/test.rs | 5 +- src/config.rs | 4 +- src/handler/mod.rs | 16 +++--- src/service.rs | 101 ++++++++++++++++++++++++++++++++++---- src/service/test.rs | 2 +- 5 files changed, 103 insertions(+), 25 deletions(-) diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 5efaf8a98..1e4eb10a6 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -99,10 +99,7 @@ async fn ticket_wait_time_duration() { // Add an add for topic ads.insert(enr, topic).unwrap(); - assert_gt!( - ads.ticket_wait_time(topic), - Some(Duration::from_secs(2)) - ); + assert_gt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(2))); assert_lt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(3))); } diff --git a/src/config.rs b/src/config.rs index f8c58a0ee..b3eedc9cb 100644 --- a/src/config.rs +++ b/src/config.rs @@ -102,7 +102,7 @@ pub struct Discv5Config { pub topic_radius: u64, pub topic_query_timeout: Duration, - pub topics_num_results: usize, + pub topic_query_peers: usize, /// A custom executor which can spawn the discv5 tasks. This must be a tokio runtime, with /// timing support. By default, the executor that created the discv5 struct will be used. @@ -146,7 +146,7 @@ impl Default for Discv5Config { ban_duration: Some(Duration::from_secs(3600)), // 1 hour topic_radius: 256, topic_query_timeout: Duration::from_secs(60), - topics_num_results: 16, + topic_query_peers: 10, ip_mode: IpMode::default(), executor: None, } diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 2a533a907..410e54be9 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -39,7 +39,6 @@ use crate::{ }; use enr::{CombinedKey, NodeId}; use futures::prelude::*; -use more_asserts::debug_unreachable; use parking_lot::RwLock; use std::{ collections::HashMap, @@ -694,7 +693,10 @@ impl Handler { topic, enr: _, ticket: _, - } => HandlerOut::EstablishedTopic(enr, connection_direction, topic), + } + | RequestBody::TopicQuery { topic } => { + HandlerOut::EstablishedTopic(enr, connection_direction, topic) + } _ => HandlerOut::Established(enr, connection_direction), }; self.service_send @@ -1006,16 +1008,14 @@ impl Handler { if let Some(remaining_responses) = request_call.remaining_responses.as_mut() { *remaining_responses -= 1; let reinsert = match request_call.request.body { - RequestBody::FindNode { .. } | RequestBody::TopicQuery { .. } => { - remaining_responses > &mut 0 - } // The request is reinserted for either another nodes response, a ticket or a // register confirmation response that may come, otherwise the request times out. RequestBody::RegisterTopic { .. } => remaining_responses >= &mut 0, - _ => { - debug_unreachable!("Only FINDNODE, TOPICQUERY and REGISTERTOPIC expect nodes response"); - false + RequestBody::TopicQuery { .. } => { + // remove from some map of NODES and AD NODES + remaining_responses >= &mut 0 } + _ => remaining_responses > &mut 0, }; if reinsert { // more responses remaining, add back the request and send the response diff --git a/src/service.rs b/src/service.rs index 1f33d54e4..7d9f771ac 100644 --- a/src/service.rs +++ b/src/service.rs @@ -436,7 +436,7 @@ impl Service { ticket_pools: TicketPools::default(), active_topic_queries: ActiveTopicQueries::new( config.topic_query_timeout, - config.topics_num_results, + config.max_nodes_response, ), exit, config: config.clone(), @@ -666,12 +666,6 @@ impl Service { if callback.send(found_enrs).is_err() { warn!("Callback dropped for query {}. Results dropped", *id); } - } else { - let QueryType::FindNode(node_id) = result.target.query_type; - let topic = TopicHash::from_raw(node_id.raw()); - if self.topics.contains_key(&topic){ - // add to topic kbuckets? - } } } } @@ -978,6 +972,13 @@ impl Service { if enr.node_id() == node_address.node_id && enr.udp4_socket().map(SocketAddr::V4) == Some(node_address.socket_addr) { + self.send_topic_nodes_response( + topic, + node_address.clone(), + id.clone(), + "REGTOPIC".into(), + ); + let wait_time = self .ads .ticket_wait_time(topic) @@ -1060,6 +1061,12 @@ impl Service { } } RequestBody::TopicQuery { topic } => { + self.send_topic_nodes_response( + topic, + node_address.clone(), + id.clone(), + "REGTOPIC".into(), + ); self.send_topic_query_response(node_address, id, topic); } } @@ -1243,6 +1250,40 @@ impl Service { }); } } + RequestBody::RegisterTopic { + topic, + enr: _, + ticket: _, + } => { + if let Some(kbuckets) = self.topics_kbuckets.get_mut(&topic) { + for enr in nodes { + let peer_key: kbucket::Key = enr.node_id().into(); + match kbuckets.insert_or_update( + &peer_key, + enr, + NodeStatus { + state: ConnectionState::Disconnected, + direction: ConnectionDirection::Incoming, + }, + ) { + InsertResult::Failed(FailureReason::BucketFull) => { + error!("Table full") + } + InsertResult::Failed(FailureReason::BucketFilter) => { + error!("Failed bucket filter") + } + InsertResult::Failed(FailureReason::TableFilter) => { + error!("Failed table filter") + } + InsertResult::Failed(FailureReason::InvalidSelfUpdate) => { + error!("Invalid self update") + } + InsertResult::Failed(_) => error!("Failed to insert ENR"), + _ => {} + } + } + } + } RequestBody::FindNode { .. } => { self.discovered(&node_id, nodes, active_request.query_id) } @@ -1599,6 +1640,46 @@ impl Service { self.send_nodes_response(nodes_to_send, node_address, rpc_id, "TOPICQUERY"); } + fn send_topic_nodes_response( + &mut self, + topic: TopicHash, + node_address: NodeAddress, + id: RequestId, + req_type: String, + ) { + let local_key: kbucket::Key = self.local_enr.read().node_id().into(); + let topic_key: kbucket::Key = NodeId::new(&topic.as_bytes()).into(); + let distance_to_topic = local_key.log2_distance(&topic_key); + + let mut closest_peers: Vec = Vec::new(); + let closest_peers_length = closest_peers.len(); + if let Some(distance) = distance_to_topic { + self.kbuckets + .write() + .nodes_by_distances(&[distance], self.config.max_nodes_response) + .iter() + .for_each(|entry| closest_peers.push(entry.node.value.clone())); + + if closest_peers_length < self.config.max_nodes_response { + for entry in self + .kbuckets + .write() + .nodes_by_distances( + &[distance - 1, distance + 1], + self.config.max_nodes_response - closest_peers_length, + ) + .iter() + { + if closest_peers_length > self.config.max_nodes_response { + break; + } + closest_peers.push(entry.node.value.clone()); + } + } + } + self.send_nodes_response(closest_peers, node_address, id, &req_type); + } + /// Sends a NODES response, given a list of found ENR's. This function splits the nodes up /// into multiple responses to ensure the response stays below the maximum packet size. fn send_find_nodes_response( @@ -1645,7 +1726,7 @@ impl Service { nodes_to_send: Vec, node_address: NodeAddress, rpc_id: RequestId, - query: &str, + req_type: &str, ) { // if there are no nodes, send an empty response if nodes_to_send.is_empty() { @@ -1658,7 +1739,7 @@ impl Service { }; trace!( "Sending empty {} response to: {}", - query, + req_type, node_address.node_id ); if let Err(e) = self @@ -1718,7 +1799,7 @@ impl Service { for response in responses { trace!( "Sending {} response to: {}. Response: {} ", - query, + req_type, node_address, response ); diff --git a/src/service/test.rs b/src/service/test.rs index d3e4d5e78..d361aa543 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -102,7 +102,7 @@ async fn build_service( ticket_pools: TicketPools::default(), active_topic_queries: ActiveTopicQueries::new( config.topic_query_timeout, - config.topics_num_results, + config.max_nodes_response, ), exit, config, From 5e0ad32b7b28c56063441b199cfb779fc6b6611d Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 16 Jun 2022 23:56:55 +0200 Subject: [PATCH 140/391] Fix broken tests with select_all panic --- src/service.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/service.rs b/src/service.rs index 7d9f771ac..fe95e86e6 100644 --- a/src/service.rs +++ b/src/service.rs @@ -634,7 +634,7 @@ impl Service { event = Service::bucket_maintenance_poll(&self.kbuckets) => { self.send_event(event); } - event = Service::bucket_maintenance_poll_topics(self.topics_kbuckets.iter_mut()) => { + Some(event) = Service::bucket_maintenance_poll_topics(self.topics_kbuckets.iter_mut()) => { self.send_event(event); } query_event = Service::query_event_poll(&mut self.queries) => { @@ -2244,7 +2244,7 @@ impl Service { /// `Discv5Event::NodeInsertedTopics` variants. async fn bucket_maintenance_poll_topics( kbuckets: impl Iterator)>, - ) -> Discv5Event { + ) -> Option { // Drain applied pending entries from the routing table. let mut update_kbuckets_futures = Vec::new(); for (topic_hash, topic_kbuckets) in kbuckets { @@ -2260,8 +2260,12 @@ impl Service { Poll::Pending })); } - let (event, _, _) = select_all(update_kbuckets_futures).await; - event + if update_kbuckets_futures.is_empty() { + return None; + } else { + let (event, _, _) = select_all(update_kbuckets_futures).await; + Some(event) + } } /// A future the maintains active queries. This returns completed and timed out queries, as From eb4ce72da7875e91d88d420a361e293626b567d6 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 17 Jun 2022 00:15:11 +0200 Subject: [PATCH 141/391] Fix clippy warnings --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index fe95e86e6..cca96106c 100644 --- a/src/service.rs +++ b/src/service.rs @@ -2261,7 +2261,7 @@ impl Service { })); } if update_kbuckets_futures.is_empty() { - return None; + None } else { let (event, _, _) = select_all(update_kbuckets_futures).await; Some(event) From c5879f862e0b622008a189ddca2020dc5632b5f4 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 17 Jun 2022 14:07:27 +0200 Subject: [PATCH 142/391] Send regtopics to fixed num nodes per kbucket --- src/config.rs | 2 - src/discv5.rs | 2 +- src/kbucket/key.rs | 2 +- src/service.rs | 152 ++++++++++++++++++++++++++++++--------------- 4 files changed, 105 insertions(+), 53 deletions(-) diff --git a/src/config.rs b/src/config.rs index b3eedc9cb..af485aa1e 100644 --- a/src/config.rs +++ b/src/config.rs @@ -102,7 +102,6 @@ pub struct Discv5Config { pub topic_radius: u64, pub topic_query_timeout: Duration, - pub topic_query_peers: usize, /// A custom executor which can spawn the discv5 tasks. This must be a tokio runtime, with /// timing support. By default, the executor that created the discv5 struct will be used. @@ -146,7 +145,6 @@ impl Default for Discv5Config { ban_duration: Some(Duration::from_secs(3600)), // 1 hour topic_radius: 256, topic_query_timeout: Duration::from_secs(60), - topic_query_peers: 10, ip_mode: IpMode::default(), executor: None, } diff --git a/src/discv5.rs b/src/discv5.rs index 51320a0ec..d3ba743b2 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -548,7 +548,7 @@ impl Discv5 { pub fn remove_topic( &self, topic: String, - ) -> impl Future, RequestError>> + 'static { + ) -> impl Future> + 'static { let topic = Topic::new(topic); let channel = self.clone_channel(); diff --git a/src/kbucket/key.rs b/src/kbucket/key.rs index f27765afc..ae97f6645 100644 --- a/src/kbucket/key.rs +++ b/src/kbucket/key.rs @@ -108,7 +108,7 @@ impl From for Key { } /// A distance between two `Key`s. -#[derive(Copy, Clone, PartialEq, Eq, Default, PartialOrd, Ord, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, Default, PartialOrd, Ord, Debug, Hash)] pub struct Distance(pub(super) U256); #[cfg(test)] diff --git a/src/service.rs b/src/service.rs index cca96106c..3d9e6e971 100644 --- a/src/service.rs +++ b/src/service.rs @@ -57,10 +57,7 @@ use std::{ task::{Context, Poll}, time::{Duration, Instant}, }; -use tokio::{ - sync::{mpsc, oneshot}, - time::interval, -}; +use tokio::sync::{mpsc, oneshot}; use tracing::{debug, error, info, trace, warn}; mod ip_vote; @@ -171,10 +168,7 @@ pub enum ServiceRequest { /// RegisterTopic publishes this node as an advertiser for a topic at given node RegisterTopic(Topic), ActiveTopics(oneshot::Sender>), - RemoveTopic( - TopicHash, - oneshot::Sender, RequestError>>, - ), + RemoveTopic(TopicHash, oneshot::Sender>), } use crate::discv5::PERMIT_BAN_LIST; @@ -233,11 +227,9 @@ pub struct Service { /// Ads advertised locally for other nodes. ads: Ads, - /// Tickets received by other nodes. - tickets: Tickets, - - /// Topics to advertise on other nodes. - topics: HashMap, + /// Topics tracks registration attempts of the topics to advertise on + /// other nodes. + topics: HashMap>>, /// KBuckets per topic hash. topics_kbuckets: HashMap>, @@ -245,13 +237,21 @@ pub struct Service { /// Ads currently advertised on other nodes. active_topics: Ads, + /// Tickets received by other nodes. + tickets: Tickets, + /// Locally issued tickets returned by nodes pending registration for free local ad slots. ticket_pools: TicketPools, - /// + /// Locally initiated topic query requests in process. active_topic_queries: ActiveTopicQueries, } +pub enum RegistrationState { + Confirmed(Instant), + Ticket, +} + pub struct ActiveTopicQuery { // A NodeId mapped to false is waiting for a response or failed request. queried_peers: HashMap, @@ -429,10 +429,10 @@ impl Service { discv5_recv, event_stream: None, ads, - tickets: Tickets::new(Duration::from_secs(60 * 15)), topics: HashMap::new(), topics_kbuckets: HashMap::new(), active_topics, + tickets: Tickets::new(Duration::from_secs(60 * 15)), ticket_pools: TicketPools::default(), active_topic_queries: ActiveTopicQueries::new( config.topic_query_timeout, @@ -451,8 +451,6 @@ impl Service { /// The main execution loop of the discv5 serviced. async fn start(&mut self) { - let mut publish_topics = interval(Duration::from_secs(60 * 15)); - loop { tokio::select! { _ = &mut self.exit => { @@ -535,7 +533,7 @@ impl Service { } ServiceRequest::RegisterTopic(topic) => { let topic_hash = topic.hash(); - if self.topics.insert(topic_hash, topic).is_some() { + if self.topics.insert(topic_hash, HashMap::new()).is_some() { warn!("This topic is already being advertised"); } else { // NOTE: Currently we don't expose custom filter support in the configuration. Users can @@ -585,10 +583,11 @@ impl Service { } } ServiceRequest::RemoveTopic(topic_hash, callback) => { - let topic = self.topics.remove(&topic_hash).map(|topic| topic.topic()); - METRICS.topics_to_publish.store(self.topics.len(), Ordering::Relaxed); - if callback.send(Ok(topic)).is_err() { - error!("Failed to return the removed topic"); + if self.topics.remove(&topic_hash).is_some() { + METRICS.topics_to_publish.store(self.topics.len(), Ordering::Relaxed); + if callback.send(Ok(base64::encode(topic_hash.as_bytes()))).is_err() { + error!("Failed to return the removed topic"); + } } } } @@ -602,11 +601,11 @@ impl Service { self.inject_session_established(enr, direction, Some(topic_hash)); } HandlerOut::Request(node_address, request) => { - self.handle_rpc_request(node_address, *request); - } + self.handle_rpc_request(node_address, *request); + } HandlerOut::Response(node_address, response) => { - self.handle_rpc_response(node_address, *response); - } + self.handle_rpc_response(node_address, *response); + } HandlerOut::WhoAreYou(whoareyou_ref) => { // check what our latest known ENR is for this node. if let Some(known_enr) = self.find_enr(&whoareyou_ref.0.node_id) { @@ -691,11 +690,6 @@ impl Service { // to the ticket issuer. self.reg_topic_request(active_ticket.contact(), active_topic.topic(), enr, Some(active_ticket.ticket())); } - _ = publish_topics.tick() => { - // Topics are republished at regular intervals. - - self.topics_kbuckets.clone().keys().for_each(|topic_hash| self.send_register_topics(*topic_hash)); - } Some(Ok((topic, ticket_pool))) = self.ticket_pools.next() => { // No particular selection is carried out at this stage of implementation, the choice of node to give // the free ad slot to is random. @@ -731,15 +725,51 @@ impl Service { fn send_register_topics(&mut self, topic_hash: TopicHash) { if let Entry::Occupied(kbuckets) = self.topics_kbuckets.entry(topic_hash) { - kbuckets.get().clone().iter().for_each(|entry| { + let all_buckets_reg_attempts = self.topics.entry(topic_hash).or_default(); + // Remove expired ads + let mut new_reg_peers = Vec::new(); + for reg_attempts in all_buckets_reg_attempts.values_mut() { + reg_attempts.retain(|_, reg_attempt| { + if let RegistrationState::Confirmed(insert_time) = reg_attempt { + insert_time.elapsed() >= Duration::from_secs(15 * 60) + } else { + false + } + }); + let reg_attempts_count = reg_attempts.len(); + if reg_attempts_count < self.config.max_nodes_response { + let mut peers = Vec::new(); + let _ = kbuckets.get().clone().iter().map(|entry| { + let peer = entry.node.value.clone(); + if let Entry::Vacant(_) = reg_attempts.entry(peer.node_id()) { + peers.push(peer); + } + }); + + if !peers.is_empty() { + let max_nodes_response = self.config.max_nodes_response; + + new_reg_peers = peers + .into_iter() + .map_while(|peer| { + if reg_attempts_count < max_nodes_response { + Some(peer) + } else { + None + } + }) + .collect(); + } + } + } + for peer in new_reg_peers { let local_enr = self.local_enr.read().clone(); - if let Ok(node_contact) = - NodeContact::try_from_enr(entry.node.value.clone(), self.config.ip_mode) - .map_err(|e| error!("Failed to send REGTOPIC to peer. Error: {:?}", e)) + if let Ok(node_contact) = NodeContact::try_from_enr(peer, self.config.ip_mode) + .map_err(|e| error!("Failed to send REGTOPIC to peer. Error: {:?}", e)) { - self.reg_topic_request(node_contact, topic_hash, local_enr, None) + self.reg_topic_request(node_contact, topic_hash, local_enr.clone(), None); } - }); + } } else { debug_unreachable!("Broken invariant, a kbuckets table should exist for topic hash"); } @@ -1397,7 +1427,7 @@ impl Service { wait_time, topic, } => { - if wait_time <= MAX_WAIT_TIME_TICKET { + if wait_time <= MAX_WAIT_TIME_TICKET && wait_time > 0 { self.tickets .insert( active_request.contact, @@ -1407,16 +1437,41 @@ impl Service { ) .ok(); } + + let peer_key: kbucket::Key = node_id.into(); + let topic_key: kbucket::Key = NodeId::new(&topic.as_bytes()).into(); + if let Some(distance) = peer_key.log2_distance(&topic_key) { + let registration_attempts = self.topics.entry(topic).or_default(); + registration_attempts + .entry(distance) + .or_default() + .entry(node_id) + .or_insert(RegistrationState::Ticket); + self.send_register_topics(topic); + } } ResponseBody::RegisterConfirmation { topic } => { if let Some(enr) = active_request.contact.enr() { - self.active_topics.insert(enr, topic).ok(); - METRICS - .active_ads - .store(self.active_topics.len(), Ordering::Relaxed); - METRICS - .active_regtopic_req - .store(self.active_regtopic_requests.len(), Ordering::Relaxed); + let now = Instant::now(); + let peer_key: kbucket::Key = node_id.into(); + let topic_key: kbucket::Key = NodeId::new(&topic.as_bytes()).into(); + if let Some(distance) = peer_key.log2_distance(&topic_key) { + let registration_attempts = self.topics.entry(topic).or_default(); + registration_attempts + .entry(distance) + .or_default() + .entry(node_id) + .or_insert(RegistrationState::Confirmed(now)); + + let _ = self.active_topics.insert(enr, topic); + + METRICS + .active_ads + .store(self.active_topics.len(), Ordering::Relaxed); + METRICS + .active_regtopic_req + .store(self.active_regtopic_requests.len(), Ordering::Relaxed); + } } } } @@ -1652,7 +1707,6 @@ impl Service { let distance_to_topic = local_key.log2_distance(&topic_key); let mut closest_peers: Vec = Vec::new(); - let closest_peers_length = closest_peers.len(); if let Some(distance) = distance_to_topic { self.kbuckets .write() @@ -1660,17 +1714,17 @@ impl Service { .iter() .for_each(|entry| closest_peers.push(entry.node.value.clone())); - if closest_peers_length < self.config.max_nodes_response { + if closest_peers.len() < self.config.max_nodes_response { for entry in self .kbuckets .write() .nodes_by_distances( &[distance - 1, distance + 1], - self.config.max_nodes_response - closest_peers_length, + self.config.max_nodes_response - closest_peers.len(), ) .iter() { - if closest_peers_length > self.config.max_nodes_response { + if closest_peers.len() > self.config.max_nodes_response { break; } closest_peers.push(entry.node.value.clone()); From cf650cf6b53e701741f055d3f9df23cae4e1547b Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 17 Jun 2022 14:14:40 +0200 Subject: [PATCH 143/391] Remove unused derived trait impl --- src/kbucket/key.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kbucket/key.rs b/src/kbucket/key.rs index ae97f6645..f27765afc 100644 --- a/src/kbucket/key.rs +++ b/src/kbucket/key.rs @@ -108,7 +108,7 @@ impl From for Key { } /// A distance between two `Key`s. -#[derive(Copy, Clone, PartialEq, Eq, Default, PartialOrd, Ord, Debug, Hash)] +#[derive(Copy, Clone, PartialEq, Eq, Default, PartialOrd, Ord, Debug)] pub struct Distance(pub(super) U256); #[cfg(test)] From 4009438a4ffa655df90dd13d47531539834bd77d Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 17 Jun 2022 14:30:34 +0200 Subject: [PATCH 144/391] Minor fixes --- src/service.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/service.rs b/src/service.rs index 3d9e6e971..cde71456f 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1006,7 +1006,7 @@ impl Service { topic, node_address.clone(), id.clone(), - "REGTOPIC".into(), + "REGTOPIC", ); let wait_time = self @@ -1095,7 +1095,7 @@ impl Service { topic, node_address.clone(), id.clone(), - "REGTOPIC".into(), + "TOPICQUERY", ); self.send_topic_query_response(node_address, id, topic); } @@ -1700,7 +1700,7 @@ impl Service { topic: TopicHash, node_address: NodeAddress, id: RequestId, - req_type: String, + req_type: &str, ) { let local_key: kbucket::Key = self.local_enr.read().node_id().into(); let topic_key: kbucket::Key = NodeId::new(&topic.as_bytes()).into(); @@ -1800,7 +1800,7 @@ impl Service { .handler_send .send(HandlerIn::Response(node_address, Box::new(response))) { - warn!("Failed to send empty FINDNODES response {}", e) + warn!("Failed to send empty {} response {}", req_type, e) } } else { // build the NODES response @@ -1861,7 +1861,7 @@ impl Service { node_address.clone(), Box::new(response), )) { - warn!("Failed to send FINDNODES response {}", e) + warn!("Failed to send {} response {}", req_type, e) } } } From 2f2167fe80707145b9d9b322ecd6718bd670e1bb Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 17 Jun 2022 15:14:05 +0200 Subject: [PATCH 145/391] Send topic queries to a number of nodes at a time --- src/service.rs | 43 +++++++++++++++++++++++++++---------------- 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/src/service.rs b/src/service.rs index cde71456f..cf7c1268e 100644 --- a/src/service.rs +++ b/src/service.rs @@ -280,7 +280,7 @@ impl ActiveTopicQueries { pub enum TopicQueryState { Finished(TopicHash), TimedOut(TopicHash), - Unsatisfied(TopicHash), + Unsatisfied(TopicHash, usize), } impl Stream for ActiveTopicQueries { @@ -301,8 +301,13 @@ impl Stream for ActiveTopicQueries { .iter() .filter(|(_peer, return_status)| **return_status) .count(); + // If all peers have responded or failed the request and we still did not + // obtain enough results, the query is in TopicQueryState::Unsatisfied. if exhausted_peers >= query.queried_peers.len() { - return Poll::Ready(Some(TopicQueryState::Unsatisfied(*topic_hash))); + return Poll::Ready(Some(TopicQueryState::Unsatisfied( + *topic_hash, + query.results.len(), + ))); } } } @@ -529,7 +534,7 @@ impl Service { }); self.topics_kbuckets.insert(topic_hash, kbuckets); } - self.send_topic_queries(topic_hash, Some(callback)); + self.send_topic_queries(topic_hash, self.config.max_nodes_response, Some(callback)); } ServiceRequest::RegisterTopic(topic) => { let topic_hash = topic.hash(); @@ -714,8 +719,8 @@ impl Service { } } }, - TopicQueryState::Unsatisfied(topic_hash) => { - self.send_topic_queries(topic_hash, None); + TopicQueryState::Unsatisfied(topic_hash, num_query_peers) => { + self.send_topic_queries(topic_hash, num_query_peers, None); } } } @@ -779,6 +784,7 @@ impl Service { fn send_topic_queries( &mut self, topic_hash: TopicHash, + num_query_peers: usize, callback: Option, RequestError>>>, ) { let query = self @@ -794,18 +800,23 @@ impl Service { let queried_peers = query.queried_peers.clone(); if let Entry::Occupied(kbuckets) = self.topics_kbuckets.entry(topic_hash) { let mut peers = kbuckets.get().clone(); - let new_query_peers = peers - .iter() - .filter_map(|entry| { - (!queried_peers.contains_key(entry.node.key.preimage())).then(|| { - query - .queried_peers - .entry(*entry.node.key.preimage()) - .or_default(); - entry.node.value - }) + let mut new_query_peers_iter = peers.iter().filter_map(|entry| { + (!queried_peers.contains_key(entry.node.key.preimage())).then(|| { + query + .queried_peers + .entry(*entry.node.key.preimage()) + .or_default(); + entry.node.value }) - .collect::>(); + }); + let mut new_query_peers = Vec::new(); + while new_query_peers.len() < num_query_peers { + // Start querying nodes further away, starting at distance 256 + if let Some(enr) = new_query_peers_iter.next() { + new_query_peers.push(enr); + } + } + for enr in new_query_peers { if let Ok(node_contact) = NodeContact::try_from_enr(enr.clone(), self.config.ip_mode) From e0324159ffc1145877cb98cba6dbe5b4c293f661 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 17 Jun 2022 18:45:10 +0200 Subject: [PATCH 146/391] Insert rev bug --- src/service.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/service.rs b/src/service.rs index cf7c1268e..42ac39a73 100644 --- a/src/service.rs +++ b/src/service.rs @@ -800,7 +800,8 @@ impl Service { let queried_peers = query.queried_peers.clone(); if let Entry::Occupied(kbuckets) = self.topics_kbuckets.entry(topic_hash) { let mut peers = kbuckets.get().clone(); - let mut new_query_peers_iter = peers.iter().filter_map(|entry| { + // Start querying nodes further away, starting at distance 256 + let mut new_query_peers_iter = peers.iter().rev().filter_map(|entry| { (!queried_peers.contains_key(entry.node.key.preimage())).then(|| { query .queried_peers @@ -811,11 +812,11 @@ impl Service { }); let mut new_query_peers = Vec::new(); while new_query_peers.len() < num_query_peers { - // Start querying nodes further away, starting at distance 256 if let Some(enr) = new_query_peers_iter.next() { new_query_peers.push(enr); } } + let _ = new_query_peers.iter().rev().count(); for enr in new_query_peers { if let Ok(node_contact) = From f7a3d36d01da6a8e3fac97d763e4049f05687c75 Mon Sep 17 00:00:00 2001 From: Diva M Date: Fri, 17 Jun 2022 12:05:36 -0500 Subject: [PATCH 147/391] fix double ended iterator --- src/kbucket.rs | 2 +- src/kbucket/bucket.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/kbucket.rs b/src/kbucket.rs index 1b7a3e8ac..1926c0872 100644 --- a/src/kbucket.rs +++ b/src/kbucket.rs @@ -500,7 +500,7 @@ where } /// Returns an iterator over all the entries in the routing table. - pub fn iter(&mut self) -> impl Iterator> { + pub fn iter(&mut self) -> impl DoubleEndedIterator> { let applied_pending = &mut self.applied_pending; self.buckets.iter_mut().flat_map(move |table| { if let Some(applied) = table.apply_pending() { diff --git a/src/kbucket/bucket.rs b/src/kbucket/bucket.rs index 17a6ab295..9aaa7e8cb 100644 --- a/src/kbucket/bucket.rs +++ b/src/kbucket/bucket.rs @@ -273,7 +273,7 @@ where } /// Returns an iterator over the nodes in the bucket, together with their status. - pub fn iter(&self) -> impl Iterator> { + pub fn iter(&self) -> impl DoubleEndedIterator> { self.nodes.iter() } From 31e53861ec1d93192c478ff4f4cac852765318df Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 20 Jun 2022 13:26:56 +0200 Subject: [PATCH 148/391] Choose ticket based on cummulative wait time --- src/advertisement/ticket.rs | 40 +++++++++++-- src/rpc.rs | 43 ++++++++++---- src/service.rs | 115 +++++++++++++++++++----------------- src/service/test.rs | 1 + 4 files changed, 130 insertions(+), 69 deletions(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 90341500f..19524e8bc 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -118,7 +118,7 @@ impl Stream for Tickets { } } -/// An PendingTicket maps to a Ticket in Tickets upon insert. +/// An PendingTicket maps to a Ticket received by another node in Tickets upon insert. #[derive(Clone)] struct PendingTicket { /// The ActiveTopic serves to match the Ticket to an entry in Tickets' @@ -209,12 +209,41 @@ struct RegistrationWindow { open_time: Instant, } +/// The tickets that will be considered for an ad slot. +pub struct PoolTicket { + enr: Enr, + req_id: RequestId, + ticket: Ticket, +} + +impl PoolTicket { + pub fn new(enr: Enr, req_id: RequestId, ticket: Ticket) -> Self { + PoolTicket { + enr, + req_id, + ticket, + } + } + + pub fn node_record(&self) -> &Enr { + &self.enr + } + + pub fn req_id(&self) -> &RequestId { + &self.req_id + } + + pub fn ticket(&self) -> &Ticket { + &self.ticket + } +} + /// The TicketPools collects all the registration attempts for a free ad slot. #[derive(Default)] pub struct TicketPools { /// The ticket_pools keeps track of all the registrants and their Tickets. One /// ticket_pool per TopicHash can be open at a time. - ticket_pools: HashMap>, + ticket_pools: HashMap>, /// The expirations keeps track of when to close a ticket pool so the next one /// can be opened. expirations: VecDeque, @@ -234,7 +263,10 @@ impl TicketPools { open_time, }); } - pool.insert(node_record.node_id(), (node_record, req_id, ticket)); + pool.insert( + node_record.node_id(), + PoolTicket::new(node_record, req_id, ticket), + ); } } } @@ -242,7 +274,7 @@ impl TicketPools { } impl Stream for TicketPools { - type Item = Result<(TopicHash, HashMap), String>; + type Item = Result<(TopicHash, HashMap), String>; fn poll_next(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { let ticket_pool = self.expirations.front(); if let Some(reg_window) = ticket_pool { diff --git a/src/rpc.rs b/src/rpc.rs index e9f03cffa..4dc79af3c 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -690,7 +690,7 @@ pub struct Ticket { topic: TopicHash, req_time: Instant, wait_time: Duration, - //cum_wait: Option, + cum_wait: Duration, } impl rlp::Encodable for Ticket { @@ -770,10 +770,10 @@ impl rlp::Decodable for Ticket { let req_time = { if let Ok(time_since_unix) = SystemTime::now().duration_since(UNIX_EPOCH) { - let s_bytes = decoded_list.remove(0).data()?; - let mut s = [0u8; 8]; - s.copy_from_slice(s_bytes); - let secs = u64::from_be_bytes(s); + let secs_data = decoded_list.remove(0).data()?; + let mut secs_bytes = [0u8; 8]; + secs_bytes.copy_from_slice(secs_data); + let secs = u64::from_be_bytes(secs_bytes); let req_time_since_unix = Duration::from_secs(secs); let time_since_req = time_since_unix - req_time_since_unix; if let Some(req_time) = Instant::now().checked_sub(time_since_req) { @@ -789,10 +789,18 @@ impl rlp::Decodable for Ticket { }; let wait_time = { - let s_bytes = decoded_list.remove(0).data()?; - let mut s = [0u8; 8]; - s.copy_from_slice(s_bytes); - let secs = u64::from_be_bytes(s); + let secs_data = decoded_list.remove(0).data()?; + let mut secs_bytes = [0u8; 8]; + secs_bytes.copy_from_slice(secs_data); + let secs = u64::from_be_bytes(secs_bytes); + Duration::from_secs(secs) + }; + + let cum_wait = { + let secs_data = decoded_list.remove(0).data()?; + let mut secs_bytes = [0u8; 8]; + secs_bytes.copy_from_slice(secs_data); + let secs = u64::from_be_bytes(secs_bytes); Duration::from_secs(secs) }; @@ -802,6 +810,7 @@ impl rlp::Decodable for Ticket { topic, req_time, wait_time, + cum_wait, }) } } @@ -823,7 +832,7 @@ impl Ticket { topic: TopicHash, req_time: Instant, wait_time: Duration, - //cum_wait: Option, + cum_wait: Duration, ) -> Self { Ticket { //nonce, @@ -832,7 +841,7 @@ impl Ticket { topic, req_time, wait_time, - //cum_wait, + cum_wait, } } @@ -848,6 +857,14 @@ impl Ticket { self.wait_time } + pub fn cum_wait(&self) -> Duration { + self.cum_wait + } + + pub fn set_cum_wait(&mut self, prev_cum_wait: Duration) { + self.cum_wait = prev_cum_wait + self.wait_time; + } + pub fn encode(&self) -> Vec { let mut buf = Vec::new(); let mut s = RlpStream::new(); @@ -1148,6 +1165,7 @@ mod tests { TopicHash::from_raw([1u8; 32]), Instant::now(), Duration::from_secs(11), + Duration::from_secs(25), ); let ticket = ticket.encode(); @@ -1183,6 +1201,7 @@ mod tests { TopicHash::from_raw([1u8; 32]), Instant::now(), Duration::from_secs(11), + Duration::from_secs(25), ); let encoded = ticket.encode(); @@ -1207,6 +1226,7 @@ mod tests { TopicHash::from_raw([1u8; 32]), Instant::now(), Duration::from_secs(11), + Duration::from_secs(25), ); let ticket_key: [u8; 16] = rand::random(); @@ -1256,6 +1276,7 @@ mod tests { TopicHash::from_raw([1u8; 32]), Instant::now(), Duration::from_secs(11), + Duration::from_secs(25), ); let ticket = ticket.encode(); diff --git a/src/service.rs b/src/service.rs index 42ac39a73..fb5ea20f7 100644 --- a/src/service.rs +++ b/src/service.rs @@ -46,7 +46,6 @@ use fnv::FnvHashMap; use futures::{future::select_all, prelude::*}; use more_asserts::debug_unreachable; use parking_lot::RwLock; -use rand::Rng; use rpc::*; use std::{ collections::{hash_map::Entry, HashMap}, @@ -696,14 +695,11 @@ impl Service { self.reg_topic_request(active_ticket.contact(), active_topic.topic(), enr, Some(active_ticket.ticket())); } Some(Ok((topic, ticket_pool))) = self.ticket_pools.next() => { - // No particular selection is carried out at this stage of implementation, the choice of node to give - // the free ad slot to is random. - let random_index = rand::thread_rng().gen_range(0..ticket_pool.len()); - let ticket_pool = ticket_pool.values().step_by(random_index).next(); - if let Some((node_record, req_id, _ticket)) = ticket_pool.map(|(node_record, req_id, ticket)| (node_record.clone(), req_id.clone(), ticket)) { - self.ads.insert(node_record.clone(), topic).ok(); - NodeContact::try_from_enr(node_record, self.config.ip_mode).map(|contact| { - self.send_regconfirmation_response(contact.node_address(), req_id, topic); + // Select ticket with longest cummulative wait time. + if let Some(pool_ticket) = ticket_pool.values().max_by_key(|pool_ticket| pool_ticket.ticket().cum_wait()) { + self.ads.insert(pool_ticket.node_record().clone(), topic).ok(); + NodeContact::try_from_enr(pool_ticket.node_record().clone(), self.config.ip_mode).map(|contact| { + self.send_regconfirmation_response(contact.node_address(), pool_ticket.req_id().clone(), topic); }).ok(); METRICS.hosted_ads.store(self.ads.len(), Ordering::Relaxed); } @@ -1021,62 +1017,48 @@ impl Service { "REGTOPIC", ); + // The current wait time for a given topic. let wait_time = self .ads .ticket_wait_time(topic) .unwrap_or(Duration::from_secs(0)); - let new_ticket = Ticket::new( + let mut new_ticket = Ticket::new( node_address.node_id, node_address.socket_addr.ip(), topic, tokio::time::Instant::now(), wait_time, + Duration::from_secs(0), ); - // According to spec, a ticket should always be issued upon receiving a REGTOPIC request. - self.send_ticket_response( - node_address, - id.clone(), - new_ticket.clone(), - wait_time, - ); - - // If the wait time has expired, the TICKET is added to the matching ticket pool. If this is - // the first REGTOPIC request from a given node for a given topic, the newly created ticket - // is used to add the registration attempt to to the ticket pool. - if wait_time <= Duration::from_secs(0) { - if !ticket.is_empty() { - let decoded_enr = self - .local_enr - .write() - .to_base64() - .parse::() - .map_err(|e| { - error!( - "Failed to decrypt ticket in REGTOPIC request. Error: {}", - e - ) - }); - if let Ok(decoded_enr) = decoded_enr { - if let Some(ticket_key) = decoded_enr.get("ticket_key") { - let decrypted_ticket = { - let aead = - Aes128Gcm::new(GenericArray::from_slice(ticket_key)); - let payload = Payload { - msg: &ticket, - aad: b"", - }; - aead.decrypt(GenericArray::from_slice(&[1u8; 12]), payload) + if !ticket.is_empty() { + let decoded_enr = self + .local_enr + .write() + .to_base64() + .parse::() + .map_err(|e| { + error!("Failed to decrypt ticket in REGTOPIC request. Error: {}", e) + }); + if let Ok(decoded_enr) = decoded_enr { + if let Some(ticket_key) = decoded_enr.get("ticket_key") { + let decrypted_ticket = { + let aead = Aes128Gcm::new(GenericArray::from_slice(ticket_key)); + let payload = Payload { + msg: &ticket, + aad: b"", + }; + aead.decrypt(GenericArray::from_slice(&[1u8; 12]), payload) .map_err(|e| { error!( "Failed to decrypt ticket in REGTOPIC request. Error: {}", e ) }) - }; - if let Ok(decrypted_ticket) = decrypted_ticket { - Ticket::decode(&decrypted_ticket) + }; + if let Ok(decrypted_ticket) = decrypted_ticket { + Ticket::decode(&decrypted_ticket) .map_err(|e| { error!( "Failed to decode ticket in REGTOPIC request. Error: {}", @@ -1084,19 +1066,44 @@ impl Service { ) }) .map(|ticket| { - // Drop if src_node_id, src_ip and topic derived from node_address and request - // don't match those in ticket if let Some(ticket) = ticket { - if ticket == new_ticket { - self.ticket_pools.insert(enr, id, ticket); + // A ticket is always be issued upon receiving a REGTOPIC request, even if there is no + // wait time for the ad slot. See discv5 spec. This node will not store tickets received + // with wait time 0. + new_ticket.set_cum_wait(ticket.cum_wait()); + self.send_ticket_response( + node_address, + id.clone(), + new_ticket.clone(), + wait_time, + ); + // If current wait time is 0, the ticket is added to the matching ticket pool. + if wait_time <= Duration::from_secs(0) { + // Drop if src_node_id, src_ip and topic derived from node_address and request + // don't match those in ticket. For example if a malicious node tries to use + // another ticket issued by us. + if ticket == new_ticket { + self.ticket_pools.insert(enr, id, ticket); + } } } }) .ok(); - } } } - } else { + } + } else { + // A ticket is always be issued upon receiving a REGTOPIC request, even if there is no + // wait time for the ad slot. See discv5 spec. This node will not store tickets received + // with wait time 0. + self.send_ticket_response( + node_address, + id.clone(), + new_ticket.clone(), + wait_time, + ); + // If current wait time is 0, the ticket is added to the matching ticket pool. + if wait_time <= Duration::from_secs(0) { self.ticket_pools.insert(enr, id, new_ticket); } } @@ -1743,7 +1750,7 @@ impl Service { } } } - self.send_nodes_response(closest_peers, node_address, id, &req_type); + self.send_nodes_response(closest_peers, node_address, id, req_type); } /// Sends a NODES response, given a list of found ENR's. This function splits the nodes up diff --git a/src/service/test.rs b/src/service/test.rs index d361aa543..acd5fadff 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -225,6 +225,7 @@ async fn encrypt_decrypt_ticket() { TopicHash::from_raw([1u8; 32]), tokio::time::Instant::now(), tokio::time::Duration::from_secs(5), + tokio::time::Duration::from_secs(25), ); let ticket_key = decoded_enr.get("ticket_key").unwrap(); From 2c6708a9ae66cb7e477d5ed8ffa31d3477ed10a4 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 20 Jun 2022 14:53:55 +0200 Subject: [PATCH 149/391] Fix tests --- src/advertisement/mod.rs | 2 +- src/rpc.rs | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 1fe72ecf0..38da16825 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -165,7 +165,7 @@ impl Ads { let nodes = self.ads.entry(topic).or_default(); let ad_node = AdNode::new(node_record, now); if nodes.contains(&ad_node) { - error!( + debug!( "This node {} is already advertising this topic", ad_node.node_record().node_id() ); diff --git a/src/rpc.rs b/src/rpc.rs index 4dc79af3c..2faef1909 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -708,6 +708,7 @@ impl rlp::Encodable for Ticket { s.append(&time_stamp.as_secs().to_be_bytes().to_vec()); } s.append(&self.wait_time.as_secs().to_be_bytes().to_vec()); + s.append(&self.wait_time.as_secs().to_be_bytes().to_vec()); } } @@ -718,7 +719,7 @@ impl rlp::Decodable for Ticket { return Err(DecoderError::RlpExpectedToBeList); } - if rlp.item_count() != Ok(5) { + if rlp.item_count() != Ok(6) { error!( "List has wrong item count, should be 5 but is {:?}", rlp.item_count() From 73ff727f3c5a54db6ff3c773bb552cf8ef8d4e72 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 20 Jun 2022 15:01:54 +0200 Subject: [PATCH 150/391] Remove unused imports --- src/advertisement/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 38da16825..37bb74195 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -11,7 +11,7 @@ use std::{ }; use tokio::time::Instant; use topic::TopicHash; -use tracing::{debug, error}; +use tracing::debug; mod test; pub mod ticket; From aa3a758e3fcfc85646cd8a4fc3cd3580c55ac793 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jun 2022 09:47:36 +0200 Subject: [PATCH 151/391] Fix test --- src/service.rs | 4 +++- src/service/test.rs | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index fb5ea20f7..95802cc5e 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1482,7 +1482,9 @@ impl Service { .entry(node_id) .or_insert(RegistrationState::Confirmed(now)); - let _ = self.active_topics.insert(enr, topic); + let _ = self.active_topics.insert(enr, topic).map_err(|e| { + error!("Couldn't insert topic into active topics. Error: {}.", e) + }); METRICS .active_ads diff --git a/src/service/test.rs b/src/service/test.rs index acd5fadff..a5af42f08 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -252,3 +252,6 @@ async fn encrypt_decrypt_ticket() { assert_eq!(decoded_ticket, ticket); } + +#[tokio::test] +async fn test_ticketing() {} From d41388e0eb83f9e8de80164a9beaf6e3c435ccf7 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jun 2022 10:11:32 +0200 Subject: [PATCH 152/391] Fix log output --- src/rpc.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index 2faef1909..960e07887 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -350,7 +350,7 @@ impl std::fmt::Display for ResponseBody { write!(f, "]") } ResponseBody::Talk { response } => { - write!(f, "Response: Response {}", hex::encode(response)) + write!(f, "TALK: Response {}", hex::encode(response)) } ResponseBody::Ticket { ticket, @@ -366,7 +366,7 @@ impl std::fmt::Display for ResponseBody { ) } ResponseBody::RegisterConfirmation { topic } => { - write!(f, "REGTOPIC: Registered: {}", topic) + write!(f, "REGCONFIRMATION: Registered: {}", topic) } } } @@ -394,7 +394,7 @@ impl std::fmt::Display for RequestBody { RequestBody::TopicQuery { topic } => write!(f, "TOPICQUERY: topic: {}", topic), RequestBody::RegisterTopic { topic, enr, ticket } => write!( f, - "RegisterTopic: topic: {}, enr: {}, ticket: {}", + "REGTOPIC: topic: {}, enr: {}, ticket: {}", topic, enr.to_base64(), hex::encode(ticket), From 746c3f6af1a849a2c39a18d1e63972571a9fd51c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jun 2022 10:29:55 +0200 Subject: [PATCH 153/391] Add debug output --- src/service.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index 95802cc5e..5d980cd05 100644 --- a/src/service.rs +++ b/src/service.rs @@ -573,7 +573,7 @@ impl Service { InsertResult::Failed(FailureReason::TableFilter) => error!("Failed table filter"), InsertResult::Failed(FailureReason::InvalidSelfUpdate) => error!("Invalid self update"), InsertResult::Failed(_) => error!("Failed to insert ENR"), - _ => {}, + _ => debug!("Insertion of node {} into KBucket of {} successful.", entry.node.key.preimage(), topic_hash), } }); self.topics_kbuckets.insert(topic_hash, kbuckets); @@ -760,6 +760,8 @@ impl Service { } }) .collect(); + } else { + debug!("No peers found to send regtopics to."); } } } From cc6f5a73d964c9ee52cd79bda7731f24c7c06926 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jun 2022 10:33:45 +0200 Subject: [PATCH 154/391] Add debug output --- src/service.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/service.rs b/src/service.rs index 5d980cd05..25f9b148b 100644 --- a/src/service.rs +++ b/src/service.rs @@ -760,6 +760,7 @@ impl Service { } }) .collect(); + debug!("Found new reg peers. Peers: {:?}", new_reg_peers); } else { debug!("No peers found to send regtopics to."); } From 2fe76f9cf8e216724948e01468039d0faa7e40ee Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jun 2022 10:41:40 +0200 Subject: [PATCH 155/391] Add more debug info --- src/service.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/service.rs b/src/service.rs index 25f9b148b..211221baa 100644 --- a/src/service.rs +++ b/src/service.rs @@ -536,6 +536,7 @@ impl Service { self.send_topic_queries(topic_hash, self.config.max_nodes_response, Some(callback)); } ServiceRequest::RegisterTopic(topic) => { + debug!("Received REGTOPIC request"); let topic_hash = topic.hash(); if self.topics.insert(topic_hash, HashMap::new()).is_some() { warn!("This topic is already being advertised"); @@ -552,6 +553,7 @@ impl Service { (None, None) }; + debug!("Initiating kbuckets for topic hash {}", topic_hash); let mut kbuckets = KBucketsTable::new( NodeId::new(&topic_hash.as_bytes()).into(), Duration::from_secs(60), @@ -559,6 +561,7 @@ impl Service { table_filter, bucket_filter, ); + debug!("Adding {} entries from local routing table to topic's kbuckets", self.kbuckets.write().iter().count()); self.kbuckets.write().iter().for_each(|entry| { match kbuckets.insert_or_update( entry.node.key, From df74c4b6d30c60561042a884e00cba5955eb1824 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jun 2022 10:53:31 +0200 Subject: [PATCH 156/391] Run cargo fmt --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index 211221baa..39108c283 100644 --- a/src/service.rs +++ b/src/service.rs @@ -763,7 +763,7 @@ impl Service { } }) .collect(); - debug!("Found new reg peers. Peers: {:?}", new_reg_peers); + debug!("Found new reg peers. Peers: {:?}", new_reg_peers); } else { debug!("No peers found to send regtopics to."); } From 28188d0aa9c3338fcdd6ab0064488c81c17ba67a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jun 2022 11:07:04 +0200 Subject: [PATCH 157/391] Move hashing of topic up one layer to app --- src/discv5.rs | 5 +++-- src/service.rs | 5 ++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index d3ba743b2..01b7dabf4 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -571,14 +571,15 @@ impl Discv5 { &self, topic: String, ) -> impl Future> + 'static { - let topic = Topic::new(topic); let channel = self.clone_channel(); async move { let channel = channel .as_ref() .map_err(|_| RequestError::ServiceNotStarted)?; - let event = ServiceRequest::RegisterTopic(topic.clone()); + let topic_hash = Topic::new(&topic).hash(); + let event = ServiceRequest::RegisterTopic(topic_hash); + debug!("Registering topic {} with Sha256 hash {}", topic, topic_hash); // send the request channel .send(event) diff --git a/src/service.rs b/src/service.rs index 39108c283..88ddc6325 100644 --- a/src/service.rs +++ b/src/service.rs @@ -165,7 +165,7 @@ pub enum ServiceRequest { /// Queries given node for nodes advertising a topic hash TopicQuery(TopicHash, oneshot::Sender, RequestError>>), /// RegisterTopic publishes this node as an advertiser for a topic at given node - RegisterTopic(Topic), + RegisterTopic(TopicHash), ActiveTopics(oneshot::Sender>), RemoveTopic(TopicHash, oneshot::Sender>), } @@ -535,9 +535,8 @@ impl Service { } self.send_topic_queries(topic_hash, self.config.max_nodes_response, Some(callback)); } - ServiceRequest::RegisterTopic(topic) => { + ServiceRequest::RegisterTopic(topic_hash) => { debug!("Received REGTOPIC request"); - let topic_hash = topic.hash(); if self.topics.insert(topic_hash, HashMap::new()).is_some() { warn!("This topic is already being advertised"); } else { From 8c8b1ff315341ee1da359877855788af2ff25ff1 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jun 2022 11:15:50 +0200 Subject: [PATCH 158/391] Remove unused deps --- src/discv5.rs | 5 ++++- src/service.rs | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 01b7dabf4..96fb09d2b 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -579,7 +579,10 @@ impl Discv5 { .map_err(|_| RequestError::ServiceNotStarted)?; let topic_hash = Topic::new(&topic).hash(); let event = ServiceRequest::RegisterTopic(topic_hash); - debug!("Registering topic {} with Sha256 hash {}", topic, topic_hash); + debug!( + "Registering topic {} with Sha256 hash {}", + topic, topic_hash + ); // send the request channel .send(event) diff --git a/src/service.rs b/src/service.rs index 88ddc6325..fa48ece34 100644 --- a/src/service.rs +++ b/src/service.rs @@ -19,7 +19,7 @@ use self::{ use crate::{ advertisement::{ ticket::{ActiveRegtopicRequests, TicketPools, Tickets}, - topic::{Sha256Topic as Topic, TopicHash}, + topic::TopicHash, Ads, }, error::{RequestError, ResponseError}, From bbe432917c4e7768af48d3cc2bccd9dcd822ae81 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jun 2022 15:26:45 +0200 Subject: [PATCH 159/391] Fix send_register_topics logic --- src/service.rs | 63 ++++++++++++++++++++++---------------------------- 1 file changed, 28 insertions(+), 35 deletions(-) diff --git a/src/service.rs b/src/service.rs index fa48ece34..086ff9a82 100644 --- a/src/service.rs +++ b/src/service.rs @@ -536,7 +536,6 @@ impl Service { self.send_topic_queries(topic_hash, self.config.max_nodes_response, Some(callback)); } ServiceRequest::RegisterTopic(topic_hash) => { - debug!("Received REGTOPIC request"); if self.topics.insert(topic_hash, HashMap::new()).is_some() { warn!("This topic is already being advertised"); } else { @@ -575,7 +574,7 @@ impl Service { InsertResult::Failed(FailureReason::TableFilter) => error!("Failed table filter"), InsertResult::Failed(FailureReason::InvalidSelfUpdate) => error!("Invalid self update"), InsertResult::Failed(_) => error!("Failed to insert ENR"), - _ => debug!("Insertion of node {} into KBucket of {} successful.", entry.node.key.preimage(), topic_hash), + _ => debug!("Insertion of node {} into KBucket of {} was successful", entry.node.key.preimage(), topic_hash), } }); self.topics_kbuckets.insert(topic_hash, kbuckets); @@ -727,44 +726,38 @@ impl Service { } fn send_register_topics(&mut self, topic_hash: TopicHash) { - if let Entry::Occupied(kbuckets) = self.topics_kbuckets.entry(topic_hash) { - let all_buckets_reg_attempts = self.topics.entry(topic_hash).or_default(); + if let Entry::Occupied(ref mut kbuckets) = self.topics_kbuckets.entry(topic_hash) { + let reg_attempts = self.topics.entry(topic_hash).or_default(); // Remove expired ads let mut new_reg_peers = Vec::new(); - for reg_attempts in all_buckets_reg_attempts.values_mut() { - reg_attempts.retain(|_, reg_attempt| { - if let RegistrationState::Confirmed(insert_time) = reg_attempt { - insert_time.elapsed() >= Duration::from_secs(15 * 60) - } else { - false - } - }); - let reg_attempts_count = reg_attempts.len(); - if reg_attempts_count < self.config.max_nodes_response { - let mut peers = Vec::new(); - let _ = kbuckets.get().clone().iter().map(|entry| { - let peer = entry.node.value.clone(); - if let Entry::Vacant(_) = reg_attempts.entry(peer.node_id()) { - peers.push(peer); + debug!("Sending REGTOPICs to new peers"); + for (index, bucket) in kbuckets.get_mut().buckets_iter().enumerate() { + if let Entry::Occupied(ref mut entry) = reg_attempts.entry(index as u64) { + let registrations = entry.get_mut(); + registrations.retain(|_, reg_attempt| { + if let RegistrationState::Confirmed(insert_time) = reg_attempt { + insert_time.elapsed() >= Duration::from_secs(15 * 60) + } else { + false } }); - - if !peers.is_empty() { - let max_nodes_response = self.config.max_nodes_response; - - new_reg_peers = peers - .into_iter() - .map_while(|peer| { - if reg_attempts_count < max_nodes_response { - Some(peer) - } else { - None + // The count of active registration attempts after expired adds have been removed + if registrations.len() < self.config.max_nodes_response + && registrations.len() != bucket.num_entries() + { + let mut bucket_iter = bucket.iter(); + let mut new_peers = Vec::new(); + while new_peers.len() + registrations.len() < self.config.max_nodes_response + { + if let Some(peer) = bucket_iter.next() { + if let Entry::Vacant(_) = registrations.entry(*peer.key.preimage()) + { + debug!("Found new reg peer. Peer: {:?}", peer.key.preimage()); + new_peers.push(peer.value.clone()) } - }) - .collect(); - debug!("Found new reg peers. Peers: {:?}", new_reg_peers); - } else { - debug!("No peers found to send regtopics to."); + } + } + new_reg_peers.append(&mut new_peers); } } } From 622ca2bfe2b926b44608d2cc48e10e7bfd322410 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jun 2022 15:29:38 +0200 Subject: [PATCH 160/391] Add warning --- src/service.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/service.rs b/src/service.rs index 086ff9a82..7d16829c9 100644 --- a/src/service.rs +++ b/src/service.rs @@ -731,6 +731,7 @@ impl Service { // Remove expired ads let mut new_reg_peers = Vec::new(); debug!("Sending REGTOPICs to new peers"); + // WARNING! This currently only works as long as buckets range is one bit for (index, bucket) in kbuckets.get_mut().buckets_iter().enumerate() { if let Entry::Occupied(ref mut entry) = reg_attempts.entry(index as u64) { let registrations = entry.get_mut(); From 1746aaf286cff4da8bf6efffb8b2c621659dfad7 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jun 2022 15:50:41 +0200 Subject: [PATCH 161/391] Fix regtopics --- src/service.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/service.rs b/src/service.rs index 7d16829c9..5d4701584 100644 --- a/src/service.rs +++ b/src/service.rs @@ -756,6 +756,8 @@ impl Service { debug!("Found new reg peer. Peer: {:?}", peer.key.preimage()); new_peers.push(peer.value.clone()) } + } else { + break; } } new_reg_peers.append(&mut new_peers); From 066e2a53a37675317e776be455b3d69fa5fc901d Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jun 2022 15:59:52 +0200 Subject: [PATCH 162/391] Fix regtopics --- src/service.rs | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/src/service.rs b/src/service.rs index 5d4701584..0c50dc72c 100644 --- a/src/service.rs +++ b/src/service.rs @@ -730,9 +730,9 @@ impl Service { let reg_attempts = self.topics.entry(topic_hash).or_default(); // Remove expired ads let mut new_reg_peers = Vec::new(); - debug!("Sending REGTOPICs to new peers"); // WARNING! This currently only works as long as buckets range is one bit for (index, bucket) in kbuckets.get_mut().buckets_iter().enumerate() { + // Remove expired registrations if let Entry::Occupied(ref mut entry) = reg_attempts.entry(index as u64) { let registrations = entry.get_mut(); registrations.retain(|_, reg_attempt| { @@ -742,28 +742,27 @@ impl Service { false } }); + } + let registrations = reg_attempts.entry(index as u64).or_default(); // The count of active registration attempts after expired adds have been removed if registrations.len() < self.config.max_nodes_response && registrations.len() != bucket.num_entries() { - let mut bucket_iter = bucket.iter(); let mut new_peers = Vec::new(); - while new_peers.len() + registrations.len() < self.config.max_nodes_response - { - if let Some(peer) = bucket_iter.next() { - if let Entry::Vacant(_) = registrations.entry(*peer.key.preimage()) - { - debug!("Found new reg peer. Peer: {:?}", peer.key.preimage()); - new_peers.push(peer.value.clone()) - } - } else { + for peer in bucket.iter() { + if new_peers.len() + registrations.len() + >= self.config.max_nodes_response + { break; } + if let Entry::Vacant(_) = registrations.entry(*peer.key.preimage()) { + debug!("Found new reg peer. Peer: {:?}", peer.key.preimage()); + new_peers.push(peer.value.clone()) + } } new_reg_peers.append(&mut new_peers); } } - } for peer in new_reg_peers { let local_enr = self.local_enr.read().clone(); if let Ok(node_contact) = NodeContact::try_from_enr(peer, self.config.ip_mode) From dd12411cf158c33788d61b373d0dabc7c3551f85 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jun 2022 16:37:27 +0200 Subject: [PATCH 163/391] Allow NODES repsonse for RETOPIC --- src/rpc.rs | 4 +++- src/service.rs | 30 ++++++++++++++---------------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index 960e07887..54c00f6f2 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -224,7 +224,9 @@ impl Response { ResponseBody::Nodes { .. } => { matches!( req, - RequestBody::FindNode { .. } | RequestBody::TopicQuery { .. } + RequestBody::FindNode { .. } + | RequestBody::TopicQuery { .. } + | RequestBody::RegisterTopic { .. } ) } ResponseBody::Talk { .. } => matches!(req, RequestBody::Talk { .. }), diff --git a/src/service.rs b/src/service.rs index 0c50dc72c..b6c257cb1 100644 --- a/src/service.rs +++ b/src/service.rs @@ -744,25 +744,23 @@ impl Service { }); } let registrations = reg_attempts.entry(index as u64).or_default(); - // The count of active registration attempts after expired adds have been removed - if registrations.len() < self.config.max_nodes_response - && registrations.len() != bucket.num_entries() - { - let mut new_peers = Vec::new(); - for peer in bucket.iter() { - if new_peers.len() + registrations.len() - >= self.config.max_nodes_response - { - break; - } - if let Entry::Vacant(_) = registrations.entry(*peer.key.preimage()) { - debug!("Found new reg peer. Peer: {:?}", peer.key.preimage()); - new_peers.push(peer.value.clone()) - } + // The count of active registration attempts after expired adds have been removed + if registrations.len() < self.config.max_nodes_response + && registrations.len() != bucket.num_entries() + { + let mut new_peers = Vec::new(); + for peer in bucket.iter() { + if new_peers.len() + registrations.len() >= self.config.max_nodes_response { + break; + } + if let Entry::Vacant(_) = registrations.entry(*peer.key.preimage()) { + debug!("Found new reg peer. Peer: {:?}", peer.key.preimage()); + new_peers.push(peer.value.clone()) } - new_reg_peers.append(&mut new_peers); } + new_reg_peers.append(&mut new_peers); } + } for peer in new_reg_peers { let local_enr = self.local_enr.read().clone(); if let Ok(node_contact) = NodeContact::try_from_enr(peer, self.config.ip_mode) From b914ac4d17b578f8136e71c1d91e09da83e77f72 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jun 2022 16:57:16 +0200 Subject: [PATCH 164/391] Match NODES response for REGTOPIC req --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index b6c257cb1..a3eabe6b1 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1177,7 +1177,7 @@ impl Service { // These are sanitized and ordered let distances_requested = match &active_request.request_body { RequestBody::FindNode { distances } => distances, - RequestBody::TopicQuery { .. } => &topic_radius, + RequestBody::TopicQuery { .. } | RequestBody::RegisterTopic { .. } => &topic_radius, _ => unreachable!(), }; From ed26c952dbe3b9fdb3bf1a86e62d6e3e471c4293 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jun 2022 17:41:17 +0200 Subject: [PATCH 165/391] Add hack for topic query double nodes response --- src/handler/mod.rs | 1 + src/service.rs | 58 +++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 53 insertions(+), 6 deletions(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 410e54be9..2428ab769 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -388,6 +388,7 @@ impl Handler { .await } PacketKind::Message { src_id } => { + debug!("Received a message"); let node_address = NodeAddress { socket_addr: inbound_packet.src_address, node_id: src_id, diff --git a/src/service.rs b/src/service.rs index a3eabe6b1..88ead970b 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1291,11 +1291,57 @@ impl Service { match active_request.request_body { RequestBody::TopicQuery { topic } => { - if let Some(query) = self.active_topic_queries.queries.get_mut(&topic) { - nodes.into_iter().for_each(|enr| { - query.results.insert(enr.node_id(), enr); - }); + let mut is_ads = false; + for enr in nodes.iter() { + let sender_key: kbucket::Key = node_id.into(); + let peer_key: kbucket::Key = enr.node_id().into(); + let topic_key: kbucket::Key = NodeId::new(&topic.as_bytes()).into(); + if let Some(distance_sender_topic) = sender_key.log2_distance(&topic_key) { + if let Some(distance_peer_topic) = peer_key.log2_distance(&topic_key) { + if distance_peer_topic > distance_sender_topic + 1 || distance_peer_topic < distance_sender_topic - 1 { + is_ads = true; + break; + } + } + } } + if is_ads { + if let Some(query) = self.active_topic_queries.queries.get_mut(&topic) { + nodes.into_iter().for_each(|enr| { + query.results.insert(enr.node_id(), enr); + }); + } + } else { + if let Some(kbuckets) = self.topics_kbuckets.get_mut(&topic) { + for enr in nodes { + let peer_key: kbucket::Key = enr.node_id().into(); + match kbuckets.insert_or_update( + &peer_key, + enr.clone(), + NodeStatus { + state: ConnectionState::Disconnected, + direction: ConnectionDirection::Incoming, + }, + ) { + InsertResult::Failed(FailureReason::BucketFull) => { + error!("Table full") + } + InsertResult::Failed(FailureReason::BucketFilter) => { + error!("Failed bucket filter") + } + InsertResult::Failed(FailureReason::TableFilter) => { + error!("Failed table filter") + } + InsertResult::Failed(FailureReason::InvalidSelfUpdate) => { + error!("Invalid self update") + } + InsertResult::Failed(_) => error!("Failed to insert ENR"), + _ => debug!("Insertion of node {} into KBucket of {} was successful", enr.node_id(), topic), + } + } + } + } + } RequestBody::RegisterTopic { topic, @@ -1307,7 +1353,7 @@ impl Service { let peer_key: kbucket::Key = enr.node_id().into(); match kbuckets.insert_or_update( &peer_key, - enr, + enr.clone(), NodeStatus { state: ConnectionState::Disconnected, direction: ConnectionDirection::Incoming, @@ -1326,7 +1372,7 @@ impl Service { error!("Invalid self update") } InsertResult::Failed(_) => error!("Failed to insert ENR"), - _ => {} + _ => debug!("Insertion of node {} into KBucket of {} was successful", enr.node_id(), topic), } } } From ea80eab5f8f0b7cec3687226bb0298189ba5db33 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jun 2022 17:49:04 +0200 Subject: [PATCH 166/391] Add warning to topic query double nodes response logic --- src/service.rs | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/src/service.rs b/src/service.rs index 88ead970b..87a148083 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1177,7 +1177,9 @@ impl Service { // These are sanitized and ordered let distances_requested = match &active_request.request_body { RequestBody::FindNode { distances } => distances, - RequestBody::TopicQuery { .. } | RequestBody::RegisterTopic { .. } => &topic_radius, + RequestBody::TopicQuery { .. } | RequestBody::RegisterTopic { .. } => { + &topic_radius + } _ => unreachable!(), }; @@ -1295,10 +1297,19 @@ impl Service { for enr in nodes.iter() { let sender_key: kbucket::Key = node_id.into(); let peer_key: kbucket::Key = enr.node_id().into(); - let topic_key: kbucket::Key = NodeId::new(&topic.as_bytes()).into(); - if let Some(distance_sender_topic) = sender_key.log2_distance(&topic_key) { - if let Some(distance_peer_topic) = peer_key.log2_distance(&topic_key) { - if distance_peer_topic > distance_sender_topic + 1 || distance_peer_topic < distance_sender_topic - 1 { + let topic_key: kbucket::Key = + NodeId::new(&topic.as_bytes()).into(); + if let Some(distance_sender_topic) = + sender_key.log2_distance(&topic_key) + { + if let Some(distance_peer_topic) = + peer_key.log2_distance(&topic_key) + { + // WARNING! This hack is based on the probability that ad nodes are not all in the + // same bucket +-1 + if distance_peer_topic > distance_sender_topic + 1 + || distance_peer_topic < distance_sender_topic - 1 + { is_ads = true; break; } @@ -1306,16 +1317,17 @@ impl Service { } } if is_ads { - if let Some(query) = self.active_topic_queries.queries.get_mut(&topic) { + if let Some(query) = + self.active_topic_queries.queries.get_mut(&topic) + { nodes.into_iter().for_each(|enr| { query.results.insert(enr.node_id(), enr); }); } - } else { - if let Some(kbuckets) = self.topics_kbuckets.get_mut(&topic) { - for enr in nodes { - let peer_key: kbucket::Key = enr.node_id().into(); - match kbuckets.insert_or_update( + } else if let Some(kbuckets) = self.topics_kbuckets.get_mut(&topic) { + for enr in nodes { + let peer_key: kbucket::Key = enr.node_id().into(); + match kbuckets.insert_or_update( &peer_key, enr.clone(), NodeStatus { @@ -1338,10 +1350,8 @@ impl Service { InsertResult::Failed(_) => error!("Failed to insert ENR"), _ => debug!("Insertion of node {} into KBucket of {} was successful", enr.node_id(), topic), } - } } } - } RequestBody::RegisterTopic { topic, From f817aee8241c5901c454070360b6ca95acb974fb Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jun 2022 18:10:30 +0200 Subject: [PATCH 167/391] Add debug info --- src/handler/mod.rs | 1 - src/service.rs | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 2428ab769..410e54be9 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -388,7 +388,6 @@ impl Handler { .await } PacketKind::Message { src_id } => { - debug!("Received a message"); let node_address = NodeAddress { socket_addr: inbound_packet.src_address, node_id: src_id, diff --git a/src/service.rs b/src/service.rs index 87a148083..b95d9b2cd 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1004,6 +1004,7 @@ impl Service { self.send_event(Discv5Event::TalkRequest(req)); } RequestBody::RegisterTopic { topic, enr, ticket } => { + debug!("Received a REGTOPIC req"); // Drop if request tries to advertise another node than sender if enr.node_id() == node_address.node_id && enr.udp4_socket().map(SocketAddr::V4) == Some(node_address.socket_addr) From 741c19a8f2c0f7ac2d5e789777c2163e6ff26e15 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jun 2022 18:21:20 +0200 Subject: [PATCH 168/391] Add debug info --- src/service.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/service.rs b/src/service.rs index b95d9b2cd..be49f5da0 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1106,6 +1106,8 @@ impl Service { self.ticket_pools.insert(enr, id, new_ticket); } } + } else { + debug!("REGTOPIC enr does not match request sender enr"); } } RequestBody::TopicQuery { topic } => { From df6fdb156e34e58544f7395f05a25b393eeda029 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jun 2022 18:21:32 +0200 Subject: [PATCH 169/391] Add debug info --- src/service.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index be49f5da0..37574f414 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1004,7 +1004,6 @@ impl Service { self.send_event(Discv5Event::TalkRequest(req)); } RequestBody::RegisterTopic { topic, enr, ticket } => { - debug!("Received a REGTOPIC req"); // Drop if request tries to advertise another node than sender if enr.node_id() == node_address.node_id && enr.udp4_socket().map(SocketAddr::V4) == Some(node_address.socket_addr) From 1b9e9c2c31794d8d0788d391c7e41576e32cb267 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jun 2022 20:13:20 +0200 Subject: [PATCH 170/391] Add debug messages --- src/service.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index 37574f414..bea316df5 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1008,6 +1008,7 @@ impl Service { if enr.node_id() == node_address.node_id && enr.udp4_socket().map(SocketAddr::V4) == Some(node_address.socket_addr) { + debug!("Sending NODES response"); self.send_topic_nodes_response( topic, node_address.clone(), @@ -1091,6 +1092,7 @@ impl Service { } } } else { + debug!("Sending TICKET response"); // A ticket is always be issued upon receiving a REGTOPIC request, even if there is no // wait time for the ad slot. See discv5 spec. This node will not store tickets received // with wait time 0. @@ -1106,7 +1108,7 @@ impl Service { } } } else { - debug!("REGTOPIC enr does not match request sender enr"); + debug!("REGTOPIC enr does not match request sender's enr. Nodes can only register themselves."); } } RequestBody::TopicQuery { topic } => { From 003b816ce2806bc0e3abd1dccbac2b633a971196 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jun 2022 22:13:16 +0200 Subject: [PATCH 171/391] Update regtopic active requests for addtional NODES response --- src/advertisement/ticket.rs | 26 +++++++++-------- src/service.rs | 56 ++++++++++++++++++++++--------------- 2 files changed, 49 insertions(+), 33 deletions(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 19524e8bc..e768f237e 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -7,7 +7,7 @@ use delay_map::HashMapDelay; use enr::NodeId; use more_asserts::debug_unreachable; use node_info::NodeContact; -use std::cmp::Eq; +use std::{cmp::Eq, collections::hash_map::Entry}; /// Max tickets that are stored from one node for a topic (in the configured /// time period). @@ -18,9 +18,9 @@ const REGISTRATION_WINDOW_IN_SECS: u64 = 10; const MAX_REGISTRANTS_PER_AD_SLOT: usize = 50; /// The duration for which requests are stored. const REQUEST_TIMEOUT_IN_SECS: u64 = 15; -/// Each REGTOPIC request can get both a TICKET response and REGCONFIRMATION -/// response. -const MAX_RESPONSES_PER_REGTOPIC: u8 = 2; +/// Each REGTOPIC request gets a TICKET response, NODES response and can get +/// a REGCONFIRMATION response. +const MAX_RESPONSES_PER_REGTOPIC: u8 = 3; /// A topic is active when it associated with the node id from a node it is /// published on. @@ -347,14 +347,9 @@ impl ActiveRegtopicRequests { pub fn remove(&mut self, req_id: &RequestId) -> Option { if let Some(seen_count) = self.request_history.get_mut(req_id) { *seen_count += 1; - if *seen_count < 1 { + if *seen_count == 0 { self.request_history.remove(req_id); - self.requests.remove(req_id).map(|req| ActiveRequest { - contact: req.contact.clone(), - request_body: req.request_body.clone(), - query_id: req.query_id, - callback: None, - }) + self.requests.remove(req_id) } else { self.requests.get(req_id).map(|req| ActiveRequest { contact: req.contact.clone(), @@ -368,6 +363,15 @@ impl ActiveRegtopicRequests { } } + // If NODES response needs to be divided into multiple NODES responses, the request + // must be reinserted. + pub fn reinsert(&mut self, req_id: RequestId) { + self.remove_expired(); + if let Entry::Occupied(ref mut entry) = self.request_history.entry(req_id) { + *entry.get_mut() += 1; + } + } + pub fn insert(&mut self, req_id: RequestId, req: ActiveRequest) { self.remove_expired(); let now = Instant::now(); diff --git a/src/service.rs b/src/service.rs index bea316df5..2578167b1 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1008,7 +1008,7 @@ impl Service { if enr.node_id() == node_address.node_id && enr.udp4_socket().map(SocketAddr::V4) == Some(node_address.socket_addr) { - debug!("Sending NODES response"); + debug!("Sending NODES response to REGTOPIC"); self.send_topic_nodes_response( topic, node_address.clone(), @@ -1128,13 +1128,12 @@ impl Service { // verify we know of the rpc_id let id = response.id.clone(); - // A REGTOPIC request can receive both a TICKET and then also possibly a REGCONFIRMATION - // response. If no active request exists in active_requests, the response may still be a - // REGCONFIRMATION response. - let active_request = if let Some(active_request) = self.active_requests.remove(&id) { - Some(active_request) + // A REGTOPIC request receives a TICKET, NODES and then also possibly a REGCONFIRMATION + // response. + let (active_request, req_type) = if let Some(active_request) = self.active_requests.remove(&id) { + (Some(active_request), ActiveRequestType::Other) } else { - self.active_regtopic_requests.remove(&id) + (self.active_regtopic_requests.remove(&id), ActiveRequestType::RegisterTopic) }; if let Some(mut active_request) = active_request { @@ -1272,7 +1271,14 @@ impl Service { current_response.received_nodes.append(&mut nodes); self.active_nodes_responses .insert(node_id, current_response); - self.active_requests.insert(id, active_request); + match req_type { + ActiveRequestType::RegisterTopic => { + self.active_regtopic_requests.reinsert(id); + } + _ => { + self.active_requests.insert(id, active_request); + } + } return; } @@ -1658,23 +1664,12 @@ impl Service { enr, ticket: ticket_bytes, }; - - let active_request = ActiveRequest { - contact: contact.clone(), - request_body: request_body.clone(), - query_id: None, - callback: None, - }; - let req_id = self.send_rpc_request(ActiveRequest { + self.send_rpc_request(ActiveRequest { contact, request_body, query_id: None, callback: None, }); - self.active_regtopic_requests.insert(req_id, active_request); - METRICS - .active_regtopic_req - .store(self.active_regtopic_requests.len(), Ordering::Relaxed); } /// Queries a node for the ads that node currently advertises for a given topic. @@ -1988,9 +1983,10 @@ impl Service { fn send_rpc_request(&mut self, active_request: ActiveRequest) -> RequestId { // Generate a random rpc_id which is matched per node id let id = RequestId::random(); + let request_body = active_request.request_body.clone(); let request: Request = Request { id: id.clone(), - body: active_request.request_body.clone(), + body: request_body.clone(), }; let contact = active_request.contact.clone(); @@ -2000,7 +1996,17 @@ impl Service { .send(HandlerIn::Request(contact, Box::new(request))) .is_ok() { - self.active_requests.insert(id.clone(), active_request); + match request_body { + RequestBody::RegisterTopic { .. } => { + self.active_regtopic_requests.insert(id.clone(), active_request); + METRICS + .active_regtopic_req + .store(self.active_regtopic_requests.len(), Ordering::Relaxed); + } + _ => { + self.active_requests.insert(id.clone(), active_request); + } + } } id } @@ -2458,3 +2464,9 @@ enum ConnectionStatus { /// The node has disconnected Disconnected, } + +pub enum ActiveRequestType { + RegisterTopic, + TopicQuery, + Other, +} From 505aff45a8305d35c4d51a6256850990ab79c7ce Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jun 2022 22:23:53 +0200 Subject: [PATCH 172/391] Amplify distances reqeusted for topics --- src/service.rs | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/src/service.rs b/src/service.rs index 2578167b1..3ffff2034 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1128,13 +1128,15 @@ impl Service { // verify we know of the rpc_id let id = response.id.clone(); - // A REGTOPIC request receives a TICKET, NODES and then also possibly a REGCONFIRMATION - // response. - let (active_request, req_type) = if let Some(active_request) = self.active_requests.remove(&id) { - (Some(active_request), ActiveRequestType::Other) - } else { - (self.active_regtopic_requests.remove(&id), ActiveRequestType::RegisterTopic) - }; + let (active_request, req_type) = + if let Some(active_request) = self.active_requests.remove(&id) { + (Some(active_request), ActiveRequestType::Other) + } else { + ( + self.active_regtopic_requests.remove(&id), + ActiveRequestType::RegisterTopic, + ) + }; if let Some(mut active_request) = active_request { debug!( @@ -1176,7 +1178,7 @@ impl Service { ); } - let topic_radius = vec![self.config.topic_radius]; + let topic_radius = (1..self.config.topic_radius).collect(); // These are sanitized and ordered let distances_requested = match &active_request.request_body { RequestBody::FindNode { distances } => distances, @@ -1998,7 +2000,8 @@ impl Service { { match request_body { RequestBody::RegisterTopic { .. } => { - self.active_regtopic_requests.insert(id.clone(), active_request); + self.active_regtopic_requests + .insert(id.clone(), active_request); METRICS .active_regtopic_req .store(self.active_regtopic_requests.len(), Ordering::Relaxed); From fe5a5d04f09368bd26fa94278b61855641a3ebcf Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jun 2022 23:07:04 +0200 Subject: [PATCH 173/391] Add trace for responses --- src/service.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/service.rs b/src/service.rs index 3ffff2034..0c8b63344 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1128,6 +1128,7 @@ impl Service { // verify we know of the rpc_id let id = response.id.clone(); + trace!("Received {} response", response.body); let (active_request, req_type) = if let Some(active_request) = self.active_requests.remove(&id) { (Some(active_request), ActiveRequestType::Other) From c3311e65fb80c24c988c981e45ab32624110788d Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 22 Jun 2022 10:48:34 +0200 Subject: [PATCH 174/391] Add trace messages --- src/handler/mod.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 410e54be9..a42b49c66 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -392,6 +392,7 @@ impl Handler { socket_addr: inbound_packet.src_address, node_id: src_id, }; + trace!("Received a message"); self.handle_message( node_address, message_nonce, @@ -924,6 +925,7 @@ impl Handler { } } Message::Response(response) => { + trace!("Received a response"); // Sessions could be awaiting an ENR response. Check if this response matches // these if let Some(request_id) = session.awaiting_enr.as_ref() { @@ -1010,14 +1012,14 @@ impl Handler { let reinsert = match request_call.request.body { // The request is reinserted for either another nodes response, a ticket or a // register confirmation response that may come, otherwise the request times out. - RequestBody::RegisterTopic { .. } => remaining_responses >= &mut 0, - RequestBody::TopicQuery { .. } => { - // remove from some map of NODES and AD NODES + RequestBody::RegisterTopic { .. } | RequestBody::TopicQuery { .. } => { + trace!("Received a topics NODES reponse"); remaining_responses >= &mut 0 - } + }, _ => remaining_responses > &mut 0, }; if reinsert { + trace!("Reinserting active request"); // more responses remaining, add back the request and send the response // add back the request and send the response self.active_requests From 65cc66c7f84d1f2c63ea5f93cd5e4d9bf05ad9ef Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 22 Jun 2022 10:52:51 +0200 Subject: [PATCH 175/391] Add trace message --- src/handler/mod.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index a42b49c66..4f920fab8 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -392,7 +392,6 @@ impl Handler { socket_addr: inbound_packet.src_address, node_id: src_id, }; - trace!("Received a message"); self.handle_message( node_address, message_nonce, @@ -925,7 +924,6 @@ impl Handler { } } Message::Response(response) => { - trace!("Received a response"); // Sessions could be awaiting an ENR response. Check if this response matches // these if let Some(request_id) = session.awaiting_enr.as_ref() { @@ -962,6 +960,7 @@ impl Handler { return; } } + trace!("Handling a {} response", response.body); // Handle standard responses self.handle_response(node_address, response).await; } From 39160177bea02f3b7090a0070bd4ab3980a9abd1 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 22 Jun 2022 10:59:15 +0200 Subject: [PATCH 176/391] Fix reinsertion of active req handler --- src/handler/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 4f920fab8..406464570 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -1047,6 +1047,10 @@ impl Handler { } return; } + } else if let RequestBody::RegisterTopic { .. } | RequestBody::TopicQuery { .. } = request_call.request.body { + trace!("Received a topics NODES reponse"); + self.active_requests + .insert(node_address.clone(), request_call); } } else if let ResponseBody::Ticket { .. } = response.body { // The request is reinserted for either a nodes response or a register From b139738a60b2aa096341919f0221e0e48101fc4c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 22 Jun 2022 11:20:22 +0200 Subject: [PATCH 177/391] Ignore wait time zero tickets --- src/handler/mod.rs | 6 ++++++ src/service.rs | 23 +++++++++++------------ 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 406464570..76cf26b21 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -196,6 +196,12 @@ impl RequestCall { } } +pub enum ResponseType { + Nodes, + Ticket, + Regconfirmation, +} + /// Process to handle handshakes and sessions established from raw RPC communications between nodes. pub struct Handler { /// Configuration for the discv5 service. diff --git a/src/service.rs b/src/service.rs index 0c8b63344..8be830199 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1522,18 +1522,17 @@ impl Service { topic, ) .ok(); - } - - let peer_key: kbucket::Key = node_id.into(); - let topic_key: kbucket::Key = NodeId::new(&topic.as_bytes()).into(); - if let Some(distance) = peer_key.log2_distance(&topic_key) { - let registration_attempts = self.topics.entry(topic).or_default(); - registration_attempts - .entry(distance) - .or_default() - .entry(node_id) - .or_insert(RegistrationState::Ticket); - self.send_register_topics(topic); + let peer_key: kbucket::Key = node_id.into(); + let topic_key: kbucket::Key = NodeId::new(&topic.as_bytes()).into(); + if let Some(distance) = peer_key.log2_distance(&topic_key) { + let registration_attempts = self.topics.entry(topic).or_default(); + registration_attempts + .entry(distance) + .or_default() + .entry(node_id) + .or_insert(RegistrationState::Ticket); + self.send_register_topics(topic); + } } } ResponseBody::RegisterConfirmation { topic } => { From fdea8b23dd39e05217c7747c5fee6690f4a5e5ce Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 22 Jun 2022 11:23:04 +0200 Subject: [PATCH 178/391] Add trace --- src/service.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/service.rs b/src/service.rs index 8be830199..675152e13 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1531,6 +1531,7 @@ impl Service { .or_default() .entry(node_id) .or_insert(RegistrationState::Ticket); + trace!("Sending REGTOPIC with ticket"); self.send_register_topics(topic); } } From 888f9fc0f9acf6de7ac8d1efa4f57cb92a2bd5dc Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 22 Jun 2022 11:34:40 +0200 Subject: [PATCH 179/391] Git increase request timeout --- src/config.rs | 2 +- src/service.rs | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/config.rs b/src/config.rs index af485aa1e..bd5661102 100644 --- a/src/config.rs +++ b/src/config.rs @@ -122,7 +122,7 @@ impl Default for Discv5Config { Self { enable_packet_filter: false, - request_timeout: Duration::from_secs(1), + request_timeout: Duration::from_secs(3), vote_duration: Duration::from_secs(30), query_peer_timeout: Duration::from_secs(2), query_timeout: Duration::from_secs(60), diff --git a/src/service.rs b/src/service.rs index 675152e13..8be830199 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1531,7 +1531,6 @@ impl Service { .or_default() .entry(node_id) .or_insert(RegistrationState::Ticket); - trace!("Sending REGTOPIC with ticket"); self.send_register_topics(topic); } } From 42879b3a02858fce5db20b8b536fe2fa8d9c1de9 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 22 Jun 2022 11:36:25 +0200 Subject: [PATCH 180/391] Increase request timeout to receive REGCONFIRMATION --- src/config.rs | 2 +- src/handler/mod.rs | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/config.rs b/src/config.rs index bd5661102..af485aa1e 100644 --- a/src/config.rs +++ b/src/config.rs @@ -122,7 +122,7 @@ impl Default for Discv5Config { Self { enable_packet_filter: false, - request_timeout: Duration::from_secs(3), + request_timeout: Duration::from_secs(1), vote_duration: Duration::from_secs(30), query_peer_timeout: Duration::from_secs(2), query_timeout: Duration::from_secs(60), diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 76cf26b21..e40374fd3 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -300,7 +300,7 @@ impl Handler { node_id, enr, key, - active_requests: ActiveRequests::new(config.request_timeout), + active_requests: ActiveRequests::new(config.request_timeout+Duration::from_secs(10)), pending_requests: HashMap::new(), filter_expected_responses, sessions: LruTimeCache::new( @@ -346,6 +346,7 @@ impl Handler { self.process_inbound_packet(inbound_packet).await; } Some(Ok((node_address, pending_request))) = self.active_requests.next() => { + trace!("Discarding request {} with timeout", pending_request.request.body); self.handle_request_timeout(node_address, pending_request).await; } _ = banned_nodes_check.tick() => self.unban_nodes_check(), // Unban nodes that are past the timeout From 2fe218af52dd45074dd9aeb270ca736a8a2c1a9b Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 22 Jun 2022 11:53:51 +0200 Subject: [PATCH 181/391] Fix distance amplification bug --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index 8be830199..49bcc2fc2 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1179,7 +1179,7 @@ impl Service { ); } - let topic_radius = (1..self.config.topic_radius).collect(); + let topic_radius = (1..self.config.topic_radius+1).collect(); // These are sanitized and ordered let distances_requested = match &active_request.request_body { RequestBody::FindNode { distances } => distances, From 1cf0aa5f4b3e532a27b71ae3c68cab7a93e1cac4 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 22 Jun 2022 12:35:01 +0200 Subject: [PATCH 182/391] Increase timeout handler --- src/handler/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index e40374fd3..a8453fc06 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -300,7 +300,7 @@ impl Handler { node_id, enr, key, - active_requests: ActiveRequests::new(config.request_timeout+Duration::from_secs(10)), + active_requests: ActiveRequests::new(config.request_timeout + Duration::from_secs(20)), pending_requests: HashMap::new(), filter_expected_responses, sessions: LruTimeCache::new( From dfb51a9ea2d374857842bbab1ceb68b218f0c435 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 22 Jun 2022 13:27:23 +0200 Subject: [PATCH 183/391] Add trace for TOPICQUERY --- src/handler/mod.rs | 2 +- src/service.rs | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index a8453fc06..d20f41e6d 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -300,7 +300,7 @@ impl Handler { node_id, enr, key, - active_requests: ActiveRequests::new(config.request_timeout + Duration::from_secs(20)), + active_requests: ActiveRequests::new(config.request_timeout + Duration::from_secs(15)), pending_requests: HashMap::new(), filter_expected_responses, sessions: LruTimeCache::new( diff --git a/src/service.rs b/src/service.rs index 49bcc2fc2..0c45b8af4 100644 --- a/src/service.rs +++ b/src/service.rs @@ -495,6 +495,7 @@ impl Service { ServiceRequest::TopicQuery(topic_hash, callback) => { // If we look up the topic hash for the first time we initialise its kbuckets. if let Entry::Vacant(_) = self.topics_kbuckets.entry(topic_hash) { + trace!("Init kbuckets for topic hash {}", topic_hash); // NOTE: Currently we don't expose custom filter support in the configuration. Users can // optionally use the IP filter via the ip_limit configuration parameter. In the future, we // may expose this functionality to the users if there is demand for it. @@ -781,6 +782,7 @@ impl Service { num_query_peers: usize, callback: Option, RequestError>>>, ) { + trace!("Preparing to send TOPICQUERYs"); let query = self .active_topic_queries .queries @@ -812,6 +814,7 @@ impl Service { } let _ = new_query_peers.iter().rev().count(); + trace!("Sending TOPICQUERYs to {} peers", new_query_peers.len()); for enr in new_query_peers { if let Ok(node_contact) = NodeContact::try_from_enr(enr.clone(), self.config.ip_mode) From f905849f5ef544f1b4dc7c8bb802b9969b3753d2 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 22 Jun 2022 13:42:20 +0200 Subject: [PATCH 184/391] Increase trace messages in buggy area --- src/service.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/service.rs b/src/service.rs index 0c45b8af4..5e843b193 100644 --- a/src/service.rs +++ b/src/service.rs @@ -796,6 +796,7 @@ impl Service { let queried_peers = query.queried_peers.clone(); if let Entry::Occupied(kbuckets) = self.topics_kbuckets.entry(topic_hash) { let mut peers = kbuckets.get().clone(); + trace!("Found {} peers in kbuckets of topic hash {}", peers.iter().count(), topic_hash); // Start querying nodes further away, starting at distance 256 let mut new_query_peers_iter = peers.iter().rev().filter_map(|entry| { (!queried_peers.contains_key(entry.node.key.preimage())).then(|| { @@ -824,6 +825,7 @@ impl Service { } } } else { + error!("Debug unreachable"); debug_unreachable!("Broken invariant, a kbuckets table should exist for topic hash"); } } From e36e2a578062b82c58f8f834ac3285356e5ecdf0 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 22 Jun 2022 13:56:55 +0200 Subject: [PATCH 185/391] Fix endless while loop bug --- src/service.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/service.rs b/src/service.rs index 5e843b193..2f9a004e6 100644 --- a/src/service.rs +++ b/src/service.rs @@ -797,7 +797,7 @@ impl Service { if let Entry::Occupied(kbuckets) = self.topics_kbuckets.entry(topic_hash) { let mut peers = kbuckets.get().clone(); trace!("Found {} peers in kbuckets of topic hash {}", peers.iter().count(), topic_hash); - // Start querying nodes further away, starting at distance 256 + // Prefer querying nodes further away, starting at distance 256 by to avoid hotspots let mut new_query_peers_iter = peers.iter().rev().filter_map(|entry| { (!queried_peers.contains_key(entry.node.key.preimage())).then(|| { query @@ -808,13 +808,13 @@ impl Service { }) }); let mut new_query_peers = Vec::new(); - while new_query_peers.len() < num_query_peers { - if let Some(enr) = new_query_peers_iter.next() { - new_query_peers.push(enr); + for enr in new_query_peers_iter.next() { + trace!("Added new TOPICQUERY peer {}", enr.node_id()); + new_query_peers.push(enr); + if new_query_peers.len() < num_query_peers { + break; } } - let _ = new_query_peers.iter().rev().count(); - trace!("Sending TOPICQUERYs to {} peers", new_query_peers.len()); for enr in new_query_peers { if let Ok(node_contact) = @@ -825,7 +825,6 @@ impl Service { } } } else { - error!("Debug unreachable"); debug_unreachable!("Broken invariant, a kbuckets table should exist for topic hash"); } } From 995b450aa77af9b7b39694d8c44621b895179129 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 22 Jun 2022 14:03:57 +0200 Subject: [PATCH 186/391] Remove trace messages --- src/service.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/service.rs b/src/service.rs index 2f9a004e6..05ec6cfae 100644 --- a/src/service.rs +++ b/src/service.rs @@ -782,7 +782,6 @@ impl Service { num_query_peers: usize, callback: Option, RequestError>>>, ) { - trace!("Preparing to send TOPICQUERYs"); let query = self .active_topic_queries .queries @@ -809,13 +808,12 @@ impl Service { }); let mut new_query_peers = Vec::new(); for enr in new_query_peers_iter.next() { - trace!("Added new TOPICQUERY peer {}", enr.node_id()); new_query_peers.push(enr); if new_query_peers.len() < num_query_peers { break; } } - trace!("Sending TOPICQUERYs to {} peers", new_query_peers.len()); + trace!("Sending TOPICQUERYs to {} new peers", new_query_peers.len()); for enr in new_query_peers { if let Ok(node_contact) = NodeContact::try_from_enr(enr.clone(), self.config.ip_mode) From f804e9a7e9bb8c07a5a2bc041930716ea5a9fa96 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 22 Jun 2022 14:33:46 +0200 Subject: [PATCH 187/391] Add trace for topic query ad nodes not sending --- src/service.rs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/service.rs b/src/service.rs index 05ec6cfae..4db4d5795 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1011,7 +1011,7 @@ impl Service { && enr.udp4_socket().map(SocketAddr::V4) == Some(node_address.socket_addr) { debug!("Sending NODES response to REGTOPIC"); - self.send_topic_nodes_response( + self.send_find_topic_nodes_response( topic, node_address.clone(), id.clone(), @@ -1114,13 +1114,15 @@ impl Service { } } RequestBody::TopicQuery { topic } => { - self.send_topic_nodes_response( + trace!("Sending TOPICQUERY find nodes response"); + self.send_find_topic_nodes_response( topic, node_address.clone(), id.clone(), "TOPICQUERY", ); - self.send_topic_query_response(node_address, id, topic); + trace!("Sending TOPICQUERY AD nodes response"); + self.send_topic_query_nodes_response(node_address, id, topic); } } } @@ -1130,7 +1132,6 @@ impl Service { // verify we know of the rpc_id let id = response.id.clone(); - trace!("Received {} response", response.body); let (active_request, req_type) = if let Some(active_request) = self.active_requests.remove(&id) { (Some(active_request), ActiveRequestType::Other) @@ -1759,7 +1760,7 @@ impl Service { /// Answer to a topic query containing the nodes currently advertised for the /// requested topic if any. - fn send_topic_query_response( + fn send_topic_query_nodes_response( &mut self, node_address: NodeAddress, rpc_id: RequestId, @@ -1770,10 +1771,10 @@ impl Service { .get_ad_nodes(topic) .map(|ad| ad.node_record().clone()) .collect(); - self.send_nodes_response(nodes_to_send, node_address, rpc_id, "TOPICQUERY"); + self.send_nodes_response(nodes_to_send, node_address, rpc_id, "TOPICQUERY ADS"); } - fn send_topic_nodes_response( + fn send_find_topic_nodes_response( &mut self, topic: TopicHash, node_address: NodeAddress, @@ -1930,7 +1931,7 @@ impl Service { for response in responses { trace!( - "Sending {} response to: {}. Response: {} ", + "Sending {} NODES response to: {}. Response: {} ", req_type, node_address, response From 32901845930a145f7be9f64793ee7c1a7f8b6a61 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 22 Jun 2022 17:00:03 +0200 Subject: [PATCH 188/391] Add extra ADNODE response body --- src/handler/mod.rs | 92 +++++++++++++++++++++++--- src/rpc.rs | 25 +++++++- src/service.rs | 157 +++++++++++++++++++++++++++------------------ 3 files changed, 200 insertions(+), 74 deletions(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index d20f41e6d..47609efde 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -168,6 +168,9 @@ pub(crate) struct RequestCall { /// If we receive a Nodes Response with a total greater than 1. This keeps track of the /// remaining responses expected. remaining_responses: Option, + /// If we receive a AdNodes Response with a total greater than 1. This keeps track of the + /// remaining responses expected. + remaining_adnode_responses: Option, /// Signifies if we are initiating the session with a random packet. This is only used to /// determine the connection direction of the session. initiating_session: bool, @@ -187,6 +190,7 @@ impl RequestCall { handshake_sent: false, retries: 1, remaining_responses: None, + remaining_adnode_responses: None, initiating_session, } } @@ -300,7 +304,9 @@ impl Handler { node_id, enr, key, - active_requests: ActiveRequests::new(config.request_timeout + Duration::from_secs(15)), + active_requests: ActiveRequests::new( + config.request_timeout + Duration::from_secs(15), + ), pending_requests: HashMap::new(), filter_expected_responses, sessions: LruTimeCache::new( @@ -1016,12 +1022,23 @@ impl Handler { if let Some(remaining_responses) = request_call.remaining_responses.as_mut() { *remaining_responses -= 1; let reinsert = match request_call.request.body { - // The request is reinserted for either another nodes response, a ticket or a - // register confirmation response that may come, otherwise the request times out. - RequestBody::RegisterTopic { .. } | RequestBody::TopicQuery { .. } => { - trace!("Received a topics NODES reponse"); + RequestBody::RegisterTopic { .. } => { + trace!("Received a REGTOPIC NODES reponse"); + // The request is reinserted for either another NODES response, a TICKET or a + // REGCONFIRMATION response that may come, otherwise the request times out. remaining_responses >= &mut 0 - }, + } + RequestBody::TopicQuery { .. } => { + trace!("Received a TOPICQUERY NODES reponse"); + // TopicQuerys may receive multiple ADNODE responses as well as NODES responses + // so the request call must be reinserted. + let remaining_adnode_responses = + match request_call.remaining_adnode_responses { + Some(remaining) => remaining > 0, + None => false, + }; + remaining_responses > &mut 0 || remaining_adnode_responses + } _ => remaining_responses > &mut 0, }; if reinsert { @@ -1054,16 +1071,73 @@ impl Handler { } return; } - } else if let RequestBody::RegisterTopic { .. } | RequestBody::TopicQuery { .. } = request_call.request.body { + // If there is only one NODES response but it is for a REGTOPIC, reinsert the active request for a + // TICKET or a potential REGCONFIRMATION. + } else if let RequestBody::RegisterTopic { .. } = request_call.request.body { trace!("Received a topics NODES reponse"); self.active_requests + .insert(node_address.clone(), request_call); + } + } else if let ResponseBody::AdNodes { total, .. } = response.body { + if total > 1 { + // This is a multi-response Nodes response + if let Some(ref mut remaining_adnode_responses) = + request_call.remaining_adnode_responses + { + *remaining_adnode_responses -= 1; + let reinsert = { + // TopicQuerys may receive multiple ADNODE responses as well as NODES responses + // so the request call must be reinserted. + let remaining_responses = match request_call.remaining_responses { + Some(remaining) => remaining > 0, + None => false, + }; + remaining_adnode_responses > &mut 0 || remaining_responses + }; + if reinsert { + trace!("Reinserting active TOPICQUERY request"); + // more responses remaining, add back the request and send the response + // add back the request and send the response + self.active_requests .insert(node_address.clone(), request_call); + if let Err(e) = self + .service_send + .send(HandlerOut::Response(node_address, Box::new(response))) + .await + { + warn!("Failed to inform of response {}", e) + } + return; + } + } else { + // This is the first instance + request_call.remaining_responses = Some(total - 1); + // add back the request and send the response + self.active_requests + .insert(node_address.clone(), request_call); + if let Err(e) = self + .service_send + .send(HandlerOut::Response(node_address, Box::new(response))) + .await + { + warn!("Failed to inform of response {}", e) + } + return; + } } } else if let ResponseBody::Ticket { .. } = response.body { - // The request is reinserted for either a nodes response or a register - // confirmation response that may come, otherwise the request times out. + // The request is reinserted for either a NODES response or a potential REGCONFIRMATION + // response that may come. self.active_requests .insert(node_address.clone(), request_call); + if let Err(e) = self + .service_send + .send(HandlerOut::Response(node_address, Box::new(response))) + .await + { + warn!("Failed to inform of response {}", e) + } + return; } // Remove the expected response diff --git a/src/rpc.rs b/src/rpc.rs index 54c00f6f2..de73b33dd 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -1,5 +1,6 @@ use crate::advertisement::topic::TopicHash; use enr::{CombinedKey, Enr, NodeId}; +use more_asserts::debug_unreachable; use rlp::{DecoderError, Rlp, RlpStream}; use std::{ net::{IpAddr, Ipv6Addr}, @@ -136,6 +137,14 @@ pub enum ResponseBody { /// The topic of a successful REGTOPIC request. topic: TopicHash, }, + /// A NODES response to a TOPICQUERY which also receives a NODES response + /// with peers to add to topic kbuckets. + AdNodes { + /// The total number of responses that make up this response. + total: u64, + /// A list of ENR's returned by the responder. + nodes: Vec>, + }, } impl Request { @@ -214,6 +223,7 @@ impl Response { ResponseBody::Talk { .. } => 6, ResponseBody::Ticket { .. } => 8, ResponseBody::RegisterConfirmation { .. } => 9, + ResponseBody::AdNodes { .. } => 10, } } @@ -234,6 +244,7 @@ impl Response { ResponseBody::RegisterConfirmation { .. } => { matches!(req, RequestBody::RegisterTopic { .. }) } + ResponseBody::AdNodes { .. } => matches!(req, RequestBody::TopicQuery { .. }), } } @@ -257,7 +268,7 @@ impl Response { buf.extend_from_slice(&s.out()); buf } - ResponseBody::Nodes { total, nodes } => { + ResponseBody::Nodes { total, nodes } | ResponseBody::AdNodes { total, nodes } => { let mut s = RlpStream::new(); s.begin_list(3); s.append(&id.as_bytes()); @@ -337,8 +348,16 @@ impl std::fmt::Display for ResponseBody { "PONG: Enr-seq: {}, Ip: {:?}, Port: {}", enr_seq, ip, port ), - ResponseBody::Nodes { total, nodes } => { - write!(f, "NODES: total: {}, Nodes: [", total)?; + ResponseBody::Nodes { total, nodes } | ResponseBody::AdNodes { total, nodes } => { + let response_type = match self { + ResponseBody::Nodes { .. } => "NODES", + ResponseBody::AdNodes { .. } => "ADNODES", + _ => { + debug_unreachable!("Only NODES and ADNODES"); + "" + } + }; + write!(f, "{}: total: {}, Nodes: [", response_type, total)?; let mut first = true; for id in nodes { if !first { diff --git a/src/service.rs b/src/service.rs index 4db4d5795..63818a105 100644 --- a/src/service.rs +++ b/src/service.rs @@ -195,8 +195,15 @@ pub struct Service { /// Keeps track of the number of responses received from a NODES response. active_nodes_responses: HashMap, - /// Keeps track of expected REGCONFIRMATION responses that may be received from a REGTOPIC - /// request. + /// Keeps track of the number of responses received from a NODES response. + active_adnodes_responses: HashMap, + + /// Keeps track of the 2 expected responses, NODES and ADNODES that may be received from a + /// TOPICQUERY request. + topic_query_responses: HashMap, + + /// Keeps track of the 3 expected responses, TICKET, NODES and REGCONFIRMATION that may be + /// received from a REGTOPIC request. active_regtopic_requests: ActiveRegtopicRequests, /// A map of votes nodes have made about our external IP address. We accept the majority. @@ -424,6 +431,8 @@ impl Service { queries: QueryPool::new(config.query_timeout), active_requests: Default::default(), active_nodes_responses: HashMap::new(), + active_adnodes_responses: HashMap::new(), + topic_query_responses: HashMap::new(), active_regtopic_requests: ActiveRegtopicRequests::default(), ip_votes, handler_send, @@ -795,7 +804,11 @@ impl Service { let queried_peers = query.queried_peers.clone(); if let Entry::Occupied(kbuckets) = self.topics_kbuckets.entry(topic_hash) { let mut peers = kbuckets.get().clone(); - trace!("Found {} peers in kbuckets of topic hash {}", peers.iter().count(), topic_hash); + trace!( + "Found {} peers in kbuckets of topic hash {}", + peers.iter().count(), + topic_hash + ); // Prefer querying nodes further away, starting at distance 256 by to avoid hotspots let mut new_query_peers_iter = peers.iter().rev().filter_map(|entry| { (!queried_peers.contains_key(entry.node.key.preimage())).then(|| { @@ -1132,14 +1145,11 @@ impl Service { // verify we know of the rpc_id let id = response.id.clone(); - let (active_request, req_type) = + let active_request = if let Some(active_request) = self.active_requests.remove(&id) { - (Some(active_request), ActiveRequestType::Other) + Some(active_request) } else { - ( - self.active_regtopic_requests.remove(&id), - ActiveRequestType::RegisterTopic, - ) + self.active_regtopic_requests.remove(&id) }; if let Some(mut active_request) = active_request { @@ -1182,7 +1192,7 @@ impl Service { ); } - let topic_radius = (1..self.config.topic_radius+1).collect(); + let topic_radius = (1..self.config.topic_radius + 1).collect(); // These are sanitized and ordered let distances_requested = match &active_request.request_body { RequestBody::FindNode { distances } => distances, @@ -1277,10 +1287,10 @@ impl Service { current_response.received_nodes.append(&mut nodes); self.active_nodes_responses .insert(node_id, current_response); - match req_type { - ActiveRequestType::RegisterTopic => { + match active_request.request_body { + RequestBody::RegisterTopic { .. } => { self.active_regtopic_requests.reinsert(id); - } + }, _ => { self.active_requests.insert(id, active_request); } @@ -1306,41 +1316,15 @@ impl Service { // will be ignored. // ensure any mapping is removed in this rare case self.active_nodes_responses.remove(&node_id); + + if let Some() = self.topic_query_responses.get_mut(active_request.request.id) { + + } + match active_request.request_body { RequestBody::TopicQuery { topic } => { - let mut is_ads = false; - for enr in nodes.iter() { - let sender_key: kbucket::Key = node_id.into(); - let peer_key: kbucket::Key = enr.node_id().into(); - let topic_key: kbucket::Key = - NodeId::new(&topic.as_bytes()).into(); - if let Some(distance_sender_topic) = - sender_key.log2_distance(&topic_key) - { - if let Some(distance_peer_topic) = - peer_key.log2_distance(&topic_key) - { - // WARNING! This hack is based on the probability that ad nodes are not all in the - // same bucket +-1 - if distance_peer_topic > distance_sender_topic + 1 - || distance_peer_topic < distance_sender_topic - 1 - { - is_ads = true; - break; - } - } - } - } - if is_ads { - if let Some(query) = - self.active_topic_queries.queries.get_mut(&topic) - { - nodes.into_iter().for_each(|enr| { - query.results.insert(enr.node_id(), enr); - }); - } - } else if let Some(kbuckets) = self.topics_kbuckets.get_mut(&topic) { + if let Some(kbuckets) = self.topics_kbuckets.get_mut(&topic) { for enr in nodes { let peer_key: kbucket::Key = enr.node_id().into(); match kbuckets.insert_or_update( @@ -1411,6 +1395,61 @@ impl Service { ), } } + ResponseBody::AdNodes { total, nodes } => { + // handle the case that there is more than one response + if total > 1 { + let mut current_response = self + .active_adnodes_responses + .remove(&node_id) + .unwrap_or_default(); + + debug!( + "ADNODES Response: {} of {} received", + current_response.count, total + ); + // if there are more responses coming, store the nodes and wait for + // another response + // We allow for implementations to send at a minimum 3 nodes per response. + // We allow for the number of nodes to be returned as the maximum we emit. + if current_response.count < self.config.max_nodes_response / 3 + 1 + && (current_response.count as u64) < total + { + current_response.count += 1; + + current_response.received_nodes.append(&mut nodes); + self.active_adnodes_responses + .insert(node_id, current_response); + self.active_topic_query_requests.reinsert(id); + return; + } + + // have received all the Nodes responses we are willing to accept + // ignore duplicates here as they will be handled when adding + // to the DHT + current_response.received_nodes.append(&mut nodes); + nodes = current_response.received_nodes; + } + + debug!( + "Received a ADNODES response of len: {}, total: {}, from: {}", + nodes.len(), + total, + active_request.contact + ); + // note: If a peer sends an initial NODES response with a total > 1 then + // in a later response sends a response with a total of 1, all previous nodes + // will be ignored. + // ensure any mapping is removed in this rare case + self.active_nodes_responses.remove(&node_id); + + if let RequestBody::TopicQuery { topic } = active_request.request_body { + if let Some(query) = self.active_topic_queries.queries.get_mut(&topic) { + nodes.into_iter().for_each(|enr| { + query.results.insert(enr.node_id(), enr); + }); + } + } + } ResponseBody::Pong { enr_seq, ip, port } => { let socket = SocketAddr::new(ip, port); // perform ENR majority-based update if required. @@ -1525,17 +1564,17 @@ impl Service { topic, ) .ok(); - let peer_key: kbucket::Key = node_id.into(); - let topic_key: kbucket::Key = NodeId::new(&topic.as_bytes()).into(); - if let Some(distance) = peer_key.log2_distance(&topic_key) { - let registration_attempts = self.topics.entry(topic).or_default(); - registration_attempts - .entry(distance) - .or_default() - .entry(node_id) - .or_insert(RegistrationState::Ticket); - self.send_register_topics(topic); - } + let peer_key: kbucket::Key = node_id.into(); + let topic_key: kbucket::Key = NodeId::new(&topic.as_bytes()).into(); + if let Some(distance) = peer_key.log2_distance(&topic_key) { + let registration_attempts = self.topics.entry(topic).or_default(); + registration_attempts + .entry(distance) + .or_default() + .entry(node_id) + .or_insert(RegistrationState::Ticket); + self.send_register_topics(topic); + } } } ResponseBody::RegisterConfirmation { topic } => { @@ -2470,9 +2509,3 @@ enum ConnectionStatus { /// The node has disconnected Disconnected, } - -pub enum ActiveRequestType { - RegisterTopic, - TopicQuery, - Other, -} From a212173668884f968402ff1f970bede272868925 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 22 Jun 2022 22:14:13 +0200 Subject: [PATCH 189/391] Receive AdNodes response on service level --- src/service.rs | 69 ++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 50 insertions(+), 19 deletions(-) diff --git a/src/service.rs b/src/service.rs index 63818a105..327f146af 100644 --- a/src/service.rs +++ b/src/service.rs @@ -198,12 +198,12 @@ pub struct Service { /// Keeps track of the number of responses received from a NODES response. active_adnodes_responses: HashMap, - /// Keeps track of the 2 expected responses, NODES and ADNODES that may be received from a + /// Keeps track of the 2 expected responses, NODES and ADNODES that should be received from a /// TOPICQUERY request. - topic_query_responses: HashMap, + topic_query_responses: HashMap, - /// Keeps track of the 3 expected responses, TICKET, NODES and REGCONFIRMATION that may be - /// received from a REGTOPIC request. + /// Keeps track of the 3 expected responses, TICKET and NODES that should be received from a + /// REGTOPIC request and REGCONFIRMATION that may be received. active_regtopic_requests: ActiveRegtopicRequests, /// A map of votes nodes have made about our external IP address. We accept the majority. @@ -253,6 +253,13 @@ pub struct Service { active_topic_queries: ActiveTopicQueries, } +pub enum TopicQueryResponseState { + Start, + Nodes, + AdNodes, + Complete, +} + pub enum RegistrationState { Confirmed(Instant), Ticket, @@ -1145,12 +1152,11 @@ impl Service { // verify we know of the rpc_id let id = response.id.clone(); - let active_request = - if let Some(active_request) = self.active_requests.remove(&id) { - Some(active_request) - } else { - self.active_regtopic_requests.remove(&id) - }; + let active_request = if let Some(active_request) = self.active_requests.remove(&id) { + Some(active_request) + } else { + self.active_regtopic_requests.remove(&id) + }; if let Some(mut active_request) = active_request { debug!( @@ -1290,7 +1296,7 @@ impl Service { match active_request.request_body { RequestBody::RegisterTopic { .. } => { self.active_regtopic_requests.reinsert(id); - }, + } _ => { self.active_requests.insert(id, active_request); } @@ -1316,11 +1322,6 @@ impl Service { // will be ignored. // ensure any mapping is removed in this rare case self.active_nodes_responses.remove(&node_id); - - if let Some() = self.topic_query_responses.get_mut(active_request.request.id) { - - } - match active_request.request_body { RequestBody::TopicQuery { topic } => { @@ -1394,8 +1395,23 @@ impl Service { "Only TOPICQUERY and FINDNODE requests expect NODES response" ), } + + if let Some(response_state) = self.topic_query_responses.get_mut(&node_id) { + match response_state { + TopicQueryResponseState::Start => { + *response_state = TopicQueryResponseState::Nodes; + self.active_requests.insert(id, active_request); + } + TopicQueryResponseState::AdNodes => { + *response_state = TopicQueryResponseState::Complete; + } + _ => { + debug_unreachable!("No more NODES responses should be received if TOPICQUERY is in Complete or Nodes state.") + } + } + } } - ResponseBody::AdNodes { total, nodes } => { + ResponseBody::AdNodes { total, mut nodes } => { // handle the case that there is more than one response if total > 1 { let mut current_response = self @@ -1419,7 +1435,7 @@ impl Service { current_response.received_nodes.append(&mut nodes); self.active_adnodes_responses .insert(node_id, current_response); - self.active_topic_query_requests.reinsert(id); + self.active_requests.insert(id, active_request); return; } @@ -1440,7 +1456,7 @@ impl Service { // in a later response sends a response with a total of 1, all previous nodes // will be ignored. // ensure any mapping is removed in this rare case - self.active_nodes_responses.remove(&node_id); + self.active_adnodes_responses.remove(&node_id); if let RequestBody::TopicQuery { topic } = active_request.request_body { if let Some(query) = self.active_topic_queries.queries.get_mut(&topic) { @@ -1449,6 +1465,21 @@ impl Service { }); } } + + if let Some(response_state) = self.topic_query_responses.get_mut(&node_id) { + match response_state { + TopicQueryResponseState::Start => { + *response_state = TopicQueryResponseState::AdNodes; + self.active_requests.insert(id, active_request); + } + TopicQueryResponseState::Nodes => { + *response_state = TopicQueryResponseState::Complete; + } + _ => { + debug_unreachable!("No more ADNODES responses should be received if TOPICQUERY is in Complete or AdNodes state.") + } + } + } } ResponseBody::Pong { enr_seq, ip, port } => { let socket = SocketAddr::new(ip, port); From f5b5ef84f9e8c4f490ee60ebed5bd5b03aeb4f86 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 22 Jun 2022 23:17:00 +0200 Subject: [PATCH 190/391] Reuse code for adding batches of new nodes to topics kbuckets --- src/service.rs | 152 ++++++++++++++++++++----------------------------- 1 file changed, 63 insertions(+), 89 deletions(-) diff --git a/src/service.rs b/src/service.rs index 327f146af..0a32d0e89 100644 --- a/src/service.rs +++ b/src/service.rs @@ -531,23 +531,10 @@ impl Service { table_filter, bucket_filter, ); - self.kbuckets.write().iter().for_each(|entry| { - match kbuckets.insert_or_update( - entry.node.key, - entry.node.value.clone(), - NodeStatus { - state: ConnectionState::Disconnected, - direction: ConnectionDirection::Incoming, - }, - ) { - InsertResult::Failed(FailureReason::BucketFull) => error!("Table full"), - InsertResult::Failed(FailureReason::BucketFilter) => error!("Failed bucket filter"), - InsertResult::Failed(FailureReason::TableFilter) => error!("Failed table filter"), - InsertResult::Failed(FailureReason::InvalidSelfUpdate) => error!("Invalid self update"), - InsertResult::Failed(_) => error!("Failed to insert ENR"), - _ => {}, - } - }); + { + let mut local_routing_table = self.kbuckets.write(); + Service::new_connections_disconnected(&mut kbuckets, topic_hash, local_routing_table.iter().map(|entry| entry.node.value.clone())); + } self.topics_kbuckets.insert(topic_hash, kbuckets); } self.send_topic_queries(topic_hash, self.config.max_nodes_response, Some(callback)); @@ -577,25 +564,14 @@ impl Service { bucket_filter, ); debug!("Adding {} entries from local routing table to topic's kbuckets", self.kbuckets.write().iter().count()); - self.kbuckets.write().iter().for_each(|entry| { - match kbuckets.insert_or_update( - entry.node.key, - entry.node.value.clone(), - NodeStatus { - state: ConnectionState::Disconnected, - direction: ConnectionDirection::Incoming, - }, - ) { - InsertResult::Failed(FailureReason::BucketFull) => error!("Table full"), - InsertResult::Failed(FailureReason::BucketFilter) => error!("Failed bucket filter"), - InsertResult::Failed(FailureReason::TableFilter) => error!("Failed table filter"), - InsertResult::Failed(FailureReason::InvalidSelfUpdate) => error!("Invalid self update"), - InsertResult::Failed(_) => error!("Failed to insert ENR"), - _ => debug!("Insertion of node {} into KBucket of {} was successful", entry.node.key.preimage(), topic_hash), - } - }); + + { + let mut local_routing_table = self.kbuckets.write(); + Service::new_connections_disconnected(&mut kbuckets, topic_hash, local_routing_table.iter().map(|entry| entry.node.value.clone())); + } self.topics_kbuckets.insert(topic_hash, kbuckets); METRICS.topics_to_publish.store(self.topics.len(), Ordering::Relaxed); + self.send_register_topics(topic_hash); } } @@ -817,7 +793,7 @@ impl Service { topic_hash ); // Prefer querying nodes further away, starting at distance 256 by to avoid hotspots - let mut new_query_peers_iter = peers.iter().rev().filter_map(|entry| { + let new_query_peers_iter = peers.iter().rev().filter_map(|entry| { (!queried_peers.contains_key(entry.node.key.preimage())).then(|| { query .queried_peers @@ -827,7 +803,7 @@ impl Service { }) }); let mut new_query_peers = Vec::new(); - for enr in new_query_peers_iter.next() { + for enr in new_query_peers_iter { new_query_peers.push(enr); if new_query_peers.len() < num_query_peers { break; @@ -1326,32 +1302,11 @@ impl Service { match active_request.request_body { RequestBody::TopicQuery { topic } => { if let Some(kbuckets) = self.topics_kbuckets.get_mut(&topic) { - for enr in nodes { - let peer_key: kbucket::Key = enr.node_id().into(); - match kbuckets.insert_or_update( - &peer_key, - enr.clone(), - NodeStatus { - state: ConnectionState::Disconnected, - direction: ConnectionDirection::Incoming, - }, - ) { - InsertResult::Failed(FailureReason::BucketFull) => { - error!("Table full") - } - InsertResult::Failed(FailureReason::BucketFilter) => { - error!("Failed bucket filter") - } - InsertResult::Failed(FailureReason::TableFilter) => { - error!("Failed table filter") - } - InsertResult::Failed(FailureReason::InvalidSelfUpdate) => { - error!("Invalid self update") - } - InsertResult::Failed(_) => error!("Failed to insert ENR"), - _ => debug!("Insertion of node {} into KBucket of {} was successful", enr.node_id(), topic), - } - } + Service::new_connections_disconnected( + kbuckets, + topic, + nodes.into_iter(), + ); } } RequestBody::RegisterTopic { @@ -1360,32 +1315,11 @@ impl Service { ticket: _, } => { if let Some(kbuckets) = self.topics_kbuckets.get_mut(&topic) { - for enr in nodes { - let peer_key: kbucket::Key = enr.node_id().into(); - match kbuckets.insert_or_update( - &peer_key, - enr.clone(), - NodeStatus { - state: ConnectionState::Disconnected, - direction: ConnectionDirection::Incoming, - }, - ) { - InsertResult::Failed(FailureReason::BucketFull) => { - error!("Table full") - } - InsertResult::Failed(FailureReason::BucketFilter) => { - error!("Failed bucket filter") - } - InsertResult::Failed(FailureReason::TableFilter) => { - error!("Failed table filter") - } - InsertResult::Failed(FailureReason::InvalidSelfUpdate) => { - error!("Invalid self update") - } - InsertResult::Failed(_) => error!("Failed to insert ENR"), - _ => debug!("Insertion of node {} into KBucket of {} was successful", enr.node_id(), topic), - } - } + Service::new_connections_disconnected( + kbuckets, + topic, + nodes.into_iter(), + ); } } RequestBody::FindNode { .. } => { @@ -2430,12 +2364,52 @@ impl Service { topic, enr: _, ticket: _, - } => self.connection_updated(node_id, ConnectionStatus::Disconnected, Some(topic)), + } + | RequestBody::TopicQuery { topic } => { + self.connection_updated(node_id, ConnectionStatus::Disconnected, Some(topic)) + } _ => self.connection_updated(node_id, ConnectionStatus::Disconnected, None), } } } + fn new_connections_disconnected( + kbuckets: &mut KBucketsTable, + topic: TopicHash, + nodes: impl Iterator, + ) { + for enr in nodes { + let peer_key: kbucket::Key = enr.node_id().into(); + match kbuckets.insert_or_update( + &peer_key, + enr.clone(), + NodeStatus { + state: ConnectionState::Disconnected, + direction: ConnectionDirection::Incoming, + }, + ) { + InsertResult::Failed(FailureReason::BucketFull) => { + error!("Table full") + } + InsertResult::Failed(FailureReason::BucketFilter) => { + error!("Failed bucket filter") + } + InsertResult::Failed(FailureReason::TableFilter) => { + error!("Failed table filter") + } + InsertResult::Failed(FailureReason::InvalidSelfUpdate) => { + error!("Invalid self update") + } + InsertResult::Failed(_) => error!("Failed to insert ENR"), + _ => debug!( + "Insertion of node {} into KBucket of {} was successful", + enr.node_id(), + topic + ), + } + } + } + /// A future that maintains the routing table and inserts nodes when required. This returns the /// `Discv5Event::NodeInserted` variant if a new node has been inserted into the routing table. async fn bucket_maintenance_poll( From 97c42e7735b880d35f3ea2cf2fb4598ce19d6f42 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 22 Jun 2022 23:22:08 +0200 Subject: [PATCH 191/391] Fix carg clippy --- src/service/test.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/service/test.rs b/src/service/test.rs index a5af42f08..ce50ce505 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -86,6 +86,8 @@ async fn build_service( queries: QueryPool::new(config.query_timeout), active_requests: Default::default(), active_nodes_responses: HashMap::new(), + active_adnodes_responses: HashMap::new(), + topic_query_responses: HashMap::new(), active_regtopic_requests: ActiveRegtopicRequests::default(), ip_votes: None, handler_send, From 024f548cb5f1e5bd8f0e28524e9b61422a458ce1 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 23 Jun 2022 09:42:26 +0200 Subject: [PATCH 192/391] Add debug for request service level --- src/service.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/service.rs b/src/service.rs index 0a32d0e89..efea387eb 100644 --- a/src/service.rs +++ b/src/service.rs @@ -932,6 +932,10 @@ impl Service { /// Processes an RPC request from a peer. Requests respond to the received socket address, /// rather than the IP of the known ENR. fn handle_rpc_request(&mut self, node_address: NodeAddress, req: Request) { + debug!( + "Received RPC request: {} from: {}", + req.body, node_address + ); let id = req.id; match req.body { RequestBody::FindNode { distances } => { From b10885c69b0968583654996427dfa01606b4cb95 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 23 Jun 2022 10:27:43 +0200 Subject: [PATCH 193/391] Fix reinsertion of active request bug --- src/service.rs | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/src/service.rs b/src/service.rs index efea387eb..59014d79e 100644 --- a/src/service.rs +++ b/src/service.rs @@ -932,10 +932,7 @@ impl Service { /// Processes an RPC request from a peer. Requests respond to the received socket address, /// rather than the IP of the known ENR. fn handle_rpc_request(&mut self, node_address: NodeAddress, req: Request) { - debug!( - "Received RPC request: {} from: {}", - req.body, node_address - ); + debug!("Received RPC request: {} from: {}", req.body, node_address); let id = req.id; match req.body { RequestBody::FindNode { distances } => { @@ -1330,22 +1327,25 @@ impl Service { self.discovered(&node_id, nodes, active_request.query_id) } _ => debug_unreachable!( - "Only TOPICQUERY and FINDNODE requests expect NODES response" + "Only TOPICQUERY, REGTOPIC and FINDNODE requests expect NODES response" ), } - if let Some(response_state) = self.topic_query_responses.get_mut(&node_id) { - match response_state { - TopicQueryResponseState::Start => { - *response_state = TopicQueryResponseState::Nodes; - self.active_requests.insert(id, active_request); - } - TopicQueryResponseState::AdNodes => { - *response_state = TopicQueryResponseState::Complete; - } - _ => { - debug_unreachable!("No more NODES responses should be received if TOPICQUERY is in Complete or Nodes state.") - } + let response_state = self + .topic_query_responses + .entry(node_id) + .or_insert(TopicQueryResponseState::Start); + + match response_state { + TopicQueryResponseState::Start => { + *response_state = TopicQueryResponseState::Nodes; + self.active_requests.insert(id, active_request); + } + TopicQueryResponseState::AdNodes => { + *response_state = TopicQueryResponseState::Complete; + } + _ => { + debug_unreachable!("No more NODES responses should be received if TOPICQUERY is in Complete or Nodes state.") } } } From 55815d390a36242a842f418bceefa11522b31728 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 23 Jun 2022 10:42:35 +0200 Subject: [PATCH 194/391] Fix topic query response state bug --- src/service.rs | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/src/service.rs b/src/service.rs index 59014d79e..1233c8198 100644 --- a/src/service.rs +++ b/src/service.rs @@ -257,7 +257,6 @@ pub enum TopicQueryResponseState { Start, Nodes, AdNodes, - Complete, } pub enum RegistrationState { @@ -1342,10 +1341,10 @@ impl Service { self.active_requests.insert(id, active_request); } TopicQueryResponseState::AdNodes => { - *response_state = TopicQueryResponseState::Complete; + self.topic_query_responses.remove(&node_id); } - _ => { - debug_unreachable!("No more NODES responses should be received if TOPICQUERY is in Complete or Nodes state.") + TopicQueryResponseState::Nodes => { + debug_unreachable!("No more NODES responses should be received if TOPICQUERY response is in Nodes state.") } } } @@ -1404,20 +1403,23 @@ impl Service { } } - if let Some(response_state) = self.topic_query_responses.get_mut(&node_id) { - match response_state { + let response_state = self + .topic_query_responses + .entry(node_id) + .or_insert(TopicQueryResponseState::Start); + + match response_state { TopicQueryResponseState::Start => { *response_state = TopicQueryResponseState::AdNodes; self.active_requests.insert(id, active_request); } TopicQueryResponseState::Nodes => { - *response_state = TopicQueryResponseState::Complete; + self.topic_query_responses.remove(&node_id); } - _ => { - debug_unreachable!("No more ADNODES responses should be received if TOPICQUERY is in Complete or AdNodes state.") + TopicQueryResponseState::AdNodes => { + debug_unreachable!("No more ADNODES responses should be received if TOPICQUERY response is in AdNodes state.") } } - } } ResponseBody::Pong { enr_seq, ip, port } => { let socket = SocketAddr::new(ip, port); From 53c0dab6660780d43e2a8a653427a77de8371e9c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 23 Jun 2022 10:50:45 +0200 Subject: [PATCH 195/391] Fix topic query response state bug --- src/service.rs | 48 +++++++++++++++++++++++------------------------- 1 file changed, 23 insertions(+), 25 deletions(-) diff --git a/src/service.rs b/src/service.rs index 1233c8198..a8ef51a93 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1307,6 +1307,23 @@ impl Service { topic, nodes.into_iter(), ); + let response_state = self + .topic_query_responses + .entry(node_id) + .or_insert(TopicQueryResponseState::Start); + + match response_state { + TopicQueryResponseState::Start => { + *response_state = TopicQueryResponseState::Nodes; + self.active_requests.insert(id, active_request); + } + TopicQueryResponseState::AdNodes => { + self.topic_query_responses.remove(&node_id); + } + TopicQueryResponseState::Nodes => { + debug_unreachable!("No more NODES responses should be received if TOPICQUERY response is in Nodes state.") + } + } } } RequestBody::RegisterTopic { @@ -1329,24 +1346,6 @@ impl Service { "Only TOPICQUERY, REGTOPIC and FINDNODE requests expect NODES response" ), } - - let response_state = self - .topic_query_responses - .entry(node_id) - .or_insert(TopicQueryResponseState::Start); - - match response_state { - TopicQueryResponseState::Start => { - *response_state = TopicQueryResponseState::Nodes; - self.active_requests.insert(id, active_request); - } - TopicQueryResponseState::AdNodes => { - self.topic_query_responses.remove(&node_id); - } - TopicQueryResponseState::Nodes => { - debug_unreachable!("No more NODES responses should be received if TOPICQUERY response is in Nodes state.") - } - } } ResponseBody::AdNodes { total, mut nodes } => { // handle the case that there is more than one response @@ -1401,14 +1400,12 @@ impl Service { query.results.insert(enr.node_id(), enr); }); } - } - - let response_state = self - .topic_query_responses - .entry(node_id) - .or_insert(TopicQueryResponseState::Start); + let response_state = self + .topic_query_responses + .entry(node_id) + .or_insert(TopicQueryResponseState::Start); - match response_state { + match response_state { TopicQueryResponseState::Start => { *response_state = TopicQueryResponseState::AdNodes; self.active_requests.insert(id, active_request); @@ -1420,6 +1417,7 @@ impl Service { debug_unreachable!("No more ADNODES responses should be received if TOPICQUERY response is in AdNodes state.") } } + } } ResponseBody::Pong { enr_seq, ip, port } => { let socket = SocketAddr::new(ip, port); From e688e7165dd534a75674373c7c40916c890398ca Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 23 Jun 2022 19:32:14 +0200 Subject: [PATCH 196/391] Introduce state for topic query responses in handler --- src/error.rs | 3 +++ src/handler/mod.rs | 32 ++++++++++++++++++++++++++++---- src/service.rs | 12 ++++++------ 3 files changed, 37 insertions(+), 10 deletions(-) diff --git a/src/error.rs b/src/error.rs index 9079df7c4..88fe53121 100644 --- a/src/error.rs +++ b/src/error.rs @@ -113,6 +113,9 @@ pub enum RequestError { EntropyFailure(&'static str), /// Finding nodes closest to a topic hash failed. TopicDistance(String), + /// A request that is responded with multiple respones + /// gets the wrong combination of responses. + InvalidResponseCombo(String), } #[derive(Debug, Clone, PartialEq)] diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 47609efde..c4df51936 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -200,10 +200,10 @@ impl RequestCall { } } -pub enum ResponseType { +pub enum TopicQueryResponseState { + Start, Nodes, - Ticket, - Regconfirmation, + AdNodes, } /// Process to handle handshakes and sessions established from raw RPC communications between nodes. @@ -221,6 +221,9 @@ pub struct Handler { active_requests: ActiveRequests, /// The expected responses by SocketAddr which allows packets to pass the underlying filter. filter_expected_responses: Arc>>, + /// Keeps track of the 2 expected responses, NODES and ADNODES that should be received from a + /// TOPICQUERY request. + topic_query_responses: HashMap, /// Requests awaiting a handshake completion. pending_requests: HashMap>, /// Currently in-progress handshakes with peers. @@ -309,6 +312,7 @@ impl Handler { ), pending_requests: HashMap::new(), filter_expected_responses, + topic_query_responses: HashMap::new(), sessions: LruTimeCache::new( config.session_timeout, Some(config.session_cache_capacity), @@ -1001,6 +1005,7 @@ impl Handler { /// Nodes response. async fn handle_response(&mut self, node_address: NodeAddress, response: Response) { // Find a matching request, if any + trace!("Received {} response", response.body); if let Some(mut request_call) = self.active_requests.remove(&node_address) { if request_call.id() != &response.id { trace!( @@ -1076,7 +1081,26 @@ impl Handler { } else if let RequestBody::RegisterTopic { .. } = request_call.request.body { trace!("Received a topics NODES reponse"); self.active_requests - .insert(node_address.clone(), request_call); + .insert(node_address.clone(), request_call.clone()); + } + let response_state = self + .topic_query_responses + .entry(node_address.clone()) + .or_insert(TopicQueryResponseState::Start); + + match response_state { + TopicQueryResponseState::Start => { + *response_state = TopicQueryResponseState::Nodes; + self.active_requests + .insert(node_address.clone(), request_call); + } + TopicQueryResponseState::AdNodes => { + self.topic_query_responses.remove(&node_address); + } + TopicQueryResponseState::Nodes => { + warn!("No more ADNODES responses should be received if TOPICQUERY response is in AdNodes state."); + self.fail_request(request_call, RequestError::InvalidResponseCombo("Received more than one set of NODES responses for a TOPICQUERY request".into()), true).await; + } } } else if let ResponseBody::AdNodes { total, .. } = response.body { if total > 1 { diff --git a/src/service.rs b/src/service.rs index a8ef51a93..2d22707e4 100644 --- a/src/service.rs +++ b/src/service.rs @@ -253,6 +253,12 @@ pub struct Service { active_topic_queries: ActiveTopicQueries, } +pub enum TopicQueryState { + Finished(TopicHash), + TimedOut(TopicHash), + Unsatisfied(TopicHash, usize), +} + pub enum TopicQueryResponseState { Start, Nodes, @@ -289,12 +295,6 @@ impl ActiveTopicQueries { } } -pub enum TopicQueryState { - Finished(TopicHash), - TimedOut(TopicHash), - Unsatisfied(TopicHash, usize), -} - impl Stream for ActiveTopicQueries { type Item = TopicQueryState; fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { From e9133449fe485f0877501e63ede14f11151c1cd7 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 23 Jun 2022 22:25:19 +0200 Subject: [PATCH 197/391] Filter nodes in responses before insertion in kbuckets --- examples/find_nodes.rs | 1 + src/discv5.rs | 6 ++ src/service.rs | 189 ++++++++++++++++++++--------------------- 3 files changed, 99 insertions(+), 97 deletions(-) diff --git a/examples/find_nodes.rs b/examples/find_nodes.rs index 9a6cd2b35..ada29f513 100644 --- a/examples/find_nodes.rs +++ b/examples/find_nodes.rs @@ -191,6 +191,7 @@ async fn main() { } match discv5_ev { Discv5Event::Discovered(enr) => info!("Enr discovered {}", enr), + Discv5Event::DiscoveredTopic(enr, topic_hash) => info!("Enr discovered {} for topic {}", enr, topic_hash), Discv5Event::EnrAdded { enr, replaced: _ } => info!("Enr added {}", enr), Discv5Event::NodeInserted { node_id, replaced: _ } => info!("Node inserted {}", node_id), Discv5Event::NodeInsertedTopic { node_id, replaced: _, topic_hash } => info!("Node inserted {} in topic hash {} kbucket", node_id, topic_hash), diff --git a/src/discv5.rs b/src/discv5.rs index 96fb09d2b..eea56984a 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -58,6 +58,12 @@ pub enum Discv5Event { /// This happen spontaneously through queries as nodes return ENR's. These ENR's are not /// guaranteed to be live or contactable. Discovered(Enr), + /// A node has been discovered from either a REGTOPIC or a TOPICQUERY request. + /// + /// The ENR of the node is returned. Various properties can be derived from the ENR. + /// This happen spontaneously through requests as nodes return ENR's. These ENR's are not + /// guaranteed to be live or contactable. + DiscoveredTopic(Enr, TopicHash), /// A new ENR was added to the routing table. EnrAdded { enr: Enr, replaced: Option }, /// A new node has been added to the routing table. diff --git a/src/service.rs b/src/service.rs index 2d22707e4..a542af1fa 100644 --- a/src/service.rs +++ b/src/service.rs @@ -523,7 +523,7 @@ impl Service { (None, None) }; - let mut kbuckets = KBucketsTable::new( + let kbuckets = KBucketsTable::new( NodeId::new(&topic_hash.as_bytes()).into(), Duration::from_secs(60), self.config.incoming_bucket_limit, @@ -531,8 +531,14 @@ impl Service { bucket_filter, ); { - let mut local_routing_table = self.kbuckets.write(); - Service::new_connections_disconnected(&mut kbuckets, topic_hash, local_routing_table.iter().map(|entry| entry.node.value.clone())); + let mut local_routing_table = self.kbuckets.write().clone(); + for enr in local_routing_table.iter().map(|entry| entry.node.value.clone()) { + self.connection_updated( + enr.node_id(), + ConnectionStatus::Connected(enr, ConnectionDirection::Incoming), + Some(topic_hash), + ); + } } self.topics_kbuckets.insert(topic_hash, kbuckets); } @@ -555,7 +561,7 @@ impl Service { }; debug!("Initiating kbuckets for topic hash {}", topic_hash); - let mut kbuckets = KBucketsTable::new( + let kbuckets = KBucketsTable::new( NodeId::new(&topic_hash.as_bytes()).into(), Duration::from_secs(60), self.config.incoming_bucket_limit, @@ -564,9 +570,13 @@ impl Service { ); debug!("Adding {} entries from local routing table to topic's kbuckets", self.kbuckets.write().iter().count()); - { - let mut local_routing_table = self.kbuckets.write(); - Service::new_connections_disconnected(&mut kbuckets, topic_hash, local_routing_table.iter().map(|entry| entry.node.value.clone())); + let mut local_routing_table = self.kbuckets.write().clone(); + for enr in local_routing_table.iter().map(|entry| entry.node.value.clone()) { + self.connection_updated( + enr.node_id(), + ConnectionStatus::Connected(enr, ConnectionDirection::Incoming), + Some(topic_hash), + ); } self.topics_kbuckets.insert(topic_hash, kbuckets); METRICS.topics_to_publish.store(self.topics.len(), Ordering::Relaxed); @@ -1128,11 +1138,11 @@ impl Service { // verify we know of the rpc_id let id = response.id.clone(); - let active_request = if let Some(active_request) = self.active_requests.remove(&id) { - Some(active_request) - } else { - self.active_regtopic_requests.remove(&id) - }; + let active_request = self + .active_requests + .remove(&id) + .and_then(|active_request| Some(active_request)) + .or_else(|| self.active_regtopic_requests.remove(&id)); if let Some(mut active_request) = active_request { debug!( @@ -1301,28 +1311,23 @@ impl Service { match active_request.request_body { RequestBody::TopicQuery { topic } => { - if let Some(kbuckets) = self.topics_kbuckets.get_mut(&topic) { - Service::new_connections_disconnected( - kbuckets, - topic, - nodes.into_iter(), - ); - let response_state = self - .topic_query_responses - .entry(node_id) - .or_insert(TopicQueryResponseState::Start); - - match response_state { - TopicQueryResponseState::Start => { - *response_state = TopicQueryResponseState::Nodes; - self.active_requests.insert(id, active_request); - } - TopicQueryResponseState::AdNodes => { - self.topic_query_responses.remove(&node_id); - } - TopicQueryResponseState::Nodes => { - debug_unreachable!("No more NODES responses should be received if TOPICQUERY response is in Nodes state.") - } + self.discovered(&node_id, nodes, active_request.query_id, Some(topic)); + + let response_state = self + .topic_query_responses + .entry(node_id) + .or_insert(TopicQueryResponseState::Start); + + match response_state { + TopicQueryResponseState::Start => { + *response_state = TopicQueryResponseState::Nodes; + self.active_requests.insert(id, active_request); + } + TopicQueryResponseState::AdNodes => { + self.topic_query_responses.remove(&node_id); + } + TopicQueryResponseState::Nodes => { + debug_unreachable!("No more NODES responses should be received if TOPICQUERY response is in Nodes state.") } } } @@ -1330,17 +1335,9 @@ impl Service { topic, enr: _, ticket: _, - } => { - if let Some(kbuckets) = self.topics_kbuckets.get_mut(&topic) { - Service::new_connections_disconnected( - kbuckets, - topic, - nodes.into_iter(), - ); - } - } + } => self.discovered(&node_id, nodes, active_request.query_id, Some(topic)), RequestBody::FindNode { .. } => { - self.discovered(&node_id, nodes, active_request.query_id) + self.discovered(&node_id, nodes, active_request.query_id, None) } _ => debug_unreachable!( "Only TOPICQUERY, REGTOPIC and FINDNODE requests expect NODES response" @@ -2035,7 +2032,13 @@ impl Service { } /// Processes discovered peers from a query. - fn discovered(&mut self, source: &NodeId, mut enrs: Vec, query_id: Option) { + fn discovered( + &mut self, + source: &NodeId, + mut enrs: Vec, + query_id: Option, + topic_hash: Option, + ) { let local_id = self.local_enr.read().node_id(); enrs.retain(|enr| { if enr.node_id() == local_id { @@ -2045,20 +2048,43 @@ impl Service { // If any of the discovered nodes are in the routing table, and there contains an older ENR, update it. // If there is an event stream send the Discovered event if self.config.report_discovered_peers { - self.send_event(Discv5Event::Discovered(enr.clone())); + if let Some(topic_hash) = topic_hash { + self.send_event(Discv5Event::DiscoveredTopic(enr.clone(), topic_hash)); + } else { + self.send_event(Discv5Event::Discovered(enr.clone())); + } } // ignore peers that don't pass the table filter if (self.config.table_filter)(enr) { + let kbuckets_topic = topic_hash.and_then(|topic_hash| { + self.topics_kbuckets + .get_mut(&topic_hash) + .and_then(|kbuckets| { + Some(kbuckets).or_else(|| { + debug_unreachable!("A kbuckets table should exist for topic hash"); + None + }) + }) + }); + let key = kbucket::Key::from(enr.node_id()); // If the ENR exists in the routing table and the discovered ENR has a greater // sequence number, perform some filter checks before updating the enr. - let must_update_enr = match self.kbuckets.write().entry(&key) { - kbucket::Entry::Present(entry, _) => entry.value().seq() < enr.seq(), - kbucket::Entry::Pending(mut entry, _) => entry.value().seq() < enr.seq(), - _ => false, + let must_update_enr = if let Some(kbuckets_topic) = kbuckets_topic { + match kbuckets_topic.entry(&key) { + kbucket::Entry::Present(entry, _) => entry.value().seq() < enr.seq(), + kbucket::Entry::Pending(mut entry, _) => entry.value().seq() < enr.seq(), + _ => false, + } + } else { + match self.kbuckets.write().entry(&key) { + kbucket::Entry::Present(entry, _) => entry.value().seq() < enr.seq(), + kbucket::Entry::Pending(mut entry, _) => entry.value().seq() < enr.seq(), + _ => false, + } }; if must_update_enr { @@ -2085,6 +2111,10 @@ impl Service { source != &enr.node_id() }); + if topic_hash.is_some() { + return; + } + // if this is part of a query, update the query if let Some(query_id) = query_id { if let Some(query) = self.queries.get_mut(query_id) { @@ -2121,16 +2151,16 @@ impl Service { let mut ping_peer = None; let mut event_to_send = None; - let kbuckets_topic = if let Some(topic_hash) = topic_hash { - if let Some(kbuckets) = self.topics_kbuckets.get_mut(&topic_hash) { - Some(kbuckets) - } else { - debug_unreachable!("A kbuckets table should exist for topic hash"); - None - } - } else { - None - }; + let kbuckets_topic = topic_hash.and_then(|topic_hash| { + self.topics_kbuckets + .get_mut(&topic_hash) + .and_then(|kbuckets| { + Some(kbuckets).or_else(|| { + debug_unreachable!("A kbuckets table should exist for topic hash"); + None + }) + }) + }); let key = kbucket::Key::from(node_id); match new_status { @@ -2145,6 +2175,7 @@ impl Service { } else { self.kbuckets.write().insert_or_update(&key, enr, status) }; + match insert_result { InsertResult::Inserted => { // We added this peer to the table @@ -2316,6 +2347,7 @@ impl Service { &node_id, nodes_response.received_nodes, active_request.query_id, + None, ); } } else { @@ -2377,43 +2409,6 @@ impl Service { } } - fn new_connections_disconnected( - kbuckets: &mut KBucketsTable, - topic: TopicHash, - nodes: impl Iterator, - ) { - for enr in nodes { - let peer_key: kbucket::Key = enr.node_id().into(); - match kbuckets.insert_or_update( - &peer_key, - enr.clone(), - NodeStatus { - state: ConnectionState::Disconnected, - direction: ConnectionDirection::Incoming, - }, - ) { - InsertResult::Failed(FailureReason::BucketFull) => { - error!("Table full") - } - InsertResult::Failed(FailureReason::BucketFilter) => { - error!("Failed bucket filter") - } - InsertResult::Failed(FailureReason::TableFilter) => { - error!("Failed table filter") - } - InsertResult::Failed(FailureReason::InvalidSelfUpdate) => { - error!("Invalid self update") - } - InsertResult::Failed(_) => error!("Failed to insert ENR"), - _ => debug!( - "Insertion of node {} into KBucket of {} was successful", - enr.node_id(), - topic - ), - } - } - } - /// A future that maintains the routing table and inserts nodes when required. This returns the /// `Discv5Event::NodeInserted` variant if a new node has been inserted into the routing table. async fn bucket_maintenance_poll( From f46e694185962272f6e615c0021eb28be4c86756 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 24 Jun 2022 00:09:26 +0200 Subject: [PATCH 198/391] Constrain response combo topics in handler --- src/handler/mod.rs | 220 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 189 insertions(+), 31 deletions(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index c4df51936..4e3c7b5c9 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -206,6 +206,12 @@ pub enum TopicQueryResponseState { AdNodes, } +pub enum RegTopicResponseState { + Start, + Nodes, + Ticket, +} + /// Process to handle handshakes and sessions established from raw RPC communications between nodes. pub struct Handler { /// Configuration for the discv5 service. @@ -224,6 +230,9 @@ pub struct Handler { /// Keeps track of the 2 expected responses, NODES and ADNODES that should be received from a /// TOPICQUERY request. topic_query_responses: HashMap, + /// Keeps track of the 3 expected responses, NODES and TICKET that should be received from a + /// REGTOPIC request, and REGCONFIRMATION that may be recieved. + reg_topic_responses: HashMap, /// Requests awaiting a handshake completion. pending_requests: HashMap>, /// Currently in-progress handshakes with peers. @@ -313,6 +322,7 @@ impl Handler { pending_requests: HashMap::new(), filter_expected_responses, topic_query_responses: HashMap::new(), + reg_topic_responses: HashMap::new(), sessions: LruTimeCache::new( config.session_timeout, Some(config.session_cache_capacity), @@ -1076,31 +1086,99 @@ impl Handler { } return; } - // If there is only one NODES response but it is for a REGTOPIC, reinsert the active request for a - // TICKET or a potential REGCONFIRMATION. - } else if let RequestBody::RegisterTopic { .. } = request_call.request.body { - trace!("Received a topics NODES reponse"); - self.active_requests - .insert(node_address.clone(), request_call.clone()); } - let response_state = self - .topic_query_responses - .entry(node_address.clone()) - .or_insert(TopicQueryResponseState::Start); - - match response_state { - TopicQueryResponseState::Start => { - *response_state = TopicQueryResponseState::Nodes; - self.active_requests - .insert(node_address.clone(), request_call); - } - TopicQueryResponseState::AdNodes => { - self.topic_query_responses.remove(&node_address); + // If the total number of NODES responses arrived and it is for a REGTOPIC or a + // TOPICQUERY the active request might be waiting for more types of responses. + match request_call.request.body { + RequestBody::RegisterTopic { .. } => { + trace!("Received a NODES reponse for a REGTOPIC request"); + let response_state = self + .reg_topic_responses + .entry(node_address.clone()) + .or_insert(RegTopicResponseState::Start); + + match response_state { + RegTopicResponseState::Start => { + *response_state = RegTopicResponseState::Nodes; + self.active_requests + .insert(node_address.clone(), request_call); + if let Err(e) = self + .service_send + .send(HandlerOut::Response( + node_address.clone(), + Box::new(response), + )) + .await + { + warn!("Failed to inform of response {}", e) + } + return; + } + RegTopicResponseState::Ticket => { + self.reg_topic_responses.remove(&node_address); + // Still a REGCONFIRMATION may come hence request call is reinserted. + self.active_requests + .insert(node_address.clone(), request_call); + if let Err(e) = self + .service_send + .send(HandlerOut::Response( + node_address.clone(), + Box::new(response), + )) + .await + { + warn!("Failed to inform of response {}", e) + } + return; + } + RegTopicResponseState::Nodes => { + warn!("No more NODES responses should be received if REGTOPIC response is in Nodes state."); + self.fail_request(request_call, RequestError::InvalidResponseCombo("Received more than one set of NODES responses for a REGTOPIC request".into()), true).await; + // Remove the expected response + self.remove_expected_response(node_address.socket_addr); + self.send_next_request(node_address).await; + return; + } + } } - TopicQueryResponseState::Nodes => { - warn!("No more ADNODES responses should be received if TOPICQUERY response is in AdNodes state."); - self.fail_request(request_call, RequestError::InvalidResponseCombo("Received more than one set of NODES responses for a TOPICQUERY request".into()), true).await; + RequestBody::TopicQuery { .. } => { + trace!("Received a NODES reponse for a TOPICQUERY request"); + let response_state = self + .topic_query_responses + .entry(node_address.clone()) + .or_insert(TopicQueryResponseState::Start); + + match response_state { + TopicQueryResponseState::Start => { + *response_state = TopicQueryResponseState::Nodes; + self.active_requests + .insert(node_address.clone(), request_call); + if let Err(e) = self + .service_send + .send(HandlerOut::Response( + node_address.clone(), + Box::new(response), + )) + .await + { + warn!("Failed to inform of response {}", e) + } + return; + } + TopicQueryResponseState::AdNodes => { + self.topic_query_responses.remove(&node_address); + } + TopicQueryResponseState::Nodes => { + warn!("No more NODES responses should be received if TOPICQUERY response is in Nodes state."); + self.fail_request(request_call, RequestError::InvalidResponseCombo("Received more than one set of NODES responses for a TOPICQUERY request".into()), true).await; + // Remove the expected response + self.remove_expected_response(node_address.socket_addr); + self.send_next_request(node_address).await; + return; + } + } } + _ => {} } } else if let ResponseBody::AdNodes { total, .. } = response.body { if total > 1 { @@ -1149,19 +1227,99 @@ impl Handler { return; } } + let response_state = self + .topic_query_responses + .entry(node_address.clone()) + .or_insert(TopicQueryResponseState::Start); + + match response_state { + TopicQueryResponseState::Start => { + *response_state = TopicQueryResponseState::AdNodes; + self.active_requests + .insert(node_address.clone(), request_call); + if let Err(e) = self + .service_send + .send(HandlerOut::Response( + node_address.clone(), + Box::new(response), + )) + .await + { + warn!("Failed to inform of response {}", e) + } + return; + } + TopicQueryResponseState::Nodes => { + self.topic_query_responses.remove(&node_address); + } + TopicQueryResponseState::AdNodes => { + warn!("No more ADNODES responses should be received if TOPICQUERY response is in AdNodes state."); + self.fail_request(request_call, RequestError::InvalidResponseCombo("Received more than one set of NODES responses for a TOPICQUERY request".into()), true).await; + // Remove the expected response + self.remove_expected_response(node_address.socket_addr); + self.send_next_request(node_address).await; + return; + } + } } else if let ResponseBody::Ticket { .. } = response.body { // The request is reinserted for either a NODES response or a potential REGCONFIRMATION // response that may come. - self.active_requests - .insert(node_address.clone(), request_call); - if let Err(e) = self - .service_send - .send(HandlerOut::Response(node_address, Box::new(response))) - .await - { - warn!("Failed to inform of response {}", e) + let response_state = self + .reg_topic_responses + .entry(node_address.clone()) + .or_insert(RegTopicResponseState::Start); + + match response_state { + RegTopicResponseState::Start => { + *response_state = RegTopicResponseState::Ticket; + self.active_requests + .insert(node_address.clone(), request_call.clone()); + if let Err(e) = self + .service_send + .send(HandlerOut::Response( + node_address.clone(), + Box::new(response), + )) + .await + { + warn!("Failed to inform of response {}", e) + } + return; + } + RegTopicResponseState::Nodes => { + self.reg_topic_responses.remove(&node_address); + // Still a REGCONFIRMATION may come hence request call is reinserted. + self.active_requests + .insert(node_address.clone(), request_call.clone()); + if let Err(e) = self + .service_send + .send(HandlerOut::Response( + node_address.clone(), + Box::new(response), + )) + .await + { + warn!("Failed to inform of response {}", e) + } + return; + } + RegTopicResponseState::Ticket => { + warn!("No more TICKET responses should be received if REGTOPIC response is in Ticket state."); + self.fail_request( + request_call, + RequestError::InvalidResponseCombo( + "Received more than one TICKET response for a REGTOPIC request" + .into(), + ), + true, + ) + .await; + // Remove the expected response + self.remove_expected_response(node_address.socket_addr); + self.send_next_request(node_address).await; + return; + } } - return; } // Remove the expected response From ff5bb1de71b2ae84510973d239d3f98ff13f23ba Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 24 Jun 2022 00:22:39 +0200 Subject: [PATCH 199/391] Fix clippy warnings --- src/service.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index a542af1fa..87c98311b 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1141,7 +1141,6 @@ impl Service { let active_request = self .active_requests .remove(&id) - .and_then(|active_request| Some(active_request)) .or_else(|| self.active_regtopic_requests.remove(&id)); if let Some(mut active_request) = active_request { From 6b2f3c91eb5556d0f8594f095ca596e8fc0c44e6 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 24 Jun 2022 09:43:56 +0200 Subject: [PATCH 200/391] Add trace messages --- src/service.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/service.rs b/src/service.rs index 87c98311b..bf9af1426 100644 --- a/src/service.rs +++ b/src/service.rs @@ -728,6 +728,7 @@ impl Service { } fn send_register_topics(&mut self, topic_hash: TopicHash) { + trace!("Sending REGTOPICS"); if let Entry::Occupied(ref mut kbuckets) = self.topics_kbuckets.entry(topic_hash) { let reg_attempts = self.topics.entry(topic_hash).or_default(); // Remove expired ads @@ -2175,6 +2176,10 @@ impl Service { self.kbuckets.write().insert_or_update(&key, enr, status) }; + if let Some(topic_hash) = topic_hash { + trace!("Inserting node into kbucket of topic gave result: {:?}", insert_result); + } + match insert_result { InsertResult::Inserted => { // We added this peer to the table From ef768e48036cfd8435139042c0d056b4f7e60baf Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 24 Jun 2022 09:55:42 +0200 Subject: [PATCH 201/391] Add trace message --- src/service.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index bf9af1426..957878f29 100644 --- a/src/service.rs +++ b/src/service.rs @@ -730,6 +730,7 @@ impl Service { fn send_register_topics(&mut self, topic_hash: TopicHash) { trace!("Sending REGTOPICS"); if let Entry::Occupied(ref mut kbuckets) = self.topics_kbuckets.entry(topic_hash) { + trace!("Found {} new entries in kbuckets of topic hash {}", kbuckets.get_mut().iter().count(), topic_hash); let reg_attempts = self.topics.entry(topic_hash).or_default(); // Remove expired ads let mut new_reg_peers = Vec::new(); @@ -1674,6 +1675,7 @@ impl Service { enr, ticket: ticket_bytes, }; + trace!("Sending reg topic to node {}", contact.socket_addr()); self.send_rpc_request(ActiveRequest { contact, request_body, @@ -2176,7 +2178,7 @@ impl Service { self.kbuckets.write().insert_or_update(&key, enr, status) }; - if let Some(topic_hash) = topic_hash { + if let Some(_) = topic_hash { trace!("Inserting node into kbucket of topic gave result: {:?}", insert_result); } From 88977c28ae37fe53e6a65b4e319df6cd1bd57ccb Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 24 Jun 2022 10:15:13 +0200 Subject: [PATCH 202/391] Rearrange imperative order to fix empty topic kbuckets bug --- src/service.rs | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/src/service.rs b/src/service.rs index 957878f29..342fd6e7b 100644 --- a/src/service.rs +++ b/src/service.rs @@ -530,7 +530,10 @@ impl Service { table_filter, bucket_filter, ); - { + + self.topics_kbuckets.insert(topic_hash, kbuckets); + + let mut local_routing_table = self.kbuckets.write().clone(); for enr in local_routing_table.iter().map(|entry| entry.node.value.clone()) { self.connection_updated( @@ -539,8 +542,6 @@ impl Service { Some(topic_hash), ); } - } - self.topics_kbuckets.insert(topic_hash, kbuckets); } self.send_topic_queries(topic_hash, self.config.max_nodes_response, Some(callback)); } @@ -570,6 +571,8 @@ impl Service { ); debug!("Adding {} entries from local routing table to topic's kbuckets", self.kbuckets.write().iter().count()); + self.topics_kbuckets.insert(topic_hash, kbuckets); + let mut local_routing_table = self.kbuckets.write().clone(); for enr in local_routing_table.iter().map(|entry| entry.node.value.clone()) { self.connection_updated( @@ -578,7 +581,6 @@ impl Service { Some(topic_hash), ); } - self.topics_kbuckets.insert(topic_hash, kbuckets); METRICS.topics_to_publish.store(self.topics.len(), Ordering::Relaxed); self.send_register_topics(topic_hash); @@ -730,7 +732,11 @@ impl Service { fn send_register_topics(&mut self, topic_hash: TopicHash) { trace!("Sending REGTOPICS"); if let Entry::Occupied(ref mut kbuckets) = self.topics_kbuckets.entry(topic_hash) { - trace!("Found {} new entries in kbuckets of topic hash {}", kbuckets.get_mut().iter().count(), topic_hash); + trace!( + "Found {} new entries in kbuckets of topic hash {}", + kbuckets.get_mut().iter().count(), + topic_hash + ); let reg_attempts = self.topics.entry(topic_hash).or_default(); // Remove expired ads let mut new_reg_peers = Vec::new(); @@ -2179,7 +2185,10 @@ impl Service { }; if let Some(_) = topic_hash { - trace!("Inserting node into kbucket of topic gave result: {:?}", insert_result); + trace!( + "Inserting node into kbucket of topic gave result: {:?}", + insert_result + ); } match insert_result { From 89aa7240652ffcb70e684b1aff47939b1664fd62 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 24 Jun 2022 14:24:36 +0200 Subject: [PATCH 203/391] Send ADNODES responses --- src/service.rs | 74 ++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 57 insertions(+), 17 deletions(-) diff --git a/src/service.rs b/src/service.rs index 342fd6e7b..ba412a107 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1128,14 +1128,17 @@ impl Service { } } RequestBody::TopicQuery { topic } => { - trace!("Sending TOPICQUERY find nodes response"); + trace!( + "Sending NODES response to TOPICQUERY request {}", + id + ); self.send_find_topic_nodes_response( topic, node_address.clone(), id.clone(), "TOPICQUERY", ); - trace!("Sending TOPICQUERY AD nodes response"); + trace!("Sending ADNODES response"); self.send_topic_query_nodes_response(node_address, id, topic); } } @@ -1784,7 +1787,16 @@ impl Service { .get_ad_nodes(topic) .map(|ad| ad.node_record().clone()) .collect(); - self.send_nodes_response(nodes_to_send, node_address, rpc_id, "TOPICQUERY ADS"); + self.send_nodes_response( + nodes_to_send, + node_address, + rpc_id, + "TOPICQUERY", + ResponseBody::AdNodes { + total: 1u64, + nodes: Vec::new(), + }, + ); } fn send_find_topic_nodes_response( @@ -1823,7 +1835,16 @@ impl Service { } } } - self.send_nodes_response(closest_peers, node_address, id, req_type); + self.send_nodes_response( + closest_peers, + node_address, + id, + req_type, + ResponseBody::Nodes { + total: 1u64, + nodes: Vec::new(), + }, + ); } /// Sends a NODES response, given a list of found ENR's. This function splits the nodes up @@ -1864,7 +1885,16 @@ impl Service { nodes_to_send.push(node); } } - self.send_nodes_response(nodes_to_send, node_address, rpc_id, "FINDNODE"); + self.send_nodes_response( + nodes_to_send, + node_address, + rpc_id, + "FINDNODE", + ResponseBody::Nodes { + total: 1u64, + nodes: Vec::new(), + }, + ); } fn send_nodes_response( @@ -1873,15 +1903,13 @@ impl Service { node_address: NodeAddress, rpc_id: RequestId, req_type: &str, + resp_body: ResponseBody, ) { // if there are no nodes, send an empty response if nodes_to_send.is_empty() { let response = Response { id: rpc_id, - body: ResponseBody::Nodes { - total: 1u64, - nodes: Vec::new(), - }, + body: resp_body.clone(), }; trace!( "Sending empty {} response to: {}", @@ -1892,7 +1920,10 @@ impl Service { .handler_send .send(HandlerIn::Response(node_address, Box::new(response))) { - warn!("Failed to send empty {} response {}", req_type, e) + warn!( + "Failed to send empty response {} to request {} response. Error: {}", + resp_body, req_type, e + ) } } else { // build the NODES response @@ -1933,12 +1964,21 @@ impl Service { let responses: Vec = to_send_nodes .into_iter() - .map(|nodes| Response { - id: rpc_id.clone(), - body: ResponseBody::Nodes { - total: (rpc_index + 1) as u64, - nodes, - }, + .map(|nodes| { + let body = match resp_body { + ResponseBody::AdNodes { .. } => ResponseBody::AdNodes { + total: (rpc_index + 1) as u64, + nodes, + }, + _ => ResponseBody::Nodes { + total: (rpc_index + 1) as u64, + nodes, + }, + }; + Response { + id: rpc_id.clone(), + body, + } }) .collect(); @@ -2184,7 +2224,7 @@ impl Service { self.kbuckets.write().insert_or_update(&key, enr, status) }; - if let Some(_) = topic_hash { + if topic_hash.is_some() { trace!( "Inserting node into kbucket of topic gave result: {:?}", insert_result From 24b3d1eba6411afbf127de68ef8423d64548a116 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 24 Jun 2022 15:25:48 +0200 Subject: [PATCH 204/391] Remove TOPICQUERY query if no more peers --- src/service.rs | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/src/service.rs b/src/service.rs index ba412a107..ad5a67f04 100644 --- a/src/service.rs +++ b/src/service.rs @@ -826,6 +826,25 @@ impl Service { break; } } + // If no new nodes can be found to query, return TOPICQUERY request early. + if new_query_peers.len() < 1 { + debug!("Found no new peers to send TOPICQUERY to, returning unsatisfied request"); + if let Some(query) = self.active_topic_queries.queries.remove(&topic_hash) { + if let Some(callback) = query.callback { + if callback + .send(Ok(query.results.into_values().collect::>())) + .is_err() + { + warn!( + "Callback dropped for topic query {}. Results dropped", + topic_hash + ); + } + } + } + return; + } + trace!("Sending TOPICQUERYs to {} new peers", new_query_peers.len()); for enr in new_query_peers { if let Ok(node_contact) = @@ -1128,10 +1147,7 @@ impl Service { } } RequestBody::TopicQuery { topic } => { - trace!( - "Sending NODES response to TOPICQUERY request {}", - id - ); + trace!("Sending NODES response to TOPICQUERY request {}", id); self.send_find_topic_nodes_response( topic, node_address.clone(), From 36dd5a1c6070ad7eef900152ee0d2274e211bd5a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 24 Jun 2022 16:39:41 +0200 Subject: [PATCH 205/391] Fix missing message decode AdNodes response --- src/rpc.rs | 31 +++++++++++++++++++++++++++++-- src/service.rs | 2 +- 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index de73b33dd..0a9a4eb79 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -86,7 +86,7 @@ pub enum RequestBody { }, /// A REGTOPIC request. RegisterTopic { - /// The topic we want to advertise at the node receiving this request. + /// The hashed topic we want to advertise at the node receiving this request. topic: TopicHash, // Current node record of sender. enr: crate::Enr, @@ -223,7 +223,7 @@ impl Response { ResponseBody::Talk { .. } => 6, ResponseBody::Ticket { .. } => 8, ResponseBody::RegisterConfirmation { .. } => 9, - ResponseBody::AdNodes { .. } => 10, + ResponseBody::AdNodes { .. } => 11, } } @@ -694,6 +694,33 @@ impl Message { body: RequestBody::TopicQuery { topic }, }) } + 11 => { + // AdNodesResponse + if list_len != 3 { + debug!( + "AdNodes Response has an invalid RLP list length. Expected 3, found {}", + list_len + ); + return Err(DecoderError::RlpIncorrectListLen); + } + + let nodes = { + let enr_list_rlp = rlp.at(2)?; + if enr_list_rlp.is_empty() { + // no records + vec![] + } else { + enr_list_rlp.as_list::>()? + } + }; + Message::Response(Response { + id, + body: ResponseBody::AdNodes { + total: rlp.val_at::(1)?, + nodes, + }, + }) + } _ => { return Err(DecoderError::Custom("Unknown RPC message type")); } diff --git a/src/service.rs b/src/service.rs index ad5a67f04..ae0ccc4b5 100644 --- a/src/service.rs +++ b/src/service.rs @@ -827,7 +827,7 @@ impl Service { } } // If no new nodes can be found to query, return TOPICQUERY request early. - if new_query_peers.len() < 1 { + if new_query_peers.is_empty() { debug!("Found no new peers to send TOPICQUERY to, returning unsatisfied request"); if let Some(query) = self.active_topic_queries.queries.remove(&topic_hash) { if let Some(callback) = query.callback { From cb7c3366ea50af62960d8d27261101579ccabb8b Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 24 Jun 2022 16:43:25 +0200 Subject: [PATCH 206/391] Increase timeout for testing regconfirmation from 2 nodes --- src/handler/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 4e3c7b5c9..c8ae59c74 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -317,7 +317,7 @@ impl Handler { enr, key, active_requests: ActiveRequests::new( - config.request_timeout + Duration::from_secs(15), + config.request_timeout + Duration::from_secs(25), ), pending_requests: HashMap::new(), filter_expected_responses, From dd21d02e64b98501771be0f1c7c506d004996227 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 24 Jun 2022 17:05:53 +0200 Subject: [PATCH 207/391] Add trace info for TOPICQUERY dry state --- src/discv5.rs | 16 ++++++++-------- src/service.rs | 32 +++++++++++++++++--------------- 2 files changed, 25 insertions(+), 23 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index eea56984a..070a965e7 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -522,8 +522,6 @@ impl Discv5 { let channel = self.clone_channel(); async move { - let mut all_found_ad_nodes = Vec::new(); - // the service will verify if this node is contactable, we just send it and // await a response. let (callback_send, callback_recv) = oneshot::channel(); @@ -539,13 +537,15 @@ impl Discv5 { .await .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; // await the response - callback_recv + let ad_nodes = callback_recv .await - .map_err(|e| RequestError::ChannelFailed(e.to_string()))? - .map(|ad_nodes| all_found_ad_nodes.push(ad_nodes)) - .ok(); - let all_found_ad_nodes = all_found_ad_nodes.into_iter().flatten().collect(); - Ok(all_found_ad_nodes) + .map_err(|e| RequestError::ChannelFailed(e.to_string()))?; + if let Ok(ad_nodes) = ad_nodes { + debug!("Received {} ad nodes", ad_nodes.len()); + Ok(ad_nodes) + } else { + Ok(Vec::new()) + } } } diff --git a/src/service.rs b/src/service.rs index ae0ccc4b5..949d2d3c8 100644 --- a/src/service.rs +++ b/src/service.rs @@ -256,7 +256,10 @@ pub struct Service { pub enum TopicQueryState { Finished(TopicHash), TimedOut(TopicHash), + // Not enough ads have been returned, more peers should be queried. Unsatisfied(TopicHash, usize), + // No new peers can be found to send TOPICQUERYs to. + Dry(TopicHash), } pub enum TopicQueryResponseState { @@ -277,6 +280,7 @@ pub struct ActiveTopicQuery { results: HashMap, callback: Option, RequestError>>>, start: Instant, + dry: bool, } pub struct ActiveTopicQueries { @@ -299,7 +303,9 @@ impl Stream for ActiveTopicQueries { type Item = TopicQueryState; fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { for (topic_hash, query) in self.queries.iter() { - if query.results.len() >= self.num_results { + if query.dry { + return Poll::Ready(Some(TopicQueryState::Dry(*topic_hash))); + } else if query.results.len() >= self.num_results { return Poll::Ready(Some(TopicQueryState::Finished(*topic_hash))); } else if query.start.elapsed() >= self.time_out { warn!( @@ -711,7 +717,7 @@ impl Service { } Some(topic_query_progress) = self.active_topic_queries.next() => { match topic_query_progress { - TopicQueryState::Finished(topic_hash) | TopicQueryState::TimedOut(topic_hash) => { + TopicQueryState::Finished(topic_hash) | TopicQueryState::TimedOut(topic_hash) | TopicQueryState::Dry(topic_hash) => { if let Some(query) = self.active_topic_queries.queries.remove(&topic_hash) { if let Some(callback) = query.callback { if callback.send(Ok(query.results.into_values().collect::>())).is_err() { @@ -800,6 +806,7 @@ impl Service { results: HashMap::new(), callback, start: Instant::now(), + dry: false, }); let queried_peers = query.queried_peers.clone(); if let Entry::Occupied(kbuckets) = self.topics_kbuckets.entry(topic_hash) { @@ -828,19 +835,9 @@ impl Service { } // If no new nodes can be found to query, return TOPICQUERY request early. if new_query_peers.is_empty() { - debug!("Found no new peers to send TOPICQUERY to, returning unsatisfied request"); - if let Some(query) = self.active_topic_queries.queries.remove(&topic_hash) { - if let Some(callback) = query.callback { - if callback - .send(Ok(query.results.into_values().collect::>())) - .is_err() - { - warn!( - "Callback dropped for topic query {}. Results dropped", - topic_hash - ); - } - } + debug!("Found no new peers to send TOPICQUERY to, setting query status to dry"); + if let Some(mut query) = self.active_topic_queries.queries.remove(&topic_hash) { + query.dry = true; } return; } @@ -1420,6 +1417,11 @@ impl Service { if let RequestBody::TopicQuery { topic } = active_request.request_body { if let Some(query) = self.active_topic_queries.queries.get_mut(&topic) { nodes.into_iter().for_each(|enr| { + trace!( + "Inserting node {} into query for topic hash {}", + enr.node_id(), + topic + ); query.results.insert(enr.node_id(), enr); }); } From fdf3d5a8273ff77d6a0e651b9dba670179f18f48 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 25 Jun 2022 15:09:04 +0200 Subject: [PATCH 208/391] Add trace messages in buggy zone --- src/service.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/service.rs b/src/service.rs index 949d2d3c8..f718139d2 100644 --- a/src/service.rs +++ b/src/service.rs @@ -253,6 +253,7 @@ pub struct Service { active_topic_queries: ActiveTopicQueries, } +#[derive(Debug)] pub enum TopicQueryState { Finished(TopicHash), TimedOut(TopicHash), @@ -716,10 +717,12 @@ impl Service { } } Some(topic_query_progress) = self.active_topic_queries.next() => { + trace!("Query is in state {:?}", topic_query_progress); match topic_query_progress { TopicQueryState::Finished(topic_hash) | TopicQueryState::TimedOut(topic_hash) | TopicQueryState::Dry(topic_hash) => { if let Some(query) = self.active_topic_queries.queries.remove(&topic_hash) { if let Some(callback) = query.callback { + trace!("Sending result of query for topic hash {} to discv5 layer", topic_hash); if callback.send(Ok(query.results.into_values().collect::>())).is_err() { warn!("Callback dropped for topic query {}. Results dropped", topic_hash); } @@ -1436,6 +1439,7 @@ impl Service { self.active_requests.insert(id, active_request); } TopicQueryResponseState::Nodes => { + trace!("TOPICQUERY has received expected responses"); self.topic_query_responses.remove(&node_id); } TopicQueryResponseState::AdNodes => { From d5cabd9098777aa2127371e8ac5eadafa8fda239 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 25 Jun 2022 15:32:27 +0200 Subject: [PATCH 209/391] Add trace in buggy zone --- src/service.rs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/service.rs b/src/service.rs index f718139d2..afc78cbbf 100644 --- a/src/service.rs +++ b/src/service.rs @@ -301,19 +301,20 @@ impl ActiveTopicQueries { } impl Stream for ActiveTopicQueries { - type Item = TopicQueryState; + type Item = Result; fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + trace!("Polling active topic queries"); for (topic_hash, query) in self.queries.iter() { if query.dry { - return Poll::Ready(Some(TopicQueryState::Dry(*topic_hash))); + return Poll::Ready(Some(Ok(TopicQueryState::Dry(*topic_hash)))); } else if query.results.len() >= self.num_results { - return Poll::Ready(Some(TopicQueryState::Finished(*topic_hash))); + return Poll::Ready(Some(Ok(TopicQueryState::Finished(*topic_hash)))); } else if query.start.elapsed() >= self.time_out { warn!( "TOPICQUERY timed out. Only {} ads found for topic hash.", query.results.len() ); - return Poll::Ready(Some(TopicQueryState::TimedOut(*topic_hash))); + return Poll::Ready(Some(Ok(TopicQueryState::TimedOut(*topic_hash)))); } else { let exhausted_peers = query .queried_peers @@ -323,10 +324,10 @@ impl Stream for ActiveTopicQueries { // If all peers have responded or failed the request and we still did not // obtain enough results, the query is in TopicQueryState::Unsatisfied. if exhausted_peers >= query.queried_peers.len() { - return Poll::Ready(Some(TopicQueryState::Unsatisfied( + return Poll::Ready(Some(Ok(TopicQueryState::Unsatisfied( *topic_hash, query.results.len(), - ))); + )))); } } } @@ -716,7 +717,7 @@ impl Service { METRICS.hosted_ads.store(self.ads.len(), Ordering::Relaxed); } } - Some(topic_query_progress) = self.active_topic_queries.next() => { + Some(Ok(topic_query_progress)) = self.active_topic_queries.next() => { trace!("Query is in state {:?}", topic_query_progress); match topic_query_progress { TopicQueryState::Finished(topic_hash) | TopicQueryState::TimedOut(topic_hash) | TopicQueryState::Dry(topic_hash) => { @@ -1210,7 +1211,7 @@ impl Service { ); } - let topic_radius = (1..self.config.topic_radius + 1).collect(); + let topic_radius = (1..=self.config.topic_radius).collect(); // These are sanitized and ordered let distances_requested = match &active_request.request_body { RequestBody::FindNode { distances } => distances, From 5607e9644c0e17a298ac86c43f799441d262f50e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 25 Jun 2022 15:46:11 +0200 Subject: [PATCH 210/391] Set queried peer upon return of result --- src/service.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/service.rs b/src/service.rs index afc78cbbf..c7abb316d 100644 --- a/src/service.rs +++ b/src/service.rs @@ -301,20 +301,19 @@ impl ActiveTopicQueries { } impl Stream for ActiveTopicQueries { - type Item = Result; + type Item = TopicQueryState; fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - trace!("Polling active topic queries"); for (topic_hash, query) in self.queries.iter() { if query.dry { - return Poll::Ready(Some(Ok(TopicQueryState::Dry(*topic_hash)))); + return Poll::Ready(Some(TopicQueryState::Dry(*topic_hash))); } else if query.results.len() >= self.num_results { - return Poll::Ready(Some(Ok(TopicQueryState::Finished(*topic_hash)))); + return Poll::Ready(Some(TopicQueryState::Finished(*topic_hash))); } else if query.start.elapsed() >= self.time_out { warn!( "TOPICQUERY timed out. Only {} ads found for topic hash.", query.results.len() ); - return Poll::Ready(Some(Ok(TopicQueryState::TimedOut(*topic_hash)))); + return Poll::Ready(Some(TopicQueryState::TimedOut(*topic_hash))); } else { let exhausted_peers = query .queried_peers @@ -324,10 +323,10 @@ impl Stream for ActiveTopicQueries { // If all peers have responded or failed the request and we still did not // obtain enough results, the query is in TopicQueryState::Unsatisfied. if exhausted_peers >= query.queried_peers.len() { - return Poll::Ready(Some(Ok(TopicQueryState::Unsatisfied( + return Poll::Ready(Some(TopicQueryState::Unsatisfied( *topic_hash, query.results.len(), - )))); + ))); } } } @@ -717,7 +716,7 @@ impl Service { METRICS.hosted_ads.store(self.ads.len(), Ordering::Relaxed); } } - Some(Ok(topic_query_progress)) = self.active_topic_queries.next() => { + Some(topic_query_progress) = self.active_topic_queries.next() => { trace!("Query is in state {:?}", topic_query_progress); match topic_query_progress { TopicQueryState::Finished(topic_hash) | TopicQueryState::TimedOut(topic_hash) | TopicQueryState::Dry(topic_hash) => { @@ -1428,6 +1427,7 @@ impl Service { ); query.results.insert(enr.node_id(), enr); }); + *query.queried_peers.entry(node_id).or_default() = true; } let response_state = self .topic_query_responses From 31a82f83f63aa517cbf3ca2df0813604999e43ea Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 25 Jun 2022 16:02:12 +0200 Subject: [PATCH 211/391] Pass query through dry state before remove --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index c7abb316d..2272c3868 100644 --- a/src/service.rs +++ b/src/service.rs @@ -839,7 +839,7 @@ impl Service { // If no new nodes can be found to query, return TOPICQUERY request early. if new_query_peers.is_empty() { debug!("Found no new peers to send TOPICQUERY to, setting query status to dry"); - if let Some(mut query) = self.active_topic_queries.queries.remove(&topic_hash) { + if let Some(query) = self.active_topic_queries.queries.get_mut(&topic_hash) { query.dry = true; } return; From 4aa52f2ae7ca4c8836aeaa4bc7c1a2f414532e24 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 27 Jun 2022 21:23:55 +0200 Subject: [PATCH 212/391] Set regconfirmation time out spearately in handler --- src/handler/active_requests.rs | 10 +++++++++- src/handler/mod.rs | 8 +++++--- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/src/handler/active_requests.rs b/src/handler/active_requests.rs index a55324ce8..5b4f4e125 100644 --- a/src/handler/active_requests.rs +++ b/src/handler/active_requests.rs @@ -23,7 +23,15 @@ impl ActiveRequests { pub(crate) fn insert(&mut self, node_address: NodeAddress, request_call: RequestCall) { let nonce = *request_call.packet.message_nonce(); self.active_requests_mapping - .insert(node_address.clone(), request_call); + .insert(node_address.clone(), request_call); + self.active_requests_nonce_mapping + .insert(nonce, node_address); + } + + pub(crate) fn insert_at(&mut self, node_address: NodeAddress, request_call: RequestCall, timeout: Duration) { + let nonce = *request_call.packet.message_nonce(); + self.active_requests_mapping + .insert_at(node_address.clone(), request_call, timeout); self.active_requests_nonce_mapping .insert(nonce, node_address); } diff --git a/src/handler/mod.rs b/src/handler/mod.rs index c8ae59c74..d7e1f50db 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -212,6 +212,8 @@ pub enum RegTopicResponseState { Ticket, } +const TIMEOUT_REGCONFIRMATION: Duration = Duration::from_secs(20); + /// Process to handle handshakes and sessions established from raw RPC communications between nodes. pub struct Handler { /// Configuration for the discv5 service. @@ -317,7 +319,7 @@ impl Handler { enr, key, active_requests: ActiveRequests::new( - config.request_timeout + Duration::from_secs(25), + config.request_timeout, ), pending_requests: HashMap::new(), filter_expected_responses, @@ -1118,7 +1120,7 @@ impl Handler { self.reg_topic_responses.remove(&node_address); // Still a REGCONFIRMATION may come hence request call is reinserted. self.active_requests - .insert(node_address.clone(), request_call); + .insert_at(node_address.clone(), request_call, TIMEOUT_REGCONFIRMATION); if let Err(e) = self .service_send .send(HandlerOut::Response( @@ -1290,7 +1292,7 @@ impl Handler { self.reg_topic_responses.remove(&node_address); // Still a REGCONFIRMATION may come hence request call is reinserted. self.active_requests - .insert(node_address.clone(), request_call.clone()); + .insert_at(node_address.clone(), request_call.clone(), TIMEOUT_REGCONFIRMATION); if let Err(e) = self .service_send .send(HandlerOut::Response( From da33ffc199671fb031df6d543de40f9eab74004f Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 28 Jun 2022 00:07:40 +0200 Subject: [PATCH 213/391] Handle timeout for topic requests --- src/handler/active_requests.rs | 9 +++- src/handler/mod.rs | 94 +++++++++++++++++++++++++++++----- src/service.rs | 22 ++++---- 3 files changed, 101 insertions(+), 24 deletions(-) diff --git a/src/handler/active_requests.rs b/src/handler/active_requests.rs index 5b4f4e125..fc406a8ca 100644 --- a/src/handler/active_requests.rs +++ b/src/handler/active_requests.rs @@ -23,12 +23,17 @@ impl ActiveRequests { pub(crate) fn insert(&mut self, node_address: NodeAddress, request_call: RequestCall) { let nonce = *request_call.packet.message_nonce(); self.active_requests_mapping - .insert(node_address.clone(), request_call); + .insert(node_address.clone(), request_call); self.active_requests_nonce_mapping .insert(nonce, node_address); } - pub(crate) fn insert_at(&mut self, node_address: NodeAddress, request_call: RequestCall, timeout: Duration) { + pub(crate) fn insert_at( + &mut self, + node_address: NodeAddress, + request_call: RequestCall, + timeout: Duration, + ) { let nonce = *request_call.packet.message_nonce(); self.active_requests_mapping .insert_at(node_address.clone(), request_call, timeout); diff --git a/src/handler/mod.rs b/src/handler/mod.rs index d7e1f50db..588136c0d 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -41,7 +41,7 @@ use enr::{CombinedKey, NodeId}; use futures::prelude::*; use parking_lot::RwLock; use std::{ - collections::HashMap, + collections::{hash_map::Entry, HashMap}, convert::TryFrom, default::Default, net::SocketAddr, @@ -218,6 +218,8 @@ const TIMEOUT_REGCONFIRMATION: Duration = Duration::from_secs(20); pub struct Handler { /// Configuration for the discv5 service. request_retries: u8, + /// Configuration for the discv5 service of duration for which nodes are banned. + ban_duration: Option, /// The local node id to save unnecessary read locks on the ENR. The NodeID should not change /// during the operation of the server. node_id: NodeId, @@ -315,12 +317,11 @@ impl Handler { let mut handler = Handler { request_retries: config.request_retries, + ban_duration: config.ban_duration, node_id, enr, key, - active_requests: ActiveRequests::new( - config.request_timeout, - ), + active_requests: ActiveRequests::new(config.request_timeout), pending_requests: HashMap::new(), filter_expected_responses, topic_query_responses: HashMap::new(), @@ -458,6 +459,38 @@ impl Handler { node_address: NodeAddress, mut request_call: RequestCall, ) { + if let RequestBody::RegisterTopic { .. } = request_call.request.body { + if let Entry::Occupied(entry) = self.reg_topic_responses.entry(node_address.clone()) { + let response_state = entry.get(); + if let RegTopicResponseState::Ticket | RegTopicResponseState::Nodes = response_state + { + self.reg_topic_responses.remove(&node_address); + trace!("Request timed out with {}", node_address); + // Remove the request from the awaiting packet_filter + self.remove_expected_response(node_address.socket_addr); + // The request has timed out. We keep any established session for future use. + self.fail_request(request_call, RequestError::Timeout, false) + .await; + return; + } + } + } else if let RequestBody::TopicQuery { .. } = request_call.request.body { + if let Entry::Occupied(entry) = self.topic_query_responses.entry(node_address.clone()) { + let response_state = entry.get(); + if let TopicQueryResponseState::AdNodes | TopicQueryResponseState::Nodes = + response_state + { + self.topic_query_responses.remove(&node_address); + trace!("Request timed out with {}", node_address); + // Remove the request from the awaiting packet_filter + self.remove_expected_response(node_address.socket_addr); + // The request has timed out. We keep any established session for future use. + self.fail_request(request_call, RequestError::Timeout, false) + .await; + } + } + return; + } if request_call.retries >= self.request_retries { trace!("Request timed out with {}", node_address); // Remove the request from the awaiting packet_filter @@ -1117,10 +1150,12 @@ impl Handler { return; } RegTopicResponseState::Ticket => { - self.reg_topic_responses.remove(&node_address); // Still a REGCONFIRMATION may come hence request call is reinserted. - self.active_requests - .insert_at(node_address.clone(), request_call, TIMEOUT_REGCONFIRMATION); + self.active_requests.insert_at( + node_address.clone(), + request_call, + TIMEOUT_REGCONFIRMATION, + ); if let Err(e) = self .service_send .send(HandlerOut::Response( @@ -1138,7 +1173,14 @@ impl Handler { self.fail_request(request_call, RequestError::InvalidResponseCombo("Received more than one set of NODES responses for a REGTOPIC request".into()), true).await; // Remove the expected response self.remove_expected_response(node_address.socket_addr); - self.send_next_request(node_address).await; + warn!( + "Peer returned more than one set of NODES responses for REGTOPIC request. Blacklisting {}", + node_address + ); + let ban_timeout = self.ban_duration.map(|v| Instant::now() + v); + PERMIT_BAN_LIST + .write() + .ban(node_address.clone(), ban_timeout); return; } } @@ -1175,7 +1217,14 @@ impl Handler { self.fail_request(request_call, RequestError::InvalidResponseCombo("Received more than one set of NODES responses for a TOPICQUERY request".into()), true).await; // Remove the expected response self.remove_expected_response(node_address.socket_addr); - self.send_next_request(node_address).await; + warn!( + "Peer returned more than one set of NODES responses for TOPICQUERY request. Blacklisting {}", + node_address + ); + let ban_timeout = self.ban_duration.map(|v| Instant::now() + v); + PERMIT_BAN_LIST + .write() + .ban(node_address.clone(), ban_timeout); return; } } @@ -1259,6 +1308,14 @@ impl Handler { self.fail_request(request_call, RequestError::InvalidResponseCombo("Received more than one set of NODES responses for a TOPICQUERY request".into()), true).await; // Remove the expected response self.remove_expected_response(node_address.socket_addr); + warn!( + "Peer returned more than one set of ADNODES responses for TOPICQUERY request. Blacklisting {}", + node_address + ); + let ban_timeout = self.ban_duration.map(|v| Instant::now() + v); + PERMIT_BAN_LIST + .write() + .ban(node_address.clone(), ban_timeout); self.send_next_request(node_address).await; return; } @@ -1289,10 +1346,12 @@ impl Handler { return; } RegTopicResponseState::Nodes => { - self.reg_topic_responses.remove(&node_address); // Still a REGCONFIRMATION may come hence request call is reinserted. - self.active_requests - .insert_at(node_address.clone(), request_call.clone(), TIMEOUT_REGCONFIRMATION); + self.active_requests.insert_at( + node_address.clone(), + request_call.clone(), + TIMEOUT_REGCONFIRMATION, + ); if let Err(e) = self .service_send .send(HandlerOut::Response( @@ -1318,10 +1377,19 @@ impl Handler { .await; // Remove the expected response self.remove_expected_response(node_address.socket_addr); - self.send_next_request(node_address).await; + warn!( + "Peer returned more than one TICKET responses for REGTOPIC request. Blacklisting {}", + node_address + ); + let ban_timeout = self.ban_duration.map(|v| Instant::now() + v); + PERMIT_BAN_LIST + .write() + .ban(node_address.clone(), ban_timeout); return; } } + } else if let ResponseBody::RegisterConfirmation { .. } = response.body { + self.reg_topic_responses.remove(&node_address); } // Remove the expected response diff --git a/src/service.rs b/src/service.rs index 2272c3868..daf88220a 100644 --- a/src/service.rs +++ b/src/service.rs @@ -541,14 +541,14 @@ impl Service { self.topics_kbuckets.insert(topic_hash, kbuckets); - let mut local_routing_table = self.kbuckets.write().clone(); - for enr in local_routing_table.iter().map(|entry| entry.node.value.clone()) { - self.connection_updated( - enr.node_id(), - ConnectionStatus::Connected(enr, ConnectionDirection::Incoming), - Some(topic_hash), - ); - } + let mut local_routing_table = self.kbuckets.write().clone(); + for enr in local_routing_table.iter().map(|entry| entry.node.value.clone()) { + self.connection_updated( + enr.node_id(), + ConnectionStatus::Connected(enr, ConnectionDirection::Incoming), + Some(topic_hash), + ); + } } self.send_topic_queries(topic_hash, self.config.max_nodes_response, Some(callback)); } @@ -2386,7 +2386,11 @@ impl Service { /// specified). fn rpc_failure(&mut self, id: RequestId, error: RequestError) { trace!("RPC Error removing request. Reason: {:?}, id {}", error, id); - if let Some(active_request) = self.active_requests.remove(&id) { + if let Some(active_request) = self + .active_requests + .remove(&id) + .or_else(|| self.active_regtopic_requests.remove(&id)) + { // If this is initiated by the user, return an error on the callback. All callbacks // support a request error. match active_request.callback { From b959a0ea4cf5d2bc354dfed264eec04380436459 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 28 Jun 2022 00:34:53 +0200 Subject: [PATCH 214/391] Proceed as normal in handler if register confirmation does not come --- src/handler/mod.rs | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 588136c0d..f1bf0c72c 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -210,6 +210,7 @@ pub enum RegTopicResponseState { Start, Nodes, Ticket, + RegisterConfirmation, } const TIMEOUT_REGCONFIRMATION: Duration = Duration::from_secs(20); @@ -462,7 +463,13 @@ impl Handler { if let RequestBody::RegisterTopic { .. } = request_call.request.body { if let Entry::Occupied(entry) = self.reg_topic_responses.entry(node_address.clone()) { let response_state = entry.get(); - if let RegTopicResponseState::Ticket | RegTopicResponseState::Nodes = response_state + if let RegTopicResponseState::RegisterConfirmation = response_state { + self.reg_topic_responses.remove(&node_address); + self.remove_expected_response(node_address.socket_addr); + self.send_next_request(node_address).await; + return; + } else if let RegTopicResponseState::Ticket | RegTopicResponseState::Nodes = + response_state { self.reg_topic_responses.remove(&node_address); trace!("Request timed out with {}", node_address); @@ -1150,6 +1157,7 @@ impl Handler { return; } RegTopicResponseState::Ticket => { + *response_state = RegTopicResponseState::RegisterConfirmation; // Still a REGCONFIRMATION may come hence request call is reinserted. self.active_requests.insert_at( node_address.clone(), @@ -1168,8 +1176,9 @@ impl Handler { } return; } - RegTopicResponseState::Nodes => { - warn!("No more NODES responses should be received if REGTOPIC response is in Nodes state."); + RegTopicResponseState::Nodes + | RegTopicResponseState::RegisterConfirmation => { + warn!("No more NODES responses should be received if REGTOPIC response is in Nodes or RegisterConfirmation state."); self.fail_request(request_call, RequestError::InvalidResponseCombo("Received more than one set of NODES responses for a REGTOPIC request".into()), true).await; // Remove the expected response self.remove_expected_response(node_address.socket_addr); @@ -1346,6 +1355,7 @@ impl Handler { return; } RegTopicResponseState::Nodes => { + *response_state = RegTopicResponseState::RegisterConfirmation; // Still a REGCONFIRMATION may come hence request call is reinserted. self.active_requests.insert_at( node_address.clone(), @@ -1364,8 +1374,8 @@ impl Handler { } return; } - RegTopicResponseState::Ticket => { - warn!("No more TICKET responses should be received if REGTOPIC response is in Ticket state."); + RegTopicResponseState::Ticket | RegTopicResponseState::RegisterConfirmation => { + warn!("No more TICKET responses should be received if REGTOPIC response is in Ticket or RegisterConfirmation state."); self.fail_request( request_call, RequestError::InvalidResponseCombo( From c21f8df7182c1ca72d368ee65df304a6e4fd71d3 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 28 Jun 2022 21:19:31 +0200 Subject: [PATCH 215/391] Run cargo fmt --- src/handler/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index a0fc4f700..aea1c62bb 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -773,7 +773,9 @@ impl Handler { | RequestBody::TopicQuery { topic } => { HandlerOut::EstablishedTopic(enr, connection_direction, topic) } - _ => HandlerOut::Established(enr, node_address.socket_addr, connection_direction), + _ => { + HandlerOut::Established(enr, node_address.socket_addr, connection_direction) + } }; self.service_send .send(kbucket_addition) From 605406918a05289b4fbed3db65ab06e28dc95954 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 29 Jun 2022 00:18:54 +0200 Subject: [PATCH 216/391] Assign wait time based on similarity score --- src/advertisement/mod.rs | 45 ++++++++++++++----- src/advertisement/test.rs | 51 ++++++++++++--------- src/service.rs | 93 ++++++++++++++++++++++----------------- 3 files changed, 115 insertions(+), 74 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 37bb74195..62265e4dd 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -1,11 +1,12 @@ use super::*; -use crate::Enr; +use crate::{enr::NodeId, Enr}; use core::time::Duration; use futures::prelude::*; use more_asserts::debug_unreachable; use std::{ collections::{HashMap, VecDeque}, fmt, + net::IpAddr, pin::Pin, task::{Context, Poll}, }; @@ -112,22 +113,44 @@ impl Ads { self.ads.get(&topic).into_iter().flatten() } - pub fn ticket_wait_time(&mut self, topic: TopicHash) -> Option { + pub fn ticket_wait_time( + &mut self, + topic: TopicHash, + node_id: NodeId, + ip: IpAddr, + ) -> Option { self.remove_expired(); let now = Instant::now(); + // The occupancy score encompasses checking if the table is full. if self.expirations.len() < self.max_ads { - self.ads - .get(&topic) - .filter(|nodes| nodes.len() >= self.max_ads_per_topic) - .map(|nodes| { - nodes.get(0).map(|ad| { + if let Some(nodes) = self.ads.get(&topic) { + for ad in nodes.iter() { + // The similarity score encompasses checking if ads with same node id and ip exist. + let same_ip = match ip { + IpAddr::V4(ip) => ad.node_record.ip4() == Some(ip), + IpAddr::V6(ip) => ad.node_record.ip6() == Some(ip), + }; + if ad.node_record.node_id() == node_id || same_ip { + let elapsed_time = now.saturating_duration_since(ad.insert_time); + let wait_time = self.ad_lifetime.saturating_sub(elapsed_time); + return Some(wait_time); + } + } + // The occupancy score also encompasses checking if the ad slots for a + // certain topic are full. + if nodes.len() >= self.max_ads_per_topic { + return nodes.front().map(|ad| { let elapsed_time = now.saturating_duration_since(ad.insert_time); self.ad_lifetime.saturating_sub(elapsed_time) - }) - }) - .unwrap_or_default() + }); + } else { + None + } + } else { + None + } } else { - self.expirations.get(0).map(|ad| { + self.expirations.front().map(|ad| { let elapsed_time = now.saturating_duration_since(ad.insert_time); self.ad_lifetime.saturating_sub(elapsed_time) }) diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 1e4eb10a6..62903693f 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -79,9 +79,14 @@ async fn insert_ad_and_get_nodes() { #[tokio::test] async fn ticket_wait_time_no_wait_time() { + // Create the test values needed + let port = 6666; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); let mut ads = Ads::new(Duration::from_secs(1), 10, 50).unwrap(); let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); - assert_eq!(ads.ticket_wait_time(topic), None) + assert_eq!(ads.ticket_wait_time(topic, enr.node_id(), ip), None) } #[tokio::test] @@ -97,10 +102,10 @@ async fn ticket_wait_time_duration() { let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); // Add an add for topic - ads.insert(enr, topic).unwrap(); + ads.insert(enr.clone(), topic).unwrap(); - assert_gt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(2))); - assert_lt!(ads.ticket_wait_time(topic), Some(Duration::from_secs(3))); + assert_gt!(ads.ticket_wait_time(topic, enr.node_id(), ip), Some(Duration::from_secs(2))); + assert_lt!(ads.ticket_wait_time(topic, enr.node_id(), ip), Some(Duration::from_secs(3))); } #[tokio::test] @@ -111,10 +116,10 @@ async fn ticket_wait_time_full_table() { let key = CombinedKey::generate_secp256k1(); let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); - let port = 5000; - let ip: IpAddr = "127.0.0.1".parse().unwrap(); - let key = CombinedKey::generate_secp256k1(); - let enr_2 = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); + let port_2 = 5000; + let ip_2: IpAddr = "192.168.0.1".parse().unwrap(); + let key_2 = CombinedKey::generate_secp256k1(); + let enr_2 = EnrBuilder::new("v4").ip(ip_2).udp4(port_2).build(&key_2).unwrap(); let mut ads = Ads::new(Duration::from_secs(3), 2, 3).unwrap(); @@ -131,14 +136,14 @@ async fn ticket_wait_time_full_table() { ads.insert(enr.clone(), topic_2).unwrap(); // Now max_ads in table is reached so the second ad for topic_2 has to wait - assert_ne!(ads.ticket_wait_time(topic_2), None); + assert_ne!(ads.ticket_wait_time(topic_2, enr.node_id(), ip), None); tokio::time::sleep(Duration::from_secs(3)).await; // Now the first ads have expired and the table is not full so no neither topic // or topic_2 ads have to wait - assert_eq!(ads.ticket_wait_time(topic), None); - assert_eq!(ads.ticket_wait_time(topic_2), None); + assert_eq!(ads.ticket_wait_time(topic, enr.node_id(), ip), None); + assert_eq!(ads.ticket_wait_time(topic_2, enr_2.node_id(), ip_2), None); } #[tokio::test] @@ -149,10 +154,15 @@ async fn ticket_wait_time_full_topic() { let key = CombinedKey::generate_secp256k1(); let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); - let port = 5000; - let ip: IpAddr = "127.0.0.1".parse().unwrap(); - let key = CombinedKey::generate_secp256k1(); - let enr_2 = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); + let port_2 = 5000; + let ip_2: IpAddr = "192.168.0.1".parse().unwrap(); + let key_2 = CombinedKey::generate_secp256k1(); + let enr_2 = EnrBuilder::new("v4").ip(ip_2).udp4(port_2).build(&key_2).unwrap(); + + let port_3 = 5000; + let ip_3: IpAddr = "8.8.8.8".parse().unwrap(); + let key_3 = CombinedKey::generate_secp256k1(); + let enr_3 = EnrBuilder::new("v4").ip(ip_3).udp4(port_3).build(&key_3).unwrap(); let mut ads = Ads::new(Duration::from_secs(3), 2, 4).unwrap(); @@ -164,17 +174,14 @@ async fn ticket_wait_time_full_topic() { ads.insert(enr_2.clone(), topic).unwrap(); // Now max_ads_per_topic is reached for topic - assert_ne!(ads.ticket_wait_time(topic), None); + assert_ne!(ads.ticket_wait_time(topic, enr_3.node_id(), ip_3), None); // Add a topic_2 ad - ads.insert(enr, topic_2).unwrap(); + ads.insert(enr.clone(), topic_2).unwrap(); // The table isn't full so topic_2 ads don't have to wait - assert_eq!(ads.ticket_wait_time(topic_2), None); - - // But for topic they do until the first ads have expired - assert_ne!(ads.ticket_wait_time(topic), None); + assert_eq!(ads.ticket_wait_time(topic_2, enr_2.node_id(), ip_2), None); tokio::time::sleep(Duration::from_secs(3)).await; - assert_eq!(ads.ticket_wait_time(topic), None); + assert_eq!(ads.ticket_wait_time(topic, enr_3.node_id(), ip), None); } diff --git a/src/service.rs b/src/service.rs index 9bdfba722..9af779403 100644 --- a/src/service.rs +++ b/src/service.rs @@ -34,7 +34,7 @@ use crate::{ query_pool::{ FindNodeQueryConfig, PredicateQueryConfig, QueryId, QueryPool, QueryPoolState, TargetKey, }, - rpc, Discv5Config, Discv5Event, Enr, + rpc, Discv5Config, Discv5Event, Enr, IpMode, }; use aes_gcm::{ aead::{generic_array::GenericArray, Aead, NewAead, Payload}, @@ -1041,59 +1041,71 @@ impl Service { } RequestBody::RegisterTopic { topic, enr, ticket } => { // Drop if request tries to advertise another node than sender - if enr.node_id() == node_address.node_id - && enr.udp4_socket().map(SocketAddr::V4) == Some(node_address.socket_addr) - { - debug!("Sending NODES response to REGTOPIC"); - self.send_find_topic_nodes_response( - topic, - node_address.clone(), - id.clone(), - "REGTOPIC", - ); + if enr.node_id() != node_address.node_id { + return; + } + match self.config.ip_mode { + IpMode::Ip4 => { + if enr.udp4_socket().map(SocketAddr::V4) != Some(node_address.socket_addr) { + return; + } + } + IpMode::Ip6 { .. } => { + if enr.udp6_socket().map(SocketAddr::V6) != Some(node_address.socket_addr) { + return; + } + } + } + debug!("Sending NODES response to REGTOPIC"); + self.send_find_topic_nodes_response( + topic, + node_address.clone(), + id.clone(), + "REGTOPIC", + ); - // The current wait time for a given topic. - let wait_time = self - .ads - .ticket_wait_time(topic) - .unwrap_or(Duration::from_secs(0)); - - let mut new_ticket = Ticket::new( - node_address.node_id, - node_address.socket_addr.ip(), - topic, - tokio::time::Instant::now(), - wait_time, - Duration::from_secs(0), - ); + // The current wait time for a given topic. + let wait_time = self + .ads + .ticket_wait_time(topic, node_address.node_id, node_address.socket_addr.ip()) + .unwrap_or(Duration::from_secs(0)); + + let mut new_ticket = Ticket::new( + node_address.node_id, + node_address.socket_addr.ip(), + topic, + tokio::time::Instant::now(), + wait_time, + Duration::from_secs(0), + ); - if !ticket.is_empty() { - let decoded_enr = self - .local_enr + if !ticket.is_empty() { + let decoded_enr = + self.local_enr .write() .to_base64() .parse::() .map_err(|e| { error!("Failed to decrypt ticket in REGTOPIC request. Error: {}", e) }); - if let Ok(decoded_enr) = decoded_enr { - if let Some(ticket_key) = decoded_enr.get("ticket_key") { - let decrypted_ticket = { - let aead = Aes128Gcm::new(GenericArray::from_slice(ticket_key)); - let payload = Payload { - msg: &ticket, - aad: b"", - }; - aead.decrypt(GenericArray::from_slice(&[1u8; 12]), payload) + if let Ok(decoded_enr) = decoded_enr { + if let Some(ticket_key) = decoded_enr.get("ticket_key") { + let decrypted_ticket = { + let aead = Aes128Gcm::new(GenericArray::from_slice(ticket_key)); + let payload = Payload { + msg: &ticket, + aad: b"", + }; + aead.decrypt(GenericArray::from_slice(&[1u8; 12]), payload) .map_err(|e| { error!( "Failed to decrypt ticket in REGTOPIC request. Error: {}", e ) }) - }; - if let Ok(decrypted_ticket) = decrypted_ticket { - Ticket::decode(&decrypted_ticket) + }; + if let Ok(decrypted_ticket) = decrypted_ticket { + Ticket::decode(&decrypted_ticket) .map_err(|e| { error!( "Failed to decode ticket in REGTOPIC request. Error: {}", @@ -1124,7 +1136,6 @@ impl Service { } }) .ok(); - } } } } else { From b673eb0424fc1f3287ba6e5ee6406b409d736612 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 29 Jun 2022 12:23:53 +0200 Subject: [PATCH 217/391] Add discovered topic enrs to topic kbuckets --- src/service.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/service.rs b/src/service.rs index 9af779403..7f9593e4d 100644 --- a/src/service.rs +++ b/src/service.rs @@ -2195,6 +2195,13 @@ impl Service { }); if topic_hash.is_some() { + for enr in enrs.into_iter() { + self.connection_updated( + enr.node_id(), + ConnectionStatus::Connected(enr, ConnectionDirection::Incoming), + topic_hash, + ); + } return; } From 5e879c9e140bc50181cab3a03ffc38995cff42a1 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 29 Jun 2022 12:58:06 +0200 Subject: [PATCH 218/391] Add discovered topic nodes as trusted enrs after filter --- src/advertisement/test.rs | 28 +++++++++++++++++++----- src/service.rs | 46 +++++++++++++++++++++++++++++---------- 2 files changed, 58 insertions(+), 16 deletions(-) diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 62903693f..2db48e736 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -104,8 +104,14 @@ async fn ticket_wait_time_duration() { // Add an add for topic ads.insert(enr.clone(), topic).unwrap(); - assert_gt!(ads.ticket_wait_time(topic, enr.node_id(), ip), Some(Duration::from_secs(2))); - assert_lt!(ads.ticket_wait_time(topic, enr.node_id(), ip), Some(Duration::from_secs(3))); + assert_gt!( + ads.ticket_wait_time(topic, enr.node_id(), ip), + Some(Duration::from_secs(2)) + ); + assert_lt!( + ads.ticket_wait_time(topic, enr.node_id(), ip), + Some(Duration::from_secs(3)) + ); } #[tokio::test] @@ -119,7 +125,11 @@ async fn ticket_wait_time_full_table() { let port_2 = 5000; let ip_2: IpAddr = "192.168.0.1".parse().unwrap(); let key_2 = CombinedKey::generate_secp256k1(); - let enr_2 = EnrBuilder::new("v4").ip(ip_2).udp4(port_2).build(&key_2).unwrap(); + let enr_2 = EnrBuilder::new("v4") + .ip(ip_2) + .udp4(port_2) + .build(&key_2) + .unwrap(); let mut ads = Ads::new(Duration::from_secs(3), 2, 3).unwrap(); @@ -157,12 +167,20 @@ async fn ticket_wait_time_full_topic() { let port_2 = 5000; let ip_2: IpAddr = "192.168.0.1".parse().unwrap(); let key_2 = CombinedKey::generate_secp256k1(); - let enr_2 = EnrBuilder::new("v4").ip(ip_2).udp4(port_2).build(&key_2).unwrap(); + let enr_2 = EnrBuilder::new("v4") + .ip(ip_2) + .udp4(port_2) + .build(&key_2) + .unwrap(); let port_3 = 5000; let ip_3: IpAddr = "8.8.8.8".parse().unwrap(); let key_3 = CombinedKey::generate_secp256k1(); - let enr_3 = EnrBuilder::new("v4").ip(ip_3).udp4(port_3).build(&key_3).unwrap(); + let enr_3 = EnrBuilder::new("v4") + .ip(ip_3) + .udp4(port_3) + .build(&key_3) + .unwrap(); let mut ads = Ads::new(Duration::from_secs(3), 2, 4).unwrap(); diff --git a/src/service.rs b/src/service.rs index 7f9593e4d..07c344989 100644 --- a/src/service.rs +++ b/src/service.rs @@ -2160,6 +2160,41 @@ impl Service { match kbuckets_topic.entry(&key) { kbucket::Entry::Present(entry, _) => entry.value().seq() < enr.seq(), kbucket::Entry::Pending(mut entry, _) => entry.value().seq() < enr.seq(), + kbucket::Entry::Absent(_) => { + match kbuckets_topic.insert_or_update( + &key, + enr.clone(), + NodeStatus { + state: ConnectionState::Disconnected, + direction: ConnectionDirection::Incoming, + }, + ) { + InsertResult::Inserted + | InsertResult::Pending { .. } + | InsertResult::StatusUpdated { .. } + | InsertResult::ValueUpdated + | InsertResult::Updated { .. } + | InsertResult::UpdatedPending => trace!( + "Added node id {} to kbucket of topic hash {:?}", + enr.node_id(), + topic_hash + ), + InsertResult::Failed(FailureReason::BucketFull) => { + error!("Table full") + } + InsertResult::Failed(FailureReason::BucketFilter) => { + error!("Failed bucket filter") + } + InsertResult::Failed(FailureReason::TableFilter) => { + error!("Failed table filter") + } + InsertResult::Failed(FailureReason::InvalidSelfUpdate) => { + error!("Invalid self update") + } + InsertResult::Failed(_) => error!("Failed to insert ENR"), + } + false + } _ => false, } } else { @@ -2194,17 +2229,6 @@ impl Service { source != &enr.node_id() }); - if topic_hash.is_some() { - for enr in enrs.into_iter() { - self.connection_updated( - enr.node_id(), - ConnectionStatus::Connected(enr, ConnectionDirection::Incoming), - topic_hash, - ); - } - return; - } - // if this is part of a query, update the query if let Some(query_id) = query_id { if let Some(query) = self.queries.get_mut(query_id) { From 6f7ee38ce0a5343638a90e74d0d38e3b6c1d435d Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 29 Jun 2022 15:31:23 +0200 Subject: [PATCH 219/391] Add new enrs from pong to topic kbuckets also --- src/service.rs | 53 ++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 38 insertions(+), 15 deletions(-) diff --git a/src/service.rs b/src/service.rs index 07c344989..607d2b0fe 100644 --- a/src/service.rs +++ b/src/service.rs @@ -946,12 +946,17 @@ impl Service { } /// Returns an ENR if one is known for the given NodeId. - pub fn find_enr(&self, node_id: &NodeId) -> Option { + pub fn find_enr(&mut self, node_id: &NodeId) -> Option { // check if we know this node id in our routing table let key = kbucket::Key::from(*node_id); if let kbucket::Entry::Present(entry, _) = self.kbuckets.write().entry(&key) { return Some(entry.value().clone()); } + for kbuckets in self.topics_kbuckets.values_mut() { + if let kbucket::Entry::Present(entry, _) = kbuckets.entry(&key) { + return Some(entry.value().clone()); + } + } // check the untrusted addresses for ongoing queries for query in self.queries.iter() { if let Some(enr) = query @@ -2331,21 +2336,39 @@ impl Service { } } ConnectionStatus::PongReceived(enr) => { - match self - .kbuckets - .write() - .update_node(&key, enr, Some(ConnectionState::Connected)) - { - UpdateResult::Failed(reason) => { - self.peers_to_ping.remove(&node_id); - debug!( - "Could not update ENR from pong. Node: {}, reason: {:?}", - node_id, reason - ); + if let kbucket::Entry::Present(_, _) = self.kbuckets.write().entry(&key) { + match self.kbuckets.write().update_node( + &key, + enr.clone(), + Some(ConnectionState::Connected), + ) { + UpdateResult::Failed(reason) => { + self.peers_to_ping.remove(&node_id); + debug!( + "Could not update ENR from pong. Node: {}, reason: {:?}", + node_id, reason + ); + } + update => { + debug!("Updated {:?}", update) + } // Updated ENR successfully. + } + } + for kbuckets in self.topics_kbuckets.values_mut() { + if let kbucket::Entry::Present(_, _) = kbuckets.entry(&key) { + match kbuckets.update_node(&key, enr.clone(), Some(ConnectionState::Connected)) { + UpdateResult::Failed(reason) => { + self.peers_to_ping.remove(&node_id); + debug!( + "Could not update ENR from pong. Node: {}, reason: {:?}", + node_id, reason + ); + } + update => { + debug!("Updated {:?}", update) + } // Updated ENR successfully. + } } - update => { - debug!("Updated {:?}", update) - } // Updated ENR successfully. } } ConnectionStatus::Disconnected => { From 1655dadfc5205cf0671664875b57331cc009fbb2 Mon Sep 17 00:00:00 2001 From: Diva M Date: Wed, 29 Jun 2022 11:12:17 -0500 Subject: [PATCH 220/391] fmt --- src/service.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index 607d2b0fe..b2acb7dc0 100644 --- a/src/service.rs +++ b/src/service.rs @@ -2356,7 +2356,11 @@ impl Service { } for kbuckets in self.topics_kbuckets.values_mut() { if let kbucket::Entry::Present(_, _) = kbuckets.entry(&key) { - match kbuckets.update_node(&key, enr.clone(), Some(ConnectionState::Connected)) { + match kbuckets.update_node( + &key, + enr.clone(), + Some(ConnectionState::Connected), + ) { UpdateResult::Failed(reason) => { self.peers_to_ping.remove(&node_id); debug!( From 9173669bab50f6bd0b6f143203c3512902260f76 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 29 Jun 2022 19:47:31 +0200 Subject: [PATCH 221/391] Fix reception of pong response service --- src/service.rs | 65 ++++++++++++++++++++++++++++---------------------- 1 file changed, 37 insertions(+), 28 deletions(-) diff --git a/src/service.rs b/src/service.rs index b2acb7dc0..9de417fcf 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1470,7 +1470,7 @@ impl Service { let socket = SocketAddr::new(ip, port); // perform ENR majority-based update if required. - // Only count votes that from peers we have contacted. + // Only count votes that are from peers we have contacted. let key: kbucket::Key = node_id.into(); let should_count = match self.kbuckets.write().entry(&key) { kbucket::Entry::Present(_, status) @@ -1478,7 +1478,21 @@ impl Service { { true } - _ => false, + _ => { + let mut should_count = false; + for kbuckets in self.topics_kbuckets.values_mut() { + match kbuckets.entry(&key) { + kbucket::Entry::Present(_, status) + if status.is_connected() && !status.is_incoming() => + { + should_count = true; + break; + } + _ => {} + } + } + should_count + } }; if should_count { @@ -2336,12 +2350,27 @@ impl Service { } } ConnectionStatus::PongReceived(enr) => { - if let kbucket::Entry::Present(_, _) = self.kbuckets.write().entry(&key) { - match self.kbuckets.write().update_node( - &key, - enr.clone(), - Some(ConnectionState::Connected), - ) { + match self.kbuckets.write().update_node( + &key, + enr.clone(), + Some(ConnectionState::Connected), + ) { + UpdateResult::Failed(FailureReason::KeyNonExistant) => {} + UpdateResult::Failed(reason) => { + self.peers_to_ping.remove(&node_id); + debug!( + "Could not update ENR from pong. Node: {}, reason: {:?}", + node_id, reason + ); + } + update => { + debug!("Updated {:?}", update) + } // Updated ENR successfully. + } + for kbuckets in self.topics_kbuckets.values_mut() { + match kbuckets.update_node(&key, enr.clone(), Some(ConnectionState::Connected)) + { + UpdateResult::Failed(FailureReason::KeyNonExistant) => {} UpdateResult::Failed(reason) => { self.peers_to_ping.remove(&node_id); debug!( @@ -2354,26 +2383,6 @@ impl Service { } // Updated ENR successfully. } } - for kbuckets in self.topics_kbuckets.values_mut() { - if let kbucket::Entry::Present(_, _) = kbuckets.entry(&key) { - match kbuckets.update_node( - &key, - enr.clone(), - Some(ConnectionState::Connected), - ) { - UpdateResult::Failed(reason) => { - self.peers_to_ping.remove(&node_id); - debug!( - "Could not update ENR from pong. Node: {}, reason: {:?}", - node_id, reason - ); - } - update => { - debug!("Updated {:?}", update) - } // Updated ENR successfully. - } - } - } } ConnectionStatus::Disconnected => { let update_result = if let Some(kbuckets) = kbuckets_topic { From 37396b256d3a05c35e24674f04e9dfa2601ad2b3 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 29 Jun 2022 22:38:42 +0200 Subject: [PATCH 222/391] Track ads by subnet --- src/advertisement/mod.rs | 141 +++++++++++++++++++++++++++++++------- src/advertisement/test.rs | 12 ++-- src/service.rs | 4 +- src/service/test.rs | 4 +- 4 files changed, 125 insertions(+), 36 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 62265e4dd..11a0740f5 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -80,6 +80,12 @@ pub struct Ads { /// The max_ads limit is up to the user although recommnedations are /// given in the specs. max_ads: usize, + /// Max ads per subnet for the whole table, + max_ads_subnet: usize, + /// Max ads per subnet per topic, + max_ads_subnet_topic: usize, + /// Expiration times of ads by subnet + subnet_expirations: HashMap, VecDeque>, } impl Ads { @@ -87,6 +93,8 @@ impl Ads { ad_lifetime: Duration, max_ads_per_topic: usize, max_ads: usize, + max_ads_subnet: usize, + max_ads_subnet_topic: usize, ) -> Result { if max_ads_per_topic > max_ads { return Err("Ads per topic cannot be > max_ads"); @@ -98,6 +106,9 @@ impl Ads { ad_lifetime, max_ads_per_topic, max_ads, + max_ads_subnet, + max_ads_subnet_topic, + subnet_expirations: HashMap::new(), }) } @@ -121,34 +132,79 @@ impl Ads { ) -> Option { self.remove_expired(); let now = Instant::now(); - // The occupancy score encompasses checking if the table is full. - if self.expirations.len() < self.max_ads { - if let Some(nodes) = self.ads.get(&topic) { - for ad in nodes.iter() { - // The similarity score encompasses checking if ads with same node id and ip exist. - let same_ip = match ip { - IpAddr::V4(ip) => ad.node_record.ip4() == Some(ip), - IpAddr::V6(ip) => ad.node_record.ip6() == Some(ip), - }; - if ad.node_record.node_id() == node_id || same_ip { - let elapsed_time = now.saturating_duration_since(ad.insert_time); + // Occupancy check to see if the table is full. + // Similarity check to see if the ad slots for an ip subnet are full. + let subnet = match ip { + IpAddr::V4(ip) => ip.octets()[0..=2].to_vec(), + IpAddr::V6(ip) => ip.octets()[0..=5].to_vec(), + }; + + if let Some(nodes) = self.ads.get(&topic) { + let mut subnet_first_insert_time = None; + let mut subnet_ads_count = 0; + for ad in nodes.iter() { + // Similarity check to see if ads with same node id and ip exist for the given topic. + let same_ip = match ip { + IpAddr::V4(ip) => ad.node_record.ip4() == Some(ip), + IpAddr::V6(ip) => ad.node_record.ip6() == Some(ip), + }; + if ad.node_record.node_id() == node_id || same_ip { + let elapsed_time = now.saturating_duration_since(ad.insert_time); + let wait_time = self.ad_lifetime.saturating_sub(elapsed_time); + return Some(wait_time); + } + let subnet_match = match ip { + IpAddr::V4(_) => ad + .node_record + .ip4() + .map(|ip| ip.octets()[0..=2].to_vec() == subnet) + .unwrap_or(false), + IpAddr::V6(_) => ad + .node_record + .ip4() + .map(|ip| ip.octets()[0..=5].to_vec() == subnet) + .unwrap_or(false), + }; + + if subnet_match { + if !subnet_first_insert_time.is_some() { + subnet_first_insert_time = Some(ad.insert_time); + } + subnet_ads_count += 1; + } + } + // Similarity check to see if the limit of ads per subnet per topic or otherwise table is reached. + // If the ad slots per subnet per topic are not full and neither are the ad slots per subnet for + // the whole table then waiting time is not decided by subnet. + if subnet_ads_count >= self.max_ads_subnet_topic { + if let Some(insert_time) = subnet_first_insert_time { + let elapsed_time = now.saturating_duration_since(insert_time); + let wait_time = self.ad_lifetime.saturating_sub(elapsed_time); + return Some(wait_time); + } + } + if let Some(expirations) = self.subnet_expirations.get_mut(&subnet) { + if expirations.len() >= self.max_ads_subnet { + if let Some(insert_time) = expirations.pop_front() { + let elapsed_time = now.saturating_duration_since(insert_time); let wait_time = self.ad_lifetime.saturating_sub(elapsed_time); return Some(wait_time); } } - // The occupancy score also encompasses checking if the ad slots for a - // certain topic are full. - if nodes.len() >= self.max_ads_per_topic { - return nodes.front().map(|ad| { - let elapsed_time = now.saturating_duration_since(ad.insert_time); - self.ad_lifetime.saturating_sub(elapsed_time) - }); - } else { - None - } - } else { - None } + + // Occupancy check to see if the ad slots for a certain topic are full. + if nodes.len() >= self.max_ads_per_topic { + return nodes.front().map(|ad| { + let elapsed_time = now.saturating_duration_since(ad.insert_time); + self.ad_lifetime.saturating_sub(elapsed_time) + }); + } + } + // If the ad slots per topic are not full and neither is the table then waiting time is None, + // otherwise waiting time is that of the next ad in the table to expire. + if self.expirations.len() < self.max_ads { + None } else { self.expirations.front().map(|ad| { let elapsed_time = now.saturating_duration_since(ad.insert_time); @@ -169,15 +225,32 @@ impl Ads { to_remove_ads.into_iter().for_each(|(topic, index)| { if let Some(topic_ads) = self.ads.get_mut(&topic) { - for _ in 0..index { - topic_ads.pop_front(); + for i in 0..index { + let ad = topic_ads.pop_front(); + if let Some(ad) = ad { + let subnet = if let Some(ip) = ad.node_record.ip4() { + Some(ip.octets()[0..=2].to_vec()) + } else if let Some(ip6) = ad.node_record.ip6() { + Some(ip6.octets()[0..=5].to_vec()) + } else { None }; + if let Some(subnet) = subnet { + if let Some(subnet_expiries) = self.subnet_expirations.get_mut(&subnet) { + subnet_expiries.pop_front(); + } else { + debug_unreachable!("Mismatched mapping between ads and their expirations by subnet. At least {} ads should exist for subnet {:?}", i+1, subnet); + } + } + } else { + debug_unreachable!("Mismatched mapping between ads and their expirations. At least {} ads should exist for topic hash {}", i+1, topic) + } self.expirations.pop_front(); } if topic_ads.is_empty() { self.ads.remove(&topic); } + } else { - debug_unreachable!("Mismatched mapping between ads and their expirations"); + debug_unreachable!("Mismatched mapping between ads and their expirations. An entry should exist for topic hash {}", topic); } }); } @@ -185,6 +258,22 @@ impl Ads { pub fn insert(&mut self, node_record: Enr, topic: TopicHash) -> Result<(), &str> { self.remove_expired(); let now = Instant::now(); + + let subnet = if let Some(ip) = node_record.ip4() { + Some(ip.octets()[0..=2].to_vec()) + } else if let Some(ip6) = node_record.ip6() { + Some(ip6.octets()[0..=5].to_vec()) + } else { + None + }; + if let Some(subnet) = subnet { + let subnet_expirires = self + .subnet_expirations + .entry(subnet) + .or_insert(VecDeque::new()); + subnet_expirires.push_back(now); + } + let nodes = self.ads.entry(topic).or_default(); let ad_node = AdNode::new(node_record, now); if nodes.contains(&ad_node) { diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 2db48e736..61534848f 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -14,7 +14,7 @@ async fn insert_same_node() { let key = CombinedKey::generate_secp256k1(); let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); - let mut ads = Ads::new(Duration::from_secs(2), 10, 50).unwrap(); + let mut ads = Ads::new(Duration::from_secs(2), 10, 50, 100, 100).unwrap(); let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); @@ -43,7 +43,7 @@ async fn insert_ad_and_get_nodes() { let key = CombinedKey::generate_secp256k1(); let enr_2 = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); - let mut ads = Ads::new(Duration::from_secs(2), 10, 50).unwrap(); + let mut ads = Ads::new(Duration::from_secs(2), 10, 50, 100, 100).unwrap(); let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); @@ -84,7 +84,7 @@ async fn ticket_wait_time_no_wait_time() { let ip: IpAddr = "127.0.0.1".parse().unwrap(); let key = CombinedKey::generate_secp256k1(); let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); - let mut ads = Ads::new(Duration::from_secs(1), 10, 50).unwrap(); + let mut ads = Ads::new(Duration::from_secs(1), 10, 50, 100, 100).unwrap(); let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); assert_eq!(ads.ticket_wait_time(topic, enr.node_id(), ip), None) } @@ -97,7 +97,7 @@ async fn ticket_wait_time_duration() { let key = CombinedKey::generate_secp256k1(); let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); - let mut ads = Ads::new(Duration::from_secs(3), 1, 3).unwrap(); + let mut ads = Ads::new(Duration::from_secs(3), 1, 3, 100, 100).unwrap(); let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); @@ -131,7 +131,7 @@ async fn ticket_wait_time_full_table() { .build(&key_2) .unwrap(); - let mut ads = Ads::new(Duration::from_secs(3), 2, 3).unwrap(); + let mut ads = Ads::new(Duration::from_secs(3), 2, 3, 100, 100).unwrap(); let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); @@ -182,7 +182,7 @@ async fn ticket_wait_time_full_topic() { .build(&key_3) .unwrap(); - let mut ads = Ads::new(Duration::from_secs(3), 2, 4).unwrap(); + let mut ads = Ads::new(Duration::from_secs(3), 2, 4, 100, 100).unwrap(); let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); diff --git a/src/service.rs b/src/service.rs index 9de417fcf..380c21463 100644 --- a/src/service.rs +++ b/src/service.rs @@ -408,13 +408,13 @@ impl Service { let (discv5_send, discv5_recv) = mpsc::channel(30); let (exit_send, exit) = oneshot::channel(); - let ads = match Ads::new(Duration::from_secs(60 * 15), 100, 50000) { + let ads = match Ads::new(Duration::from_secs(60 * 15), 100, 50000, 10, 3) { Ok(ads) => ads, Err(e) => { return Err(Error::new(ErrorKind::InvalidInput, e)); } }; - let active_topics = match Ads::new(Duration::from_secs(60 * 15), 100, 50000) { + let active_topics = match Ads::new(Duration::from_secs(60 * 15), 100, 50000, 10, 3) { Ok(ads) => ads, Err(e) => { return Err(Error::new(ErrorKind::InvalidInput, e)); diff --git a/src/service/test.rs b/src/service/test.rs index ce50ce505..56e1d816e 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -96,11 +96,11 @@ async fn build_service( peers_to_ping: HashSetDelay::new(config.ping_interval), discv5_recv, event_stream: None, - ads: Ads::new(Duration::from_secs(60 * 15), 100, 50000).unwrap(), + ads: Ads::new(Duration::from_secs(60 * 15), 100, 50000, 10, 3).unwrap(), tickets: Tickets::new(Duration::from_secs(60 * 15)), topics: HashMap::new(), topics_kbuckets: HashMap::new(), - active_topics: Ads::new(Duration::from_secs(60 * 15), 100, 50000).unwrap(), + active_topics: Ads::new(Duration::from_secs(60 * 15), 100, 50000, 10, 3).unwrap(), ticket_pools: TicketPools::default(), active_topic_queries: ActiveTopicQueries::new( config.topic_query_timeout, From 139fcd2451a01f5521832cf3f948d5feef266922 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 29 Jun 2022 22:52:33 +0200 Subject: [PATCH 223/391] Fix clippy warnings --- src/advertisement/mod.rs | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 11a0740f5..d02057b1a 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -161,13 +161,13 @@ impl Ads { .unwrap_or(false), IpAddr::V6(_) => ad .node_record - .ip4() + .ip6() .map(|ip| ip.octets()[0..=5].to_vec() == subnet) .unwrap_or(false), }; if subnet_match { - if !subnet_first_insert_time.is_some() { + if subnet_first_insert_time.is_none() { subnet_first_insert_time = Some(ad.insert_time); } subnet_ads_count += 1; @@ -230,9 +230,9 @@ impl Ads { if let Some(ad) = ad { let subnet = if let Some(ip) = ad.node_record.ip4() { Some(ip.octets()[0..=2].to_vec()) - } else if let Some(ip6) = ad.node_record.ip6() { - Some(ip6.octets()[0..=5].to_vec()) - } else { None }; + } else { + ad.node_record.ip6().map(|ip6| ip6.octets()[0..=5].to_vec()) + }; if let Some(subnet) = subnet { if let Some(subnet_expiries) = self.subnet_expirations.get_mut(&subnet) { subnet_expiries.pop_front(); @@ -261,16 +261,14 @@ impl Ads { let subnet = if let Some(ip) = node_record.ip4() { Some(ip.octets()[0..=2].to_vec()) - } else if let Some(ip6) = node_record.ip6() { - Some(ip6.octets()[0..=5].to_vec()) } else { - None + node_record.ip6().map(|ip6| ip6.octets()[0..=5].to_vec()) }; if let Some(subnet) = subnet { let subnet_expirires = self .subnet_expirations .entry(subnet) - .or_insert(VecDeque::new()); + .or_insert_with(VecDeque::new); subnet_expirires.push_back(now); } From 95f2a07e87dbdc5ff04e4dad5b89bd2ec9a7b24c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 30 Jun 2022 18:53:27 +0200 Subject: [PATCH 224/391] Add tests for subnet ticket wait time --- src/advertisement/mod.rs | 39 ++++++++++++++++++---------- src/advertisement/test.rs | 53 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+), 13 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index d02057b1a..b941275fd 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -63,7 +63,8 @@ impl AdTopic { } } -/// The Ads struct contains the locally adveritsed AdNodes. +/// The Ads struct contains adveritsed AdNodes. Table is used to refer to all +/// the ads, and the table stores ads by topic. #[derive(Clone, Debug)] pub struct Ads { /// The expirations makes sure that AdNodes are advertised only for the @@ -96,8 +97,8 @@ impl Ads { max_ads_subnet: usize, max_ads_subnet_topic: usize, ) -> Result { - if max_ads_per_topic > max_ads { - return Err("Ads per topic cannot be > max_ads"); + if max_ads_per_topic > max_ads || max_ads_subnet_topic > max_ads_subnet { + return Err("Ads per topic [per subnet] cannot be > max_ads [per subnet]"); } Ok(Ads { @@ -139,6 +140,20 @@ impl Ads { IpAddr::V6(ip) => ip.octets()[0..=5].to_vec(), }; + let wait_time_max_ads_subnet = + if let Some(expirations) = self.subnet_expirations.get_mut(&subnet) { + if expirations.len() >= self.max_ads_subnet { + expirations.pop_front().map(|insert_time| { + let elapsed_time = now.saturating_duration_since(insert_time); + self.ad_lifetime.saturating_sub(elapsed_time) + }) + } else { + None + } + } else { + None + }; + if let Some(nodes) = self.ads.get(&topic) { let mut subnet_first_insert_time = None; let mut subnet_ads_count = 0; @@ -165,7 +180,6 @@ impl Ads { .map(|ip| ip.octets()[0..=5].to_vec() == subnet) .unwrap_or(false), }; - if subnet_match { if subnet_first_insert_time.is_none() { subnet_first_insert_time = Some(ad.insert_time); @@ -175,7 +189,8 @@ impl Ads { } // Similarity check to see if the limit of ads per subnet per topic or otherwise table is reached. // If the ad slots per subnet per topic are not full and neither are the ad slots per subnet for - // the whole table then waiting time is not decided by subnet. + // the whole table then waiting time is not decided by subnet but by the number of free ad slots + // for the topic. if subnet_ads_count >= self.max_ads_subnet_topic { if let Some(insert_time) = subnet_first_insert_time { let elapsed_time = now.saturating_duration_since(insert_time); @@ -183,14 +198,8 @@ impl Ads { return Some(wait_time); } } - if let Some(expirations) = self.subnet_expirations.get_mut(&subnet) { - if expirations.len() >= self.max_ads_subnet { - if let Some(insert_time) = expirations.pop_front() { - let elapsed_time = now.saturating_duration_since(insert_time); - let wait_time = self.ad_lifetime.saturating_sub(elapsed_time); - return Some(wait_time); - } - } + if wait_time_max_ads_subnet.is_some() { + return wait_time_max_ads_subnet; } // Occupancy check to see if the ad slots for a certain topic are full. @@ -201,6 +210,10 @@ impl Ads { }); } } + // Similarity check to see if the limit of ads per subnet per table is reached. + if wait_time_max_ads_subnet.is_some() { + return wait_time_max_ads_subnet; + } // If the ad slots per topic are not full and neither is the table then waiting time is None, // otherwise waiting time is that of the next ad in the table to expire. if self.expirations.len() < self.max_ads { diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 61534848f..075453b0f 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -203,3 +203,56 @@ async fn ticket_wait_time_full_topic() { tokio::time::sleep(Duration::from_secs(3)).await; assert_eq!(ads.ticket_wait_time(topic, enr_3.node_id(), ip), None); } + +#[tokio::test] +async fn ticket_wait_time_full_subnet() { + let port = 1510; + let ip: IpAddr = "192.168.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); + + let port_2 = 1995; + let ip_2: IpAddr = "192.168.0.2".parse().unwrap(); + let key_2 = CombinedKey::generate_secp256k1(); + let enr_2 = EnrBuilder::new("v4") + .ip(ip_2) + .udp4(port_2) + .build(&key_2) + .unwrap(); + + let mut ads = Ads::new(Duration::from_secs(2), 2, 4, 2, 1).unwrap(); + let topic_1 = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); + let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); + let topic_3 = Topic::new(std::str::from_utf8(&[3u8; 32]).unwrap()).hash(); + + ads.insert(enr.clone(), topic_1).unwrap(); + ads.insert(enr_2, topic_2).unwrap(); + + assert_ne!(ads.ticket_wait_time(topic_3, enr.node_id(), ip), None); +} + +#[tokio::test] +async fn ticket_wait_time_full_subnet_topic() { + let port = 1510; + let ip: IpAddr = "192.168.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); + + let port_2 = 1995; + let ip_2: IpAddr = "192.168.0.2".parse().unwrap(); + let key_2 = CombinedKey::generate_secp256k1(); + let enr_2 = EnrBuilder::new("v4") + .ip(ip_2) + .udp4(port_2) + .build(&key_2) + .unwrap(); + + let mut ads = Ads::new(Duration::from_secs(2), 2, 4, 2, 1).unwrap(); + let topic_1 = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); + let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); + + ads.insert(enr.clone(), topic_1).unwrap(); + + assert_ne!(ads.ticket_wait_time(topic_1, enr_2.node_id(), ip), None); + assert_eq!(ads.ticket_wait_time(topic_2, enr.node_id(), ip), None); +} From a482724659657582fe184434a1d05389c664c973 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 30 Jun 2022 20:11:27 +0200 Subject: [PATCH 225/391] Filter tickets before selection --- src/advertisement/ticket.rs | 15 +++++++++++---- src/service.rs | 21 ++++++++++++++------- 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index e768f237e..68828156c 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -214,14 +214,16 @@ pub struct PoolTicket { enr: Enr, req_id: RequestId, ticket: Ticket, + ip: IpAddr, } impl PoolTicket { - pub fn new(enr: Enr, req_id: RequestId, ticket: Ticket) -> Self { + pub fn new(enr: Enr, req_id: RequestId, ticket: Ticket, ip: IpAddr) -> Self { PoolTicket { enr, req_id, ticket, + ip, } } @@ -236,13 +238,18 @@ impl PoolTicket { pub fn ticket(&self) -> &Ticket { &self.ticket } + + pub fn ip(&self) -> &IpAddr { + &self.ip + } } /// The TicketPools collects all the registration attempts for a free ad slot. #[derive(Default)] pub struct TicketPools { /// The ticket_pools keeps track of all the registrants and their Tickets. One - /// ticket_pool per TopicHash can be open at a time. + /// ticket_pool per TopicHash can be open at a time. A ticket pool collects the + /// valid tickets received within the registration window for a topic. ticket_pools: HashMap>, /// The expirations keeps track of when to close a ticket pool so the next one /// can be opened. @@ -250,7 +257,7 @@ pub struct TicketPools { } impl TicketPools { - pub fn insert(&mut self, node_record: Enr, req_id: RequestId, ticket: Ticket) { + pub fn insert(&mut self, node_record: Enr, req_id: RequestId, ticket: Ticket, ip: IpAddr) { if let Some(open_time) = ticket.req_time().checked_add(ticket.wait_time()) { if open_time.elapsed() <= Duration::from_secs(REGISTRATION_WINDOW_IN_SECS) { let pool = self.ticket_pools.entry(ticket.topic()).or_default(); @@ -265,7 +272,7 @@ impl TicketPools { } pool.insert( node_record.node_id(), - PoolTicket::new(node_record, req_id, ticket), + PoolTicket::new(node_record, req_id, ticket, ip), ); } } diff --git a/src/service.rs b/src/service.rs index 380c21463..50e23ff44 100644 --- a/src/service.rs +++ b/src/service.rs @@ -707,7 +707,9 @@ impl Service { // to the ticket issuer. self.reg_topic_request(active_ticket.contact(), active_topic.topic(), enr, Some(active_ticket.ticket())); } - Some(Ok((topic, ticket_pool))) = self.ticket_pools.next() => { + Some(Ok((topic, mut ticket_pool))) = self.ticket_pools.next() => { + // Remove any tickets which don't have a current wait time of None. + ticket_pool.retain(|node_id, pool_ticket| self.ads.ticket_wait_time(topic, *node_id, *pool_ticket.ip()) == None); // Select ticket with longest cummulative wait time. if let Some(pool_ticket) = ticket_pool.values().max_by_key(|pool_ticket| pool_ticket.ticket().cum_wait()) { self.ads.insert(pool_ticket.node_record().clone(), topic).ok(); @@ -1081,7 +1083,7 @@ impl Service { topic, tokio::time::Instant::now(), wait_time, - Duration::from_secs(0), + wait_time, ); if !ticket.is_empty() { @@ -1124,7 +1126,7 @@ impl Service { // with wait time 0. new_ticket.set_cum_wait(ticket.cum_wait()); self.send_ticket_response( - node_address, + node_address.clone(), id.clone(), new_ticket.clone(), wait_time, @@ -1135,7 +1137,7 @@ impl Service { // don't match those in ticket. For example if a malicious node tries to use // another ticket issued by us. if ticket == new_ticket { - self.ticket_pools.insert(enr, id, ticket); + self.ticket_pools.insert(enr, id, ticket, node_address.socket_addr.ip()); } } } @@ -1149,14 +1151,19 @@ impl Service { // wait time for the ad slot. See discv5 spec. This node will not store tickets received // with wait time 0. self.send_ticket_response( - node_address, + node_address.clone(), id.clone(), new_ticket.clone(), wait_time, ); // If current wait time is 0, the ticket is added to the matching ticket pool. - if wait_time <= Duration::from_secs(0) { - self.ticket_pools.insert(enr, id, new_ticket); + if wait_time == Duration::from_secs(0) { + self.ticket_pools.insert( + enr, + id, + new_ticket, + node_address.socket_addr.ip(), + ); } } } else { From 054520b61c83086af1b7fe945ab6fa46dfbd4a6c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 30 Jun 2022 20:24:31 +0200 Subject: [PATCH 226/391] Filter returned ad nodes before inserting in query --- src/service.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/service.rs b/src/service.rs index 50e23ff44..948b8a814 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1442,6 +1442,7 @@ impl Service { self.active_adnodes_responses.remove(&node_id); if let RequestBody::TopicQuery { topic } = active_request.request_body { + nodes.retain(|enr| (self.config.table_filter)(enr)); if let Some(query) = self.active_topic_queries.queries.get_mut(&topic) { nodes.into_iter().for_each(|enr| { trace!( From d68d0a703a43153e095383dc5b390e999fcbf6b3 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 1 Jul 2022 11:13:27 +0200 Subject: [PATCH 227/391] Insert nodes with correct connection direction --- src/service.rs | 80 +++++++++++++++++++++++++++++++++++++------------- 1 file changed, 59 insertions(+), 21 deletions(-) diff --git a/src/service.rs b/src/service.rs index 948b8a814..bf8d11806 100644 --- a/src/service.rs +++ b/src/service.rs @@ -517,7 +517,6 @@ impl Service { ServiceRequest::TopicQuery(topic_hash, callback) => { // If we look up the topic hash for the first time we initialise its kbuckets. if let Entry::Vacant(_) = self.topics_kbuckets.entry(topic_hash) { - trace!("Init kbuckets for topic hash {}", topic_hash); // NOTE: Currently we don't expose custom filter support in the configuration. Users can // optionally use the IP filter via the ip_limit configuration parameter. In the future, we // may expose this functionality to the users if there is demand for it. @@ -530,6 +529,7 @@ impl Service { (None, None) }; + trace!("Initiating kbuckets for topic hash {}", topic_hash); let kbuckets = KBucketsTable::new( NodeId::new(&topic_hash.as_bytes()).into(), Duration::from_secs(60), @@ -538,17 +538,36 @@ impl Service { bucket_filter, ); - self.topics_kbuckets.insert(topic_hash, kbuckets); - + debug!("Adding {} entries from local routing table to topic's kbuckets", self.kbuckets.write().iter().count()); - let mut local_routing_table = self.kbuckets.write().clone(); - for enr in local_routing_table.iter().map(|entry| entry.node.value.clone()) { - self.connection_updated( - enr.node_id(), - ConnectionStatus::Connected(enr, ConnectionDirection::Incoming), - Some(topic_hash), - ); + for entry in self.kbuckets.write().iter() { + match kbuckets.insert_or_update(entry.node.key, entry.node.value.clone(), entry.status) { + InsertResult::Inserted + | InsertResult::Pending { .. } + | InsertResult::StatusUpdated { .. } + | InsertResult::ValueUpdated + | InsertResult::Updated { .. } + | InsertResult::UpdatedPending => trace!( + "Added node id {} to kbucket of topic hash {}", + entry.node.value.node_id(), + topic_hash + ), + InsertResult::Failed(FailureReason::BucketFull) => { + error!("Table full for topic hash {}", topic_hash) + } + InsertResult::Failed(FailureReason::BucketFilter) => { + error!("Failed bucket filter for topic hash {}", topic_hash) + } + InsertResult::Failed(FailureReason::TableFilter) => { + error!("Failed table filter for topic hash {}", topic_hash) + } + InsertResult::Failed(FailureReason::InvalidSelfUpdate) => { + error!("Invalid self update for topic hash {}", topic_hash) + } + InsertResult::Failed(_) => error!("Failed to insert ENR for topic hash {}", topic_hash), + } } + self.topics_kbuckets.insert(topic_hash, kbuckets); } self.send_topic_queries(topic_hash, self.config.max_nodes_response, Some(callback)); } @@ -568,26 +587,45 @@ impl Service { (None, None) }; - debug!("Initiating kbuckets for topic hash {}", topic_hash); - let kbuckets = KBucketsTable::new( + trace!("Initiating kbuckets for topic hash {}", topic_hash); + let mut kbuckets = KBucketsTable::new( NodeId::new(&topic_hash.as_bytes()).into(), Duration::from_secs(60), self.config.incoming_bucket_limit, table_filter, bucket_filter, ); - debug!("Adding {} entries from local routing table to topic's kbuckets", self.kbuckets.write().iter().count()); - self.topics_kbuckets.insert(topic_hash, kbuckets); + debug!("Adding {} entries from local routing table to topic's kbuckets", self.kbuckets.write().iter().count()); - let mut local_routing_table = self.kbuckets.write().clone(); - for enr in local_routing_table.iter().map(|entry| entry.node.value.clone()) { - self.connection_updated( - enr.node_id(), - ConnectionStatus::Connected(enr, ConnectionDirection::Incoming), - Some(topic_hash), - ); + for entry in self.kbuckets.write().iter() { + match kbuckets.insert_or_update(entry.node.key, entry.node.value.clone(), entry.status) { + InsertResult::Inserted + | InsertResult::Pending { .. } + | InsertResult::StatusUpdated { .. } + | InsertResult::ValueUpdated + | InsertResult::Updated { .. } + | InsertResult::UpdatedPending => trace!( + "Added node id {} to kbucket of topic hash {}", + entry.node.value.node_id(), + topic_hash + ), + InsertResult::Failed(FailureReason::BucketFull) => { + error!("Table full for topic hash {}", topic_hash) + } + InsertResult::Failed(FailureReason::BucketFilter) => { + error!("Failed bucket filter for topic hash {}", topic_hash) + } + InsertResult::Failed(FailureReason::TableFilter) => { + error!("Failed table filter for topic hash {}", topic_hash) + } + InsertResult::Failed(FailureReason::InvalidSelfUpdate) => { + error!("Invalid self update for topic hash {}", topic_hash) + } + InsertResult::Failed(_) => error!("Failed to insert ENR for topic hash {}", topic_hash), + } } + self.topics_kbuckets.insert(topic_hash, kbuckets); METRICS.topics_to_publish.store(self.topics.len(), Ordering::Relaxed); self.send_register_topics(topic_hash); From 3d13daca6093f589e895c079fb4c516ed7bf5415 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 1 Jul 2022 11:24:21 +0200 Subject: [PATCH 228/391] Fix bug --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index bf8d11806..c4569740e 100644 --- a/src/service.rs +++ b/src/service.rs @@ -530,7 +530,7 @@ impl Service { }; trace!("Initiating kbuckets for topic hash {}", topic_hash); - let kbuckets = KBucketsTable::new( + let mut kbuckets = KBucketsTable::new( NodeId::new(&topic_hash.as_bytes()).into(), Duration::from_secs(60), self.config.incoming_bucket_limit, From 0b6ddff6cdfcac8ea1f2eca578eed84970ae8a1a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 4 Jul 2022 10:53:58 +0200 Subject: [PATCH 229/391] Remove old logic --- src/handler/mod.rs | 33 ++------------------------------- 1 file changed, 2 insertions(+), 31 deletions(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index aea1c62bb..5d6061864 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -1102,27 +1102,7 @@ impl Handler { // This is a multi-response Nodes response if let Some(remaining_responses) = request_call.remaining_responses.as_mut() { *remaining_responses -= 1; - let reinsert = match request_call.request.body { - RequestBody::RegisterTopic { .. } => { - trace!("Received a REGTOPIC NODES reponse"); - // The request is reinserted for either another NODES response, a TICKET or a - // REGCONFIRMATION response that may come, otherwise the request times out. - remaining_responses >= &mut 0 - } - RequestBody::TopicQuery { .. } => { - trace!("Received a TOPICQUERY NODES reponse"); - // TopicQuerys may receive multiple ADNODE responses as well as NODES responses - // so the request call must be reinserted. - let remaining_adnode_responses = - match request_call.remaining_adnode_responses { - Some(remaining) => remaining > 0, - None => false, - }; - remaining_responses > &mut 0 || remaining_adnode_responses - } - _ => remaining_responses > &mut 0, - }; - if reinsert { + if remaining_responses != &0 { trace!("Reinserting active request"); // more responses remaining, add back the request and send the response // add back the request and send the response @@ -1271,16 +1251,7 @@ impl Handler { request_call.remaining_adnode_responses { *remaining_adnode_responses -= 1; - let reinsert = { - // TopicQuerys may receive multiple ADNODE responses as well as NODES responses - // so the request call must be reinserted. - let remaining_responses = match request_call.remaining_responses { - Some(remaining) => remaining > 0, - None => false, - }; - remaining_adnode_responses > &mut 0 || remaining_responses - }; - if reinsert { + if remaining_adnode_responses != &0 { trace!("Reinserting active TOPICQUERY request"); // more responses remaining, add back the request and send the response // add back the request and send the response From 7ece2b02519f540f99802827c684b6f50dac7ee5 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 4 Jul 2022 11:08:49 +0200 Subject: [PATCH 230/391] Remove block upon awaiting regconfirmation --- src/handler/mod.rs | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 5d6061864..ea99831eb 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -215,7 +215,9 @@ pub enum RegTopicResponseState { RegisterConfirmation, } -const TIMEOUT_REGCONFIRMATION: Duration = Duration::from_secs(20); +/// The time out for awaiting REGCONFIRMATION responses is the registration window (10 seconds) +/// plus some seconds for processing. +const TIMEOUT_REGCONFIRMATION: Duration = Duration::from_secs(15); /// Process to handle handshakes and sessions established from raw RPC communications between nodes. pub struct Handler { @@ -232,6 +234,8 @@ pub struct Handler { key: Arc>, /// Pending raw requests. active_requests: ActiveRequests, + /// Pending raw REGTOPIC requests awaiting a REGCONFIRMATION response that may come. + requests_awaiting_regconf: ActiveRequests, /// The expected responses by SocketAddr which allows packets to pass the underlying filter. filter_expected_responses: Arc>>, /// Keeps track of the 2 expected responses, NODES and ADNODES that should be received from a @@ -325,6 +329,9 @@ impl Handler { enr, key, active_requests: ActiveRequests::new(config.request_timeout), + requests_awaiting_regconf: ActiveRequests::new( + TIMEOUT_REGCONFIRMATION + config.request_timeout, + ), pending_requests: HashMap::new(), filter_expected_responses, topic_query_responses: HashMap::new(), @@ -1162,8 +1169,10 @@ impl Handler { } RegTopicResponseState::Ticket => { *response_state = RegTopicResponseState::RegisterConfirmation; - // Still a REGCONFIRMATION may come hence request call is reinserted. - self.active_requests.insert_at( + // Still a REGCONFIRMATION may come hence request call is reinserted, in a separate + // struct to avoid blocking further requests to the node address during + // TIMEOUT_REGCONFIRMATION time. + self.requests_awaiting_regconf.insert_at( node_address.clone(), request_call, TIMEOUT_REGCONFIRMATION, @@ -1351,8 +1360,10 @@ impl Handler { } RegTopicResponseState::Nodes => { *response_state = RegTopicResponseState::RegisterConfirmation; - // Still a REGCONFIRMATION may come hence request call is reinserted. - self.active_requests.insert_at( + // Still a REGCONFIRMATION may come hence request call is reinserted, in a separate + // struct to avoid blocking further requests to the node address during + // TIMEOUT_REGCONFIRMATION time. + self.requests_awaiting_regconf.insert_at( node_address.clone(), request_call.clone(), TIMEOUT_REGCONFIRMATION, From d2f2744590a3526a62ebd50cece100922cede193 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 4 Jul 2022 12:00:52 +0200 Subject: [PATCH 231/391] fixup! Remove block upon awaiting regconfirmation --- src/advertisement/test.rs | 4 ++-- src/handler/active_requests.rs | 13 ------------- src/handler/mod.rs | 33 ++++++++++++++++----------------- 3 files changed, 18 insertions(+), 32 deletions(-) diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index 075453b0f..e4470d9a6 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -22,7 +22,7 @@ async fn insert_same_node() { // Since 2 seconds haven't passed assert_eq!( - ads.insert(enr.clone(), topic).map_err(|e| e), + ads.insert(enr.clone(), topic), Err("Node already advertising this topic") ); @@ -53,7 +53,7 @@ async fn insert_ad_and_get_nodes() { // The ad hasn't expired and duplicates are not allowed assert_eq!( - ads.insert(enr.clone(), topic).map_err(|e| e), + ads.insert(enr.clone(), topic), Err("Node already advertising this topic") ); diff --git a/src/handler/active_requests.rs b/src/handler/active_requests.rs index fc406a8ca..a55324ce8 100644 --- a/src/handler/active_requests.rs +++ b/src/handler/active_requests.rs @@ -28,19 +28,6 @@ impl ActiveRequests { .insert(nonce, node_address); } - pub(crate) fn insert_at( - &mut self, - node_address: NodeAddress, - request_call: RequestCall, - timeout: Duration, - ) { - let nonce = *request_call.packet.message_nonce(); - self.active_requests_mapping - .insert_at(node_address.clone(), request_call, timeout); - self.active_requests_nonce_mapping - .insert(nonce, node_address); - } - pub(crate) fn get(&self, node_address: &NodeAddress) -> Option<&RequestCall> { self.active_requests_mapping.get(node_address) } diff --git a/src/handler/mod.rs b/src/handler/mod.rs index ea99831eb..bba89d685 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -235,7 +235,7 @@ pub struct Handler { /// Pending raw requests. active_requests: ActiveRequests, /// Pending raw REGTOPIC requests awaiting a REGCONFIRMATION response that may come. - requests_awaiting_regconf: ActiveRequests, + active_requests_regconf: ActiveRequests, /// The expected responses by SocketAddr which allows packets to pass the underlying filter. filter_expected_responses: Arc>>, /// Keeps track of the 2 expected responses, NODES and ADNODES that should be received from a @@ -329,7 +329,7 @@ impl Handler { enr, key, active_requests: ActiveRequests::new(config.request_timeout), - requests_awaiting_regconf: ActiveRequests::new( + active_requests_regconf: ActiveRequests::new( TIMEOUT_REGCONFIRMATION + config.request_timeout, ), pending_requests: HashMap::new(), @@ -1089,7 +1089,14 @@ impl Handler { async fn handle_response(&mut self, node_address: NodeAddress, response: Response) { // Find a matching request, if any trace!("Received {} response", response.body); - if let Some(mut request_call) = self.active_requests.remove(&node_address) { + + let request_call = if let Some(request_call) = self.active_requests.remove(&node_address) { + Some(request_call) + } else { + self.active_requests_regconf.remove(&node_address) + }; + + if let Some(mut request_call) = request_call { if request_call.id() != &response.id { trace!( "Received an RPC Response to an unknown request. Likely late response. {}", @@ -1170,13 +1177,9 @@ impl Handler { RegTopicResponseState::Ticket => { *response_state = RegTopicResponseState::RegisterConfirmation; // Still a REGCONFIRMATION may come hence request call is reinserted, in a separate - // struct to avoid blocking further requests to the node address during - // TIMEOUT_REGCONFIRMATION time. - self.requests_awaiting_regconf.insert_at( - node_address.clone(), - request_call, - TIMEOUT_REGCONFIRMATION, - ); + // struct to avoid blocking further requests to the node address during the request timeout. + self.active_requests_regconf + .insert(node_address.clone(), request_call); if let Err(e) = self .service_send .send(HandlerOut::Response( @@ -1361,13 +1364,9 @@ impl Handler { RegTopicResponseState::Nodes => { *response_state = RegTopicResponseState::RegisterConfirmation; // Still a REGCONFIRMATION may come hence request call is reinserted, in a separate - // struct to avoid blocking further requests to the node address during - // TIMEOUT_REGCONFIRMATION time. - self.requests_awaiting_regconf.insert_at( - node_address.clone(), - request_call.clone(), - TIMEOUT_REGCONFIRMATION, - ); + // struct to avoid blocking further requests to the node address during the request timeout. + self.active_requests_regconf + .insert(node_address.clone(), request_call.clone()); if let Err(e) = self .service_send .send(HandlerOut::Response( From 983819bc4df35b4f9b97f4ac06b4d6f0ea1b82fd Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 4 Jul 2022 12:43:09 +0200 Subject: [PATCH 232/391] Ban nodes that send invalid ticket and fix bug --- src/error.rs | 3 +++ src/service.rs | 54 ++++++++++++++++++++++++++++---------------------- 2 files changed, 33 insertions(+), 24 deletions(-) diff --git a/src/error.rs b/src/error.rs index 88fe53121..f0319f97d 100644 --- a/src/error.rs +++ b/src/error.rs @@ -116,6 +116,9 @@ pub enum RequestError { /// A request that is responded with multiple respones /// gets the wrong combination of responses. InvalidResponseCombo(String), + /// A REGTOPIC requerst has sent a ticket that was not + /// issued by us. + InvalidTicket, } #[derive(Debug, Clone, PartialEq)] diff --git a/src/service.rs b/src/service.rs index c4569740e..6ad0598b7 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1087,16 +1087,19 @@ impl Service { RequestBody::RegisterTopic { topic, enr, ticket } => { // Drop if request tries to advertise another node than sender if enr.node_id() != node_address.node_id { + debug!("The enr node id in REGTOPIC request body does not match sender's. Nodes can only register themselves."); return; } match self.config.ip_mode { IpMode::Ip4 => { if enr.udp4_socket().map(SocketAddr::V4) != Some(node_address.socket_addr) { + debug!("The enr ip in REGTOPIC request body does not match sender's. Nodes can only register themselves."); return; } } IpMode::Ip6 { .. } => { if enr.udp6_socket().map(SocketAddr::V6) != Some(node_address.socket_addr) { + debug!("The enr ip in REGTOPIC request body does not match sender's. Nodes can only register themselves."); return; } } @@ -1125,7 +1128,7 @@ impl Service { ); if !ticket.is_empty() { - let decoded_enr = + let decoded_local_enr = self.local_enr .write() .to_base64() @@ -1133,8 +1136,8 @@ impl Service { .map_err(|e| { error!("Failed to decrypt ticket in REGTOPIC request. Error: {}", e) }); - if let Ok(decoded_enr) = decoded_enr { - if let Some(ticket_key) = decoded_enr.get("ticket_key") { + if let Ok(decoded_local_enr) = decoded_local_enr { + if let Some(ticket_key) = decoded_local_enr.get("ticket_key") { let decrypted_ticket = { let aead = Aes128Gcm::new(GenericArray::from_slice(ticket_key)); let payload = Payload { @@ -1181,31 +1184,34 @@ impl Service { } }) .ok(); + } else { + warn!("Node sent a ticket that couldn't be decrypted with local ticket key. Blacklisting: {}", node_address.node_id); + let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); + PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + self.rpc_failure(id, RequestError::InvalidTicket); } } - } else { - debug!("Sending TICKET response"); - // A ticket is always be issued upon receiving a REGTOPIC request, even if there is no - // wait time for the ad slot. See discv5 spec. This node will not store tickets received - // with wait time 0. - self.send_ticket_response( - node_address.clone(), - id.clone(), - new_ticket.clone(), - wait_time, - ); - // If current wait time is 0, the ticket is added to the matching ticket pool. - if wait_time == Duration::from_secs(0) { - self.ticket_pools.insert( - enr, - id, - new_ticket, - node_address.socket_addr.ip(), - ); - } } } else { - debug!("REGTOPIC enr does not match request sender's enr. Nodes can only register themselves."); + debug!("Sending TICKET response"); + // A ticket is always be issued upon receiving a REGTOPIC request, even if there is no + // wait time for the ad slot. See discv5 spec. This node will not store tickets received + // with wait time 0. + self.send_ticket_response( + node_address.clone(), + id.clone(), + new_ticket.clone(), + wait_time, + ); + // If current wait time is 0, the ticket is added to the matching ticket pool. + if wait_time == Duration::from_secs(0) { + self.ticket_pools.insert( + enr, + id, + new_ticket, + node_address.socket_addr.ip(), + ); + } } } RequestBody::TopicQuery { topic } => { From 42ee4e4bfeda9d3762f1ac7ca6804eaf09e90310 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 4 Jul 2022 12:56:32 +0200 Subject: [PATCH 233/391] Run cargo fmt --- src/service.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/service.rs b/src/service.rs index 6ad0598b7..eba896530 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1128,14 +1128,14 @@ impl Service { ); if !ticket.is_empty() { - let decoded_local_enr = - self.local_enr - .write() - .to_base64() - .parse::() - .map_err(|e| { - error!("Failed to decrypt ticket in REGTOPIC request. Error: {}", e) - }); + let decoded_local_enr = self + .local_enr + .write() + .to_base64() + .parse::() + .map_err(|e| { + error!("Failed to decrypt ticket in REGTOPIC request. Error: {}", e) + }); if let Ok(decoded_local_enr) = decoded_local_enr { if let Some(ticket_key) = decoded_local_enr.get("ticket_key") { let decrypted_ticket = { @@ -1186,7 +1186,8 @@ impl Service { .ok(); } else { warn!("Node sent a ticket that couldn't be decrypted with local ticket key. Blacklisting: {}", node_address.node_id); - let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); + let ban_timeout = + self.config.ban_duration.map(|v| Instant::now() + v); PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); self.rpc_failure(id, RequestError::InvalidTicket); } From c107493056122df9a3544c2d5f9b68f7e2f41fe2 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 4 Jul 2022 17:35:28 +0200 Subject: [PATCH 234/391] Check distances of returned nodes for topic reqs --- src/config.rs | 13 ++------- src/service.rs | 76 +++++++++++++++++++++++++++----------------------- 2 files changed, 43 insertions(+), 46 deletions(-) diff --git a/src/config.rs b/src/config.rs index 27b252fce..1eb3bf7e8 100644 --- a/src/config.rs +++ b/src/config.rs @@ -96,11 +96,8 @@ pub struct Discv5Config { /// will last indefinitely. Default is 1 hour. pub ban_duration: Option, - /// The max length in bits that the suffix of the topic hash is allowed to vary from the node ids that - /// REGTOPIC and TOPICQUERY requests are sent to. Setting it to 256 means that the requests are sent to - /// all of the nodes in the kbuckets. - pub topic_radius: u64, - + /// A topic look up should time out after a set duration, after which no more TOPICQUERY requests should + /// be sent to peers regardless of the number of results found. This is in order to avoid starvation. pub topic_query_timeout: Duration, /// A custom executor which can spawn the discv5 tasks. This must be a tokio runtime, with @@ -143,7 +140,6 @@ impl Default for Discv5Config { filter_max_bans_per_ip: Some(5), permit_ban_list: PermitBanList::default(), ban_duration: Some(Duration::from_secs(3600)), // 1 hour - topic_radius: 256, topic_query_timeout: Duration::from_secs(60), ip_mode: IpMode::default(), executor: None, @@ -309,11 +305,6 @@ impl Discv5ConfigBuilder { self } - pub fn topic_radius(&mut self, topic_radius: u64) -> &mut Self { - self.config.topic_radius = topic_radius; - self - } - /// A custom executor which can spawn the discv5 tasks. This must be a tokio runtime, with /// timing support. pub fn executor(&mut self, executor: Box) -> &mut Self { diff --git a/src/service.rs b/src/service.rs index eba896530..04403582e 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1279,12 +1279,25 @@ impl Service { ); } - let topic_radius = (1..=self.config.topic_radius).collect(); // These are sanitized and ordered - let distances_requested = match &active_request.request_body { - RequestBody::FindNode { distances } => distances, - RequestBody::TopicQuery { .. } | RequestBody::RegisterTopic { .. } => { - &topic_radius + let distances_requested: Vec = match &active_request.request_body { + RequestBody::FindNode { distances } => distances.clone(), + RequestBody::TopicQuery { topic } + | RequestBody::RegisterTopic { topic, .. } => { + let peer_key: kbucket::Key = node_address.node_id.into(); + let topic_key: kbucket::Key = + NodeId::new(&topic.as_bytes()).into(); + let distance_to_topic = peer_key.log2_distance(&topic_key); + if let Some(distance) = distance_to_topic { + [distance - 1, distance, distance + 1].into() + } else { + warn!("The node id of this peer is the requested topic hash. Blacklisting peer with node id {}", node_id); + let ban_timeout = + self.config.ban_duration.map(|v| Instant::now() + v); + PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + self.rpc_failure(id, RequestError::InvalidTicket); + return; + } } _ => unreachable!(), }; @@ -1404,39 +1417,32 @@ impl Service { // ensure any mapping is removed in this rare case self.active_nodes_responses.remove(&node_id); - match active_request.request_body { - RequestBody::TopicQuery { topic } => { - self.discovered(&node_id, nodes, active_request.query_id, Some(topic)); + if let RequestBody::TopicQuery { topic } = active_request.request_body { + self.discovered(&node_id, nodes, active_request.query_id, Some(topic)); - let response_state = self - .topic_query_responses - .entry(node_id) - .or_insert(TopicQueryResponseState::Start); + let response_state = self + .topic_query_responses + .entry(node_id) + .or_insert(TopicQueryResponseState::Start); - match response_state { - TopicQueryResponseState::Start => { - *response_state = TopicQueryResponseState::Nodes; - self.active_requests.insert(id, active_request); - } - TopicQueryResponseState::AdNodes => { - self.topic_query_responses.remove(&node_id); - } - TopicQueryResponseState::Nodes => { - debug_unreachable!("No more NODES responses should be received if TOPICQUERY response is in Nodes state.") - } + match response_state { + TopicQueryResponseState::Start => { + *response_state = TopicQueryResponseState::Nodes; + self.active_requests.insert(id, active_request); + } + TopicQueryResponseState::AdNodes => { + self.topic_query_responses.remove(&node_id); + } + TopicQueryResponseState::Nodes => { + debug_unreachable!("No more NODES responses should be received if TOPICQUERY response is in Nodes state.") } } - RequestBody::RegisterTopic { - topic, - enr: _, - ticket: _, - } => self.discovered(&node_id, nodes, active_request.query_id, Some(topic)), - RequestBody::FindNode { .. } => { - self.discovered(&node_id, nodes, active_request.query_id, None) - } - _ => debug_unreachable!( - "Only TOPICQUERY, REGTOPIC and FINDNODE requests expect NODES response" - ), + } else if let RequestBody::RegisterTopic { topic, .. } = + active_request.request_body + { + self.discovered(&node_id, nodes, active_request.query_id, Some(topic)); + } else if let RequestBody::FindNode { .. } = active_request.request_body { + self.discovered(&node_id, nodes, active_request.query_id, None) } } ResponseBody::AdNodes { total, mut nodes } => { @@ -1930,7 +1936,7 @@ impl Service { .kbuckets .write() .nodes_by_distances( - &[distance - 1, distance + 1], + &[distance - 1, distance, distance + 1], self.config.max_nodes_response - closest_peers.len(), ) .iter() From 9012e2698c3a87a46e2be9e3cf2b75e14f163e50 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 4 Jul 2022 17:42:44 +0200 Subject: [PATCH 235/391] Fixup comments --- src/discv5.rs | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 3f7171404..455cde53f 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -58,11 +58,7 @@ pub enum Discv5Event { /// This happen spontaneously through queries as nodes return ENR's. These ENR's are not /// guaranteed to be live or contactable. Discovered(Enr), - /// A node has been discovered from either a REGTOPIC or a TOPICQUERY request. - /// - /// The ENR of the node is returned. Various properties can be derived from the ENR. - /// This happen spontaneously through requests as nodes return ENR's. These ENR's are not - /// guaranteed to be live or contactable. + /// A node has been discovered from either a REGTOPIC or a TOPICQUERY request. See [`Discv5Event::Discovered`]. DiscoveredTopic(Enr, TopicHash), /// A new ENR was added to the routing table. EnrAdded { enr: Enr, replaced: Option }, @@ -555,15 +551,14 @@ impl Discv5 { /// from the next interval on. pub fn remove_topic( &self, - topic: String, + topic_hash: TopicHash, ) -> impl Future> + 'static { - let topic = Topic::new(topic); let channel = self.clone_channel(); async move { let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; let (callback_send, callback_recv) = oneshot::channel(); - let event = ServiceRequest::RemoveTopic(topic.hash(), callback_send); + let event = ServiceRequest::RemoveTopic(topic_hash, callback_send); channel .send(event) .await From fbcdfdfbab4181c932bee2d0522b9ab5d24b1257 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 4 Jul 2022 21:01:28 +0200 Subject: [PATCH 236/391] Comment code and fix misc bugs --- src/advertisement/mod.rs | 1 + src/advertisement/ticket.rs | 8 ++- src/handler/mod.rs | 31 +++++++-- src/service.rs | 126 +++++++++++++++++++++++++++--------- 4 files changed, 131 insertions(+), 35 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index b941275fd..3387ed40b 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -125,6 +125,7 @@ impl Ads { self.ads.get(&topic).into_iter().flatten() } + /// Ticket wait time enforces diversity among adveritsed nodes. pub fn ticket_wait_time( &mut self, topic: TopicHash, diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 68828156c..f139cc7ba 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -40,6 +40,10 @@ impl ActiveTopic { pub fn topic(&self) -> TopicHash { self.topic } + + pub fn node_id(&self) -> &NodeId { + &self.node_id + } } /// A ticket is active when it is associated with the node contact of @@ -118,7 +122,7 @@ impl Stream for Tickets { } } -/// An PendingTicket maps to a Ticket received by another node in Tickets upon insert. +/// A PendingTicket maps to a Ticket received by another node in Tickets upon insert. #[derive(Clone)] struct PendingTicket { /// The ActiveTopic serves to match the Ticket to an entry in Tickets' @@ -248,7 +252,7 @@ impl PoolTicket { #[derive(Default)] pub struct TicketPools { /// The ticket_pools keeps track of all the registrants and their Tickets. One - /// ticket_pool per TopicHash can be open at a time. A ticket pool collects the + /// ticket pool per TopicHash can be open at a time. A ticket pool collects the /// valid tickets received within the registration window for a topic. ticket_pools: HashMap>, /// The expirations keeps track of when to close a ticket pool so the next one diff --git a/src/handler/mod.rs b/src/handler/mod.rs index bba89d685..e8d7366f6 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -202,16 +202,37 @@ impl RequestCall { } } +/// TOPICQUERY requests receive 2 types of responses ADNODES and NODES, in an +/// order which cannot be guranteed. If a peer sends the wrong combination of +/// responses the peer is blacklisted. +#[derive(Default)] pub enum TopicQueryResponseState { + /// The Start state is intermediary upon receving the first response to the + /// TOPICQUERY request, either a NODES or ADNODES response. + #[default] Start, + /// A NODES response has been completely received. Nodes, + /// An ADNODES response has been completely received. AdNodes, } +/// REGTOPIC requests receive 3 types of responses TICKET, NODES and possibly +/// a REGCONFIRMATION. The order of the ticket and nodes is non-determinsitic +/// but the regconf, if it comes, always comes at least 10 seconds (duration +/// of the registration window) + latency later. If a peer sends the wrong +/// permutation of responses the peer is blacklisted. +#[derive(Default)] pub enum RegTopicResponseState { + /// The Start state is intermediary upon receving the first response to the + /// REGTOPIC request, either a NODES or TICKET response. + #[default] Start, + /// A NODES response has been completely received. Nodes, + /// A TICKET response has been received. Ticket, + /// A REGISTERCONFIRMATION response has been received. RegisterConfirmation, } @@ -478,6 +499,8 @@ impl Handler { if let Entry::Occupied(entry) = self.reg_topic_responses.entry(node_address.clone()) { let response_state = entry.get(); if let RegTopicResponseState::RegisterConfirmation = response_state { + // There is no guarantee that a REGCONFIRMATION responses should come to a REGTOPIC + // request. A timeout while awaiting a REGCONFIRMATION is not a failure. self.reg_topic_responses.remove(&node_address); self.remove_expected_response(node_address.socket_addr); self.send_next_request(node_address).await; @@ -1155,7 +1178,7 @@ impl Handler { let response_state = self .reg_topic_responses .entry(node_address.clone()) - .or_insert(RegTopicResponseState::Start); + .or_default(); match response_state { RegTopicResponseState::Start => { @@ -1215,7 +1238,7 @@ impl Handler { let response_state = self .topic_query_responses .entry(node_address.clone()) - .or_insert(TopicQueryResponseState::Start); + .or_default(); match response_state { TopicQueryResponseState::Start => { @@ -1297,7 +1320,7 @@ impl Handler { let response_state = self .topic_query_responses .entry(node_address.clone()) - .or_insert(TopicQueryResponseState::Start); + .or_default(); match response_state { TopicQueryResponseState::Start => { @@ -1342,7 +1365,7 @@ impl Handler { let response_state = self .reg_topic_responses .entry(node_address.clone()) - .or_insert(RegTopicResponseState::Start); + .or_default(); match response_state { RegTopicResponseState::Start => { diff --git a/src/service.rs b/src/service.rs index 04403582e..894df8896 100644 --- a/src/service.rs +++ b/src/service.rs @@ -143,6 +143,18 @@ impl TalkRequest { /// The max wait time accpeted for tickets. const MAX_WAIT_TIME_TICKET: u64 = 60 * 5; +/// The max nodes to adveritse for a topic. +const MAX_ADS_TOPIC: usize = 100; + +/// The max nodes to advertise. +const MAX_ADS: usize = 50000; + +/// The max ads per subnet per topic. +const MAX_ADS_SUBNET_TOPIC: usize = 5; + +/// The max ads per subnet. +const MAX_ADS_SUBNET: usize = 50; + /// The types of requests to send to the Discv5 service. pub enum ServiceRequest { /// A request to start a query. There are two types of queries: @@ -162,12 +174,15 @@ pub enum ServiceRequest { /// Sets up an event stream where the discv5 server will return various events such as /// discovered nodes as it traverses the DHT. RequestEventStream(oneshot::Sender>), - /// Queries given node for nodes advertising a topic hash + /// Starts a topic look up of nodes advertising a topic in a discv5 network. TopicQuery(TopicHash, oneshot::Sender, RequestError>>), - /// RegisterTopic publishes this node as an advertiser for a topic at given node + /// RegisterTopic publishes this node as an advertiser for a topic in a discv5 network + /// until removed. RegisterTopic(TopicHash), - ActiveTopics(oneshot::Sender>), + /// Stops publishing this node as an advetiser for a topic. RemoveTopic(TopicHash, oneshot::Sender>), + /// Retrieves the ads currently published by this node on other nodes in a discv5 network. + ActiveTopics(oneshot::Sender>), } use crate::discv5::PERMIT_BAN_LIST; @@ -203,7 +218,8 @@ pub struct Service { topic_query_responses: HashMap, /// Keeps track of the 3 expected responses, TICKET and NODES that should be received from a - /// REGTOPIC request and REGCONFIRMATION that may be received. + /// REGTOPIC request and REGCONFIRMATION that may be received if there is a free ad slot and + /// the node is selected by the remote node for the free ad slot. active_regtopic_requests: ActiveRegtopicRequests, /// A map of votes nodes have made about our external IP address. We accept the majority. @@ -233,7 +249,7 @@ pub struct Service { /// Ads advertised locally for other nodes. ads: Ads, - /// Topics tracks registration attempts of the topics to advertise on + /// Topics tracks registration attempts of the topic hashes to advertise on /// other nodes. topics: HashMap>>, @@ -253,40 +269,74 @@ pub struct Service { active_topic_queries: ActiveTopicQueries, } +/// The state of a topic lookup which changes as responses to sent TOPICQUERYs are received. +/// A topic look up may require more than one round of sending TOPICQUERYs to obtain the set +/// number of ads for the topic. #[derive(Debug)] pub enum TopicQueryState { + /// The topic look up has obtained enough results. Finished(TopicHash), + /// The topic look up has not obtained enough results and has timed out. TimedOut(TopicHash), - // Not enough ads have been returned, more peers should be queried. + /// Not enough ads have been returned from the first round of sending TOPICQUERY + /// requests, new peers in the topic's kbucktes should be queried. Unsatisfied(TopicHash, usize), - // No new peers can be found to send TOPICQUERYs to. + /// No new peers in the topic's kbuckets can be found to send TOPICQUERYs too. Dry(TopicHash), } +/// The state of a response to a single TOPICQUERY request. A topic lookup/query is +/// made up of several TOPICQUERYs each being sent to a different peer. +#[derive(Default)] pub enum TopicQueryResponseState { + #[default] + /// The Start state is intermediary upon receving the first response to the + /// TOPICQUERY request, either a NODES or ADNODES response. Start, + /// A NODES response has been completely received. Nodes, + /// An ADNODES response has been completely received. AdNodes, } +/// At any given time, a set number of registrations should be active per topic hash to +/// set to be registered. A registration is active when either a ticket for an adslot is +/// held and the ticket wait time has not yet expired, or a REGCONFIRMATION has been +/// received for an ad slot and the ad lifetime has not yet elapsed. pub enum RegistrationState { + /// A REGCONFIRMATION has been received at the given instant. Confirmed(Instant), + /// A TICKET has been received and the ticket is being held for the duration of the + /// wait time. Ticket, } +/// An active topic query/lookup keeps track of which peers from the topic's kbuckets +/// have already been queired until the set number of ads are found for the lookup or it +/// is prematurely terminated in lack of peers or time. pub struct ActiveTopicQuery { - // A NodeId mapped to false is waiting for a response or failed request. + /// A NodeId mapped to false is waiting for a response. A value of true means the + /// TOPICQUERY has received a response or the request has failed. queried_peers: HashMap, - // An ad returned by multiple peers is only included once. + /// An ad returned by multiple peers is only included once in the results. results: HashMap, + /// The resulting ad nodes are returned to the app layer when the query has reached + /// a Finished, TimedOut or Dry state. callback: Option, RequestError>>>, + /// A start time is used to montior time out of the query. start: Instant, + /// A query is marked as dry being true if no peers are found in the topic's kbuckets + /// that aren't already queried peers. dry: bool, } +/// ActiveTopicQueries marks the progress of active topic queries/lookups. pub struct ActiveTopicQueries { + /// Each topic lookup initiates an ActiveTopicQuery process. queries: HashMap, + /// The time out for any topic lookup. time_out: Duration, + /// The number of ads an ActiveTopicQuery sets out to find. num_results: usize, } @@ -408,7 +458,13 @@ impl Service { let (discv5_send, discv5_recv) = mpsc::channel(30); let (exit_send, exit) = oneshot::channel(); - let ads = match Ads::new(Duration::from_secs(60 * 15), 100, 50000, 10, 3) { + let ads = match Ads::new( + Duration::from_secs(60 * 15), + MAX_ADS_TOPIC, + MAX_ADS, + MAX_ADS_SUBNET, + MAX_ADS_SUBNET_TOPIC, + ) { Ok(ads) => ads, Err(e) => { return Err(Error::new(ErrorKind::InvalidInput, e)); @@ -421,6 +477,8 @@ impl Service { } }; + // A key is generated for en-/decrypting tickets that are issued upon receiving a topic + // regsitration attempt. let ticket_key: [u8; 16] = rand::random(); match local_enr .write() @@ -742,7 +800,16 @@ impl Service { Some(Ok((active_topic, active_ticket))) = self.tickets.next() => { let enr = self.local_enr.read().clone(); // When the ticket time expires a new regtopic request is automatically sent - // to the ticket issuer. + // to the ticket issuer and the registration state for the given topic is + // updated. + if let Some(reg_attempts) = self.topics.get_mut(&active_topic.topic()) { + for kbucket_reg_attempts in reg_attempts.values_mut() { + let reg_state = kbucket_reg_attempts.remove(active_topic.node_id()); + if reg_state.is_some() { + break; + } + } + } self.reg_topic_request(active_ticket.contact(), active_topic.topic(), enr, Some(active_ticket.ticket())); } Some(Ok((topic, mut ticket_pool))) = self.ticket_pools.next() => { @@ -779,6 +846,7 @@ impl Service { } } + /// Internal function that starts a topic registration. fn send_register_topics(&mut self, topic_hash: TopicHash) { trace!("Sending REGTOPICS"); if let Entry::Occupied(ref mut kbuckets) = self.topics_kbuckets.entry(topic_hash) { @@ -834,7 +902,7 @@ impl Service { } } - /// Internal function that starts a query. + /// Internal function that starts a topic lookup. fn send_topic_queries( &mut self, topic_hash: TopicHash, @@ -1224,7 +1292,7 @@ impl Service { "TOPICQUERY", ); trace!("Sending ADNODES response"); - self.send_topic_query_nodes_response(node_address, id, topic); + self.send_topic_query_adnodes_response(node_address, id, topic); } } } @@ -1420,10 +1488,7 @@ impl Service { if let RequestBody::TopicQuery { topic } = active_request.request_body { self.discovered(&node_id, nodes, active_request.query_id, Some(topic)); - let response_state = self - .topic_query_responses - .entry(node_id) - .or_insert(TopicQueryResponseState::Start); + let response_state = self.topic_query_responses.entry(node_id).or_default(); match response_state { TopicQueryResponseState::Start => { @@ -1505,10 +1570,7 @@ impl Service { }); *query.queried_peers.entry(node_id).or_default() = true; } - let response_state = self - .topic_query_responses - .entry(node_id) - .or_insert(TopicQueryResponseState::Start); + let response_state = self.topic_query_responses.entry(node_id).or_default(); match response_state { TopicQueryResponseState::Start => { @@ -1887,9 +1949,9 @@ impl Service { .send(HandlerIn::Response(node_address, Box::new(response))); } - /// Answer to a topic query containing the nodes currently advertised for the + /// Response to a topic query containing the nodes currently advertised for the /// requested topic if any. - fn send_topic_query_nodes_response( + fn send_topic_query_adnodes_response( &mut self, node_address: NodeAddress, rpc_id: RequestId, @@ -1912,6 +1974,9 @@ impl Service { ); } + /// Finds a list of ENRs in the local routing table's kbucktets at the distance ±1 that + /// the topic hash would be placed in, to send in a NODES response to a TOPICQUERY or + /// REGTOPIC request. fn send_find_topic_nodes_response( &mut self, topic: TopicHash, @@ -1960,8 +2025,8 @@ impl Service { ); } - /// Sends a NODES response, given a list of found ENR's. This function splits the nodes up - /// into multiple responses to ensure the response stays below the maximum packet size. + /// Finds a list of ENRs in the local routing table at the given distances, to send in a + /// NODES response to a FINDNODE request. fn send_find_nodes_response( &mut self, node_address: NodeAddress, @@ -2010,6 +2075,8 @@ impl Service { ); } + /// Sends a NODES response, given a list of ENRs. This function splits the nodes up + /// into multiple responses to ensure the response stays below the maximum packet size. fn send_nodes_response( &self, nodes_to_send: Vec, @@ -2192,7 +2259,7 @@ impl Service { } } - /// Processes discovered peers from a query. + /// Processes discovered peers from a query or a TOPICQUERY or REGTOPIC request. fn discovered( &mut self, source: &NodeId, @@ -2332,7 +2399,7 @@ impl Service { /// Update the connection status of a node in the routing table. /// This tracks whether or not we should be pinging peers. Disconnected peers are removed from - /// the queue and newly added peers to the routing table are added to the queue. + /// the queue and newly added peers to the routing table (or topics kbucktes) are added to the queue. fn connection_updated( &mut self, node_id: NodeId, @@ -2648,8 +2715,9 @@ impl Service { .await } - /// A future that maintains the topic kbuckets and inserts nodes when required. This returns the - /// `Discv5Event::NodeInsertedTopics` variants. + /// A future that maintains the topic kbuckets and inserts nodes when required. This optionally + /// returns the `Discv5Event::NodeInsertedTopics` variant if a new node has been inserted into + /// the routing table. async fn bucket_maintenance_poll_topics( kbuckets: impl Iterator)>, ) -> Option { From a2186b9b2a48d4e2c4dbd4c3f7bb13ba5b8e0b40 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 4 Jul 2022 21:30:54 +0200 Subject: [PATCH 237/391] Update rust before running clippy and check-rustdoc-links --- .github/workflows/build.yml | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 44196f99c..a80a2fad2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -6,18 +6,20 @@ jobs: cargo-fmt: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - name: Get latest version of stable rust - run: rustup update stable - - name: Check formatting with cargofmt - run: cargo fmt --all -- --check --config imports_granularity=Crate + - uses: actions/checkout@v2 + - name: Get latest version of stable rust + run: rustup update stable + - name: Check formatting with cargofmt + run: cargo fmt --all -- --check --config imports_granularity=Crate clippy: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v2 - - name: Lint code for quality and style with Clippy - run: cargo clippy --workspace --tests --all-features -- -D warnings + - uses: actions/checkout@v2 + - name: Get latest version of stable rust + run: rustup update stable + - name: Lint code for quality and style with Clippy + run: cargo clippy --workspace --tests --all-features -- -D warnings release-tests-ubuntu: runs-on: ubuntu-latest needs: cargo-fmt @@ -44,6 +46,8 @@ jobs: container: image: rust steps: - - uses: actions/checkout@v2 - - name: Check rustdoc links - run: RUSTDOCFLAGS="--deny broken_intra_doc_links" cargo doc --verbose --workspace --no-deps --document-private-items + - uses: actions/checkout@v2 + - name: Get latest version of stable rust + run: rustup update stable + - name: Check rustdoc links + run: RUSTDOCFLAGS="--deny broken_intra_doc_links" cargo doc --verbose --workspace --no-deps --document-private-items From c38e2d083df4afeaa80e9ecdda72b71b56c8fc2e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 4 Jul 2022 21:57:47 +0200 Subject: [PATCH 238/391] Fix typo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0cbc0743e..745e37164 100644 --- a/README.md +++ b/README.md @@ -109,7 +109,7 @@ non-contactable peers. This is done in the following way: -1. If a connecting node provides and ENR without specifying an address (this +1. If a connecting node provides an ENR without specifying an address (this should be the default case for most nodes behind a NAT, or ones that have just started) we consider this valid. Typically this will occur when a node has yet to determine its external IP address via PONG responses and has not From c760f413d4d0e3cd9221874cb8a3211af4a4e6ba Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 5 Jul 2022 09:58:53 +0200 Subject: [PATCH 239/391] Fix comment --- src/handler/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index e8d7366f6..3ee93b086 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -219,9 +219,9 @@ pub enum TopicQueryResponseState { /// REGTOPIC requests receive 3 types of responses TICKET, NODES and possibly /// a REGCONFIRMATION. The order of the ticket and nodes is non-determinsitic -/// but the regconf, if it comes, always comes at least 10 seconds (duration -/// of the registration window) + latency later. If a peer sends the wrong -/// permutation of responses the peer is blacklisted. +/// but the regconf, if it comes, always come up to 10 seconds (depending on +/// when in the registration window the request comes) + latency later. If a +/// peer sends the wrong permutation of responses the peer is blacklisted. #[derive(Default)] pub enum RegTopicResponseState { /// The Start state is intermediary upon receving the first response to the From cf6fd4d7a11159814a680430a1909509d4b1d3cd Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 6 Jul 2022 11:31:52 +0200 Subject: [PATCH 240/391] Put repeated code in sync closure --- src/handler/mod.rs | 60 +++++++++++++++++++--------------------------- 1 file changed, 24 insertions(+), 36 deletions(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 3ee93b086..6f3e1336a 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -1130,6 +1130,15 @@ impl Handler { return; } + let blacklist_peer = |handler: &mut Handler| { + // Remove the expected response + handler.remove_expected_response(node_address.socket_addr); + let ban_timeout = handler.ban_duration.map(|v| Instant::now() + v); + PERMIT_BAN_LIST + .write() + .ban(node_address.clone(), ban_timeout); + }; + // The response matches a request // Check to see if this is a Nodes response, in which case we may require to wait for @@ -1217,18 +1226,13 @@ impl Handler { } RegTopicResponseState::Nodes | RegTopicResponseState::RegisterConfirmation => { - warn!("No more NODES responses should be received if REGTOPIC response is in Nodes or RegisterConfirmation state."); - self.fail_request(request_call, RequestError::InvalidResponseCombo("Received more than one set of NODES responses for a REGTOPIC request".into()), true).await; - // Remove the expected response - self.remove_expected_response(node_address.socket_addr); + debug!("No more NODES responses should be received if REGTOPIC response is in Nodes or RegisterConfirmation state."); warn!( "Peer returned more than one set of NODES responses for REGTOPIC request. Blacklisting {}", node_address ); - let ban_timeout = self.ban_duration.map(|v| Instant::now() + v); - PERMIT_BAN_LIST - .write() - .ban(node_address.clone(), ban_timeout); + self.fail_request(request_call, RequestError::InvalidResponseCombo("Received more than one set of NODES responses for a REGTOPIC request".into()), true).await; + blacklist_peer(self); return; } } @@ -1261,18 +1265,13 @@ impl Handler { self.topic_query_responses.remove(&node_address); } TopicQueryResponseState::Nodes => { - warn!("No more NODES responses should be received if TOPICQUERY response is in Nodes state."); - self.fail_request(request_call, RequestError::InvalidResponseCombo("Received more than one set of NODES responses for a TOPICQUERY request".into()), true).await; - // Remove the expected response - self.remove_expected_response(node_address.socket_addr); + debug!("No more NODES responses should be received if TOPICQUERY response is in Nodes state."); warn!( "Peer returned more than one set of NODES responses for TOPICQUERY request. Blacklisting {}", node_address ); - let ban_timeout = self.ban_duration.map(|v| Instant::now() + v); - PERMIT_BAN_LIST - .write() - .ban(node_address.clone(), ban_timeout); + self.fail_request(request_call, RequestError::InvalidResponseCombo("Received more than one set of NODES responses for a TOPICQUERY request".into()), true).await; + blacklist_peer(self); return; } } @@ -1343,19 +1342,13 @@ impl Handler { self.topic_query_responses.remove(&node_address); } TopicQueryResponseState::AdNodes => { - warn!("No more ADNODES responses should be received if TOPICQUERY response is in AdNodes state."); - self.fail_request(request_call, RequestError::InvalidResponseCombo("Received more than one set of NODES responses for a TOPICQUERY request".into()), true).await; - // Remove the expected response - self.remove_expected_response(node_address.socket_addr); + debug!("No more ADNODES responses should be received if TOPICQUERY response is in AdNodes state."); warn!( "Peer returned more than one set of ADNODES responses for TOPICQUERY request. Blacklisting {}", node_address ); - let ban_timeout = self.ban_duration.map(|v| Instant::now() + v); - PERMIT_BAN_LIST - .write() - .ban(node_address.clone(), ban_timeout); - self.send_next_request(node_address).await; + self.fail_request(request_call, RequestError::InvalidResponseCombo("Received more than one set of ADNODES responses for a TOPICQUERY request".into()), true).await; + blacklist_peer(self); return; } } @@ -1403,7 +1396,11 @@ impl Handler { return; } RegTopicResponseState::Ticket | RegTopicResponseState::RegisterConfirmation => { - warn!("No more TICKET responses should be received if REGTOPIC response is in Ticket or RegisterConfirmation state."); + debug!("No more TICKET responses should be received if REGTOPIC response is in Ticket or RegisterConfirmation state."); + warn!( + "Peer returned more than one TICKET responses for REGTOPIC request. Blacklisting {}", + node_address + ); self.fail_request( request_call, RequestError::InvalidResponseCombo( @@ -1413,16 +1410,7 @@ impl Handler { true, ) .await; - // Remove the expected response - self.remove_expected_response(node_address.socket_addr); - warn!( - "Peer returned more than one TICKET responses for REGTOPIC request. Blacklisting {}", - node_address - ); - let ban_timeout = self.ban_duration.map(|v| Instant::now() + v); - PERMIT_BAN_LIST - .write() - .ban(node_address.clone(), ban_timeout); + blacklist_peer(self); return; } } From 01ad49d924f4d40647544af5f2c876262478904a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 11 Jul 2022 10:42:36 +0200 Subject: [PATCH 241/391] Fix retain statements and unsafe addition of discovered nodes to topics kbuckets --- src/service.rs | 151 ++++++++++++++++++++++---------------------- src/service/test.rs | 3 +- 2 files changed, 77 insertions(+), 77 deletions(-) diff --git a/src/service.rs b/src/service.rs index cf3f609f3..42b5cece3 100644 --- a/src/service.rs +++ b/src/service.rs @@ -155,6 +155,12 @@ const MAX_ADS_SUBNET_TOPIC: usize = 5; /// The max ads per subnet. const MAX_ADS_SUBNET: usize = 50; +/// The time window within in which the number of new tickets from a peer for a topic will be limitied. +const TICKET_LIMITER: Duration = Duration::from_secs(60 * 15); + +/// The time after a REGCONFIRMATION is sent that an ad is placed. +const AD_LIFETIME: Duration = Duration::from_secs(60 * 15); + /// The types of requests to send to the Discv5 service. pub enum ServiceRequest { /// A request to start a query. There are two types of queries: @@ -251,11 +257,16 @@ pub struct Service { /// Topics tracks registration attempts of the topic hashes to advertise on /// other nodes. - topics: HashMap>>, + registration_attempts: HashMap>>, /// KBuckets per topic hash. topics_kbuckets: HashMap>, + /// The peers returned in a NODES response to a TOPICQUERY or REGTOPIC request are inserted in + /// this intermediary stroage to check their connectivity before inserting them in the topic's + /// kbuckets. + discovered_peers_topic: HashMap>, + /// Ads currently advertised on other nodes. active_topics: Ads, @@ -459,7 +470,7 @@ impl Service { let (exit_send, exit) = oneshot::channel(); let ads = match Ads::new( - Duration::from_secs(60 * 15), + AD_LIFETIME, MAX_ADS_TOPIC, MAX_ADS, MAX_ADS_SUBNET, @@ -470,7 +481,13 @@ impl Service { return Err(Error::new(ErrorKind::InvalidInput, e)); } }; - let active_topics = match Ads::new(Duration::from_secs(60 * 15), 100, 50000, 10, 3) { + let active_topics = match Ads::new( + AD_LIFETIME, + MAX_ADS_TOPIC, + MAX_ADS, + MAX_ADS_SUBNET, + MAX_ADS_SUBNET_TOPIC, + ) { Ok(ads) => ads, Err(e) => { return Err(Error::new(ErrorKind::InvalidInput, e)); @@ -513,10 +530,11 @@ impl Service { discv5_recv, event_stream: None, ads, - topics: HashMap::new(), + registration_attempts: HashMap::new(), topics_kbuckets: HashMap::new(), + discovered_peers_topic: HashMap::new(), active_topics, - tickets: Tickets::new(Duration::from_secs(60 * 15)), + tickets: Tickets::new(TICKET_LIMITER), ticket_pools: TicketPools::default(), active_topic_queries: ActiveTopicQueries::new( config.topic_query_timeout, @@ -610,19 +628,7 @@ impl Service { entry.node.value.node_id(), topic_hash ), - InsertResult::Failed(FailureReason::BucketFull) => { - error!("Table full for topic hash {}", topic_hash) - } - InsertResult::Failed(FailureReason::BucketFilter) => { - error!("Failed bucket filter for topic hash {}", topic_hash) - } - InsertResult::Failed(FailureReason::TableFilter) => { - error!("Failed table filter for topic hash {}", topic_hash) - } - InsertResult::Failed(FailureReason::InvalidSelfUpdate) => { - error!("Invalid self update for topic hash {}", topic_hash) - } - InsertResult::Failed(_) => error!("Failed to insert ENR for topic hash {}", topic_hash), + InsertResult::Failed(f) => error!("Failed to insert ENR for topic hash {}. Failure reason: {:?}", topic_hash, f), } } self.topics_kbuckets.insert(topic_hash, kbuckets); @@ -630,7 +636,7 @@ impl Service { self.send_topic_queries(topic_hash, self.config.max_nodes_response, Some(callback)); } ServiceRequest::RegisterTopic(topic_hash) => { - if self.topics.insert(topic_hash, HashMap::new()).is_some() { + if self.registration_attempts.insert(topic_hash, HashMap::new()).is_some() { warn!("This topic is already being advertised"); } else { // NOTE: Currently we don't expose custom filter support in the configuration. Users can @@ -668,23 +674,11 @@ impl Service { entry.node.value.node_id(), topic_hash ), - InsertResult::Failed(FailureReason::BucketFull) => { - error!("Table full for topic hash {}", topic_hash) - } - InsertResult::Failed(FailureReason::BucketFilter) => { - error!("Failed bucket filter for topic hash {}", topic_hash) - } - InsertResult::Failed(FailureReason::TableFilter) => { - error!("Failed table filter for topic hash {}", topic_hash) - } - InsertResult::Failed(FailureReason::InvalidSelfUpdate) => { - error!("Invalid self update for topic hash {}", topic_hash) - } - InsertResult::Failed(_) => error!("Failed to insert ENR for topic hash {}", topic_hash), + InsertResult::Failed(f) => error!("Failed to insert ENR for topic hash {}. Failure reason: {:?}", topic_hash, f), } } self.topics_kbuckets.insert(topic_hash, kbuckets); - METRICS.topics_to_publish.store(self.topics.len(), Ordering::Relaxed); + METRICS.topics_to_publish.store(self.registration_attempts.len(), Ordering::Relaxed); self.send_register_topics(topic_hash); } @@ -695,8 +689,8 @@ impl Service { } } ServiceRequest::RemoveTopic(topic_hash, callback) => { - if self.topics.remove(&topic_hash).is_some() { - METRICS.topics_to_publish.store(self.topics.len(), Ordering::Relaxed); + if self.registration_attempts.remove(&topic_hash).is_some() { + METRICS.topics_to_publish.store(self.registration_attempts.len(), Ordering::Relaxed); if callback.send(Ok(base64::encode(topic_hash.as_bytes()))).is_err() { error!("Failed to return the removed topic"); } @@ -802,7 +796,7 @@ impl Service { // When the ticket time expires a new regtopic request is automatically sent // to the ticket issuer and the registration state for the given topic is // updated. - if let Some(reg_attempts) = self.topics.get_mut(&active_topic.topic()) { + if let Some(reg_attempts) = self.registration_attempts.get_mut(&active_topic.topic()) { for kbucket_reg_attempts in reg_attempts.values_mut() { let reg_state = kbucket_reg_attempts.remove(active_topic.node_id()); if reg_state.is_some() { @@ -855,34 +849,62 @@ impl Service { kbuckets.get_mut().iter().count(), topic_hash ); - let reg_attempts = self.topics.entry(topic_hash).or_default(); + let reg_attempts = self.registration_attempts.entry(topic_hash).or_default(); // Remove expired ads let mut new_reg_peers = Vec::new(); - // WARNING! This currently only works as long as buckets range is one bit + for (index, bucket) in kbuckets.get_mut().buckets_iter().enumerate() { // Remove expired registrations if let Entry::Occupied(ref mut entry) = reg_attempts.entry(index as u64) { let registrations = entry.get_mut(); registrations.retain(|_, reg_attempt| { if let RegistrationState::Confirmed(insert_time) = reg_attempt { - insert_time.elapsed() >= Duration::from_secs(15 * 60) + insert_time.elapsed() < AD_LIFETIME } else { - false + true } }); } let registrations = reg_attempts.entry(index as u64).or_default(); - // The count of active registration attempts after expired adds have been removed + let max_reg_attempts_bucket = self.config.max_nodes_response; + let mut new_peers = Vec::new(); + + // Attempt initating a connection to newly discovred peers if any. + if let Some(peers) = self.discovered_peers_topic.get_mut(&topic_hash) { + peers.retain(|peer| { + if new_peers.len() + registrations.len() >= max_reg_attempts_bucket { + true + } else if let Entry::Vacant(_) = registrations.entry(peer.node_id()) { + debug!("Found new registration peer in discovered peers for topic {}. Peer: {:?}", topic_hash, peer.node_id()); + new_peers.push(peer.clone()); + false + } else { + debug_unreachable!( + "Newly discovered peer {} shouldn't be stored in registration attempts", + peer.node_id() + ); + true + } + }); + new_reg_peers.append(&mut new_peers); + } + + // The count of active registration attempts for a distance after expired ads have been + // removed is less than the max number of registration attempts that should be active + // per bucket and is not equal to the total number of peers available in that bucket. if registrations.len() < self.config.max_nodes_response && registrations.len() != bucket.num_entries() { - let mut new_peers = Vec::new(); for peer in bucket.iter() { if new_peers.len() + registrations.len() >= self.config.max_nodes_response { break; } if let Entry::Vacant(_) = registrations.entry(*peer.key.preimage()) { - debug!("Found new reg peer. Peer: {:?}", peer.key.preimage()); + debug!( + "Found new registration peer in kbuckets of topic {}. Peer: {:?}", + topic_hash, + peer.key.preimage() + ); new_peers.push(peer.value.clone()) } } @@ -894,6 +916,7 @@ impl Service { if let Ok(node_contact) = NodeContact::try_from_enr(peer, self.config.ip_mode) .map_err(|e| error!("Failed to send REGTOPIC to peer. Error: {:?}", e)) { + // Registration attempts are acknowledged upon receiving a TICKET or REGCONFIRMATION response. self.reg_topic_request(node_contact, topic_hash, local_enr.clone(), None); } } @@ -1732,7 +1755,8 @@ impl Service { let peer_key: kbucket::Key = node_id.into(); let topic_key: kbucket::Key = NodeId::new(&topic.as_bytes()).into(); if let Some(distance) = peer_key.log2_distance(&topic_key) { - let registration_attempts = self.topics.entry(topic).or_default(); + let registration_attempts = + self.registration_attempts.entry(topic).or_default(); registration_attempts .entry(distance) .or_default() @@ -1748,7 +1772,8 @@ impl Service { let peer_key: kbucket::Key = node_id.into(); let topic_key: kbucket::Key = NodeId::new(&topic.as_bytes()).into(); if let Some(distance) = peer_key.log2_distance(&topic_key) { - let registration_attempts = self.topics.entry(topic).or_default(); + let registration_attempts = + self.registration_attempts.entry(topic).or_default(); registration_attempts .entry(distance) .or_default() @@ -2320,37 +2345,11 @@ impl Service { kbucket::Entry::Present(entry, _) => entry.value().seq() < enr.seq(), kbucket::Entry::Pending(mut entry, _) => entry.value().seq() < enr.seq(), kbucket::Entry::Absent(_) => { - match kbuckets_topic.insert_or_update( - &key, - enr.clone(), - NodeStatus { - state: ConnectionState::Disconnected, - direction: ConnectionDirection::Incoming, - }, - ) { - InsertResult::Inserted - | InsertResult::Pending { .. } - | InsertResult::StatusUpdated { .. } - | InsertResult::ValueUpdated - | InsertResult::Updated { .. } - | InsertResult::UpdatedPending => trace!( - "Added node id {} to kbucket of topic hash {:?}", - enr.node_id(), - topic_hash - ), - InsertResult::Failed(FailureReason::BucketFull) => { - error!("Table full") - } - InsertResult::Failed(FailureReason::BucketFilter) => { - error!("Failed bucket filter") - } - InsertResult::Failed(FailureReason::TableFilter) => { - error!("Failed table filter") - } - InsertResult::Failed(FailureReason::InvalidSelfUpdate) => { - error!("Invalid self update") - } - InsertResult::Failed(_) => error!("Failed to insert ENR"), + if let Some(topic_hash) = topic_hash { + self.discovered_peers_topic + .entry(topic_hash) + .or_default() + .push(enr.clone()); } false } diff --git a/src/service/test.rs b/src/service/test.rs index 56e1d816e..c0babb500 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -98,8 +98,9 @@ async fn build_service( event_stream: None, ads: Ads::new(Duration::from_secs(60 * 15), 100, 50000, 10, 3).unwrap(), tickets: Tickets::new(Duration::from_secs(60 * 15)), - topics: HashMap::new(), + registration_attempts: HashMap::new(), topics_kbuckets: HashMap::new(), + discovered_peers_topic: HashMap::new(), active_topics: Ads::new(Duration::from_secs(60 * 15), 100, 50000, 10, 3).unwrap(), ticket_pools: TicketPools::default(), active_topic_queries: ActiveTopicQueries::new( From e3e81266c4d7a75ec258bbdaba38715408104f40 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 11 Jul 2022 11:24:34 +0200 Subject: [PATCH 242/391] Add bounds to discovered topic peers storage --- src/service.rs | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/src/service.rs b/src/service.rs index 42b5cece3..4db852101 100644 --- a/src/service.rs +++ b/src/service.rs @@ -143,6 +143,9 @@ impl TalkRequest { /// The max wait time accpeted for tickets. const MAX_WAIT_TIME_TICKET: u64 = 60 * 5; +/// The time window within in which the number of new tickets from a peer for a topic will be limitied. +const TICKET_LIMITER: Duration = Duration::from_secs(60 * 15); + /// The max nodes to adveritse for a topic. const MAX_ADS_TOPIC: usize = 100; @@ -155,12 +158,12 @@ const MAX_ADS_SUBNET_TOPIC: usize = 5; /// The max ads per subnet. const MAX_ADS_SUBNET: usize = 50; -/// The time window within in which the number of new tickets from a peer for a topic will be limitied. -const TICKET_LIMITER: Duration = Duration::from_secs(60 * 15); - /// The time after a REGCONFIRMATION is sent that an ad is placed. const AD_LIFETIME: Duration = Duration::from_secs(60 * 15); +/// The max number of uncontacted peers to store before the kbuckets per topic. +const MAX_UNCONTACTED_PEERS_TOPIC: usize = 1000; + /// The types of requests to send to the Discv5 service. pub enum ServiceRequest { /// A request to start a query. There are two types of queries: @@ -265,7 +268,7 @@ pub struct Service { /// The peers returned in a NODES response to a TOPICQUERY or REGTOPIC request are inserted in /// this intermediary stroage to check their connectivity before inserting them in the topic's /// kbuckets. - discovered_peers_topic: HashMap>, + discovered_peers_topic: HashMap>, /// Ads currently advertised on other nodes. active_topics: Ads, @@ -869,19 +872,19 @@ impl Service { let max_reg_attempts_bucket = self.config.max_nodes_response; let mut new_peers = Vec::new(); - // Attempt initating a connection to newly discovred peers if any. + // Attempt sending a request to uncontacted peers if any. if let Some(peers) = self.discovered_peers_topic.get_mut(&topic_hash) { - peers.retain(|peer| { + peers.retain(|node_id, enr | { if new_peers.len() + registrations.len() >= max_reg_attempts_bucket { true - } else if let Entry::Vacant(_) = registrations.entry(peer.node_id()) { - debug!("Found new registration peer in discovered peers for topic {}. Peer: {:?}", topic_hash, peer.node_id()); - new_peers.push(peer.clone()); + } else if let Entry::Vacant(_) = registrations.entry(*node_id) { + debug!("Found new registration peer in discovered peers for topic {}. Peer: {:?}", topic_hash, node_id); + new_peers.push(enr.clone()); false } else { debug_unreachable!( "Newly discovered peer {} shouldn't be stored in registration attempts", - peer.node_id() + node_id ); true } @@ -2346,10 +2349,13 @@ impl Service { kbucket::Entry::Pending(mut entry, _) => entry.value().seq() < enr.seq(), kbucket::Entry::Absent(_) => { if let Some(topic_hash) = topic_hash { - self.discovered_peers_topic - .entry(topic_hash) - .or_default() - .push(enr.clone()); + let discovered_peers = + self.discovered_peers_topic.entry(topic_hash).or_default(); + // If the intermediary storage before the topic's kbucktes is at bounds, discard the + // uncontacted peers. + if discovered_peers.len() < MAX_UNCONTACTED_PEERS_TOPIC { + discovered_peers.insert(enr.node_id(), enr.clone()); + }; } false } From 84dfb3f2c3c6d65de530bb7091ffbf629ff0166c Mon Sep 17 00:00:00 2001 From: Diva M Date: Tue, 12 Jul 2022 12:52:51 -0500 Subject: [PATCH 243/391] remove workflow difference --- .github/workflows/build.yml | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a80a2fad2..44196f99c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -6,20 +6,18 @@ jobs: cargo-fmt: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - name: Get latest version of stable rust - run: rustup update stable - - name: Check formatting with cargofmt - run: cargo fmt --all -- --check --config imports_granularity=Crate + - uses: actions/checkout@v2 + - name: Get latest version of stable rust + run: rustup update stable + - name: Check formatting with cargofmt + run: cargo fmt --all -- --check --config imports_granularity=Crate clippy: runs-on: ubuntu-latest needs: cargo-fmt steps: - - uses: actions/checkout@v2 - - name: Get latest version of stable rust - run: rustup update stable - - name: Lint code for quality and style with Clippy - run: cargo clippy --workspace --tests --all-features -- -D warnings + - uses: actions/checkout@v2 + - name: Lint code for quality and style with Clippy + run: cargo clippy --workspace --tests --all-features -- -D warnings release-tests-ubuntu: runs-on: ubuntu-latest needs: cargo-fmt @@ -46,8 +44,6 @@ jobs: container: image: rust steps: - - uses: actions/checkout@v2 - - name: Get latest version of stable rust - run: rustup update stable - - name: Check rustdoc links - run: RUSTDOCFLAGS="--deny broken_intra_doc_links" cargo doc --verbose --workspace --no-deps --document-private-items + - uses: actions/checkout@v2 + - name: Check rustdoc links + run: RUSTDOCFLAGS="--deny broken_intra_doc_links" cargo doc --verbose --workspace --no-deps --document-private-items From 17156b2aef01bca7d6860eab32da8f3e63597cbd Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 13 Jul 2022 12:53:50 +0200 Subject: [PATCH 244/391] Update method for getting active ads --- src/discv5.rs | 10 +++++----- src/service.rs | 37 +++++++++++++++++++++++-------------- src/service/test.rs | 1 - 3 files changed, 28 insertions(+), 20 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 455cde53f..0825f27e7 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -13,10 +13,7 @@ //! The server can be shutdown using the [`Discv5::shutdown`] function. use crate::{ - advertisement::{ - topic::{Sha256Topic as Topic, TopicHash}, - Ads, - }, + advertisement::topic::{Sha256Topic as Topic, TopicHash}, error::{Discv5Error, QueryError, RequestError}, kbucket::{ self, ConnectionDirection, ConnectionState, FailureReason, InsertResult, KBucketsTable, @@ -29,6 +26,7 @@ use crate::{ use enr::{CombinedKey, EnrError, EnrKey, NodeId}; use parking_lot::RwLock; use std::{ + collections::HashMap, future::Future, net::SocketAddr, sync::Arc, @@ -596,7 +594,9 @@ impl Discv5 { } /// Retrieves the topics that we have published on other nodes. - pub fn active_topics(&self) -> impl Future> + 'static { + pub fn active_topics( + &self, + ) -> impl Future>, RequestError>> + 'static { // the service will verify if this node is contactable, we just send it and // await a response. let (callback_send, callback_recv) = oneshot::channel(); diff --git a/src/service.rs b/src/service.rs index 4db852101..6cb74f615 100644 --- a/src/service.rs +++ b/src/service.rs @@ -191,7 +191,7 @@ pub enum ServiceRequest { /// Stops publishing this node as an advetiser for a topic. RemoveTopic(TopicHash, oneshot::Sender>), /// Retrieves the ads currently published by this node on other nodes in a discv5 network. - ActiveTopics(oneshot::Sender>), + ActiveTopics(oneshot::Sender>, RequestError>>), } use crate::discv5::PERMIT_BAN_LIST; @@ -270,9 +270,6 @@ pub struct Service { /// kbuckets. discovered_peers_topic: HashMap>, - /// Ads currently advertised on other nodes. - active_topics: Ads, - /// Tickets received by other nodes. tickets: Tickets, @@ -536,7 +533,6 @@ impl Service { registration_attempts: HashMap::new(), topics_kbuckets: HashMap::new(), discovered_peers_topic: HashMap::new(), - active_topics, tickets: Tickets::new(TICKET_LIMITER), ticket_pools: TicketPools::default(), active_topic_queries: ActiveTopicQueries::new( @@ -687,7 +683,28 @@ impl Service { } } ServiceRequest::ActiveTopics(callback) => { - if callback.send(Ok(self.active_topics.clone())).is_err() { + let mut active_topics = HashMap::>::new(); + self.registration_attempts.iter_mut().for_each(|(topic_hash, reg_attempts_by_distance)| { + for (_distance, reg_attempts) in reg_attempts_by_distance { + reg_attempts.retain(|node_id, reg_state| { + if let RegistrationState::Confirmed(insert_time) = reg_state { + if insert_time.elapsed() < AD_LIFETIME { + active_topics.entry(*topic_hash).or_default().push(*node_id); + true + } else { + false + } + } else { + true + } + }); + } + }); + METRICS + .active_ads + .store(active_topics.values().flatten().count(), Ordering::Relaxed); + + if callback.send(Ok(active_topics)).is_err() { error!("Failed to return active topics"); } } @@ -853,7 +870,6 @@ impl Service { topic_hash ); let reg_attempts = self.registration_attempts.entry(topic_hash).or_default(); - // Remove expired ads let mut new_reg_peers = Vec::new(); for (index, bucket) in kbuckets.get_mut().buckets_iter().enumerate() { @@ -1783,13 +1799,6 @@ impl Service { .entry(node_id) .or_insert(RegistrationState::Confirmed(now)); - let _ = self.active_topics.insert(enr, topic).map_err(|e| { - error!("Couldn't insert topic into active topics. Error: {}.", e) - }); - - METRICS - .active_ads - .store(self.active_topics.len(), Ordering::Relaxed); METRICS .active_regtopic_req .store(self.active_regtopic_requests.len(), Ordering::Relaxed); diff --git a/src/service/test.rs b/src/service/test.rs index c0babb500..c2185cc3b 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -101,7 +101,6 @@ async fn build_service( registration_attempts: HashMap::new(), topics_kbuckets: HashMap::new(), discovered_peers_topic: HashMap::new(), - active_topics: Ads::new(Duration::from_secs(60 * 15), 100, 50000, 10, 3).unwrap(), ticket_pools: TicketPools::default(), active_topic_queries: ActiveTopicQueries::new( config.topic_query_timeout, From d5e5e9533144512545ed2da6ef7b3860aa7d59e7 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 13 Jul 2022 13:00:15 +0200 Subject: [PATCH 245/391] fixup! Update method for getting active ads --- src/service.rs | 46 ++++++++++++++++------------------------------ 1 file changed, 16 insertions(+), 30 deletions(-) diff --git a/src/service.rs b/src/service.rs index 396ea9e6f..d3c44afe8 100644 --- a/src/service.rs +++ b/src/service.rs @@ -481,18 +481,6 @@ impl Service { return Err(Error::new(ErrorKind::InvalidInput, e)); } }; - let active_topics = match Ads::new( - AD_LIFETIME, - MAX_ADS_TOPIC, - MAX_ADS, - MAX_ADS_SUBNET, - MAX_ADS_SUBNET_TOPIC, - ) { - Ok(ads) => ads, - Err(e) => { - return Err(Error::new(ErrorKind::InvalidInput, e)); - } - }; // A key is generated for en-/decrypting tickets that are issued upon receiving a topic // regsitration attempt. @@ -685,7 +673,7 @@ impl Service { ServiceRequest::ActiveTopics(callback) => { let mut active_topics = HashMap::>::new(); self.registration_attempts.iter_mut().for_each(|(topic_hash, reg_attempts_by_distance)| { - for (_distance, reg_attempts) in reg_attempts_by_distance { + for reg_attempts in reg_attempts_by_distance.values_mut() { reg_attempts.retain(|node_id, reg_state| { if let RegistrationState::Confirmed(insert_time) = reg_state { if insert_time.elapsed() < AD_LIFETIME { @@ -1782,23 +1770,21 @@ impl Service { } } ResponseBody::RegisterConfirmation { topic } => { - if let Some(enr) = active_request.contact.enr() { - let now = Instant::now(); - let peer_key: kbucket::Key = node_id.into(); - let topic_key: kbucket::Key = NodeId::new(&topic.as_bytes()).into(); - if let Some(distance) = peer_key.log2_distance(&topic_key) { - let registration_attempts = - self.registration_attempts.entry(topic).or_default(); - registration_attempts - .entry(distance) - .or_default() - .entry(node_id) - .or_insert(RegistrationState::Confirmed(now)); - - METRICS - .active_regtopic_req - .store(self.active_regtopic_requests.len(), Ordering::Relaxed); - } + let now = Instant::now(); + let peer_key: kbucket::Key = node_id.into(); + let topic_key: kbucket::Key = NodeId::new(&topic.as_bytes()).into(); + if let Some(distance) = peer_key.log2_distance(&topic_key) { + let registration_attempts = + self.registration_attempts.entry(topic).or_default(); + registration_attempts + .entry(distance) + .or_default() + .entry(node_id) + .or_insert(RegistrationState::Confirmed(now)); + + METRICS + .active_regtopic_req + .store(self.active_regtopic_requests.len(), Ordering::Relaxed); } } } From a78cf3f076ccf82cfef6895a426be2a11ad5795f Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 13 Jul 2022 13:50:37 +0200 Subject: [PATCH 246/391] Ensure topics republish --- src/service.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/service.rs b/src/service.rs index d3c44afe8..b744b2986 100644 --- a/src/service.rs +++ b/src/service.rs @@ -540,6 +540,9 @@ impl Service { /// The main execution loop of the discv5 serviced. async fn start(&mut self) { + // In the case where not many peers populate the topic's kbuckets, ensure topics keep being republished. + let mut registration_interval = tokio::time::interval(AD_LIFETIME); + loop { tokio::select! { _ = &mut self.exit => { @@ -827,12 +830,10 @@ impl Service { } } Some(topic_query_progress) = self.active_topic_queries.next() => { - trace!("Query is in state {:?}", topic_query_progress); match topic_query_progress { TopicQueryState::Finished(topic_hash) | TopicQueryState::TimedOut(topic_hash) | TopicQueryState::Dry(topic_hash) => { if let Some(query) = self.active_topic_queries.queries.remove(&topic_hash) { if let Some(callback) = query.callback { - trace!("Sending result of query for topic hash {} to discv5 layer", topic_hash); if callback.send(Ok(query.results.into_values().collect::>())).is_err() { warn!("Callback dropped for topic query {}. Results dropped", topic_hash); } @@ -844,6 +845,12 @@ impl Service { } } } + _ = registration_interval.tick() => { + let topics_to_reg = self.registration_attempts.keys().copied().collect::>(); + for topic_hash in topics_to_reg { + self.send_register_topics(topic_hash); + } + } } } } @@ -1765,9 +1772,9 @@ impl Service { .or_default() .entry(node_id) .or_insert(RegistrationState::Ticket); - self.send_register_topics(topic); } } + self.send_register_topics(topic_hash); } ResponseBody::RegisterConfirmation { topic } => { let now = Instant::now(); @@ -1786,6 +1793,7 @@ impl Service { .active_regtopic_req .store(self.active_regtopic_requests.len(), Ordering::Relaxed); } + self.send_register_topics(topic_hash); } } } else { From f0a0e1e679f223e3929414b1fed6774858773dbd Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 13 Jul 2022 13:51:50 +0200 Subject: [PATCH 247/391] Fix copy paste bug --- src/service.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/service.rs b/src/service.rs index b744b2986..aa9530b73 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1774,7 +1774,7 @@ impl Service { .or_insert(RegistrationState::Ticket); } } - self.send_register_topics(topic_hash); + self.send_register_topics(topic); } ResponseBody::RegisterConfirmation { topic } => { let now = Instant::now(); @@ -1793,7 +1793,7 @@ impl Service { .active_regtopic_req .store(self.active_regtopic_requests.len(), Ordering::Relaxed); } - self.send_register_topics(topic_hash); + self.send_register_topics(topic); } } } else { From b9a53be0f5dc1ccdf1a8e69768856c229434c714 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 13 Jul 2022 14:47:17 +0200 Subject: [PATCH 248/391] fixup! Ensure topics republish --- src/service.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/service.rs b/src/service.rs index aa9530b73..f9af89bf3 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1539,6 +1539,10 @@ impl Service { active_request.request_body { self.discovered(&node_id, nodes, active_request.query_id, Some(topic)); + // If a regtopic request runs dry (not enough regsitration attempts per topic kbucket + // and no more peers to contact) any new peers to contact will come with a NODES response + // to a REGTOPIC request. + self.send_register_topics(topic); } else if let RequestBody::FindNode { .. } = active_request.request_body { self.discovered(&node_id, nodes, active_request.query_id, None) } @@ -1774,7 +1778,6 @@ impl Service { .or_insert(RegistrationState::Ticket); } } - self.send_register_topics(topic); } ResponseBody::RegisterConfirmation { topic } => { let now = Instant::now(); @@ -1793,7 +1796,6 @@ impl Service { .active_regtopic_req .store(self.active_regtopic_requests.len(), Ordering::Relaxed); } - self.send_register_topics(topic); } } } else { From 2f9223a02ef9c201d15fcffc2240126a745abaa2 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 13 Jul 2022 15:13:13 +0200 Subject: [PATCH 249/391] Proceed with topic lookup upon newly discovered peers for topic --- src/service.rs | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/src/service.rs b/src/service.rs index f9af89bf3..81bd4f335 100644 --- a/src/service.rs +++ b/src/service.rs @@ -292,8 +292,6 @@ pub enum TopicQueryState { /// Not enough ads have been returned from the first round of sending TOPICQUERY /// requests, new peers in the topic's kbucktes should be queried. Unsatisfied(TopicHash, usize), - /// No new peers in the topic's kbuckets can be found to send TOPICQUERYs too. - Dry(TopicHash), } /// The state of a response to a single TOPICQUERY request. A topic lookup/query is @@ -366,7 +364,7 @@ impl Stream for ActiveTopicQueries { fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { for (topic_hash, query) in self.queries.iter() { if query.dry { - return Poll::Ready(Some(TopicQueryState::Dry(*topic_hash))); + return Poll::Pending; } else if query.results.len() >= self.num_results { return Poll::Ready(Some(TopicQueryState::Finished(*topic_hash))); } else if query.start.elapsed() >= self.time_out { @@ -831,7 +829,7 @@ impl Service { } Some(topic_query_progress) = self.active_topic_queries.next() => { match topic_query_progress { - TopicQueryState::Finished(topic_hash) | TopicQueryState::TimedOut(topic_hash) | TopicQueryState::Dry(topic_hash) => { + TopicQueryState::Finished(topic_hash) | TopicQueryState::TimedOut(topic_hash) => { if let Some(query) = self.active_topic_queries.queries.remove(&topic_hash) { if let Some(callback) = query.callback { if callback.send(Ok(query.results.into_values().collect::>())).is_err() { @@ -842,7 +840,7 @@ impl Service { }, TopicQueryState::Unsatisfied(topic_hash, num_query_peers) => { self.send_topic_queries(topic_hash, num_query_peers, None); - } + }, } } _ = registration_interval.tick() => { @@ -1539,10 +1537,6 @@ impl Service { active_request.request_body { self.discovered(&node_id, nodes, active_request.query_id, Some(topic)); - // If a regtopic request runs dry (not enough regsitration attempts per topic kbucket - // and no more peers to contact) any new peers to contact will come with a NODES response - // to a REGTOPIC request. - self.send_register_topics(topic); } else if let RequestBody::FindNode { .. } = active_request.request_body { self.discovered(&node_id, nodes, active_request.query_id, None) } @@ -2310,7 +2304,13 @@ impl Service { query_id: Option, topic_hash: Option, ) { + if enrs.is_empty() { + warn!("Discovered was called with an empty enrs vector"); + return; + } + let local_id = self.local_enr.read().node_id(); + enrs.retain(|enr| { if enr.node_id() == local_id { return false; @@ -2356,7 +2356,9 @@ impl Service { // uncontacted peers. if discovered_peers.len() < MAX_UNCONTACTED_PEERS_TOPIC { discovered_peers.insert(enr.node_id(), enr.clone()); - }; + } else { + warn!("Discarding uncontacted peers, uncontacted peers at bounds for topic hash {}", topic_hash); + } } false } @@ -2394,6 +2396,18 @@ impl Service { source != &enr.node_id() }); + if let Some(topic_hash) = topic_hash { + // If a topic lookup has dried up (no more peers to query), the query can now proceed as long as + // it hasn't timed out already. + if let Some(query) = self.active_topic_queries.queries.get_mut(&topic_hash) { + query.dry = false; + } + // If a topic registration runs dry (not enough regsitration attempts per topic kbucket + // and no more peers to contact) any new peers to contact will come with a NODES response + // to a REGTOPIC request or a TOPICQUERY if the same topic has also been looked up. + self.send_register_topics(topic_hash); + } + // if this is part of a query, update the query if let Some(query_id) = query_id { if let Some(query) = self.queries.get_mut(query_id) { From d4ac5fc29148de5662af961602ae0c8c2aaf287a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 13 Jul 2022 15:18:34 +0200 Subject: [PATCH 250/391] Run cargo fmt --- src/service.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/service.rs b/src/service.rs index 81bd4f335..9ffa2c462 100644 --- a/src/service.rs +++ b/src/service.rs @@ -2310,7 +2310,7 @@ impl Service { } let local_id = self.local_enr.read().node_id(); - + enrs.retain(|enr| { if enr.node_id() == local_id { return false; @@ -2404,7 +2404,7 @@ impl Service { } // If a topic registration runs dry (not enough regsitration attempts per topic kbucket // and no more peers to contact) any new peers to contact will come with a NODES response - // to a REGTOPIC request or a TOPICQUERY if the same topic has also been looked up. + // to a REGTOPIC request, or a TOPICQUERY if the same topic has also been looked up. self.send_register_topics(topic_hash); } From 5cafcb46f90cd5440b5f0d6649824356453efb93 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 13 Jul 2022 19:42:48 +0200 Subject: [PATCH 251/391] Fix uncontacted peers first in topic request bug --- src/service.rs | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/src/service.rs b/src/service.rs index 9ffa2c462..e061fb3d6 100644 --- a/src/service.rs +++ b/src/service.rs @@ -858,7 +858,7 @@ impl Service { trace!("Sending REGTOPICS"); if let Entry::Occupied(ref mut kbuckets) = self.topics_kbuckets.entry(topic_hash) { trace!( - "Found {} new entries in kbuckets of topic hash {}", + "Found {} entries in kbuckets of topic hash {}", kbuckets.get_mut().iter().count(), topic_hash ); @@ -886,16 +886,10 @@ impl Service { peers.retain(|node_id, enr | { if new_peers.len() + registrations.len() >= max_reg_attempts_bucket { true - } else if let Entry::Vacant(_) = registrations.entry(*node_id) { + } else { debug!("Found new registration peer in discovered peers for topic {}. Peer: {:?}", topic_hash, node_id); new_peers.push(enr.clone()); false - } else { - debug_unreachable!( - "Newly discovered peer {} shouldn't be stored in registration attempts", - node_id - ); - true } }); new_reg_peers.append(&mut new_peers); @@ -2406,6 +2400,7 @@ impl Service { // and no more peers to contact) any new peers to contact will come with a NODES response // to a REGTOPIC request, or a TOPICQUERY if the same topic has also been looked up. self.send_register_topics(topic_hash); + return; } // if this is part of a query, update the query From 8c38d10514faaa09a5c3d5c200f81aa8357a380f Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 13 Jul 2022 20:08:50 +0200 Subject: [PATCH 252/391] Add trace message --- src/handler/mod.rs | 24 ++++++++++++++++-------- src/service.rs | 5 +++-- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index acf782cc2..c04ee69a1 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -1113,20 +1113,28 @@ impl Handler { // Find a matching request, if any trace!("Received {} response", response.body); - let request_call = if let Some(request_call) = self.active_requests.remove(&node_address) { - Some(request_call) + let (request_call, is_regconf) = if let Some(request_call) = self.active_requests_regconf.remove(&node_address) { + (Some(request_call), true) } else { - self.active_requests_regconf.remove(&node_address) + (self.active_requests.remove(&node_address), false) }; if let Some(mut request_call) = request_call { if request_call.id() != &response.id { - trace!( - "Received an RPC Response to an unknown request. Likely late response. {}", - node_address - ); // add the request back and reset the timer - self.active_requests.insert(node_address, request_call); + if is_regconf { + trace!( + "Received an RPC Response from a node we are also waiting for a REGISTERCONFIRMATION from. {}", + node_address + ); + self.active_requests_regconf.insert(node_address, request_call); + } else { + trace!( + "Received an RPC Response to an unknown request. Likely late response. {}", + node_address + ); + self.active_requests.insert(node_address, request_call); + } return; } diff --git a/src/service.rs b/src/service.rs index e061fb3d6..6b1e99e7c 100644 --- a/src/service.rs +++ b/src/service.rs @@ -957,7 +957,7 @@ impl Service { peers.iter().count(), topic_hash ); - // Prefer querying nodes further away, starting at distance 256 by to avoid hotspots + // Prefer querying nodes further away, starting at distance 256 by to avoid hotspots. let new_query_peers_iter = peers.iter().rev().filter_map(|entry| { (!queried_peers.contains_key(entry.node.key.preimage())).then(|| { query @@ -974,7 +974,7 @@ impl Service { break; } } - // If no new nodes can be found to query, return TOPICQUERY request early. + // If no new nodes can be found to query, let topic lookup wait for new peers or time out. if new_query_peers.is_empty() { debug!("Found no new peers to send TOPICQUERY to, setting query status to dry"); if let Some(query) = self.active_topic_queries.queries.get_mut(&topic_hash) { @@ -2344,6 +2344,7 @@ impl Service { kbucket::Entry::Pending(mut entry, _) => entry.value().seq() < enr.seq(), kbucket::Entry::Absent(_) => { if let Some(topic_hash) = topic_hash { + trace!("Discovered new peer {} for topic hash {}", enr.node_id(), topic_hash); let discovered_peers = self.discovered_peers_topic.entry(topic_hash).or_default(); // If the intermediary storage before the topic's kbucktes is at bounds, discard the From f1f6a941af0da80455197ee9f2d091b5fb0fc68a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 13 Jul 2022 21:20:08 +0200 Subject: [PATCH 253/391] fixup! Fix uncontacted peers first in topic request bug --- src/handler/mod.rs | 16 ++++---- src/metrics.rs | 6 --- src/service.rs | 95 ++++++++++++++++++++++++++-------------------- 3 files changed, 62 insertions(+), 55 deletions(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index c04ee69a1..7990cfa18 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -1113,11 +1113,12 @@ impl Handler { // Find a matching request, if any trace!("Received {} response", response.body); - let (request_call, is_regconf) = if let Some(request_call) = self.active_requests_regconf.remove(&node_address) { - (Some(request_call), true) - } else { - (self.active_requests.remove(&node_address), false) - }; + let (request_call, is_regconf) = + if let Some(request_call) = self.active_requests_regconf.remove(&node_address) { + (Some(request_call), true) + } else { + (self.active_requests.remove(&node_address), false) + }; if let Some(mut request_call) = request_call { if request_call.id() != &response.id { @@ -1127,7 +1128,8 @@ impl Handler { "Received an RPC Response from a node we are also waiting for a REGISTERCONFIRMATION from. {}", node_address ); - self.active_requests_regconf.insert(node_address, request_call); + self.active_requests_regconf + .insert(node_address, request_call); } else { trace!( "Received an RPC Response to an unknown request. Likely late response. {}", @@ -1390,7 +1392,7 @@ impl Handler { // Still a REGCONFIRMATION may come hence request call is reinserted, in a separate // struct to avoid blocking further requests to the node address during the request timeout. self.active_requests_regconf - .insert(node_address.clone(), request_call.clone()); + .insert(node_address.clone(), request_call); if let Err(e) = self .service_send .send(HandlerOut::Response( diff --git a/src/metrics.rs b/src/metrics.rs index 18cf464a9..1f2b1c0cd 100644 --- a/src/metrics.rs +++ b/src/metrics.rs @@ -18,8 +18,6 @@ pub struct InternalMetrics { pub bytes_recv: AtomicUsize, /// The number of topics to attempt advertising on other nodes. pub topics_to_publish: AtomicUsize, - /// The number of ads currently advertised on other nodes. - pub active_ads: AtomicUsize, /// The number of ads currently advertised locally for other nodes. pub hosted_ads: AtomicUsize, /// The number of active regtopic requests awaiting a REGCONFIRMATION response. @@ -35,7 +33,6 @@ impl Default for InternalMetrics { bytes_sent: AtomicUsize::new(0), bytes_recv: AtomicUsize::new(0), topics_to_publish: AtomicUsize::new(0), - active_ads: AtomicUsize::new(0), hosted_ads: AtomicUsize::new(0), active_regtopic_req: AtomicUsize::new(0), } @@ -69,8 +66,6 @@ pub struct Metrics { pub bytes_recv: usize, /// The number of topics to attempt advertising on other nodes. pub topics_to_publish: usize, - /// The number of ads currently advertised on other nodes. - pub active_ads: usize, /// The number of ads currently advertised locally for other nodes. pub hosted_ads: usize, /// The number of active regtopic requests. @@ -88,7 +83,6 @@ impl From<&METRICS> for Metrics { bytes_sent: internal_metrics.bytes_sent.load(Ordering::Relaxed), bytes_recv: internal_metrics.bytes_recv.load(Ordering::Relaxed), topics_to_publish: internal_metrics.topics_to_publish.load(Ordering::Relaxed), - active_ads: internal_metrics.active_ads.load(Ordering::Relaxed), hosted_ads: internal_metrics.hosted_ads.load(Ordering::Relaxed), active_regtopic_req: internal_metrics.active_regtopic_req.load(Ordering::Relaxed), } diff --git a/src/service.rs b/src/service.rs index 6b1e99e7c..b8b6249a9 100644 --- a/src/service.rs +++ b/src/service.rs @@ -162,7 +162,7 @@ const MAX_ADS_SUBNET: usize = 50; const AD_LIFETIME: Duration = Duration::from_secs(60 * 15); /// The max number of uncontacted peers to store before the kbuckets per topic. -const MAX_UNCONTACTED_PEERS_TOPIC: usize = 1000; +const MAX_UNCONTACTED_PEERS_TOPIC_BUCKET: usize = 16; /// The types of requests to send to the Discv5 service. pub enum ServiceRequest { @@ -268,7 +268,7 @@ pub struct Service { /// The peers returned in a NODES response to a TOPICQUERY or REGTOPIC request are inserted in /// this intermediary stroage to check their connectivity before inserting them in the topic's /// kbuckets. - discovered_peers_topic: HashMap>, + discovered_peers_topic: HashMap>>, /// Tickets received by other nodes. tickets: Tickets, @@ -689,9 +689,6 @@ impl Service { }); } }); - METRICS - .active_ads - .store(active_topics.values().flatten().count(), Ordering::Relaxed); if callback.send(Ok(active_topics)).is_err() { error!("Failed to return active topics"); @@ -877,22 +874,27 @@ impl Service { } }); } - let registrations = reg_attempts.entry(index as u64).or_default(); + let distance = index as u64; + + let registrations = reg_attempts.entry(distance).or_default(); let max_reg_attempts_bucket = self.config.max_nodes_response; let mut new_peers = Vec::new(); // Attempt sending a request to uncontacted peers if any. if let Some(peers) = self.discovered_peers_topic.get_mut(&topic_hash) { - peers.retain(|node_id, enr | { - if new_peers.len() + registrations.len() >= max_reg_attempts_bucket { - true - } else { - debug!("Found new registration peer in discovered peers for topic {}. Peer: {:?}", topic_hash, node_id); - new_peers.push(enr.clone()); - false - } - }); - new_reg_peers.append(&mut new_peers); + if let Some(bucket) = peers.get_mut(&distance) { + bucket.retain(|node_id, enr | { + if new_peers.len() + registrations.len() >= max_reg_attempts_bucket { + true + } else { + debug!("Found new registration peer in discovered peers for topic {}. Peer: {:?}", topic_hash, node_id); + registrations.insert(*node_id, RegistrationState::Ticket); + new_peers.push(enr.clone()); + false + } + }); + new_reg_peers.append(&mut new_peers); + } } // The count of active registration attempts for a distance after expired ads have been @@ -905,12 +907,14 @@ impl Service { if new_peers.len() + registrations.len() >= self.config.max_nodes_response { break; } - if let Entry::Vacant(_) = registrations.entry(*peer.key.preimage()) { + let node_id = *peer.key.preimage(); + if let Entry::Vacant(_) = registrations.entry(node_id) { debug!( "Found new registration peer in kbuckets of topic {}. Peer: {:?}", topic_hash, peer.key.preimage() ); + registrations.insert(node_id, RegistrationState::Ticket); new_peers.push(peer.value.clone()) } } @@ -1754,17 +1758,6 @@ impl Service { topic, ) .ok(); - let peer_key: kbucket::Key = node_id.into(); - let topic_key: kbucket::Key = NodeId::new(&topic.as_bytes()).into(); - if let Some(distance) = peer_key.log2_distance(&topic_key) { - let registration_attempts = - self.registration_attempts.entry(topic).or_default(); - registration_attempts - .entry(distance) - .or_default() - .entry(node_id) - .or_insert(RegistrationState::Ticket); - } } } ResponseBody::RegisterConfirmation { topic } => { @@ -2349,12 +2342,18 @@ impl Service { self.discovered_peers_topic.entry(topic_hash).or_default(); // If the intermediary storage before the topic's kbucktes is at bounds, discard the // uncontacted peers. - if discovered_peers.len() < MAX_UNCONTACTED_PEERS_TOPIC { - discovered_peers.insert(enr.node_id(), enr.clone()); + let node_id = enr.node_id(); + let peer_key: kbucket::Key = node_id.into(); + let topic_key: kbucket::Key = NodeId::new(&topic_hash.as_bytes()).into(); + if let Some(distance) = peer_key.log2_distance(&topic_key) { + let bucket = discovered_peers.entry(distance).or_default(); + if bucket.len() < MAX_UNCONTACTED_PEERS_TOPIC_BUCKET { + bucket.insert(node_id, enr.clone()); } else { warn!("Discarding uncontacted peers, uncontacted peers at bounds for topic hash {}", topic_hash); } } + } false } _ => false, @@ -2376,7 +2375,6 @@ impl Service { "Failed to update discovered ENR. Node: {}, Reason: {:?}", source, reason ); - return false; // Remove this peer from the discovered list if the update failed } } @@ -2692,6 +2690,29 @@ impl Service { ); } } + self.connection_updated(node_id, ConnectionStatus::Disconnected, Some(topic)); + return; + } + RequestBody::RegisterTopic { + topic, + enr: _, + ticket: _, + } => { + let peer_key: kbucket::Key = node_id.into(); + let topic_key: kbucket::Key = NodeId::new(&topic.as_bytes()).into(); + if let Some(distance) = peer_key.log2_distance(&topic_key) { + let registration_attempts = + self.registration_attempts.entry(topic).or_default(); + if let Some(bucket) = registration_attempts.get_mut(&distance) { + bucket.remove(&node_id); + } + + METRICS + .active_regtopic_req + .store(self.active_regtopic_requests.len(), Ordering::Relaxed); + } + self.connection_updated(node_id, ConnectionStatus::Disconnected, Some(topic)); + return; } // for all other requests, if any are queries, mark them as failures. _ => { @@ -2712,17 +2733,7 @@ impl Service { } } - match active_request.request_body { - RequestBody::RegisterTopic { - topic, - enr: _, - ticket: _, - } - | RequestBody::TopicQuery { topic } => { - self.connection_updated(node_id, ConnectionStatus::Disconnected, Some(topic)) - } - _ => self.connection_updated(node_id, ConnectionStatus::Disconnected, None), - } + self.connection_updated(node_id, ConnectionStatus::Disconnected, None); } } From 31ff35f5f3735c34f9544e32c5578db02de9be55 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 13 Jul 2022 21:55:55 +0200 Subject: [PATCH 254/391] Fix bug of proceeding with query if discovered called with empty enrs vec --- src/service.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/service.rs b/src/service.rs index b8b6249a9..7cd26018c 100644 --- a/src/service.rs +++ b/src/service.rs @@ -2291,11 +2291,6 @@ impl Service { query_id: Option, topic_hash: Option, ) { - if enrs.is_empty() { - warn!("Discovered was called with an empty enrs vector"); - return; - } - let local_id = self.local_enr.read().node_id(); enrs.retain(|enr| { @@ -2390,6 +2385,9 @@ impl Service { }); if let Some(topic_hash) = topic_hash { + if enrs.is_empty() { + return; + } // If a topic lookup has dried up (no more peers to query), the query can now proceed as long as // it hasn't timed out already. if let Some(query) = self.active_topic_queries.queries.get_mut(&topic_hash) { From 1595444b5de59c40af70b0f3d320b7a242baf3a0 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 13 Jul 2022 22:34:21 +0200 Subject: [PATCH 255/391] Fix discovery bug --- src/service.rs | 79 +++++++++++++++++++++++++++++++++++--------------- 1 file changed, 55 insertions(+), 24 deletions(-) diff --git a/src/service.rs b/src/service.rs index 7cd26018c..c1a60ca46 100644 --- a/src/service.rs +++ b/src/service.rs @@ -2298,6 +2298,8 @@ impl Service { return false; } + let mut new_or_updated_peer = false; + // If any of the discovered nodes are in the routing table, and there contains an older ENR, update it. // If there is an event stream send the Discovered event if self.config.report_discovered_peers { @@ -2326,10 +2328,36 @@ impl Service { // If the ENR exists in the routing table and the discovered ENR has a greater // sequence number, perform some filter checks before updating the enr. - let must_update_enr = if let Some(kbuckets_topic) = kbuckets_topic { + if let Some(kbuckets_topic) = kbuckets_topic { match kbuckets_topic.entry(&key) { - kbucket::Entry::Present(entry, _) => entry.value().seq() < enr.seq(), - kbucket::Entry::Pending(mut entry, _) => entry.value().seq() < enr.seq(), + kbucket::Entry::Present(entry, _) => { + if entry.value().seq() < enr.seq() { + if let UpdateResult::Failed(reason) = + kbuckets_topic.update_node(&key, enr.clone(), None) { + self.peers_to_ping.remove(&enr.node_id()); + debug!( + "Failed to update discovered ENR for kbucket of topic hash {:?}. Node: {}, Reason: {:?}", + topic_hash, source, reason + ); + return false; // Remove this peer from the discovered list if the update failed + } + new_or_updated_peer = true; + } + }, + kbucket::Entry::Pending(mut entry, _) => { + if entry.value().seq() < enr.seq() { + if let UpdateResult::Failed(reason) = + kbuckets_topic.update_node(&key, enr.clone(), None) { + self.peers_to_ping.remove(&enr.node_id()); + debug!( + "Failed to update discovered ENR for kbucket of topic hash {:?}. Node: {}, Reason: {:?}", + topic_hash, source, reason + ); + return false; // Remove this peer from the discovered list if the update failed + } + new_or_updated_peer = true; + } + } kbucket::Entry::Absent(_) => { if let Some(topic_hash) = topic_hash { trace!("Discovered new peer {} for topic hash {}", enr.node_id(), topic_hash); @@ -2342,35 +2370,33 @@ impl Service { let topic_key: kbucket::Key = NodeId::new(&topic_hash.as_bytes()).into(); if let Some(distance) = peer_key.log2_distance(&topic_key) { let bucket = discovered_peers.entry(distance).or_default(); - if bucket.len() < MAX_UNCONTACTED_PEERS_TOPIC_BUCKET { - bucket.insert(node_id, enr.clone()); - } else { - warn!("Discarding uncontacted peers, uncontacted peers at bounds for topic hash {}", topic_hash); + if bucket.len() < MAX_UNCONTACTED_PEERS_TOPIC_BUCKET { + bucket.insert(node_id, enr.clone()); + } else { + warn!("Discarding uncontacted peers, uncontacted peers at bounds for topic hash {}", topic_hash); + } } } - } - false + new_or_updated_peer = true; } - _ => false, + _ => {} } } else { - match self.kbuckets.write().entry(&key) { + let must_update_enr = match self.kbuckets.write().entry(&key) { kbucket::Entry::Present(entry, _) => entry.value().seq() < enr.seq(), kbucket::Entry::Pending(mut entry, _) => entry.value().seq() < enr.seq(), _ => false, - } - }; - - if must_update_enr { + }; + if must_update_enr { if let UpdateResult::Failed(reason) = - self.kbuckets.write().update_node(&key, enr.clone(), None) - { - self.peers_to_ping.remove(&enr.node_id()); - debug!( - "Failed to update discovered ENR. Node: {}, Reason: {:?}", - source, reason - ); - return false; // Remove this peer from the discovered list if the update failed + self.kbuckets.write().update_node(&key, enr.clone(), None) { + self.peers_to_ping.remove(&enr.node_id()); + debug!( + "Failed to update discovered ENR. Node: {}, Reason: {:?}", + source, reason + ); + return false; // Remove this peer from the discovered list if the update failed + } } } } else { @@ -2381,7 +2407,12 @@ impl Service { // requesting the target of the query, this ENR could be the result of requesting the // target-nodes own id. We don't want to add this as a "new" discovered peer in the // query, so we remove it from the discovered list here. - source != &enr.node_id() + if topic_hash.is_some() { + // For a topic lookup or registration only new or updated peers are retained. + new_or_updated_peer && source != &enr.node_id() + } else { + source != &enr.node_id() + } }); if let Some(topic_hash) = topic_hash { From 62a0e397d517d4f73b640f9e4708a86d0b6cdd41 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 14 Jul 2022 16:59:06 +0200 Subject: [PATCH 256/391] Fix bug using uncontacted peers first for topic requests --- src/kbucket.rs | 4 ++- src/service.rs | 98 ++++++++++++++++++++++++++++++-------------------- 2 files changed, 62 insertions(+), 40 deletions(-) diff --git a/src/kbucket.rs b/src/kbucket.rs index 1926c0872..3c824c71e 100644 --- a/src/kbucket.rs +++ b/src/kbucket.rs @@ -499,7 +499,9 @@ where } } - /// Returns an iterator over all the entries in the routing table. + /// Returns an iterator over all the entries in the routing table, which will be ordered + /// in increasing order by distance since the buckets are stored in a vector to which + /// the are added in increasing order. pub fn iter(&mut self) -> impl DoubleEndedIterator> { let applied_pending = &mut self.applied_pending; self.buckets.iter_mut().flat_map(move |table| { diff --git a/src/service.rs b/src/service.rs index c1a60ca46..21f728db7 100644 --- a/src/service.rs +++ b/src/service.rs @@ -48,7 +48,7 @@ use more_asserts::debug_unreachable; use parking_lot::RwLock; use rpc::*; use std::{ - collections::{hash_map::Entry, HashMap}, + collections::{hash_map::Entry, BTreeMap, HashMap}, io::{Error, ErrorKind}, net::SocketAddr, pin::Pin, @@ -268,7 +268,7 @@ pub struct Service { /// The peers returned in a NODES response to a TOPICQUERY or REGTOPIC request are inserted in /// this intermediary stroage to check their connectivity before inserting them in the topic's /// kbuckets. - discovered_peers_topic: HashMap>>, + discovered_peers_topic: HashMap>>, /// Tickets received by other nodes. tickets: Tickets, @@ -953,51 +953,71 @@ impl Service { start: Instant::now(), dry: false, }); - let queried_peers = query.queried_peers.clone(); - if let Entry::Occupied(kbuckets) = self.topics_kbuckets.entry(topic_hash) { - let mut peers = kbuckets.get().clone(); - trace!( - "Found {} peers in kbuckets of topic hash {}", - peers.iter().count(), - topic_hash - ); - // Prefer querying nodes further away, starting at distance 256 by to avoid hotspots. - let new_query_peers_iter = peers.iter().rev().filter_map(|entry| { - (!queried_peers.contains_key(entry.node.key.preimage())).then(|| { - query - .queried_peers - .entry(*entry.node.key.preimage()) - .or_default(); - entry.node.value - }) - }); - let mut new_query_peers = Vec::new(); - for enr in new_query_peers_iter { - new_query_peers.push(enr); + + let mut new_query_peers: Vec = Vec::new(); + + // Attempt sending a request to uncontacted peers if any. + if let Some(peers) = self.discovered_peers_topic.get_mut(&topic_hash) { + // Prefer querying nodes further away, i.e. in buckets of further distance to topic, to avoid hotspots. + for bucket in peers.values_mut().rev() { if new_query_peers.len() < num_query_peers { break; } + bucket.retain(|node_id, enr| { + if new_query_peers.len() >= num_query_peers { + true + } else if let Entry::Vacant(entry) = query.queried_peers.entry(*node_id) { + entry.insert(false); + new_query_peers.push(enr.clone()); + trace!( + "Found a new topic query peer {} in uncontacted peers of topic hash {}", + node_id, + topic_hash + ); + false + } else { + true + } + }); } - // If no new nodes can be found to query, let topic lookup wait for new peers or time out. - if new_query_peers.is_empty() { - debug!("Found no new peers to send TOPICQUERY to, setting query status to dry"); - if let Some(query) = self.active_topic_queries.queries.get_mut(&topic_hash) { - query.dry = true; + } + + if let Some(kbuckets) = self.topics_kbuckets.get_mut(&topic_hash) { + // Prefer querying nodes further away, i.e. in buckets of further distance to topic, to avoid hotspots. + for kbuckets_entry in kbuckets.iter().rev() { + if new_query_peers.len() < num_query_peers { + break; } - return; - } + let node_id = *kbuckets_entry.node.key.preimage(); + let enr = kbuckets_entry.node.value; - trace!("Sending TOPICQUERYs to {} new peers", new_query_peers.len()); - for enr in new_query_peers { - if let Ok(node_contact) = - NodeContact::try_from_enr(enr.clone(), self.config.ip_mode) - .map_err(|e| error!("Failed to send TOPICQUERY to peer. Error: {:?}", e)) - { - self.topic_query_request(node_contact, topic_hash); + if let Entry::Vacant(entry) = query.queried_peers.entry(node_id) { + entry.insert(false); + new_query_peers.push(enr.clone()); + trace!( + "Found a new topic query peer {} in kbuckets of topic hash {}", + node_id, + topic_hash + ); } } - } else { - debug_unreachable!("Broken invariant, a kbuckets table should exist for topic hash"); + } + // If no new nodes can be found to query, let topic lookup wait for new peers or time out. + if new_query_peers.is_empty() { + debug!("Found no new peers to send TOPICQUERY to, setting query status to dry"); + if let Some(query) = self.active_topic_queries.queries.get_mut(&topic_hash) { + query.dry = true; + } + return; + } + + trace!("Sending TOPICQUERYs to {} new peers", new_query_peers.len()); + for enr in new_query_peers { + if let Ok(node_contact) = NodeContact::try_from_enr(enr.clone(), self.config.ip_mode) + .map_err(|e| error!("Failed to send TOPICQUERY to peer. Error: {:?}", e)) + { + self.topic_query_request(node_contact, topic_hash); + } } } From eeeaa3344b141ab77ae77e5d51f555e0caf64894 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 14 Jul 2022 17:17:27 +0200 Subject: [PATCH 257/391] Fix bug upon new discovered peers --- src/service.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index 21f728db7..5e1d25371 100644 --- a/src/service.rs +++ b/src/service.rs @@ -2447,7 +2447,9 @@ impl Service { // If a topic registration runs dry (not enough regsitration attempts per topic kbucket // and no more peers to contact) any new peers to contact will come with a NODES response // to a REGTOPIC request, or a TOPICQUERY if the same topic has also been looked up. - self.send_register_topics(topic_hash); + if self.registration_attempts.contains_key(&topic_hash) { + self.send_register_topics(topic_hash); + } return; } From 4936e031b04fa43772d777c452ca81a0b3a396c5 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 14 Jul 2022 17:24:33 +0200 Subject: [PATCH 258/391] Fix find new topic query peers bug --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index 5e1d25371..67a699fc5 100644 --- a/src/service.rs +++ b/src/service.rs @@ -985,7 +985,7 @@ impl Service { if let Some(kbuckets) = self.topics_kbuckets.get_mut(&topic_hash) { // Prefer querying nodes further away, i.e. in buckets of further distance to topic, to avoid hotspots. for kbuckets_entry in kbuckets.iter().rev() { - if new_query_peers.len() < num_query_peers { + if new_query_peers.len() >= num_query_peers { break; } let node_id = *kbuckets_entry.node.key.preimage(); From 64e287fcefe148cab9582a6fb7e4c2b332d8e621 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 14 Jul 2022 18:23:24 +0200 Subject: [PATCH 259/391] Add trace message for republish --- src/service.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/service.rs b/src/service.rs index 67a699fc5..8cef185c9 100644 --- a/src/service.rs +++ b/src/service.rs @@ -843,6 +843,7 @@ impl Service { _ = registration_interval.tick() => { let topics_to_reg = self.registration_attempts.keys().copied().collect::>(); for topic_hash in topics_to_reg { + trace!("Republishing topic hash {}", topic_hash); self.send_register_topics(topic_hash); } } From 8cca2029cf75e87486ba63f9c397de6778a9adc9 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 14 Jul 2022 18:29:32 +0200 Subject: [PATCH 260/391] Add trace messages for re-registration on same node after ad-lifetime up --- src/service.rs | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/src/service.rs b/src/service.rs index 8cef185c9..013ebfa57 100644 --- a/src/service.rs +++ b/src/service.rs @@ -159,7 +159,7 @@ const MAX_ADS_SUBNET_TOPIC: usize = 5; const MAX_ADS_SUBNET: usize = 50; /// The time after a REGCONFIRMATION is sent that an ad is placed. -const AD_LIFETIME: Duration = Duration::from_secs(60 * 15); +const AD_LIFETIME: Duration = Duration::from_secs(60 * 2); /// The max number of uncontacted peers to store before the kbuckets per topic. const MAX_UNCONTACTED_PEERS_TOPIC_BUCKET: usize = 16; @@ -864,18 +864,25 @@ impl Service { let mut new_reg_peers = Vec::new(); for (index, bucket) in kbuckets.get_mut().buckets_iter().enumerate() { + let distance = index as u64; + // Remove expired registrations - if let Entry::Occupied(ref mut entry) = reg_attempts.entry(index as u64) { + if let Entry::Occupied(ref mut entry) = reg_attempts.entry(distance) { + trace!("Removing expired registration attempts"); let registrations = entry.get_mut(); - registrations.retain(|_, reg_attempt| { + registrations.retain(|node_id, reg_attempt| { if let RegistrationState::Confirmed(insert_time) = reg_attempt { - insert_time.elapsed() < AD_LIFETIME + if insert_time.elapsed() < AD_LIFETIME { + true + } else { + trace!("Registration has expired for node id {}. Removing from registration attempts.", node_id); + false + } } else { true } }); } - let distance = index as u64; let registrations = reg_attempts.entry(distance).or_default(); let max_reg_attempts_bucket = self.config.max_nodes_response; @@ -888,7 +895,7 @@ impl Service { if new_peers.len() + registrations.len() >= max_reg_attempts_bucket { true } else { - debug!("Found new registration peer in discovered peers for topic {}. Peer: {:?}", topic_hash, node_id); + debug!("Found new registration peer in uncontacted peers for topic {}. Peer: {:?}", topic_hash, node_id); registrations.insert(*node_id, RegistrationState::Ticket); new_peers.push(enr.clone()); false @@ -927,7 +934,6 @@ impl Service { if let Ok(node_contact) = NodeContact::try_from_enr(peer, self.config.ip_mode) .map_err(|e| error!("Failed to send REGTOPIC to peer. Error: {:?}", e)) { - // Registration attempts are acknowledged upon receiving a TICKET or REGCONFIRMATION response. self.reg_topic_request(node_contact, topic_hash, local_enr.clone(), None); } } From b818e308bcd288125ac3b12c32d3508f4624a210 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 15 Jul 2022 08:26:52 +0200 Subject: [PATCH 261/391] Fix sending double nodes in NODES response to topic request --- src/service.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index 013ebfa57..2e79fb62b 100644 --- a/src/service.rs +++ b/src/service.rs @@ -873,6 +873,7 @@ impl Service { registrations.retain(|node_id, reg_attempt| { if let RegistrationState::Confirmed(insert_time) = reg_attempt { if insert_time.elapsed() < AD_LIFETIME { + trace!("Registration still alive for node id {}. Keeping in registration attempts.", node_id); true } else { trace!("Registration has expired for node id {}. Removing from registration attempts.", node_id); @@ -2052,7 +2053,7 @@ impl Service { .kbuckets .write() .nodes_by_distances( - &[distance - 1, distance, distance + 1], + &[distance - 1, distance + 1], self.config.max_nodes_response - closest_peers.len(), ) .iter() From fb690beeb28e7b880fc58a8b2de5e2f935d384de Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 15 Jul 2022 09:33:07 +0200 Subject: [PATCH 262/391] Fix bug overriding reg attempt and speed up topic query in case of not enough results returned first round --- src/service.rs | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/src/service.rs b/src/service.rs index 2e79fb62b..ea5ca5b1d 100644 --- a/src/service.rs +++ b/src/service.rs @@ -291,7 +291,7 @@ pub enum TopicQueryState { TimedOut(TopicHash), /// Not enough ads have been returned from the first round of sending TOPICQUERY /// requests, new peers in the topic's kbucktes should be queried. - Unsatisfied(TopicHash, usize), + Unsatisfied(TopicHash), } /// The state of a response to a single TOPICQUERY request. A topic lookup/query is @@ -382,10 +382,7 @@ impl Stream for ActiveTopicQueries { // If all peers have responded or failed the request and we still did not // obtain enough results, the query is in TopicQueryState::Unsatisfied. if exhausted_peers >= query.queried_peers.len() { - return Poll::Ready(Some(TopicQueryState::Unsatisfied( - *topic_hash, - query.results.len(), - ))); + return Poll::Ready(Some(TopicQueryState::Unsatisfied(*topic_hash))); } } } @@ -621,7 +618,7 @@ impl Service { } self.topics_kbuckets.insert(topic_hash, kbuckets); } - self.send_topic_queries(topic_hash, self.config.max_nodes_response, Some(callback)); + self.send_topic_queries(topic_hash, Some(callback)); } ServiceRequest::RegisterTopic(topic_hash) => { if self.registration_attempts.insert(topic_hash, HashMap::new()).is_some() { @@ -835,8 +832,8 @@ impl Service { } } }, - TopicQueryState::Unsatisfied(topic_hash, num_query_peers) => { - self.send_topic_queries(topic_hash, num_query_peers, None); + TopicQueryState::Unsatisfied(topic_hash) => { + self.send_topic_queries(topic_hash, None); }, } } @@ -863,17 +860,19 @@ impl Service { let reg_attempts = self.registration_attempts.entry(topic_hash).or_default(); let mut new_reg_peers = Vec::new(); + // Ensure that max_reg_attempts_bucket registration attempts are alive per bucket if that many peers are + // available at that distance. + let max_reg_attempts_bucket = self.config.max_nodes_response; + for (index, bucket) in kbuckets.get_mut().buckets_iter().enumerate() { let distance = index as u64; // Remove expired registrations if let Entry::Occupied(ref mut entry) = reg_attempts.entry(distance) { - trace!("Removing expired registration attempts"); let registrations = entry.get_mut(); registrations.retain(|node_id, reg_attempt| { if let RegistrationState::Confirmed(insert_time) = reg_attempt { if insert_time.elapsed() < AD_LIFETIME { - trace!("Registration still alive for node id {}. Keeping in registration attempts.", node_id); true } else { trace!("Registration has expired for node id {}. Removing from registration attempts.", node_id); @@ -886,7 +885,6 @@ impl Service { } let registrations = reg_attempts.entry(distance).or_default(); - let max_reg_attempts_bucket = self.config.max_nodes_response; let mut new_peers = Vec::new(); // Attempt sending a request to uncontacted peers if any. @@ -895,11 +893,13 @@ impl Service { bucket.retain(|node_id, enr | { if new_peers.len() + registrations.len() >= max_reg_attempts_bucket { true - } else { + } else if let Entry::Vacant(_) = registrations.entry(*node_id) { debug!("Found new registration peer in uncontacted peers for topic {}. Peer: {:?}", topic_hash, node_id); registrations.insert(*node_id, RegistrationState::Ticket); new_peers.push(enr.clone()); false + } else { + true } }); new_reg_peers.append(&mut new_peers); @@ -947,7 +947,6 @@ impl Service { fn send_topic_queries( &mut self, topic_hash: TopicHash, - num_query_peers: usize, callback: Option, RequestError>>>, ) { let query = self @@ -962,17 +961,21 @@ impl Service { dry: false, }); + // Attempt to query max_topic_query_peers peers at a time. Possibly some peers will return more than one result + // (ADNODES of length > 1), or no results will be returned from that peer. + let max_topic_query_peers = self.config.max_nodes_response; + let mut new_query_peers: Vec = Vec::new(); // Attempt sending a request to uncontacted peers if any. if let Some(peers) = self.discovered_peers_topic.get_mut(&topic_hash) { // Prefer querying nodes further away, i.e. in buckets of further distance to topic, to avoid hotspots. for bucket in peers.values_mut().rev() { - if new_query_peers.len() < num_query_peers { + if new_query_peers.len() < max_topic_query_peers { break; } bucket.retain(|node_id, enr| { - if new_query_peers.len() >= num_query_peers { + if new_query_peers.len() >= max_topic_query_peers { true } else if let Entry::Vacant(entry) = query.queried_peers.entry(*node_id) { entry.insert(false); @@ -993,7 +996,7 @@ impl Service { if let Some(kbuckets) = self.topics_kbuckets.get_mut(&topic_hash) { // Prefer querying nodes further away, i.e. in buckets of further distance to topic, to avoid hotspots. for kbuckets_entry in kbuckets.iter().rev() { - if new_query_peers.len() >= num_query_peers { + if new_query_peers.len() >= max_topic_query_peers { break; } let node_id = *kbuckets_entry.node.key.preimage(); From 4ac8600a4885132af11d83e84d60596807b52ec8 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 15 Jul 2022 09:44:23 +0200 Subject: [PATCH 263/391] Debug --- src/service.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/service.rs b/src/service.rs index ea5ca5b1d..45dd0bf56 100644 --- a/src/service.rs +++ b/src/service.rs @@ -312,6 +312,7 @@ pub enum TopicQueryResponseState { /// set to be registered. A registration is active when either a ticket for an adslot is /// held and the ticket wait time has not yet expired, or a REGCONFIRMATION has been /// received for an ad slot and the ad lifetime has not yet elapsed. +#[derive(Debug)] pub enum RegistrationState { /// A REGCONFIRMATION has been received at the given instant. Confirmed(Instant), @@ -870,8 +871,9 @@ impl Service { // Remove expired registrations if let Entry::Occupied(ref mut entry) = reg_attempts.entry(distance) { let registrations = entry.get_mut(); - registrations.retain(|node_id, reg_attempt| { - if let RegistrationState::Confirmed(insert_time) = reg_attempt { + registrations.retain(|node_id, reg_state| { + trace!("node id {}, reg state {:?}", node_id, reg_state); + if let RegistrationState::Confirmed(insert_time) = reg_state { if insert_time.elapsed() < AD_LIFETIME { true } else { From 0dfcba9d30274cb7f9247f34283f11423ab7e8d0 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 15 Jul 2022 10:03:57 +0200 Subject: [PATCH 264/391] Fix replace ticket with regconfirmation bug --- src/service.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/service.rs b/src/service.rs index 45dd0bf56..e36ed413f 100644 --- a/src/service.rs +++ b/src/service.rs @@ -298,9 +298,9 @@ pub enum TopicQueryState { /// made up of several TOPICQUERYs each being sent to a different peer. #[derive(Default)] pub enum TopicQueryResponseState { - #[default] /// The Start state is intermediary upon receving the first response to the /// TOPICQUERY request, either a NODES or ADNODES response. + #[default] Start, /// A NODES response has been completely received. Nodes, @@ -872,7 +872,7 @@ impl Service { if let Entry::Occupied(ref mut entry) = reg_attempts.entry(distance) { let registrations = entry.get_mut(); registrations.retain(|node_id, reg_state| { - trace!("node id {}, reg state {:?}", node_id, reg_state); + trace!("node id {}, reg state {:?} at distance {}", node_id, reg_state, distance); if let RegistrationState::Confirmed(insert_time) = reg_state { if insert_time.elapsed() < AD_LIFETIME { true @@ -1800,11 +1800,13 @@ impl Service { if let Some(distance) = peer_key.log2_distance(&topic_key) { let registration_attempts = self.registration_attempts.entry(topic).or_default(); - registration_attempts + if let Some(reg_state) = registration_attempts .entry(distance) .or_default() - .entry(node_id) - .or_insert(RegistrationState::Confirmed(now)); + .get_mut(&node_id) + { + *reg_state = RegistrationState::Confirmed(now); + } METRICS .active_regtopic_req From 924c986bac4fc76501b044f475ed58fcbd1045f7 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 15 Jul 2022 10:08:44 +0200 Subject: [PATCH 265/391] Debug distance mapping reg attempts and kbuckets --- src/service.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/service.rs b/src/service.rs index e36ed413f..dec2d7719 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1800,13 +1800,17 @@ impl Service { if let Some(distance) = peer_key.log2_distance(&topic_key) { let registration_attempts = self.registration_attempts.entry(topic).or_default(); - if let Some(reg_state) = registration_attempts + /*if let Some(reg_state) = registration_attempts .entry(distance) .or_default() .get_mut(&node_id) { *reg_state = RegistrationState::Confirmed(now); - } + }*/ + *registration_attempts + .entry(distance) + .or_default() + .entry(node_id).or_insert(RegistrationState::Confirmed(now)) = RegistrationState::Confirmed(now); METRICS .active_regtopic_req From e4596e1d66c4693aab7bcc4068b85ee1e4e814f3 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 15 Jul 2022 10:17:26 +0200 Subject: [PATCH 266/391] Fix distance mapping bug --- src/service.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/src/service.rs b/src/service.rs index dec2d7719..6e8cace76 100644 --- a/src/service.rs +++ b/src/service.rs @@ -866,7 +866,7 @@ impl Service { let max_reg_attempts_bucket = self.config.max_nodes_response; for (index, bucket) in kbuckets.get_mut().buckets_iter().enumerate() { - let distance = index as u64; + let distance = index as u64 + 1; // Remove expired registrations if let Entry::Occupied(ref mut entry) = reg_attempts.entry(distance) { @@ -1800,17 +1800,13 @@ impl Service { if let Some(distance) = peer_key.log2_distance(&topic_key) { let registration_attempts = self.registration_attempts.entry(topic).or_default(); - /*if let Some(reg_state) = registration_attempts + if let Some(reg_state) = registration_attempts .entry(distance) .or_default() .get_mut(&node_id) { *reg_state = RegistrationState::Confirmed(now); - }*/ - *registration_attempts - .entry(distance) - .or_default() - .entry(node_id).or_insert(RegistrationState::Confirmed(now)) = RegistrationState::Confirmed(now); + } METRICS .active_regtopic_req From ef4be68ba118dc87666eb53c5d5788c306d1f831 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 15 Jul 2022 11:03:50 +0200 Subject: [PATCH 267/391] Fix re-publishing topics on nodes have reached ticket limit bug --- src/service.rs | 107 ++++++++++++++++++++++++++++++++----------------- 1 file changed, 70 insertions(+), 37 deletions(-) diff --git a/src/service.rs b/src/service.rs index 6e8cace76..ea9783d1d 100644 --- a/src/service.rs +++ b/src/service.rs @@ -144,7 +144,7 @@ impl TalkRequest { const MAX_WAIT_TIME_TICKET: u64 = 60 * 5; /// The time window within in which the number of new tickets from a peer for a topic will be limitied. -const TICKET_LIMITER: Duration = Duration::from_secs(60 * 15); +const TICKET_LIMITER_DURATION: Duration = Duration::from_secs(60 * 15); /// The max nodes to adveritse for a topic. const MAX_ADS_TOPIC: usize = 100; @@ -319,6 +319,10 @@ pub enum RegistrationState { /// A TICKET has been received and the ticket is being held for the duration of the /// wait time. Ticket, + /// A fixed number of tickets are accepted within a certain time span. A node id in + /// ticket limit regsitration state will not be sent a REGTOPIC till the ticket + /// TICKET_LIMITER_DURATION has expired. + TicketLimit(Instant), } /// An active topic query/lookup keeps track of which peers from the topic's kbuckets @@ -517,7 +521,7 @@ impl Service { registration_attempts: HashMap::new(), topics_kbuckets: HashMap::new(), discovered_peers_topic: HashMap::new(), - tickets: Tickets::new(TICKET_LIMITER), + tickets: Tickets::new(TICKET_LIMITER_DURATION), ticket_pools: TicketPools::default(), active_topic_queries: ActiveTopicQueries::new( config.topic_query_timeout, @@ -674,16 +678,18 @@ impl Service { self.registration_attempts.iter_mut().for_each(|(topic_hash, reg_attempts_by_distance)| { for reg_attempts in reg_attempts_by_distance.values_mut() { reg_attempts.retain(|node_id, reg_state| { - if let RegistrationState::Confirmed(insert_time) = reg_state { + match reg_state { + RegistrationState::Confirmed(insert_time) => { if insert_time.elapsed() < AD_LIFETIME { active_topics.entry(*topic_hash).or_default().push(*node_id); true } else { false } - } else { - true } + RegistrationState::TicketLimit(insert_time) => insert_time.elapsed() < TICKET_LIMITER_DURATION, + RegistrationState::Ticket => true, + } }); } }); @@ -859,7 +865,7 @@ impl Service { topic_hash ); let reg_attempts = self.registration_attempts.entry(topic_hash).or_default(); - let mut new_reg_peers = Vec::new(); + let mut new_peers = Vec::new(); // Ensure that max_reg_attempts_bucket registration attempts are alive per bucket if that many peers are // available at that distance. @@ -867,55 +873,62 @@ impl Service { for (index, bucket) in kbuckets.get_mut().buckets_iter().enumerate() { let distance = index as u64 + 1; + let mut active_reg_attempts_bucket = 0; + + let registrations = reg_attempts.entry(distance).or_default(); - // Remove expired registrations - if let Entry::Occupied(ref mut entry) = reg_attempts.entry(distance) { - let registrations = entry.get_mut(); - registrations.retain(|node_id, reg_state| { - trace!("node id {}, reg state {:?} at distance {}", node_id, reg_state, distance); - if let RegistrationState::Confirmed(insert_time) = reg_state { - if insert_time.elapsed() < AD_LIFETIME { + // Remove expired registrations and ticket limit blockages. + registrations.retain(|node_id, reg_state| { + trace!("Registration attempt of node id {}, reg state {:?} at distance {}", node_id, reg_state, distance); + match reg_state { + RegistrationState::Confirmed(insert_time) => { + if insert_time.elapsed() < AD_LIFETIME { + active_reg_attempts_bucket += 1; + true + } else { + trace!("Registration has expired for node id {}. Removing from registration attempts.", node_id); + false + } + } + RegistrationState::TicketLimit(insert_time) => insert_time.elapsed() < TICKET_LIMITER_DURATION, + RegistrationState::Ticket => { + active_reg_attempts_bucket += 1; true - } else { - trace!("Registration has expired for node id {}. Removing from registration attempts.", node_id); - false } - } else { - true } }); - } - let registrations = reg_attempts.entry(distance).or_default(); - let mut new_peers = Vec::new(); + let mut new_peers_bucket = Vec::new(); // Attempt sending a request to uncontacted peers if any. if let Some(peers) = self.discovered_peers_topic.get_mut(&topic_hash) { if let Some(bucket) = peers.get_mut(&distance) { bucket.retain(|node_id, enr | { - if new_peers.len() + registrations.len() >= max_reg_attempts_bucket { + if new_peers_bucket.len() + active_reg_attempts_bucket >= max_reg_attempts_bucket { true } else if let Entry::Vacant(_) = registrations.entry(*node_id) { debug!("Found new registration peer in uncontacted peers for topic {}. Peer: {:?}", topic_hash, node_id); registrations.insert(*node_id, RegistrationState::Ticket); - new_peers.push(enr.clone()); + new_peers_bucket.push(enr.clone()); false } else { true } }); - new_reg_peers.append(&mut new_peers); + new_peers.append(&mut new_peers_bucket); } } // The count of active registration attempts for a distance after expired ads have been // removed is less than the max number of registration attempts that should be active // per bucket and is not equal to the total number of peers available in that bucket. - if registrations.len() < self.config.max_nodes_response + if active_reg_attempts_bucket < self.config.max_nodes_response && registrations.len() != bucket.num_entries() { for peer in bucket.iter() { - if new_peers.len() + registrations.len() >= self.config.max_nodes_response { + if new_peers_bucket.len() + active_reg_attempts_bucket + >= self.config.max_nodes_response + { break; } let node_id = *peer.key.preimage(); @@ -926,13 +939,13 @@ impl Service { peer.key.preimage() ); registrations.insert(node_id, RegistrationState::Ticket); - new_peers.push(peer.value.clone()) + new_peers_bucket.push(peer.value.clone()) } } - new_reg_peers.append(&mut new_peers); + new_peers.append(&mut new_peers_bucket); } } - for peer in new_reg_peers { + for peer in new_peers { let local_enr = self.local_enr.read().clone(); if let Ok(node_contact) = NodeContact::try_from_enr(peer, self.config.ip_mode) .map_err(|e| error!("Failed to send REGTOPIC to peer. Error: {:?}", e)) @@ -1783,14 +1796,34 @@ impl Service { topic, } => { if wait_time <= MAX_WAIT_TIME_TICKET && wait_time > 0 { - self.tickets - .insert( - active_request.contact, - ticket, - Duration::from_secs(wait_time), - topic, - ) - .ok(); + if let Err(e) = self.tickets.insert( + active_request.contact, + ticket, + Duration::from_secs(wait_time), + topic, + ) { + error!( + "Failed storing ticket from node id {}. Error {}", + node_id, e + ); + self.registration_attempts.get_mut(&topic).map( + |reg_attempts_by_distance| { + let now = Instant::now(); + let peer_key: kbucket::Key = node_id.into(); + let topic_key: kbucket::Key = + NodeId::new(&topic.as_bytes()).into(); + if let Some(distance) = peer_key.log2_distance(&topic_key) { + reg_attempts_by_distance.get_mut(&distance).map( + |reg_attempts| { + reg_attempts.get_mut(&node_id).map(|reg_state| { + *reg_state = RegistrationState::TicketLimit(now) + }) + }, + ); + } + }, + ); + } } } ResponseBody::RegisterConfirmation { topic } => { From 7ab9ae32016d91180b21e18ca17cd48844b9a90d Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 15 Jul 2022 11:13:46 +0200 Subject: [PATCH 268/391] Fix bug sending the recipient's node in NODES response to a topic request --- src/service.rs | 44 +++++++++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/src/service.rs b/src/service.rs index ea9783d1d..205874177 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1806,23 +1806,23 @@ impl Service { "Failed storing ticket from node id {}. Error {}", node_id, e ); - self.registration_attempts.get_mut(&topic).map( - |reg_attempts_by_distance| { - let now = Instant::now(); - let peer_key: kbucket::Key = node_id.into(); - let topic_key: kbucket::Key = - NodeId::new(&topic.as_bytes()).into(); - if let Some(distance) = peer_key.log2_distance(&topic_key) { - reg_attempts_by_distance.get_mut(&distance).map( - |reg_attempts| { - reg_attempts.get_mut(&node_id).map(|reg_state| { - *reg_state = RegistrationState::TicketLimit(now) - }) - }, - ); - } - }, - ); + if let Some(reg_attempts_by_distance) = + self.registration_attempts.get_mut(&topic) + { + let now = Instant::now(); + let peer_key: kbucket::Key = node_id.into(); + let topic_key: kbucket::Key = + NodeId::new(&topic.as_bytes()).into(); + if let Some(distance) = peer_key.log2_distance(&topic_key) { + reg_attempts_by_distance.get_mut(&distance).map( + |reg_attempts| { + reg_attempts.get_mut(&node_id).map(|reg_state| { + *reg_state = RegistrationState::TicketLimit(now) + }) + }, + ); + } + } } } } @@ -2086,7 +2086,11 @@ impl Service { .write() .nodes_by_distances(&[distance], self.config.max_nodes_response) .iter() - .for_each(|entry| closest_peers.push(entry.node.value.clone())); + .for_each(|entry| { + if entry.node.key.preimage() != &node_address.node_id { + closest_peers.push(entry.node.value.clone()) + } + }); if closest_peers.len() < self.config.max_nodes_response { for entry in self @@ -2101,7 +2105,9 @@ impl Service { if closest_peers.len() > self.config.max_nodes_response { break; } - closest_peers.push(entry.node.value.clone()); + if entry.node.key.preimage() != &node_address.node_id { + closest_peers.push(entry.node.value.clone()) + } } } } From 586a5bfd53b5a484acdd84aa3f3ddfe350ef7298 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 15 Jul 2022 12:12:57 +0200 Subject: [PATCH 269/391] Set ad lifetime --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index 205874177..6a71c86ce 100644 --- a/src/service.rs +++ b/src/service.rs @@ -159,7 +159,7 @@ const MAX_ADS_SUBNET_TOPIC: usize = 5; const MAX_ADS_SUBNET: usize = 50; /// The time after a REGCONFIRMATION is sent that an ad is placed. -const AD_LIFETIME: Duration = Duration::from_secs(60 * 2); +const AD_LIFETIME: Duration = Duration::from_secs(60 * 15); /// The max number of uncontacted peers to store before the kbuckets per topic. const MAX_UNCONTACTED_PEERS_TOPIC_BUCKET: usize = 16; From 4497957e538d3641d0447fdafc8feb07e053bcfe Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 18 Jul 2022 17:48:38 +0200 Subject: [PATCH 270/391] Update docs --- src/advertisement/topic.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/advertisement/topic.rs b/src/advertisement/topic.rs index f7a3a5fe6..549c306c0 100644 --- a/src/advertisement/topic.rs +++ b/src/advertisement/topic.rs @@ -61,11 +61,9 @@ impl Hasher for Sha256Hash { } } -/// A topic hashed by the hash algorithm implemented by the sending node. -/// TopicHash is used in place of a Vec in requests and responses. This -/// deviates from the wire protocol, it was necessary that the sender hashes -/// the topic as the hash is used to deteremine by XOR distance which nodes -/// to send the REGTOPIC request to. +/// The 32-bytes that are sent in the body of a topic request are interpreted +/// as a hash by the agreed upon hash algorithm in the discv5 network (defaults +/// to Sha256). #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct TopicHash { /// The topic hash. Stored as a fixed length byte array. From e2ced6e81a6851cf2c39478c871ccb5a9503770b Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 18 Jul 2022 17:51:48 +0200 Subject: [PATCH 271/391] Run cargo fmt --- src/advertisement/topic.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/advertisement/topic.rs b/src/advertisement/topic.rs index 549c306c0..071562fdb 100644 --- a/src/advertisement/topic.rs +++ b/src/advertisement/topic.rs @@ -61,7 +61,7 @@ impl Hasher for Sha256Hash { } } -/// The 32-bytes that are sent in the body of a topic request are interpreted +/// The 32-bytes that are sent in the body of a topic request are interpreted /// as a hash by the agreed upon hash algorithm in the discv5 network (defaults /// to Sha256). #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] From f19d2bb17a3fc5cb2d0c7081395d2716000bccc5 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 18 Jul 2022 19:49:29 +0200 Subject: [PATCH 272/391] Change name to be meanigful to new logic --- src/discv5.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/discv5.rs b/src/discv5.rs index 0825f27e7..da0359830 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -568,7 +568,7 @@ impl Discv5 { } /// Registers a topic for the first time. - pub fn reg_topic_req( + pub fn register_topic( &self, topic: String, ) -> impl Future> + 'static { From 02ed526ab3b6386890173aeb8d6641db64dce755 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 18 Jul 2022 21:26:11 +0200 Subject: [PATCH 273/391] Retrieve hosted ads for a topic from app level --- src/discv5.rs | 37 +++++++++++++++++++++++++++---------- src/service.rs | 10 +++++++++- 2 files changed, 36 insertions(+), 11 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index da0359830..5ec2f6b45 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -567,10 +567,10 @@ impl Discv5 { } } - /// Registers a topic for the first time. + /// Add a topic to keep registering on other nodes. pub fn register_topic( &self, - topic: String, + topic: &'static str, ) -> impl Future> + 'static { let channel = self.clone_channel(); @@ -578,7 +578,7 @@ impl Discv5 { let channel = channel .as_ref() .map_err(|_| RequestError::ServiceNotStarted)?; - let topic_hash = Topic::new(&topic).hash(); + let topic_hash = Topic::new(topic).hash(); let event = ServiceRequest::RegisterTopic(topic_hash); debug!( "Registering topic {} with Sha256 hash {}", @@ -619,14 +619,31 @@ impl Discv5 { } } - /// Finds the relevant nodes to publish the topics on, as far away from the topic as - /// the bits configured by the Discv5 topic_radius distance in the Discv5 config. - pub fn find_closest_nodes_to_topic( + /// Get the ads advertised for other nodes for a given topic. + pub fn ads( &self, - topic_hash: TopicHash, - ) -> impl Future, QueryError>> + 'static { - let key = NodeId::new(&topic_hash.as_bytes()); - self.find_node(key) + topic: &'static str, + ) -> impl Future, RequestError>> + 'static { + // the service will verify if this node is contactable, we just send it and + // await a response. + let (callback_send, callback_recv) = oneshot::channel(); + let channel = self.clone_channel(); + + async move { + let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; + let topic_hash = Topic::new(topic).hash(); + let event = ServiceRequest::Ads(topic_hash, callback_send); + + // send the request + channel + .send(event) + .await + .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; + // await the response + callback_recv + .await + .map_err(|e| RequestError::ChannelFailed(e.to_string()))? + } } /// Runs an iterative `FIND_NODE` request. diff --git a/src/service.rs b/src/service.rs index 6a71c86ce..b74065246 100644 --- a/src/service.rs +++ b/src/service.rs @@ -192,6 +192,8 @@ pub enum ServiceRequest { RemoveTopic(TopicHash, oneshot::Sender>), /// Retrieves the ads currently published by this node on other nodes in a discv5 network. ActiveTopics(oneshot::Sender>, RequestError>>), + /// Retrieves the ads adveritsed for other nodes for a given topic. + Ads(TopicHash, oneshot::Sender, RequestError>>), } use crate::discv5::PERMIT_BAN_LIST; @@ -702,10 +704,16 @@ impl Service { if self.registration_attempts.remove(&topic_hash).is_some() { METRICS.topics_to_publish.store(self.registration_attempts.len(), Ordering::Relaxed); if callback.send(Ok(base64::encode(topic_hash.as_bytes()))).is_err() { - error!("Failed to return the removed topic"); + error!("Failed to return the removed topic {}", topic_hash); } } } + ServiceRequest::Ads(topic, callback) => { + let ads = self.ads.get_ad_nodes(topic).map(|ad_node| ad_node.node_record().clone()).collect::>(); + if callback.send(Ok(ads)).is_err() { + error!("Failed to return ads for topic {}", topic); + } + } } } Some(event) = self.handler_recv.recv() => { From 5b3364e69ba6b83da3c60df4621ceef4bcf20538 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 19 Jul 2022 12:16:12 +0200 Subject: [PATCH 274/391] Retrieve active registration attempts from app for debug --- src/discv5.rs | 62 +++++++-- src/service.rs | 308 ++++++++++++++++++++++---------------------- src/service/test.rs | 1 + 3 files changed, 202 insertions(+), 169 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 5ec2f6b45..16b03bd7e 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -20,13 +20,13 @@ use crate::{ NodeStatus, UpdateResult, }, node_info::NodeContact, - service::{QueryKind, Service, ServiceRequest, TalkRequest}, + service::{QueryKind, RegAttempts, Service, ServiceRequest, TalkRequest}, Discv5Config, Enr, }; use enr::{CombinedKey, EnrError, EnrKey, NodeId}; use parking_lot::RwLock; use std::{ - collections::HashMap, + collections::{BTreeMap, HashMap}, future::Future, net::SocketAddr, sync::Arc, @@ -435,7 +435,7 @@ impl Discv5 { .collect() } - pub fn hashes(topic: String) -> Vec<(TopicHash, String)> { + pub fn hashes(topic: &'static str) -> Vec<(TopicHash, String)> { let sha256_topic = Topic::new(topic); vec![(sha256_topic.hash(), sha256_topic.hash_function_name())] } @@ -513,7 +513,7 @@ impl Discv5 { pub fn topic_query_req( &self, - topic_hash: TopicHash, + topic: &'static str, ) -> impl Future, RequestError>> + 'static { let channel = self.clone_channel(); @@ -522,6 +522,9 @@ impl Discv5 { // await a response. let (callback_send, callback_recv) = oneshot::channel(); + let topic = Topic::new(topic); + let topic_hash = topic.hash(); + let event = ServiceRequest::TopicQuery(topic_hash, callback_send); let channel = channel .as_ref() @@ -537,7 +540,13 @@ impl Discv5 { .await .map_err(|e| RequestError::ChannelFailed(e.to_string()))?; if let Ok(ad_nodes) = ad_nodes { - debug!("Received {} ad nodes", ad_nodes.len()); + debug!( + "Received {} ad nodes for topic {} with topic hash {} {}", + ad_nodes.len(), + topic, + topic_hash, + topic.hash_function_name() + ); Ok(ad_nodes) } else { Ok(Vec::new()) @@ -593,6 +602,28 @@ impl Discv5 { } } + pub fn reg_attempts( + &self, + topic: &'static str, + ) -> impl Future, RequestError>> + 'static { + let channel = self.clone_channel(); + let (callback_send, callback_recv) = oneshot::channel(); + + async move { + let channel = channel + .as_ref() + .map_err(|_| RequestError::ServiceNotStarted)?; + let topic = Topic::new(topic); + let topic_hash = topic.hash(); + let event = ServiceRequest::RegistrationAttempts(topic_hash, callback_send); + + channel + .send(event) + .await + .map_err(|_| RequestError::ServiceNotStarted)?; + callback_recv.await.map_err(|e| RequestError::ChannelFailed(format!("Failed to receive regsitration attempts for topic {} with topic hash {} {}. Error {}", topic, topic_hash, topic.hash_function_name(), e)))? + } + } /// Retrieves the topics that we have published on other nodes. pub fn active_topics( &self, @@ -613,9 +644,9 @@ impl Discv5 { .await .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; // await the response - callback_recv - .await - .map_err(|e| RequestError::ChannelFailed(e.to_string()))? + callback_recv.await.map_err(|e| { + RequestError::ChannelFailed(format!("Failed to receive active topics. Error {}", e)) + })? } } @@ -631,7 +662,8 @@ impl Discv5 { async move { let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; - let topic_hash = Topic::new(topic).hash(); + let topic = Topic::new(topic); + let topic_hash = topic.hash(); let event = ServiceRequest::Ads(topic_hash, callback_send); // send the request @@ -640,9 +672,15 @@ impl Discv5 { .await .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; // await the response - callback_recv - .await - .map_err(|e| RequestError::ChannelFailed(e.to_string()))? + callback_recv.await.map_err(|e| { + RequestError::ChannelFailed(format!( + "Failed to receive ads for topic {} with topic hash {} {}. Error {}", + topic, + topic_hash, + topic.hash_function_name(), + e + )) + })? } } diff --git a/src/service.rs b/src/service.rs index b74065246..ce4aaf66d 100644 --- a/src/service.rs +++ b/src/service.rs @@ -140,29 +140,14 @@ impl TalkRequest { } } -/// The max wait time accpeted for tickets. -const MAX_WAIT_TIME_TICKET: u64 = 60 * 5; - -/// The time window within in which the number of new tickets from a peer for a topic will be limitied. -const TICKET_LIMITER_DURATION: Duration = Duration::from_secs(60 * 15); - -/// The max nodes to adveritse for a topic. -const MAX_ADS_TOPIC: usize = 100; - -/// The max nodes to advertise. -const MAX_ADS: usize = 50000; - -/// The max ads per subnet per topic. -const MAX_ADS_SUBNET_TOPIC: usize = 5; - -/// The max ads per subnet. -const MAX_ADS_SUBNET: usize = 50; - -/// The time after a REGCONFIRMATION is sent that an ad is placed. -const AD_LIFETIME: Duration = Duration::from_secs(60 * 15); - -/// The max number of uncontacted peers to store before the kbuckets per topic. -const MAX_UNCONTACTED_PEERS_TOPIC_BUCKET: usize = 16; +/// The active and temporarily limited (too many tickets received from a node +/// in a given time span) registration attempts. Upon sending a REGTOPIC to +/// a node, it is inserted into RegAttempts with RegistrationState::Ticket. +#[derive(Default, Clone)] +pub struct RegAttempts { + /// One registration attempt per node is allowed at a time. + pub reg_attempts: HashMap, +} /// The types of requests to send to the Discv5 service. pub enum ServiceRequest { @@ -194,8 +179,37 @@ pub enum ServiceRequest { ActiveTopics(oneshot::Sender>, RequestError>>), /// Retrieves the ads adveritsed for other nodes for a given topic. Ads(TopicHash, oneshot::Sender, RequestError>>), + /// Retrieves the registration attempts acitve for a given topic. + RegistrationAttempts( + TopicHash, + oneshot::Sender, RequestError>>, + ), } +/// The max wait time accpeted for tickets. +const MAX_WAIT_TIME_TICKET: u64 = 60 * 5; + +/// The time window within in which the number of new tickets from a peer for a topic will be limitied. +const TICKET_LIMITER_DURATION: Duration = Duration::from_secs(60 * 15); + +/// The max nodes to adveritse for a topic. +const MAX_ADS_TOPIC: usize = 100; + +/// The max nodes to advertise. +const MAX_ADS: usize = 50000; + +/// The max ads per subnet per topic. +const MAX_ADS_SUBNET_TOPIC: usize = 5; + +/// The max ads per subnet. +const MAX_ADS_SUBNET: usize = 50; + +/// The time after a REGCONFIRMATION is sent that an ad is placed. +const AD_LIFETIME: Duration = Duration::from_secs(60 * 15); + +/// The max number of uncontacted peers to store before the kbuckets per topic. +const MAX_UNCONTACTED_PEERS_TOPIC_BUCKET: usize = 16; + use crate::discv5::PERMIT_BAN_LIST; pub struct Service { @@ -262,7 +276,7 @@ pub struct Service { /// Topics tracks registration attempts of the topic hashes to advertise on /// other nodes. - registration_attempts: HashMap>>, + registration_attempts: HashMap>, /// KBuckets per topic hash. topics_kbuckets: HashMap>, @@ -272,6 +286,9 @@ pub struct Service { /// kbuckets. discovered_peers_topic: HashMap>>, + /// The key used for en-/decrypting tickets. + ticket_key: [u8; 16], + /// Tickets received by other nodes. tickets: Tickets, @@ -314,7 +331,7 @@ pub enum TopicQueryResponseState { /// set to be registered. A registration is active when either a ticket for an adslot is /// held and the ticket wait time has not yet expired, or a REGCONFIRMATION has been /// received for an ad slot and the ad lifetime has not yet elapsed. -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum RegistrationState { /// A REGCONFIRMATION has been received at the given instant. Confirmed(Instant), @@ -484,19 +501,6 @@ impl Service { } }; - // A key is generated for en-/decrypting tickets that are issued upon receiving a topic - // regsitration attempt. - let ticket_key: [u8; 16] = rand::random(); - match local_enr - .write() - .insert("ticket_key", &ticket_key, &enr_key.write()) - { - Ok(_) => {} - Err(e) => { - return Err(Error::new(ErrorKind::Other, format!("{:?}", e))); - } - } - config .executor .clone() @@ -523,6 +527,7 @@ impl Service { registration_attempts: HashMap::new(), topics_kbuckets: HashMap::new(), discovered_peers_topic: HashMap::new(), + ticket_key: rand::random(), tickets: Tickets::new(TICKET_LIMITER_DURATION), ticket_pools: TicketPools::default(), active_topic_queries: ActiveTopicQueries::new( @@ -628,7 +633,7 @@ impl Service { self.send_topic_queries(topic_hash, Some(callback)); } ServiceRequest::RegisterTopic(topic_hash) => { - if self.registration_attempts.insert(topic_hash, HashMap::new()).is_some() { + if self.registration_attempts.insert(topic_hash, BTreeMap::new()).is_some() { warn!("This topic is already being advertised"); } else { // NOTE: Currently we don't expose custom filter support in the configuration. Users can @@ -679,7 +684,7 @@ impl Service { let mut active_topics = HashMap::>::new(); self.registration_attempts.iter_mut().for_each(|(topic_hash, reg_attempts_by_distance)| { for reg_attempts in reg_attempts_by_distance.values_mut() { - reg_attempts.retain(|node_id, reg_state| { + reg_attempts.reg_attempts.retain(|node_id, reg_state| { match reg_state { RegistrationState::Confirmed(insert_time) => { if insert_time.elapsed() < AD_LIFETIME { @@ -708,10 +713,21 @@ impl Service { } } } - ServiceRequest::Ads(topic, callback) => { - let ads = self.ads.get_ad_nodes(topic).map(|ad_node| ad_node.node_record().clone()).collect::>(); + ServiceRequest::Ads(topic_hash, callback) => { + let ads = self.ads.get_ad_nodes(topic_hash).map(|ad_node| ad_node.node_record().clone()).collect::>(); if callback.send(Ok(ads)).is_err() { - error!("Failed to return ads for topic {}", topic); + error!("Failed to return ads for topic {}", topic_hash); + } + } + ServiceRequest::RegistrationAttempts(topic_hash, callback) => { + let reg_attempts = if let Some(reg_attempts) = self.registration_attempts.get(&topic_hash) { + reg_attempts.clone() + } else { + error!("Topic hash {} is not being registered", topic_hash); + BTreeMap::new() + }; + if callback.send(Ok(reg_attempts)).is_err() { + error!("Failed to return registration attempts for topic hash {}", topic_hash); } } } @@ -816,7 +832,7 @@ impl Service { // updated. if let Some(reg_attempts) = self.registration_attempts.get_mut(&active_topic.topic()) { for kbucket_reg_attempts in reg_attempts.values_mut() { - let reg_state = kbucket_reg_attempts.remove(active_topic.node_id()); + let reg_state = kbucket_reg_attempts.reg_attempts.remove(active_topic.node_id()); if reg_state.is_some() { break; } @@ -886,7 +902,7 @@ impl Service { let registrations = reg_attempts.entry(distance).or_default(); // Remove expired registrations and ticket limit blockages. - registrations.retain(|node_id, reg_state| { + registrations.reg_attempts.retain(|node_id, reg_state| { trace!("Registration attempt of node id {}, reg state {:?} at distance {}", node_id, reg_state, distance); match reg_state { RegistrationState::Confirmed(insert_time) => { @@ -914,9 +930,9 @@ impl Service { bucket.retain(|node_id, enr | { if new_peers_bucket.len() + active_reg_attempts_bucket >= max_reg_attempts_bucket { true - } else if let Entry::Vacant(_) = registrations.entry(*node_id) { + } else if let Entry::Vacant(_) = registrations.reg_attempts.entry(*node_id) { debug!("Found new registration peer in uncontacted peers for topic {}. Peer: {:?}", topic_hash, node_id); - registrations.insert(*node_id, RegistrationState::Ticket); + registrations.reg_attempts.insert(*node_id, RegistrationState::Ticket); new_peers_bucket.push(enr.clone()); false } else { @@ -931,7 +947,7 @@ impl Service { // removed is less than the max number of registration attempts that should be active // per bucket and is not equal to the total number of peers available in that bucket. if active_reg_attempts_bucket < self.config.max_nodes_response - && registrations.len() != bucket.num_entries() + && registrations.reg_attempts.len() != bucket.num_entries() { for peer in bucket.iter() { if new_peers_bucket.len() + active_reg_attempts_bucket @@ -940,13 +956,15 @@ impl Service { break; } let node_id = *peer.key.preimage(); - if let Entry::Vacant(_) = registrations.entry(node_id) { + if let Entry::Vacant(_) = registrations.reg_attempts.entry(node_id) { debug!( "Found new registration peer in kbuckets of topic {}. Peer: {:?}", topic_hash, peer.key.preimage() ); - registrations.insert(node_id, RegistrationState::Ticket); + registrations + .reg_attempts + .insert(node_id, RegistrationState::Ticket); new_peers_bucket.push(peer.value.clone()) } } @@ -1282,70 +1300,56 @@ impl Service { ); if !ticket.is_empty() { - let decoded_local_enr = self - .local_enr - .write() - .to_base64() - .parse::() - .map_err(|e| { - error!("Failed to decrypt ticket in REGTOPIC request. Error: {}", e) - }); - if let Ok(decoded_local_enr) = decoded_local_enr { - if let Some(ticket_key) = decoded_local_enr.get("ticket_key") { - let decrypted_ticket = { - let aead = Aes128Gcm::new(GenericArray::from_slice(ticket_key)); - let payload = Payload { - msg: &ticket, - aad: b"", - }; - aead.decrypt(GenericArray::from_slice(&[1u8; 12]), payload) - .map_err(|e| { - error!( - "Failed to decrypt ticket in REGTOPIC request. Error: {}", - e - ) - }) - }; - if let Ok(decrypted_ticket) = decrypted_ticket { - Ticket::decode(&decrypted_ticket) - .map_err(|e| { - error!( - "Failed to decode ticket in REGTOPIC request. Error: {}", - e - ) - }) - .map(|ticket| { - if let Some(ticket) = ticket { - // A ticket is always be issued upon receiving a REGTOPIC request, even if there is no - // wait time for the ad slot. See discv5 spec. This node will not store tickets received - // with wait time 0. - new_ticket.set_cum_wait(ticket.cum_wait()); - self.send_ticket_response( - node_address.clone(), - id.clone(), - new_ticket.clone(), - wait_time, - ); - // If current wait time is 0, the ticket is added to the matching ticket pool. - if wait_time <= Duration::from_secs(0) { - // Drop if src_node_id, src_ip and topic derived from node_address and request - // don't match those in ticket. For example if a malicious node tries to use - // another ticket issued by us. - if ticket == new_ticket { - self.ticket_pools.insert(enr, id, ticket, node_address.socket_addr.ip()); - } - } - } - }) - .ok(); - } else { - warn!("Node sent a ticket that couldn't be decrypted with local ticket key. Blacklisting: {}", node_address.node_id); - let ban_timeout = - self.config.ban_duration.map(|v| Instant::now() + v); - PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); - self.rpc_failure(id, RequestError::InvalidTicket); - } - } + let decrypted_ticket = { + let aead = Aes128Gcm::new(GenericArray::from_slice(&self.ticket_key)); + let payload = Payload { + msg: &ticket, + aad: b"", + }; + aead.decrypt(GenericArray::from_slice(&[1u8; 12]), payload) + .map_err(|e| { + error!("Failed to decrypt ticket in REGTOPIC request. Error: {}", e) + }) + }; + if let Ok(decrypted_ticket) = decrypted_ticket { + Ticket::decode(&decrypted_ticket) + .map_err(|e| { + error!("Failed to decode ticket in REGTOPIC request. Error: {}", e) + }) + .map(|ticket| { + if let Some(ticket) = ticket { + // A ticket is always be issued upon receiving a REGTOPIC request, even if there is no + // wait time for the ad slot. See discv5 spec. This node will not store tickets received + // with wait time 0. + new_ticket.set_cum_wait(ticket.cum_wait()); + self.send_ticket_response( + node_address.clone(), + id.clone(), + new_ticket.clone(), + wait_time, + ); + // If current wait time is 0, the ticket is added to the matching ticket pool. + if wait_time <= Duration::from_secs(0) { + // Drop if src_node_id, src_ip and topic derived from node_address and request + // don't match those in ticket. For example if a malicious node tries to use + // another ticket issued by us. + if ticket == new_ticket { + self.ticket_pools.insert( + enr, + id, + ticket, + node_address.socket_addr.ip(), + ); + } + } + } + }) + .ok(); + } else { + warn!("Node sent a ticket that couldn't be decrypted with local ticket key. Blacklisting: {}", node_address.node_id); + let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); + PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + self.rpc_failure(id, RequestError::InvalidTicket); } } else { debug!("Sending TICKET response"); @@ -1822,13 +1826,11 @@ impl Service { let topic_key: kbucket::Key = NodeId::new(&topic.as_bytes()).into(); if let Some(distance) = peer_key.log2_distance(&topic_key) { - reg_attempts_by_distance.get_mut(&distance).map( - |reg_attempts| { - reg_attempts.get_mut(&node_id).map(|reg_state| { - *reg_state = RegistrationState::TicketLimit(now) - }) - }, - ); + reg_attempts_by_distance.get_mut(&distance).map(|bucket| { + bucket.reg_attempts.get_mut(&node_id).map(|reg_state| { + *reg_state = RegistrationState::TicketLimit(now) + }) + }); } } } @@ -1844,6 +1846,7 @@ impl Service { if let Some(reg_state) = registration_attempts .entry(distance) .or_default() + .reg_attempts .get_mut(&node_id) { *reg_state = RegistrationState::Confirmed(now); @@ -1989,42 +1992,32 @@ impl Service { ticket: Ticket, wait_time: Duration, ) { - self.local_enr - .write() - .to_base64() - .parse::() + let aead = Aes128Gcm::new(GenericArray::from_slice(&self.ticket_key)); + let payload = Payload { + msg: &ticket.encode(), + aad: b"", + }; + let _ = aead + .encrypt(GenericArray::from_slice(&[1u8; 12]), payload) .map_err(|e| error!("Failed to send TICKET response: {}", e)) - .map(|decoded_enr| { - if let Some(ticket_key) = decoded_enr.get("ticket_key") { - let aead = Aes128Gcm::new(GenericArray::from_slice(ticket_key)); - let payload = Payload { - msg: &ticket.encode(), - aad: b"", - }; - aead.encrypt(GenericArray::from_slice(&[1u8; 12]), payload) - .map_err(|e| error!("Failed to send TICKET response: {}", e)) - .map(|encrypted_ticket| { - let response = Response { - id: rpc_id, - body: ResponseBody::Ticket { - ticket: encrypted_ticket, - wait_time: wait_time.as_secs(), - topic: ticket.topic(), - }, - }; - trace!( - "Sending TICKET response to: {}. Response: {} ", - node_address, - response - ); - let _ = self - .handler_send - .send(HandlerIn::Response(node_address, Box::new(response))); - }) - .ok(); - } - }) - .ok(); + .map(|encrypted_ticket| { + let response = Response { + id: rpc_id, + body: ResponseBody::Ticket { + ticket: encrypted_ticket, + wait_time: wait_time.as_secs(), + topic: ticket.topic(), + }, + }; + trace!( + "Sending TICKET response to: {}. Response: {} ", + node_address, + response + ); + let _ = self + .handler_send + .send(HandlerIn::Response(node_address, Box::new(response))); + }); } /// The response sent to a node which is selected out of a ticket pool of registrants @@ -2814,10 +2807,11 @@ impl Service { let peer_key: kbucket::Key = node_id.into(); let topic_key: kbucket::Key = NodeId::new(&topic.as_bytes()).into(); if let Some(distance) = peer_key.log2_distance(&topic_key) { + // Remove the registration attempt before disconnecting the peer. let registration_attempts = self.registration_attempts.entry(topic).or_default(); if let Some(bucket) = registration_attempts.get_mut(&distance) { - bucket.remove(&node_id); + bucket.reg_attempts.remove(&node_id); } METRICS diff --git a/src/service/test.rs b/src/service/test.rs index c2185cc3b..7eb6952ca 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -97,6 +97,7 @@ async fn build_service( discv5_recv, event_stream: None, ads: Ads::new(Duration::from_secs(60 * 15), 100, 50000, 10, 3).unwrap(), + ticket_key: rand::random(), tickets: Tickets::new(Duration::from_secs(60 * 15)), registration_attempts: HashMap::new(), topics_kbuckets: HashMap::new(), From 075e186d989b15b2406f0f4e7312e1a3e00ef3d4 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 19 Jul 2022 14:54:26 +0200 Subject: [PATCH 275/391] Retrieve table entries for a topic from app --- src/discv5.rs | 46 +++++++++++++++++++++++++++++++++++++--------- src/service.rs | 22 ++++++++++++++++++++++ 2 files changed, 59 insertions(+), 9 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 16b03bd7e..3c8a84c35 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -511,6 +511,30 @@ impl Discv5 { } } + /// Returns an iterator over all ENR node IDs of nodes currently contained in the routing table. + pub async fn table_entries_id_topic( + &self, + topic: &'static str, + ) -> impl Future>, RequestError>> { + let channel = self.clone_channel(); + + async move { + let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; + let (callback_send, callback_recv) = oneshot::channel(); + + let topic = Topic::new(topic); + let topic_hash = topic.hash(); + + let event = ServiceRequest::TableEntriesIdTopic(topic_hash, callback_send); + + channel + .send(event) + .await + .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; + callback_recv.await.map_err(|e| RequestError::ChannelFailed(format!("Failed to receive table entries' ids for topic {} with topic hash {} {}. Error {}", topic, topic_hash, topic.hash_function_name(), e)))? + } + } + pub fn topic_query_req( &self, topic: &'static str, @@ -526,9 +550,7 @@ impl Discv5 { let topic_hash = topic.hash(); let event = ServiceRequest::TopicQuery(topic_hash, callback_send); - let channel = channel - .as_ref() - .map_err(|_| RequestError::ServiceNotStarted)?; + let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; // send the request channel @@ -536,9 +558,12 @@ impl Discv5 { .await .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; // await the response - let ad_nodes = callback_recv - .await - .map_err(|e| RequestError::ChannelFailed(e.to_string()))?; + let ad_nodes = callback_recv.await.map_err(|e| { + RequestError::ChannelFailed(format!( + "Failed to receive ad nodes from lookup of topic hash {}. Error {}", + topic_hash, e + )) + })?; if let Ok(ad_nodes) = ad_nodes { debug!( "Received {} ad nodes for topic {} with topic hash {} {}", @@ -570,9 +595,12 @@ impl Discv5 { .send(event) .await .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; - callback_recv - .await - .map_err(|e| RequestError::ChannelFailed(e.to_string()))? + callback_recv.await.map_err(|e| { + RequestError::ChannelFailed(format!( + "Failed to receive removed topic hash {}. Error {}", + topic_hash, e + )) + })? } } diff --git a/src/service.rs b/src/service.rs index ce4aaf66d..d6ba3888e 100644 --- a/src/service.rs +++ b/src/service.rs @@ -184,6 +184,11 @@ pub enum ServiceRequest { TopicHash, oneshot::Sender, RequestError>>, ), + /// Retrieves the node id of entries in a given topic's kbuckets by distance. + TableEntriesIdTopic( + TopicHash, + oneshot::Sender>, RequestError>>, + ), } /// The max wait time accpeted for tickets. @@ -730,6 +735,23 @@ impl Service { error!("Failed to return registration attempts for topic hash {}", topic_hash); } } + ServiceRequest::TableEntriesIdTopic(topic_hash, callback) => { + let mut table_entries = BTreeMap::new(); + if let Some(kbuckets) = self.topics_kbuckets.get_mut(&topic_hash) { + for (index, bucket) in kbuckets.buckets_iter().enumerate() { + // The bucket's index in the Vec of buckets in the kbucket table will + // be one less than the distance as the log2distance 0 from the local + // node, i.e. the local node, is not assigned a bucket. + let distance = index as u64 + 1; + let mut node_ids = Vec::new(); + bucket.iter().for_each(|node| node_ids.push(*node.key.preimage())); + table_entries.insert(distance, node_ids); + } + } + if callback.send(Ok(table_entries)).is_err() { + error!("Failed to return table entries' ids for topic hash {}", topic_hash); + } + } } } Some(event) = self.handler_recv.recv() => { From a67515e0178d48305e66d870dea47fb751cb6b36 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 19 Jul 2022 15:00:17 +0200 Subject: [PATCH 276/391] Fix async bug --- src/discv5.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/discv5.rs b/src/discv5.rs index 3c8a84c35..a7266df18 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -512,7 +512,7 @@ impl Discv5 { } /// Returns an iterator over all ENR node IDs of nodes currently contained in the routing table. - pub async fn table_entries_id_topic( + pub fn table_entries_id_topic( &self, topic: &'static str, ) -> impl Future>, RequestError>> { @@ -531,6 +531,7 @@ impl Discv5 { .send(event) .await .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; + callback_recv.await.map_err(|e| RequestError::ChannelFailed(format!("Failed to receive table entries' ids for topic {} with topic hash {} {}. Error {}", topic, topic_hash, topic.hash_function_name(), e)))? } } From 00669ecd1f0132e234d429a439465babde34ab8d Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 19 Jul 2022 17:48:29 +0200 Subject: [PATCH 277/391] Change trace to debug messages --- src/service.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/service.rs b/src/service.rs index d6ba3888e..1db3c25af 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1298,7 +1298,6 @@ impl Service { } } } - debug!("Sending NODES response to REGTOPIC"); self.send_find_topic_nodes_response( topic, node_address.clone(), @@ -1374,7 +1373,6 @@ impl Service { self.rpc_failure(id, RequestError::InvalidTicket); } } else { - debug!("Sending TICKET response"); // A ticket is always be issued upon receiving a REGTOPIC request, even if there is no // wait time for the ad slot. See discv5 spec. This node will not store tickets received // with wait time 0. @@ -1396,7 +1394,6 @@ impl Service { } } RequestBody::TopicQuery { topic } => { - trace!("Sending NODES response to TOPICQUERY request {}", id); self.send_find_topic_nodes_response( topic, node_address.clone(), @@ -2054,7 +2051,7 @@ impl Service { id: rpc_id, body: ResponseBody::RegisterConfirmation { topic }, }; - trace!( + debug!( "Sending REGCONFIRMATION response to: {}. Response: {} ", node_address, response @@ -2206,6 +2203,7 @@ impl Service { req_type: &str, resp_body: ResponseBody, ) { + debug!("Sending NODES response to {} request {}", req_type, id); // if there are no nodes, send an empty response if nodes_to_send.is_empty() { let response = Response { From fcf5549b5776c4ead2841b667fadca03436783f6 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 19 Jul 2022 18:06:34 +0200 Subject: [PATCH 278/391] Only send DiscoveredTopic event for new peers --- examples/find_nodes.rs | 2 +- src/discv5.rs | 2 +- src/service.rs | 16 +++++++--------- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/examples/find_nodes.rs b/examples/find_nodes.rs index 6f7e666e5..5a0d61630 100644 --- a/examples/find_nodes.rs +++ b/examples/find_nodes.rs @@ -191,7 +191,7 @@ async fn main() { } match discv5_ev { Discv5Event::Discovered(enr) => info!("Enr discovered {}", enr), - Discv5Event::DiscoveredTopic(enr, topic_hash) => info!("Enr discovered {} for topic {}", enr, topic_hash), + Discv5Event::DiscoveredNewPeerTopic(enr, topic_hash) => info!("Enr discovered {} for topic {}", enr, topic_hash), Discv5Event::EnrAdded { enr, replaced: _ } => info!("Enr added {}", enr), Discv5Event::NodeInserted { node_id, replaced: _ } => info!("Node inserted {}", node_id), Discv5Event::NodeInsertedTopic { node_id, replaced: _, topic_hash } => info!("Node inserted {} in topic hash {} kbucket", node_id, topic_hash), diff --git a/src/discv5.rs b/src/discv5.rs index a7266df18..63e781c01 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -57,7 +57,7 @@ pub enum Discv5Event { /// guaranteed to be live or contactable. Discovered(Enr), /// A node has been discovered from either a REGTOPIC or a TOPICQUERY request. See [`Discv5Event::Discovered`]. - DiscoveredTopic(Enr, TopicHash), + DiscoveredNewPeerTopic(Enr, TopicHash), /// A new ENR was added to the routing table. EnrAdded { enr: Enr, replaced: Option }, /// A new node has been added to the routing table. diff --git a/src/service.rs b/src/service.rs index 1db3c25af..2b63fea19 100644 --- a/src/service.rs +++ b/src/service.rs @@ -2053,8 +2053,7 @@ impl Service { }; debug!( "Sending REGCONFIRMATION response to: {}. Response: {} ", - node_address, - response + node_address, response ); let _ = self .handler_send @@ -2203,7 +2202,7 @@ impl Service { req_type: &str, resp_body: ResponseBody, ) { - debug!("Sending NODES response to {} request {}", req_type, id); + debug!("Sending NODES response to {} request {}", req_type, rpc_id); // if there are no nodes, send an empty response if nodes_to_send.is_empty() { let response = Response { @@ -2397,12 +2396,8 @@ impl Service { // If any of the discovered nodes are in the routing table, and there contains an older ENR, update it. // If there is an event stream send the Discovered event - if self.config.report_discovered_peers { - if let Some(topic_hash) = topic_hash { - self.send_event(Discv5Event::DiscoveredTopic(enr.clone(), topic_hash)); - } else { - self.send_event(Discv5Event::Discovered(enr.clone())); - } + if self.config.report_discovered_peers && topic_hash.is_none() { + self.send_event(Discv5Event::Discovered(enr.clone())); } // ignore peers that don't pass the table filter @@ -2455,6 +2450,9 @@ impl Service { } kbucket::Entry::Absent(_) => { if let Some(topic_hash) = topic_hash { + if self.config.report_discovered_peers { + self.send_event(Discv5Event::DiscoveredNewPeerTopic(enr.clone(), topic_hash)); + } trace!("Discovered new peer {} for topic hash {}", enr.node_id(), topic_hash); let discovered_peers = self.discovered_peers_topic.entry(topic_hash).or_default(); From 09fce244b82cef9b35d7fbaf04293d32c35a8a66 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 20 Jul 2022 13:37:24 +0200 Subject: [PATCH 279/391] Limit triggering TICKET, NODES and REGCONFIRMATION responses to a max number per interval --- src/service.rs | 58 +++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 46 insertions(+), 12 deletions(-) diff --git a/src/service.rs b/src/service.rs index 2b63fea19..120bf1497 100644 --- a/src/service.rs +++ b/src/service.rs @@ -212,6 +212,17 @@ const MAX_ADS_SUBNET: usize = 50; /// The time after a REGCONFIRMATION is sent that an ad is placed. const AD_LIFETIME: Duration = Duration::from_secs(60 * 15); +/// The number of registration attempts that should be active per distance +/// if there are sufficient peers. +const MAX_REG_ATTEMPTS_DISTANCE: usize = 16; + +/// Registration of topics are paced to occur at intervals. +const REGISTER_INTERVAL: Duration = Duration::from_secs(60); + +/// To avoid a self-provoked DoS, registration attempts must be limited per +/// registration interval. +const MAX_REGTOPICS_REGISTER_INTERVAL: usize = 30; + /// The max number of uncontacted peers to store before the kbuckets per topic. const MAX_UNCONTACTED_PEERS_TOPIC_BUCKET: usize = 16; @@ -553,7 +564,13 @@ impl Service { /// The main execution loop of the discv5 serviced. async fn start(&mut self) { // In the case where not many peers populate the topic's kbuckets, ensure topics keep being republished. - let mut registration_interval = tokio::time::interval(AD_LIFETIME); + let mut registration_interval = tokio::time::interval(REGISTER_INTERVAL); + let mut topics_to_reg_iter = self + .registration_attempts + .keys() + .copied() + .collect::>() + .into_iter(); loop { tokio::select! { @@ -867,8 +884,10 @@ impl Service { ticket_pool.retain(|node_id, pool_ticket| self.ads.ticket_wait_time(topic, *node_id, *pool_ticket.ip()) == None); // Select ticket with longest cummulative wait time. if let Some(pool_ticket) = ticket_pool.values().max_by_key(|pool_ticket| pool_ticket.ticket().cum_wait()) { - self.ads.insert(pool_ticket.node_record().clone(), topic).ok(); - NodeContact::try_from_enr(pool_ticket.node_record().clone(), self.config.ip_mode).map(|contact| { + let enr = pool_ticket.node_record(); + let node_id = enr.node_id(); + let _ = self.ads.insert(enr.clone(), topic).map_err(|e| error!("Couldn't insert ad from node id {} into ads. Error {}", node_id, e)); + NodeContact::try_from_enr(enr.clone(), self.config.ip_mode).map(|contact| { self.send_regconfirmation_response(contact.node_address(), pool_ticket.req_id().clone(), topic); }).ok(); METRICS.hosted_ads.store(self.ads.len(), Ordering::Relaxed); @@ -891,10 +910,18 @@ impl Service { } } _ = registration_interval.tick() => { - let topics_to_reg = self.registration_attempts.keys().copied().collect::>(); - for topic_hash in topics_to_reg { + let mut sent_regtopics = 0; + let mut topic_item = topics_to_reg_iter.next(); + while let Some(topic_hash) = topic_item { trace!("Republishing topic hash {}", topic_hash); - self.send_register_topics(topic_hash); + sent_regtopics += self.send_register_topics(topic_hash); + if sent_regtopics >= MAX_REGTOPICS_REGISTER_INTERVAL { + break + } + topic_item = topics_to_reg_iter.next(); + } + if topic_item.is_none() { + topics_to_reg_iter = self.registration_attempts.keys().copied().collect::>().into_iter(); } } } @@ -902,7 +929,7 @@ impl Service { } /// Internal function that starts a topic registration. - fn send_register_topics(&mut self, topic_hash: TopicHash) { + fn send_register_topics(&mut self, topic_hash: TopicHash) -> usize { trace!("Sending REGTOPICS"); if let Entry::Occupied(ref mut kbuckets) = self.topics_kbuckets.entry(topic_hash) { trace!( @@ -915,9 +942,10 @@ impl Service { // Ensure that max_reg_attempts_bucket registration attempts are alive per bucket if that many peers are // available at that distance. - let max_reg_attempts_bucket = self.config.max_nodes_response; - for (index, bucket) in kbuckets.get_mut().buckets_iter().enumerate() { + if new_peers.len() >= MAX_REGTOPICS_REGISTER_INTERVAL { + break; + } let distance = index as u64 + 1; let mut active_reg_attempts_bucket = 0; @@ -950,7 +978,7 @@ impl Service { if let Some(peers) = self.discovered_peers_topic.get_mut(&topic_hash) { if let Some(bucket) = peers.get_mut(&distance) { bucket.retain(|node_id, enr | { - if new_peers_bucket.len() + active_reg_attempts_bucket >= max_reg_attempts_bucket { + if new_peers_bucket.len() + active_reg_attempts_bucket >= MAX_REG_ATTEMPTS_DISTANCE { true } else if let Entry::Vacant(_) = registrations.reg_attempts.entry(*node_id) { debug!("Found new registration peer in uncontacted peers for topic {}. Peer: {:?}", topic_hash, node_id); @@ -968,12 +996,12 @@ impl Service { // The count of active registration attempts for a distance after expired ads have been // removed is less than the max number of registration attempts that should be active // per bucket and is not equal to the total number of peers available in that bucket. - if active_reg_attempts_bucket < self.config.max_nodes_response + if active_reg_attempts_bucket < MAX_REG_ATTEMPTS_DISTANCE && registrations.reg_attempts.len() != bucket.num_entries() { for peer in bucket.iter() { if new_peers_bucket.len() + active_reg_attempts_bucket - >= self.config.max_nodes_response + >= MAX_REG_ATTEMPTS_DISTANCE { break; } @@ -993,16 +1021,22 @@ impl Service { new_peers.append(&mut new_peers_bucket); } } + let mut sent_regtopics = 0; + for peer in new_peers { let local_enr = self.local_enr.read().clone(); if let Ok(node_contact) = NodeContact::try_from_enr(peer, self.config.ip_mode) .map_err(|e| error!("Failed to send REGTOPIC to peer. Error: {:?}", e)) { self.reg_topic_request(node_contact, topic_hash, local_enr.clone(), None); + // If an uncontacted peer has a faulty enr, don't count the registration attempt. + sent_regtopics += 1; } } + sent_regtopics } else { debug_unreachable!("Broken invariant, a kbuckets table should exist for topic hash"); + 0 } } From 880f63f69ac0222a72d83eed961820ca57ff49fc Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 20 Jul 2022 14:01:15 +0200 Subject: [PATCH 280/391] Set max regtopis per registration interval --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index 120bf1497..5de0f3844 100644 --- a/src/service.rs +++ b/src/service.rs @@ -221,7 +221,7 @@ const REGISTER_INTERVAL: Duration = Duration::from_secs(60); /// To avoid a self-provoked DoS, registration attempts must be limited per /// registration interval. -const MAX_REGTOPICS_REGISTER_INTERVAL: usize = 30; +const MAX_REGTOPICS_REGISTER_INTERVAL: usize = 16; /// The max number of uncontacted peers to store before the kbuckets per topic. const MAX_UNCONTACTED_PEERS_TOPIC_BUCKET: usize = 16; From 3eff0acfc637ce9c87546e7108be6ac21e334896 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 20 Jul 2022 15:32:51 +0200 Subject: [PATCH 281/391] Fix bug of triggering reg-topics outside of interval --- src/service.rs | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/src/service.rs b/src/service.rs index 5de0f3844..96b2bb79f 100644 --- a/src/service.rs +++ b/src/service.rs @@ -216,11 +216,10 @@ const AD_LIFETIME: Duration = Duration::from_secs(60 * 15); /// if there are sufficient peers. const MAX_REG_ATTEMPTS_DISTANCE: usize = 16; -/// Registration of topics are paced to occur at intervals. +/// Registration of topics are paced to occur at intervals t avoid a self-provoked DoS. const REGISTER_INTERVAL: Duration = Duration::from_secs(60); -/// To avoid a self-provoked DoS, registration attempts must be limited per -/// registration interval. +/// Registration attempts must be limited per registration interval. const MAX_REGTOPICS_REGISTER_INTERVAL: usize = 16; /// The max number of uncontacted peers to store before the kbuckets per topic. @@ -698,8 +697,6 @@ impl Service { } self.topics_kbuckets.insert(topic_hash, kbuckets); METRICS.topics_to_publish.store(self.registration_attempts.len(), Ordering::Relaxed); - - self.send_register_topics(topic_hash); } } ServiceRequest::ActiveTopics(callback) => { @@ -928,7 +925,7 @@ impl Service { } } - /// Internal function that starts a topic registration. + /// Internal function that starts a topic registration. This function should not be called outside of [`REGISTER_INTERVAL`]. fn send_register_topics(&mut self, topic_hash: TopicHash) -> usize { trace!("Sending REGTOPICS"); if let Entry::Occupied(ref mut kbuckets) = self.topics_kbuckets.entry(topic_hash) { @@ -2551,12 +2548,6 @@ impl Service { if let Some(query) = self.active_topic_queries.queries.get_mut(&topic_hash) { query.dry = false; } - // If a topic registration runs dry (not enough regsitration attempts per topic kbucket - // and no more peers to contact) any new peers to contact will come with a NODES response - // to a REGTOPIC request, or a TOPICQUERY if the same topic has also been looked up. - if self.registration_attempts.contains_key(&topic_hash) { - self.send_register_topics(topic_hash); - } return; } From 4b2dac3fc225b4362c142b179cb83c98cd0f4499 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 21 Jul 2022 14:14:55 +0200 Subject: [PATCH 282/391] Correct return type --- src/discv5.rs | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 63e781c01..4d5266771 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -561,22 +561,19 @@ impl Discv5 { // await the response let ad_nodes = callback_recv.await.map_err(|e| { RequestError::ChannelFailed(format!( - "Failed to receive ad nodes from lookup of topic hash {}. Error {}", - topic_hash, e + "Failed to receive ad nodes from lookup of topic {} with topic hash {} {}. Error {}", + topic, topic_hash, topic.hash_function_name(), e )) })?; - if let Ok(ad_nodes) = ad_nodes { + if ad_nodes.is_ok() { debug!( - "Received {} ad nodes for topic {} with topic hash {} {}", - ad_nodes.len(), + "Received ad nodes for topic {} with topic hash {} {}", topic, topic_hash, topic.hash_function_name() ); - Ok(ad_nodes) - } else { - Ok(Vec::new()) } + ad_nodes } } From a8bdc31852b24a381b114de820f71bdd4108064f Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Fri, 22 Jul 2022 08:22:32 +0200 Subject: [PATCH 283/391] Clarify docs Co-authored-by: Age Manning --- src/advertisement/ticket.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index f139cc7ba..6db821663 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -9,7 +9,7 @@ use more_asserts::debug_unreachable; use node_info::NodeContact; use std::{cmp::Eq, collections::hash_map::Entry}; -/// Max tickets that are stored from one node for a topic (in the configured +/// Max tickets that are stored for an individual node for a topic (in the configured /// time period). const MAX_TICKETS_PER_NODE_TOPIC: u8 = 3; /// The time window in which tickets are accepted for any given free ad slot. From c15c500d1531feb27a73af97a25e07ced424c9e6 Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Fri, 22 Jul 2022 08:23:01 +0200 Subject: [PATCH 284/391] Correct docs Co-authored-by: Age Manning --- src/advertisement/ticket.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 6db821663..414b8b4e0 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -22,7 +22,7 @@ const REQUEST_TIMEOUT_IN_SECS: u64 = 15; /// a REGCONFIRMATION response. const MAX_RESPONSES_PER_REGTOPIC: u8 = 3; -/// A topic is active when it associated with the node id from a node it is +/// A topic is active when it's associated with the NodeId from a node it is /// published on. #[derive(PartialEq, Eq, Hash, Clone)] pub struct ActiveTopic { From 215467e5a07d01d8f0b50ad334fa10c741a97d0d Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Fri, 22 Jul 2022 08:23:22 +0200 Subject: [PATCH 285/391] Correct docs Co-authored-by: Age Manning --- src/advertisement/ticket.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 414b8b4e0..d8f6b6152 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -28,7 +28,7 @@ const MAX_RESPONSES_PER_REGTOPIC: u8 = 3; pub struct ActiveTopic { /// NodeId of the sender of the TICKET response. node_id: NodeId, - /// The topic hash as it is sent in the TICKET response + /// The topic hash as it is sent in the TICKET response. topic: TopicHash, } From 17a05fc9ed691dd59682a31fb0708e5ee93b5d09 Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Fri, 22 Jul 2022 08:23:45 +0200 Subject: [PATCH 286/391] Correct docs Co-authored-by: Age Manning --- src/advertisement/ticket.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index d8f6b6152..82f287698 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -69,7 +69,7 @@ impl ActiveTicket { } } -/// Tickets holds the tickets recieved in TICKET responses to locally +/// Tickets hold the tickets received in TICKET responses to locally /// initiated REGTOPIC requests. pub struct Tickets { /// Tickets maps one ActiveTicket per ActiveTopic. From 9fe32be39b753f5b251ee3f6e18a1a959daf553e Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Fri, 22 Jul 2022 09:51:04 +0200 Subject: [PATCH 287/391] Fix typo Co-authored-by: Age Manning --- src/advertisement/ticket.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 82f287698..f32ca0b83 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -337,7 +337,7 @@ impl ActiveRegtopicRequest { } } -/// The ActiveRegtopicRequests keeps ActiveRequests until the have matched +/// The ActiveRegtopicRequests keeps ActiveRequests until they have matched /// with MAX_RESPONSES_PER_REGTOPIC repsonses. #[derive(Default)] pub struct ActiveRegtopicRequests { From d8a229c861cc2370daf4c4137ee4798df2d3f80d Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 22 Jul 2022 11:33:59 +0200 Subject: [PATCH 288/391] Add missing docs for advertisement crate --- src/advertisement/mod.rs | 14 +++++-- src/advertisement/ticket.rs | 83 +++++++++++++++++++++++++------------ src/advertisement/topic.rs | 17 ++++++-- 3 files changed, 81 insertions(+), 33 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 3387ed40b..3f1c9125d 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -63,8 +63,8 @@ impl AdTopic { } } -/// The Ads struct contains adveritsed AdNodes. Table is used to refer to all -/// the ads, and the table stores ads by topic. +/// The Ads struct contains adveritsed AdNodes. Topics table is used to refer to +/// all the ads, and the table stores ads by topic. #[derive(Clone, Debug)] pub struct Ads { /// The expirations makes sure that AdNodes are advertised only for the @@ -113,19 +113,25 @@ impl Ads { }) } + /// Checks if there are currently any entries in the topics table. pub fn is_empty(&self) -> bool { self.expirations.is_empty() } + /// Returns the amount of ads currently in the topics table. pub fn len(&self) -> usize { self.expirations.len() } + /// Returns an iterator over the ads currently in the topics table for a given topic + /// if any. pub fn get_ad_nodes(&self, topic: TopicHash) -> impl Iterator + '_ { self.ads.get(&topic).into_iter().flatten() } - /// Ticket wait time enforces diversity among adveritsed nodes. + /// Ticket wait time enforces diversity among adveritsed nodes. The ticket wait time is + /// calculated after removing expired entries based on the current state of the topics + /// table (ads). pub fn ticket_wait_time( &mut self, topic: TopicHash, @@ -227,6 +233,7 @@ impl Ads { } } + /// Removes ads that have been in the topics table for at least the ad lifetime specified in [`Ads`]. fn remove_expired(&mut self) { let mut to_remove_ads: HashMap = HashMap::new(); @@ -269,6 +276,7 @@ impl Ads { }); } + /// Inserts a unique node record - topic mapping into the topics table after removing expired entries. pub fn insert(&mut self, node_record: Enr, topic: TopicHash) -> Result<(), &str> { self.remove_expired(); let now = Instant::now(); diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index f139cc7ba..c61f65939 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -11,16 +11,16 @@ use std::{cmp::Eq, collections::hash_map::Entry}; /// Max tickets that are stored from one node for a topic (in the configured /// time period). -const MAX_TICKETS_PER_NODE_TOPIC: u8 = 3; +const MAX_TICKETS_NODE_TOPIC: u8 = 3; /// The time window in which tickets are accepted for any given free ad slot. const REGISTRATION_WINDOW_IN_SECS: u64 = 10; /// Max nodes that are considered in the selection process for an ad slot. -const MAX_REGISTRANTS_PER_AD_SLOT: usize = 50; +const MAX_REGISTRANTS_AD_SLOT: usize = 50; /// The duration for which requests are stored. const REQUEST_TIMEOUT_IN_SECS: u64 = 15; /// Each REGTOPIC request gets a TICKET response, NODES response and can get /// a REGCONFIRMATION response. -const MAX_RESPONSES_PER_REGTOPIC: u8 = 3; +const MAX_RESPONSES_REGTOPIC: u8 = 3; /// A topic is active when it associated with the node id from a node it is /// published on. @@ -33,14 +33,18 @@ pub struct ActiveTopic { } impl ActiveTopic { + /// Makes a topic active (currently associated with an ad slot or a ticket) by + /// associating it with a node id. pub fn new(node_id: NodeId, topic: TopicHash) -> Self { ActiveTopic { node_id, topic } } + /// Returns the topic of a topic that is active. pub fn topic(&self) -> TopicHash { self.topic } + /// Returns the node id of a topic that is active. pub fn node_id(&self) -> &NodeId { &self.node_id } @@ -56,26 +60,31 @@ pub struct ActiveTicket { } impl ActiveTicket { + /// Makes a ticket active (currently stored waiting to be used in a new registration + /// attempt when its ticket wait time has expired) by associating it with a node + /// contact. pub fn new(contact: NodeContact, ticket: Vec) -> Self { ActiveTicket { contact, ticket } } + /// Returns the node contact of a ticket that is active. pub fn contact(&self) -> NodeContact { self.contact.clone() } + /// Returns the ticket of a ticket that is active. pub fn ticket(&self) -> Vec { self.ticket.clone() } } -/// Tickets holds the tickets recieved in TICKET responses to locally -/// initiated REGTOPIC requests. +/// Tickets holds the tickets recieved in TICKET responses to locally initiated +/// REGTOPIC requests. pub struct Tickets { - /// Tickets maps one ActiveTicket per ActiveTopic. + /// Tickets maps an [`ActiveTopic`] to an [`ActiveTicket`]. tickets: HashMapDelay, - /// TicketHistory sets a time limit to how many times the ActiveTicket - /// value in tickets can be updated within a given ticket_limiter_duration. + /// TicketHistory sets a time limit to how many times the [`ActiveTicket`] + /// value in tickets can be updated within a given ticket limit duration. ticket_history: TicketHistory, } @@ -87,6 +96,7 @@ impl Tickets { } } + /// Inserts a ticket into [`Tickets`] if the state of [`TicketHistory`] allows it. pub fn insert( &mut self, contact: NodeContact, @@ -96,9 +106,8 @@ impl Tickets { ) -> Result<(), &str> { let active_topic = ActiveTopic::new(contact.node_id(), topic); - if let Err(e) = self.ticket_history.insert(active_topic.clone()) { - return Err(e); - } + self.ticket_history.insert(active_topic.clone())?; + self.tickets .insert_at(active_topic, ActiveTicket::new(contact, ticket), wait_time); Ok(()) @@ -145,24 +154,32 @@ struct TicketHistory { /// to an ActiveTopic in ticket_count. expirations: VecDeque, /// The time a PendingTicket remains in expirations. - ticket_limiter_duration: Duration, + ticket_limit_duration: Duration, } impl TicketHistory { - fn new(ticket_limiter_duration: Duration) -> Self { + fn new(ticket_limit_duration: Duration) -> Self { TicketHistory { ticket_count: HashMap::new(), expirations: VecDeque::new(), - ticket_limiter_duration, + ticket_limit_duration, } } + /// Inserts a ticket into [`TicketHistory`] unless the ticket of the given active + /// topic has already been updated the limit amount of [`MAX_TICKETS_NODE_TOPIC`] + /// times per ticket limit duration, then it is discarded and an error is returned. + /// Expired entries are removed before insertion. pub fn insert(&mut self, active_topic: ActiveTopic) -> Result<(), &str> { self.remove_expired(); let insert_time = Instant::now(); let count = self.ticket_count.entry(active_topic.clone()).or_default(); - if *count >= MAX_TICKETS_PER_NODE_TOPIC { - debug!("Max 3 tickets per (NodeId, Topic) accepted in 15 minutes"); + if *count >= MAX_TICKETS_NODE_TOPIC { + debug!( + "Max {} tickets per NodeId - Topic mapping accepted in {} minutes", + MAX_TICKETS_NODE_TOPIC, + self.ticket_limit_duration.as_secs() + ); return Err("Ticket limit reached"); } *count += 1; @@ -173,9 +190,12 @@ impl TicketHistory { Ok(()) } + /// Removes entries that have been stored for at least the ticket limit duration. + /// If the same [`ActiveTopic`] is inserted again the count up till + /// [`MAX_TICKETS_NODE_TOPIC`] inserts/updates starts anew. fn remove_expired(&mut self) { let now = Instant::now(); - let ticket_limiter_duration = self.ticket_limiter_duration; + let ticket_limiter_duration = self.ticket_limit_duration; let ticket_count = &mut self.ticket_count; let total_to_remove = self .expirations @@ -201,23 +221,26 @@ impl TicketHistory { } } -/// The RegistrationWindow is the time from when an ad slot becomes free -/// until no more registration attempts are accepted for the ad slot. +/// The RegistrationWindow is the time from when an ad slot becomes free until no more +/// registration attempts are accepted for the ad slot. #[derive(Clone)] struct RegistrationWindow { - /// The RegistrationWindow exists for a specific ad slot, so for a - /// specific topic. + /// The RegistrationWindow exists for a specific ad slot, so for a specific topic. topic: TopicHash, - /// The open_time is used to make sure the RegistrationWindow closes - /// after REGISTRATION_WINDOW_IN_SECS. + /// The open_time is used to make sure the RegistrationWindow closes after + /// REGISTRATION_WINDOW_IN_SECS. open_time: Instant, } /// The tickets that will be considered for an ad slot. pub struct PoolTicket { + /// The node record of the node that returned the ticket. enr: Enr, + /// The request id of the REGTOPIC that the ticket was returned in. req_id: RequestId, + /// The returned ticket. ticket: Ticket, + /// The ip address of the node that returned the ticket. ip: IpAddr, } @@ -267,7 +290,7 @@ impl TicketPools { let pool = self.ticket_pools.entry(ticket.topic()).or_default(); // Drop request if pool contains 50 nodes, these nodes are out of luck and // won't be automatically included in next registration window for this topic - if pool.len() < MAX_REGISTRANTS_PER_AD_SLOT { + if pool.len() < MAX_REGISTRANTS_AD_SLOT { if pool.is_empty() { self.expirations.push_back(RegistrationWindow { topic: ticket.topic(), @@ -347,14 +370,17 @@ pub struct ActiveRegtopicRequests { } impl ActiveRegtopicRequests { + /// Checks if there are currently any active REGTOPIC requests. pub fn is_empty(&self) -> bool { self.expirations.is_empty() } + /// Returns the total amount of REGTOPIC requests currently active. pub fn len(&self) -> usize { self.expirations.len() } + /// Removes a specific REGTOPIC request if it exists. pub fn remove(&mut self, req_id: &RequestId) -> Option { if let Some(seen_count) = self.request_history.get_mut(req_id) { *seen_count += 1; @@ -374,8 +400,8 @@ impl ActiveRegtopicRequests { } } - // If NODES response needs to be divided into multiple NODES responses, the request - // must be reinserted. + /// Caution! Reinsert should only be called if a NODES response to a REGTOPIC needs to be divided + /// into multiple NODES responses, the request must be reinserted. pub fn reinsert(&mut self, req_id: RequestId) { self.remove_expired(); if let Entry::Occupied(ref mut entry) = self.request_history.entry(req_id) { @@ -383,17 +409,20 @@ impl ActiveRegtopicRequests { } } + /// Inserts a REGTOPIC request into [`ActiveRegtopicRequests`] after removing timed out [`ActiveRegtopicRequest`]s. pub fn insert(&mut self, req_id: RequestId, req: ActiveRequest) { self.remove_expired(); let now = Instant::now(); self.requests.insert(req_id.clone(), req); self.request_history - .insert(req_id.clone(), MAX_RESPONSES_PER_REGTOPIC); + .insert(req_id.clone(), MAX_RESPONSES_REGTOPIC); self.expirations .push_back(ActiveRegtopicRequest::new(req_id, now)); } + /// If a REGTOPIC request doesn't receive the expected responses it times out, and calling this + /// function will remove timed out entries. fn remove_expired(&mut self) { let mut expired = Vec::new(); self.expirations diff --git a/src/advertisement/topic.rs b/src/advertisement/topic.rs index 071562fdb..642f68456 100644 --- a/src/advertisement/topic.rs +++ b/src/advertisement/topic.rs @@ -46,6 +46,7 @@ impl Hasher for IdentityHash { #[derive(Debug, Clone)] pub struct Sha256Hash {} + impl Hasher for Sha256Hash { /// Creates a [`TopicHash`] by SHA256 hashing the topic then base64 encoding the /// hash. @@ -56,6 +57,7 @@ impl Hasher for Sha256Hash { TopicHash { hash } } + /// Returns the name of the hashing algorithm this [`Hasher`] implements. fn hash_function_name() -> String { "Sha256".to_owned() } @@ -71,10 +73,12 @@ pub struct TopicHash { } impl TopicHash { + /// Returns a topic hash wrapping the given 32 bytes. pub fn from_raw(hash: [u8; 32]) -> TopicHash { TopicHash { hash } } + /// Returns the raw 32 bytes inside a topic hash. pub fn as_bytes(&self) -> [u8; 32] { self.hash } @@ -108,10 +112,12 @@ impl fmt::Display for TopicHash { } } -/// A topic, as in sigpi/rust-libp2p/protocols/gossipsub +/// A topic, as in sigpi/rust-libp2p/protocols/gossipsub. #[derive(Debug, Clone)] pub struct Topic { + /// The topic string passed to the topic upon instantiation. topic: String, + /// The configured [`Hasher`] is stored within the topic. phantom_data: std::marker::PhantomData, } @@ -122,6 +128,7 @@ impl From> for TopicHash { } impl Topic { + /// Returns a new topic. pub fn new(topic: impl Into) -> Self { Topic { topic: topic.into(), @@ -129,22 +136,26 @@ impl Topic { } } + /// Returns a hash of the topic using the [`Hasher`] configured for the topic. pub fn hash(&self) -> TopicHash { H::hash(self.topic.clone()) } + /// Returns the name of the [`Hasher`] configured for the topic. pub fn hash_function_name(&self) -> String { H::hash_function_name() } + /// Returns the string passed to the topic upon instantiation. pub fn topic(&self) -> String { self.topic.clone() } } -// Each hash algortihm chosen to publish a topic with (as XOR -// metric key) is its own Topic. impl PartialEq for Topic { + /// Each hash algortihm used to publish a hashed topic (as XOR metric key) is in + /// discv5 seen as its own [`Topic`] upon comparison. That means a topic string + /// can be published/registered more than once using different [`Hasher`]s. fn eq(&self, other: &Topic) -> bool { self.hash() == other.hash() } From 6130cb7cd992607dded3c6eedf737f0dbf25d83d Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Fri, 22 Jul 2022 11:44:09 +0200 Subject: [PATCH 289/391] Fix typo Co-authored-by: Age Manning --- src/handler/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 7990cfa18..397721cf4 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -203,7 +203,7 @@ impl RequestCall { } /// TOPICQUERY requests receive 2 types of responses ADNODES and NODES, in an -/// order which cannot be guranteed. If a peer sends the wrong combination of +/// order which cannot be guaranteed. If a peer sends the wrong combination of /// responses the peer is blacklisted. #[derive(Default)] pub enum TopicQueryResponseState { From 58217ad6b0f37f5236d2f5963550fc965484dce3 Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Fri, 22 Jul 2022 11:44:48 +0200 Subject: [PATCH 290/391] Fix typo Co-authored-by: Age Manning --- src/advertisement/ticket.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index b440e9921..261c79239 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -361,7 +361,7 @@ impl ActiveRegtopicRequest { } /// The ActiveRegtopicRequests keeps ActiveRequests until they have matched -/// with MAX_RESPONSES_PER_REGTOPIC repsonses. +/// with MAX_RESPONSES_PER_REGTOPIC responses. #[derive(Default)] pub struct ActiveRegtopicRequests { requests: HashMap, From 043c2fe257d42ee8f76ce442bb746d694320afe9 Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Fri, 22 Jul 2022 11:46:42 +0200 Subject: [PATCH 291/391] Clarify bounds Co-authored-by: Age Manning --- src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/config.rs b/src/config.rs index 1eb3bf7e8..f8b05b528 100644 --- a/src/config.rs +++ b/src/config.rs @@ -97,7 +97,7 @@ pub struct Discv5Config { pub ban_duration: Option, /// A topic look up should time out after a set duration, after which no more TOPICQUERY requests should - /// be sent to peers regardless of the number of results found. This is in order to avoid starvation. + /// be sent to peers regardless of the number of results found. This is in order to avoid starvation. The default value is 60 seconds. pub topic_query_timeout: Duration, /// A custom executor which can spawn the discv5 tasks. This must be a tokio runtime, with From 6bad729554649aba14d620e471422254fcf0e769 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 22 Jul 2022 13:37:57 +0200 Subject: [PATCH 292/391] Follow Rust pattern of new and default --- src/advertisement/mod.rs | 53 +++++++++++++++++++----- src/advertisement/test.rs | 16 ++++---- src/advertisement/ticket.rs | 14 +++++++ src/service.rs | 80 +++++++++++-------------------------- src/service/test.rs | 2 +- 5 files changed, 89 insertions(+), 76 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 3f1c9125d..e810e5103 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -12,12 +12,27 @@ use std::{ }; use tokio::time::Instant; use topic::TopicHash; -use tracing::debug; +use tracing::{debug, error}; mod test; pub mod ticket; pub mod topic; +/// The max nodes to adveritse for a topic. +const MAX_ADS_TOPIC: usize = 100; + +/// The max nodes to advertise. +const MAX_ADS: usize = 50000; + +/// The max ads per subnet per topic. +const MAX_ADS_SUBNET_TOPIC: usize = 5; + +/// The max ads per subnet. +const MAX_ADS_SUBNET: usize = 50; + +/// The time after a REGCONFIRMATION is sent that an ad is placed. +pub const AD_LIFETIME: Duration = Duration::from_secs(60 * 15); + /// An AdNode is a node that occupies an ad slot on another node. #[derive(Debug, Clone)] pub struct AdNode { @@ -77,7 +92,7 @@ pub struct Ads { ad_lifetime: Duration, /// The max_ads_per_topic limit is up to the user although recommnedations /// are given in the specs. - max_ads_per_topic: usize, + max_ads_topic: usize, /// The max_ads limit is up to the user although recommnedations are /// given in the specs. max_ads: usize, @@ -92,25 +107,41 @@ pub struct Ads { impl Ads { pub fn new( ad_lifetime: Duration, - max_ads_per_topic: usize, + max_ads_topic: usize, max_ads: usize, max_ads_subnet: usize, max_ads_subnet_topic: usize, - ) -> Result { - if max_ads_per_topic > max_ads || max_ads_subnet_topic > max_ads_subnet { - return Err("Ads per topic [per subnet] cannot be > max_ads [per subnet]"); - } + ) -> Self { + let (max_ads_topic, max_ads, max_ads_subnet, max_ads_subnet_topic) = + if max_ads_topic > max_ads || max_ads_subnet_topic > max_ads_subnet { + error!( + "Ads per topic [per subnet] cannot be > max_ads [per subnet]. Using default values" + ); + return Self::default(); + } else { + (max_ads_topic, max_ads, max_ads_subnet, max_ads_subnet_topic) + }; - Ok(Ads { + Ads { expirations: VecDeque::new(), ads: HashMap::new(), ad_lifetime, - max_ads_per_topic, + max_ads_topic, max_ads, max_ads_subnet, max_ads_subnet_topic, subnet_expirations: HashMap::new(), - }) + } + } + + pub fn default() -> Self { + Ads::new( + AD_LIFETIME, + MAX_ADS_TOPIC, + MAX_ADS, + MAX_ADS_SUBNET, + MAX_ADS_SUBNET_TOPIC, + ) } /// Checks if there are currently any entries in the topics table. @@ -210,7 +241,7 @@ impl Ads { } // Occupancy check to see if the ad slots for a certain topic are full. - if nodes.len() >= self.max_ads_per_topic { + if nodes.len() >= self.max_ads_topic { return nodes.front().map(|ad| { let elapsed_time = now.saturating_duration_since(ad.insert_time); self.ad_lifetime.saturating_sub(elapsed_time) diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index e4470d9a6..cbcd24913 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -14,7 +14,7 @@ async fn insert_same_node() { let key = CombinedKey::generate_secp256k1(); let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); - let mut ads = Ads::new(Duration::from_secs(2), 10, 50, 100, 100).unwrap(); + let mut ads = Ads::new(Duration::from_secs(2), 10, 50, 100, 100); let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); @@ -43,7 +43,7 @@ async fn insert_ad_and_get_nodes() { let key = CombinedKey::generate_secp256k1(); let enr_2 = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); - let mut ads = Ads::new(Duration::from_secs(2), 10, 50, 100, 100).unwrap(); + let mut ads = Ads::new(Duration::from_secs(2), 10, 50, 100, 100); let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); @@ -84,7 +84,7 @@ async fn ticket_wait_time_no_wait_time() { let ip: IpAddr = "127.0.0.1".parse().unwrap(); let key = CombinedKey::generate_secp256k1(); let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); - let mut ads = Ads::new(Duration::from_secs(1), 10, 50, 100, 100).unwrap(); + let mut ads = Ads::new(Duration::from_secs(1), 10, 50, 100, 100); let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); assert_eq!(ads.ticket_wait_time(topic, enr.node_id(), ip), None) } @@ -97,7 +97,7 @@ async fn ticket_wait_time_duration() { let key = CombinedKey::generate_secp256k1(); let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); - let mut ads = Ads::new(Duration::from_secs(3), 1, 3, 100, 100).unwrap(); + let mut ads = Ads::new(Duration::from_secs(3), 1, 3, 100, 100); let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); @@ -131,7 +131,7 @@ async fn ticket_wait_time_full_table() { .build(&key_2) .unwrap(); - let mut ads = Ads::new(Duration::from_secs(3), 2, 3, 100, 100).unwrap(); + let mut ads = Ads::new(Duration::from_secs(3), 2, 3, 100, 100); let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); @@ -182,7 +182,7 @@ async fn ticket_wait_time_full_topic() { .build(&key_3) .unwrap(); - let mut ads = Ads::new(Duration::from_secs(3), 2, 4, 100, 100).unwrap(); + let mut ads = Ads::new(Duration::from_secs(3), 2, 4, 100, 100); let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); @@ -220,7 +220,7 @@ async fn ticket_wait_time_full_subnet() { .build(&key_2) .unwrap(); - let mut ads = Ads::new(Duration::from_secs(2), 2, 4, 2, 1).unwrap(); + let mut ads = Ads::new(Duration::from_secs(2), 2, 4, 2, 1); let topic_1 = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); let topic_3 = Topic::new(std::str::from_utf8(&[3u8; 32]).unwrap()).hash(); @@ -247,7 +247,7 @@ async fn ticket_wait_time_full_subnet_topic() { .build(&key_2) .unwrap(); - let mut ads = Ads::new(Duration::from_secs(2), 2, 4, 2, 1).unwrap(); + let mut ads = Ads::new(Duration::from_secs(2), 2, 4, 2, 1); let topic_1 = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index b440e9921..b0f68ef55 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -9,15 +9,25 @@ use more_asserts::debug_unreachable; use node_info::NodeContact; use std::{cmp::Eq, collections::hash_map::Entry}; +/// The max wait time accpeted for tickets. +pub const MAX_WAIT_TIME_TICKET: u64 = 60 * 5; + +/// The time window within in which the number of new tickets from a peer for a topic will be limitied. +pub const TICKET_LIMIT_DURATION: Duration = Duration::from_secs(60 * 15); + /// Max tickets that are stored for an individual node for a topic (in the configured /// time period). const MAX_TICKETS_NODE_TOPIC: u8 = 3; + /// The time window in which tickets are accepted for any given free ad slot. const REGISTRATION_WINDOW_IN_SECS: u64 = 10; + /// Max nodes that are considered in the selection process for an ad slot. const MAX_REGISTRANTS_AD_SLOT: usize = 50; + /// The duration for which requests are stored. const REQUEST_TIMEOUT_IN_SECS: u64 = 15; + /// Each REGTOPIC request gets a TICKET response, NODES response and can get /// a REGCONFIRMATION response. const MAX_RESPONSES_REGTOPIC: u8 = 3; @@ -96,6 +106,10 @@ impl Tickets { } } + pub fn default() -> Self { + Tickets::new(TICKET_LIMIT_DURATION) + } + /// Inserts a ticket into [`Tickets`] if the state of [`TicketHistory`] allows it. pub fn insert( &mut self, diff --git a/src/service.rs b/src/service.rs index 96b2bb79f..3d1d589c9 100644 --- a/src/service.rs +++ b/src/service.rs @@ -18,10 +18,14 @@ use self::{ }; use crate::{ advertisement::{ - ticket::{ActiveRegtopicRequests, TicketPools, Tickets}, + ticket::{ + ActiveRegtopicRequests, TicketPools, Tickets, MAX_WAIT_TIME_TICKET, + TICKET_LIMIT_DURATION, + }, topic::TopicHash, - Ads, + Ads, AD_LIFETIME, }, + discv5::PERMIT_BAN_LIST, error::{RequestError, ResponseError}, handler::{Handler, HandlerIn, HandlerOut}, kbucket::{ @@ -49,7 +53,7 @@ use parking_lot::RwLock; use rpc::*; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap}, - io::{Error, ErrorKind}, + io::Error, net::SocketAddr, pin::Pin, sync::{atomic::Ordering, Arc}, @@ -67,6 +71,19 @@ mod test; /// NOTE: This must not be larger than 127. pub(crate) const DISTANCES_TO_REQUEST_PER_PEER: usize = 3; +/// The number of registration attempts that should be active per distance +/// if there are sufficient peers. +const MAX_REG_ATTEMPTS_DISTANCE: usize = 16; + +/// Registration of topics are paced to occur at intervals t avoid a self-provoked DoS. +const REGISTER_INTERVAL: Duration = Duration::from_secs(60); + +/// Registration attempts must be limited per registration interval. +const MAX_REGTOPICS_REGISTER_INTERVAL: usize = 16; + +/// The max number of uncontacted peers to store before the kbuckets per topic. +const MAX_UNCONTACTED_PEERS_TOPIC_BUCKET: usize = 16; + /// Request type for Protocols using `TalkReq` message. /// /// Automatically responds with an empty body on drop if @@ -191,42 +208,6 @@ pub enum ServiceRequest { ), } -/// The max wait time accpeted for tickets. -const MAX_WAIT_TIME_TICKET: u64 = 60 * 5; - -/// The time window within in which the number of new tickets from a peer for a topic will be limitied. -const TICKET_LIMITER_DURATION: Duration = Duration::from_secs(60 * 15); - -/// The max nodes to adveritse for a topic. -const MAX_ADS_TOPIC: usize = 100; - -/// The max nodes to advertise. -const MAX_ADS: usize = 50000; - -/// The max ads per subnet per topic. -const MAX_ADS_SUBNET_TOPIC: usize = 5; - -/// The max ads per subnet. -const MAX_ADS_SUBNET: usize = 50; - -/// The time after a REGCONFIRMATION is sent that an ad is placed. -const AD_LIFETIME: Duration = Duration::from_secs(60 * 15); - -/// The number of registration attempts that should be active per distance -/// if there are sufficient peers. -const MAX_REG_ATTEMPTS_DISTANCE: usize = 16; - -/// Registration of topics are paced to occur at intervals t avoid a self-provoked DoS. -const REGISTER_INTERVAL: Duration = Duration::from_secs(60); - -/// Registration attempts must be limited per registration interval. -const MAX_REGTOPICS_REGISTER_INTERVAL: usize = 16; - -/// The max number of uncontacted peers to store before the kbuckets per topic. -const MAX_UNCONTACTED_PEERS_TOPIC_BUCKET: usize = 16; - -use crate::discv5::PERMIT_BAN_LIST; - pub struct Service { /// Configuration parameters. config: Discv5Config, @@ -503,19 +484,6 @@ impl Service { let (discv5_send, discv5_recv) = mpsc::channel(30); let (exit_send, exit) = oneshot::channel(); - let ads = match Ads::new( - AD_LIFETIME, - MAX_ADS_TOPIC, - MAX_ADS, - MAX_ADS_SUBNET, - MAX_ADS_SUBNET_TOPIC, - ) { - Ok(ads) => ads, - Err(e) => { - return Err(Error::new(ErrorKind::InvalidInput, e)); - } - }; - config .executor .clone() @@ -538,12 +506,12 @@ impl Service { peers_to_ping: HashSetDelay::new(config.ping_interval), discv5_recv, event_stream: None, - ads, + ads: Ads::default(), registration_attempts: HashMap::new(), topics_kbuckets: HashMap::new(), discovered_peers_topic: HashMap::new(), ticket_key: rand::random(), - tickets: Tickets::new(TICKET_LIMITER_DURATION), + tickets: Tickets::default(), ticket_pools: TicketPools::default(), active_topic_queries: ActiveTopicQueries::new( config.topic_query_timeout, @@ -713,7 +681,7 @@ impl Service { false } } - RegistrationState::TicketLimit(insert_time) => insert_time.elapsed() < TICKET_LIMITER_DURATION, + RegistrationState::TicketLimit(insert_time) => insert_time.elapsed() < TICKET_LIMIT_DURATION, RegistrationState::Ticket => true, } }); @@ -961,7 +929,7 @@ impl Service { false } } - RegistrationState::TicketLimit(insert_time) => insert_time.elapsed() < TICKET_LIMITER_DURATION, + RegistrationState::TicketLimit(insert_time) => insert_time.elapsed() < TICKET_LIMIT_DURATION, RegistrationState::Ticket => { active_reg_attempts_bucket += 1; true diff --git a/src/service/test.rs b/src/service/test.rs index 7eb6952ca..02831cb06 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -96,7 +96,7 @@ async fn build_service( peers_to_ping: HashSetDelay::new(config.ping_interval), discv5_recv, event_stream: None, - ads: Ads::new(Duration::from_secs(60 * 15), 100, 50000, 10, 3).unwrap(), + ads: Ads::new(Duration::from_secs(60 * 15), 100, 50000, 10, 3), ticket_key: rand::random(), tickets: Tickets::new(Duration::from_secs(60 * 15)), registration_attempts: HashMap::new(), From 080131b5271b9df5c2e7b0338d579efacfbacc32 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 22 Jul 2022 15:07:17 +0200 Subject: [PATCH 293/391] Constrain ad insertion and deactivate pool tickets --- src/advertisement/mod.rs | 19 +++++++------- src/advertisement/test.rs | 52 ++++++++++++++++++------------------- src/advertisement/ticket.rs | 8 +++--- src/config.rs | 2 +- src/rpc.rs | 38 ++++++++++++--------------- src/service.rs | 19 +++++++++----- src/service/test.rs | 2 +- 7 files changed, 70 insertions(+), 70 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index e810e5103..5fb7a269a 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -308,7 +308,15 @@ impl Ads { } /// Inserts a unique node record - topic mapping into the topics table after removing expired entries. - pub fn insert(&mut self, node_record: Enr, topic: TopicHash) -> Result<(), &str> { + pub fn insert( + &mut self, + node_record: Enr, + topic: TopicHash, + ip: IpAddr, + ) -> Result<(), (Duration, &str)> { + if let Some(wait_time) = self.ticket_wait_time(topic, node_record.node_id(), ip) { + return Err((wait_time, "There is currently no ad slot free for this node - topic combination. Discarding registration attempt.")); + } self.remove_expired(); let now = Instant::now(); @@ -324,16 +332,9 @@ impl Ads { .or_insert_with(VecDeque::new); subnet_expirires.push_back(now); } - let nodes = self.ads.entry(topic).or_default(); + let ad_node = AdNode::new(node_record, now); - if nodes.contains(&ad_node) { - debug!( - "This node {} is already advertising this topic", - ad_node.node_record().node_id() - ); - return Err("Node already advertising this topic"); - } nodes.push_back(ad_node); self.expirations.push_back(AdTopic::new(topic, now)); Ok(()) diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs index cbcd24913..36fbf525c 100644 --- a/src/advertisement/test.rs +++ b/src/advertisement/test.rs @@ -18,16 +18,13 @@ async fn insert_same_node() { let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); - ads.insert(enr.clone(), topic).unwrap(); + ads.insert(enr.clone(), topic, ip).unwrap(); // Since 2 seconds haven't passed - assert_eq!( - ads.insert(enr.clone(), topic), - Err("Node already advertising this topic") - ); + assert_ne!(ads.insert(enr.clone(), topic, ip), Ok(())); tokio::time::sleep(Duration::from_secs(2)).await; - ads.insert(enr.clone(), topic).unwrap(); + ads.insert(enr.clone(), topic, ip).unwrap(); } #[tokio::test] @@ -38,10 +35,14 @@ async fn insert_ad_and_get_nodes() { let key = CombinedKey::generate_secp256k1(); let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); - let port = 5000; - let ip: IpAddr = "127.0.0.1".parse().unwrap(); - let key = CombinedKey::generate_secp256k1(); - let enr_2 = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); + let port_2 = 5000; + let ip_2: IpAddr = "192.168.0.2".parse().unwrap(); + let key_2 = CombinedKey::generate_secp256k1(); + let enr_2 = EnrBuilder::new("v4") + .ip(ip_2) + .udp4(port_2) + .build(&key_2) + .unwrap(); let mut ads = Ads::new(Duration::from_secs(2), 10, 50, 100, 100); @@ -49,19 +50,16 @@ async fn insert_ad_and_get_nodes() { let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); // Add an ad for topic from enr - ads.insert(enr.clone(), topic).unwrap(); + ads.insert(enr.clone(), topic, ip).unwrap(); // The ad hasn't expired and duplicates are not allowed - assert_eq!( - ads.insert(enr.clone(), topic), - Err("Node already advertising this topic") - ); + assert_ne!(ads.insert(enr.clone(), topic, ip), Ok(())); // Add an ad for topic from enr_2 - ads.insert(enr_2.clone(), topic).unwrap(); + ads.insert(enr_2.clone(), topic, ip_2).unwrap(); // Add an ad for topic_2 from enr - ads.insert(enr.clone(), topic_2).unwrap(); + ads.insert(enr.clone(), topic_2, ip).unwrap(); let nodes: Vec<&Enr> = ads .get_ad_nodes(topic) @@ -102,7 +100,7 @@ async fn ticket_wait_time_duration() { let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); // Add an add for topic - ads.insert(enr.clone(), topic).unwrap(); + ads.insert(enr.clone(), topic, ip).unwrap(); assert_gt!( ads.ticket_wait_time(topic, enr.node_id(), ip), @@ -137,13 +135,13 @@ async fn ticket_wait_time_full_table() { let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); // Add 2 ads for topic - ads.insert(enr.clone(), topic).unwrap(); - ads.insert(enr_2.clone(), topic).unwrap(); + ads.insert(enr.clone(), topic, ip).unwrap(); + ads.insert(enr_2.clone(), topic, ip_2).unwrap(); tokio::time::sleep(Duration::from_secs(2)).await; // Add an ad for topic_2 - ads.insert(enr.clone(), topic_2).unwrap(); + ads.insert(enr.clone(), topic_2, ip).unwrap(); // Now max_ads in table is reached so the second ad for topic_2 has to wait assert_ne!(ads.ticket_wait_time(topic_2, enr.node_id(), ip), None); @@ -188,14 +186,14 @@ async fn ticket_wait_time_full_topic() { let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); // Add 2 ads for topic - ads.insert(enr.clone(), topic).unwrap(); - ads.insert(enr_2.clone(), topic).unwrap(); + ads.insert(enr.clone(), topic, ip).unwrap(); + ads.insert(enr_2.clone(), topic, ip_2).unwrap(); // Now max_ads_per_topic is reached for topic assert_ne!(ads.ticket_wait_time(topic, enr_3.node_id(), ip_3), None); // Add a topic_2 ad - ads.insert(enr.clone(), topic_2).unwrap(); + ads.insert(enr.clone(), topic_2, ip).unwrap(); // The table isn't full so topic_2 ads don't have to wait assert_eq!(ads.ticket_wait_time(topic_2, enr_2.node_id(), ip_2), None); @@ -225,8 +223,8 @@ async fn ticket_wait_time_full_subnet() { let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); let topic_3 = Topic::new(std::str::from_utf8(&[3u8; 32]).unwrap()).hash(); - ads.insert(enr.clone(), topic_1).unwrap(); - ads.insert(enr_2, topic_2).unwrap(); + ads.insert(enr.clone(), topic_1, ip).unwrap(); + ads.insert(enr_2, topic_2, ip_2).unwrap(); assert_ne!(ads.ticket_wait_time(topic_3, enr.node_id(), ip), None); } @@ -251,7 +249,7 @@ async fn ticket_wait_time_full_subnet_topic() { let topic_1 = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); - ads.insert(enr.clone(), topic_1).unwrap(); + ads.insert(enr.clone(), topic_1, ip).unwrap(); assert_ne!(ads.ticket_wait_time(topic_1, enr_2.node_id(), ip), None); assert_eq!(ads.ticket_wait_time(topic_2, enr.node_id(), ip), None); diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 69cc96081..935e5657f 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -23,7 +23,7 @@ const MAX_TICKETS_NODE_TOPIC: u8 = 3; const REGISTRATION_WINDOW_IN_SECS: u64 = 10; /// Max nodes that are considered in the selection process for an ad slot. -const MAX_REGISTRANTS_AD_SLOT: usize = 50; +//const MAX_REGISTRANTS_AD_SLOT: usize = 50; /// The duration for which requests are stored. const REQUEST_TIMEOUT_IN_SECS: u64 = 15; @@ -298,8 +298,8 @@ pub struct TicketPools { } impl TicketPools { - pub fn insert(&mut self, node_record: Enr, req_id: RequestId, ticket: Ticket, ip: IpAddr) { - if let Some(open_time) = ticket.req_time().checked_add(ticket.wait_time()) { + pub fn insert(&mut self, _node_record: Enr, _req_id: RequestId, _ticket: Ticket, _ip: IpAddr) { + /*if let Some(open_time) = ticket.req_time().checked_add(ticket.wait_time()) { if open_time.elapsed() <= Duration::from_secs(REGISTRATION_WINDOW_IN_SECS) { let pool = self.ticket_pools.entry(ticket.topic()).or_default(); // Drop request if pool contains 50 nodes, these nodes are out of luck and @@ -317,7 +317,7 @@ impl TicketPools { ); } } - } + }*/ } } diff --git a/src/config.rs b/src/config.rs index 6e2bc830f..254225a2d 100644 --- a/src/config.rs +++ b/src/config.rs @@ -97,7 +97,7 @@ pub struct Discv5Config { pub ban_duration: Option, /// A topic look up should time out after a set duration, after which no more TOPICQUERY requests should - /// be sent to peers regardless of the number of results found. This is in order to avoid starvation. The + /// be sent to peers regardless of the number of results found. This is in order to avoid starvation. The /// default value is 60 seconds. pub topic_query_timeout: Duration, diff --git a/src/rpc.rs b/src/rpc.rs index 0a9a4eb79..5a2c7fe2d 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -2,11 +2,8 @@ use crate::advertisement::topic::TopicHash; use enr::{CombinedKey, Enr, NodeId}; use more_asserts::debug_unreachable; use rlp::{DecoderError, Rlp, RlpStream}; -use std::{ - net::{IpAddr, Ipv6Addr}, - time::{SystemTime, UNIX_EPOCH}, -}; -use tokio::time::{Duration, Instant}; +use std::net::{IpAddr, Ipv6Addr}; +use tokio::time::Duration; use tracing::{debug, error, warn}; /// Type to manage the request IDs. @@ -736,7 +733,7 @@ pub struct Ticket { src_node_id: NodeId, src_ip: IpAddr, topic: TopicHash, - req_time: Instant, + //req_time: Instant, wait_time: Duration, cum_wait: Duration, } @@ -750,11 +747,11 @@ impl rlp::Encodable for Ticket { IpAddr::V6(addr) => s.append(&(addr.octets().to_vec())), }; s.append(&self.topic); - if let Ok(time_since_unix) = SystemTime::now().duration_since(UNIX_EPOCH) { + /*if let Ok(time_since_unix) = SystemTime::now().duration_since(UNIX_EPOCH) { let time_since_req = self.req_time.elapsed(); let time_stamp = time_since_unix - time_since_req; s.append(&time_stamp.as_secs().to_be_bytes().to_vec()); - } + }*/ s.append(&self.wait_time.as_secs().to_be_bytes().to_vec()); s.append(&self.wait_time.as_secs().to_be_bytes().to_vec()); } @@ -767,7 +764,7 @@ impl rlp::Decodable for Ticket { return Err(DecoderError::RlpExpectedToBeList); } - if rlp.item_count() != Ok(6) { + if rlp.item_count() != Ok(5) { error!( "List has wrong item count, should be 5 but is {:?}", rlp.item_count() @@ -817,7 +814,7 @@ impl rlp::Decodable for Ticket { let topic = decoded_list.remove(0).as_val::()?; - let req_time = { + /*let req_time = { if let Ok(time_since_unix) = SystemTime::now().duration_since(UNIX_EPOCH) { let secs_data = decoded_list.remove(0).data()?; let mut secs_bytes = [0u8; 8]; @@ -835,7 +832,7 @@ impl rlp::Decodable for Ticket { } else { return Err(DecoderError::Custom("SystemTime before UNIX EPOCH!")); } - }; + };*/ let wait_time = { let secs_data = decoded_list.remove(0).data()?; @@ -857,7 +854,7 @@ impl rlp::Decodable for Ticket { src_node_id, src_ip, topic, - req_time, + //req_time, wait_time, cum_wait, }) @@ -879,16 +876,15 @@ impl Ticket { src_node_id: NodeId, src_ip: IpAddr, topic: TopicHash, - req_time: Instant, + //req_time: Instant, wait_time: Duration, cum_wait: Duration, ) -> Self { Ticket { - //nonce, src_node_id, src_ip, topic, - req_time, + //req_time, wait_time, cum_wait, } @@ -898,9 +894,9 @@ impl Ticket { self.topic } - pub fn req_time(&self) -> Instant { + /*pub fn req_time(&self) -> Instant { self.req_time - } + }*/ pub fn wait_time(&self) -> Duration { self.wait_time @@ -1212,7 +1208,7 @@ mod tests { node_id, ip, TopicHash::from_raw([1u8; 32]), - Instant::now(), + //Instant::now(), Duration::from_secs(11), Duration::from_secs(25), ); @@ -1248,7 +1244,7 @@ mod tests { node_id, ip, TopicHash::from_raw([1u8; 32]), - Instant::now(), + //Instant::now(), Duration::from_secs(11), Duration::from_secs(25), ); @@ -1273,7 +1269,7 @@ mod tests { node_id, ip, TopicHash::from_raw([1u8; 32]), - Instant::now(), + //Instant::now(), Duration::from_secs(11), Duration::from_secs(25), ); @@ -1323,7 +1319,7 @@ mod tests { node_id, ip, TopicHash::from_raw([1u8; 32]), - Instant::now(), + //Instant::now(), Duration::from_secs(11), Duration::from_secs(25), ); diff --git a/src/service.rs b/src/service.rs index 3d1d589c9..4ec59b04e 100644 --- a/src/service.rs +++ b/src/service.rs @@ -850,12 +850,17 @@ impl Service { // Select ticket with longest cummulative wait time. if let Some(pool_ticket) = ticket_pool.values().max_by_key(|pool_ticket| pool_ticket.ticket().cum_wait()) { let enr = pool_ticket.node_record(); - let node_id = enr.node_id(); - let _ = self.ads.insert(enr.clone(), topic).map_err(|e| error!("Couldn't insert ad from node id {} into ads. Error {}", node_id, e)); - NodeContact::try_from_enr(enr.clone(), self.config.ip_mode).map(|contact| { - self.send_regconfirmation_response(contact.node_address(), pool_ticket.req_id().clone(), topic); - }).ok(); - METRICS.hosted_ads.store(self.ads.len(), Ordering::Relaxed); + if let Ok(node_contact) = NodeContact::try_from_enr(enr.clone(), self.config.ip_mode) { + let node_id = enr.node_id(); + if let Err((wait_time, e)) = self.ads.insert(enr.clone(), topic, node_contact.socket_addr().ip()) { + error!("Couldn't insert ad from node id {} into ads. Error {}", node_id, e); + let new_ticket = Ticket::new(node_id, *pool_ticket.ip(), topic, wait_time, pool_ticket.ticket().cum_wait() + wait_time); + self.send_ticket_response(node_contact.node_address(), pool_ticket.req_id().clone(), new_ticket, wait_time); + } else { + self.send_regconfirmation_response(node_contact.node_address(), pool_ticket.req_id().clone(), topic); + METRICS.hosted_ads.store(self.ads.len(), Ordering::Relaxed); + } + } } } Some(topic_query_progress) = self.active_topic_queries.next() => { @@ -1314,7 +1319,7 @@ impl Service { node_address.node_id, node_address.socket_addr.ip(), topic, - tokio::time::Instant::now(), + //tokio::time::Instant::now(), wait_time, wait_time, ); diff --git a/src/service/test.rs b/src/service/test.rs index 02831cb06..2e42a6cf3 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -226,7 +226,7 @@ async fn encrypt_decrypt_ticket() { node_id, ip, TopicHash::from_raw([1u8; 32]), - tokio::time::Instant::now(), + //tokio::time::Instant::now(), tokio::time::Duration::from_secs(5), tokio::time::Duration::from_secs(25), ); From 1392646061344d0892ae07fc468c214c1bc3be9d Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 22 Jul 2022 15:19:22 +0200 Subject: [PATCH 294/391] Fix bug reinserting entry which cannot be immutably borrowed --- src/advertisement/mod.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index 5fb7a269a..e90d5b3b6 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -181,10 +181,13 @@ impl Ads { let wait_time_max_ads_subnet = if let Some(expirations) = self.subnet_expirations.get_mut(&subnet) { if expirations.len() >= self.max_ads_subnet { - expirations.pop_front().map(|insert_time| { + if let Some(insert_time) = expirations.pop_front() { + expirations.push_front(insert_time); let elapsed_time = now.saturating_duration_since(insert_time); - self.ad_lifetime.saturating_sub(elapsed_time) - }) + Some(self.ad_lifetime.saturating_sub(elapsed_time)) + } else { + None + } } else { None } From 3a70ac310b0caf49db8697617c4b67aba497524a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 23 Jul 2022 19:10:57 +0200 Subject: [PATCH 295/391] Add missing docs in discv app interface --- src/discv5.rs | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 4d5266771..a66aebe81 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -511,7 +511,7 @@ impl Discv5 { } } - /// Returns an iterator over all ENR node IDs of nodes currently contained in the routing table. + /// Returns an iterator over all ENR node IDs of nodes currently contained in the kbuckets of a given topic. pub fn table_entries_id_topic( &self, topic: &'static str, @@ -536,6 +536,9 @@ impl Discv5 { } } + /// Looks up a given topic on other nodes that, if currently advertising the given topic, return the + /// enrs of those ads. The query keeps going through the given topic's kbuckets until a certain number + /// of results are obtained or the query times out. pub fn topic_query_req( &self, topic: &'static str, @@ -577,8 +580,9 @@ impl Discv5 { } } - /// Removes a topic we do not wish to keep advertising on other nodes, effective - /// from the next interval on. + /// Removes a topic we do not wish to keep advertising on other nodes. This does not tell any nodes + /// we are currently adveritsed on to remove us as advertisements, however in the next registration + /// interval no registration attempts will be made for the topic. pub fn remove_topic( &self, topic_hash: TopicHash, @@ -602,7 +606,10 @@ impl Discv5 { } } - /// Add a topic to keep registering on other nodes. + /// Add a topic to register on other nodes. A topic is continuously re-registered when it is + /// its turn in the registration interval. To avoid bottlenecks, not necessarily all topics + /// nor all distances of a topic's kbuckets are covered in each registration interval. To stop + /// registering a topic it must be removed by calling remove_topic. pub fn register_topic( &self, topic: &'static str, @@ -628,6 +635,10 @@ impl Discv5 { } } + /// Retrieves the registration attempts for a given topic, either confirmed registrations that + /// are still active on other nodes or regsitration attempts that returned tickets we are + /// currently waiting on to expire (ticket wait time) before re-attempting registration at that + /// same node. Caution! The returned map will also contain pub fn reg_attempts( &self, topic: &'static str, From 30210cb7f1cc160d5fa0971fcbfc11da65ba61a1 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 23 Jul 2022 20:05:00 +0200 Subject: [PATCH 296/391] Move method to associated function --- src/discv5.rs | 15 +++++++++------ src/handler/mod.rs | 4 ++-- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index a66aebe81..df0eaa0ec 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -435,11 +435,6 @@ impl Discv5 { .collect() } - pub fn hashes(topic: &'static str) -> Vec<(TopicHash, String)> { - let sha256_topic = Topic::new(topic); - vec![(sha256_topic.hash(), sha256_topic.hash_function_name())] - } - /// Requests the ENR of a node corresponding to multiaddr or multi-addr string. /// /// Only `ed25519` and `secp256k1` key types are currently supported. @@ -687,7 +682,7 @@ impl Discv5 { } } - /// Get the ads advertised for other nodes for a given topic. + /// Returns the enrs of ads currently advertised locally on behalf of other nodes for a given topic. pub fn ads( &self, topic: &'static str, @@ -837,3 +832,11 @@ impl Drop for Discv5 { self.shutdown(); } } + +/// Helper function that returns a labeled list of hashes of the given topic string according to +/// all implemented hashing algorithms. Currently only one, Sha256, is implemented. +#[allow(dead_code)] +pub fn hashes(topic: &'static str) -> Vec<(TopicHash, String)> { + let sha256_topic = Topic::new(topic); + vec![(sha256_topic.hash(), sha256_topic.hash_function_name())] +} diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 397721cf4..8b43d9bc7 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -794,7 +794,7 @@ impl Handler { self.send(node_address.clone(), auth_packet).await; // Notify the application that the session has been established - let kbucket_addition = match request_call.request.body { + let event = match request_call.request.body { RequestBody::RegisterTopic { topic, enr: _, @@ -808,7 +808,7 @@ impl Handler { } }; self.service_send - .send(kbucket_addition) + .send(event) .await .unwrap_or_else(|e| warn!("Error with sending channel: {}", e)); } From 23af87e5c96dcada48b1d8dd82f99bb600d34d1a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 23 Jul 2022 20:12:36 +0200 Subject: [PATCH 297/391] fixup! Add missing docs in discv app interface --- src/discv5.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index df0eaa0ec..8eb400ee9 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -531,9 +531,9 @@ impl Discv5 { } } - /// Looks up a given topic on other nodes that, if currently advertising the given topic, return the - /// enrs of those ads. The query keeps going through the given topic's kbuckets until a certain number - /// of results are obtained or the query times out. + /// Looks up a given topic on other nodes that, if currently advertising the given topic, return the enrs of + /// those ads. The query keeps going through the given topic's kbuckets until a certain number (passed to + /// [`Service::ActiveTopicQueries`] upon instantiation) of results are obtained or the query times out. pub fn topic_query_req( &self, topic: &'static str, From d014b94dd67b0480f30ec9ac4655e9965c1f8edb Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 23 Jul 2022 20:23:49 +0200 Subject: [PATCH 298/391] fixup! Move method to associated function --- src/discv5.rs | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 8eb400ee9..f9ccec00c 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -43,6 +43,13 @@ use crate::metrics::{Metrics, METRICS}; lazy_static! { pub static ref PERMIT_BAN_LIST: RwLock = RwLock::new(crate::PermitBanList::default()); + + /// Helper function that returns a labeled list of hashes of the given topic string according to + /// all implemented hashing algorithms. Currently only one, Sha256, is implemented. + pub static ref HASHES: fn(topic: &'static str) -> Vec<(TopicHash, String)> = |topic| { + let sha256_topic = Topic::new(topic); + vec![(sha256_topic.hash(), sha256_topic.hash_function_name())] + }; } mod test; @@ -532,7 +539,7 @@ impl Discv5 { } /// Looks up a given topic on other nodes that, if currently advertising the given topic, return the enrs of - /// those ads. The query keeps going through the given topic's kbuckets until a certain number (passed to + /// those ads. The query keeps going through the given topic's kbuckets until a certain number (passed to /// [`Service::ActiveTopicQueries`] upon instantiation) of results are obtained or the query times out. pub fn topic_query_req( &self, @@ -832,11 +839,3 @@ impl Drop for Discv5 { self.shutdown(); } } - -/// Helper function that returns a labeled list of hashes of the given topic string according to -/// all implemented hashing algorithms. Currently only one, Sha256, is implemented. -#[allow(dead_code)] -pub fn hashes(topic: &'static str) -> Vec<(TopicHash, String)> { - let sha256_topic = Topic::new(topic); - vec![(sha256_topic.hash(), sha256_topic.hash_function_name())] -} From c65debc03f6cbc59608e411f786343fb19ed606e Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 23 Jul 2022 22:19:54 +0200 Subject: [PATCH 299/391] Fix broken docs --- .github/workflows/build.yml | 2 +- src/advertisement/ticket.rs | 4 ++-- src/discv5.rs | 4 ++-- src/ipmode.rs | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a80a2fad2..797fa6c6d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -50,4 +50,4 @@ jobs: - name: Get latest version of stable rust run: rustup update stable - name: Check rustdoc links - run: RUSTDOCFLAGS="--deny broken_intra_doc_links" cargo doc --verbose --workspace --no-deps --document-private-items + run: RUSTDOCFLAGS="--deny rustdoc::broken_intra_doc_links" cargo doc --verbose --workspace --no-deps --document-private-items diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 935e5657f..4c4ed395c 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -17,7 +17,7 @@ pub const TICKET_LIMIT_DURATION: Duration = Duration::from_secs(60 * 15); /// Max tickets that are stored for an individual node for a topic (in the configured /// time period). -const MAX_TICKETS_NODE_TOPIC: u8 = 3; +pub const MAX_TICKETS_NODE_TOPIC: u8 = 3; /// The time window in which tickets are accepted for any given free ad slot. const REGISTRATION_WINDOW_IN_SECS: u64 = 10; @@ -160,7 +160,7 @@ struct PendingTicket { /// an ActiveTopic within the time limit given by ticket_limiter_duration /// and limits it to MAX_TICKETS_PER_NODE_TOPIC times. #[derive(Default)] -struct TicketHistory { +pub struct TicketHistory { /// The ticket_count keeps track of how many tickets are stored for the /// ActiveTopic. ticket_count: HashMap, diff --git a/src/discv5.rs b/src/discv5.rs index f9ccec00c..3c729ac95 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -20,7 +20,7 @@ use crate::{ NodeStatus, UpdateResult, }, node_info::NodeContact, - service::{QueryKind, RegAttempts, Service, ServiceRequest, TalkRequest}, + service::{ActiveTopicQueries, QueryKind, RegAttempts, Service, ServiceRequest, TalkRequest}, Discv5Config, Enr, }; use enr::{CombinedKey, EnrError, EnrKey, NodeId}; @@ -540,7 +540,7 @@ impl Discv5 { /// Looks up a given topic on other nodes that, if currently advertising the given topic, return the enrs of /// those ads. The query keeps going through the given topic's kbuckets until a certain number (passed to - /// [`Service::ActiveTopicQueries`] upon instantiation) of results are obtained or the query times out. + /// [`ActiveTopicQueries`] upon instantiation) of results are obtained or the query times out. pub fn topic_query_req( &self, topic: &'static str, diff --git a/src/ipmode.rs b/src/ipmode.rs index 80e72f3a2..e7ae8eb55 100644 --- a/src/ipmode.rs +++ b/src/ipmode.rs @@ -237,7 +237,7 @@ mod tests { } } -/// Copied from the standard library. See https://github.com/rust-lang/rust/issues/27709 +/// Copied from the standard library. See /// The current code is behind the `ip` feature. pub const fn to_ipv4_mapped(ip: &std::net::Ipv6Addr) -> Option { match ip.octets() { From 9636ea4b24c15b6963f18e96ab4f478e65055a1a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 24 Jul 2022 14:03:56 +0200 Subject: [PATCH 300/391] Fix clippy warning --- src/discv5.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 3c729ac95..44825e242 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -20,7 +20,7 @@ use crate::{ NodeStatus, UpdateResult, }, node_info::NodeContact, - service::{ActiveTopicQueries, QueryKind, RegAttempts, Service, ServiceRequest, TalkRequest}, + service::{QueryKind, RegAttempts, Service, ServiceRequest, TalkRequest}, Discv5Config, Enr, }; use enr::{CombinedKey, EnrError, EnrKey, NodeId}; @@ -540,7 +540,7 @@ impl Discv5 { /// Looks up a given topic on other nodes that, if currently advertising the given topic, return the enrs of /// those ads. The query keeps going through the given topic's kbuckets until a certain number (passed to - /// [`ActiveTopicQueries`] upon instantiation) of results are obtained or the query times out. + /// [`crate::service::ActiveTopicQueries`] upon instantiation) of results are obtained or the query times out. pub fn topic_query_req( &self, topic: &'static str, From fc26548000eb9605549d91f575c806a4183222c2 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 24 Jul 2022 15:31:35 +0200 Subject: [PATCH 301/391] Fix const function --- src/discv5.rs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 44825e242..06af6c73e 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -43,15 +43,16 @@ use crate::metrics::{Metrics, METRICS}; lazy_static! { pub static ref PERMIT_BAN_LIST: RwLock = RwLock::new(crate::PermitBanList::default()); - - /// Helper function that returns a labeled list of hashes of the given topic string according to - /// all implemented hashing algorithms. Currently only one, Sha256, is implemented. - pub static ref HASHES: fn(topic: &'static str) -> Vec<(TopicHash, String)> = |topic| { - let sha256_topic = Topic::new(topic); - vec![(sha256_topic.hash(), sha256_topic.hash_function_name())] - }; } +/// Helper function that returns a labeled list of hashes of the given topic string according to +/// all implemented hashing algorithms. Currently only one, Sha256, is implemented. +#[allow(dead_code)] +pub static HASHES: fn(topic: &'static str) -> Vec<(TopicHash, String)> = |topic| { + let sha256_topic = Topic::new(topic); + vec![(sha256_topic.hash(), sha256_topic.hash_function_name())] +}; + mod test; /// Events that can be produced by the `Discv5` event stream. From a0a1a26ffc01d169631d2ff368e34082ab713dc6 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 24 Jul 2022 15:56:43 +0200 Subject: [PATCH 302/391] Export global helper function --- src/discv5.rs | 1 - src/lib.rs | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 06af6c73e..52c893334 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -47,7 +47,6 @@ lazy_static! { /// Helper function that returns a labeled list of hashes of the given topic string according to /// all implemented hashing algorithms. Currently only one, Sha256, is implemented. -#[allow(dead_code)] pub static HASHES: fn(topic: &'static str) -> Vec<(TopicHash, String)> = |topic| { let sha256_topic = Topic::new(topic); vec![(sha256_topic.hash(), sha256_topic.hash_function_name())] diff --git a/src/lib.rs b/src/lib.rs index aaab983fa..6e19b8e4d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -125,7 +125,7 @@ extern crate lazy_static; pub type Enr = enr::Enr; -pub use crate::discv5::{Discv5, Discv5Event}; +pub use crate::discv5::{HASHES, Discv5, Discv5Event}; pub use config::{Discv5Config, Discv5ConfigBuilder}; pub use error::{Discv5Error, QueryError, RequestError, ResponseError}; pub use executor::{Executor, TokioExecutor}; From b47b455cbb1a3fd8e5ae46a0fba5bd050f5388e1 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sun, 24 Jul 2022 16:04:52 +0200 Subject: [PATCH 303/391] Remove static lifetime --- src/discv5.rs | 2 +- src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 52c893334..93e8eca93 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -47,7 +47,7 @@ lazy_static! { /// Helper function that returns a labeled list of hashes of the given topic string according to /// all implemented hashing algorithms. Currently only one, Sha256, is implemented. -pub static HASHES: fn(topic: &'static str) -> Vec<(TopicHash, String)> = |topic| { +pub static HASHES: for<'a> fn(topic: &'a str) -> Vec<(TopicHash, String)> = |topic| { let sha256_topic = Topic::new(topic); vec![(sha256_topic.hash(), sha256_topic.hash_function_name())] }; diff --git a/src/lib.rs b/src/lib.rs index 6e19b8e4d..aeb913ac0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -125,7 +125,7 @@ extern crate lazy_static; pub type Enr = enr::Enr; -pub use crate::discv5::{HASHES, Discv5, Discv5Event}; +pub use crate::discv5::{Discv5, Discv5Event, HASHES}; pub use config::{Discv5Config, Discv5ConfigBuilder}; pub use error::{Discv5Error, QueryError, RequestError, ResponseError}; pub use executor::{Executor, TokioExecutor}; From c5c158ef07220d2a2c9772c039aa452ea9745a6c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 25 Jul 2022 12:08:35 +0200 Subject: [PATCH 304/391] Correct poll behaviour for ticket stream --- src/advertisement/ticket.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 4c4ed395c..68630e5db 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -136,9 +136,15 @@ impl Stream for Tickets { Poll::Ready(Some(Ok((active_topic, ticket)))) } Poll::Ready(Some(Err(e))) => { - debug!("{}", e); - Poll::Pending + error!( + "Failed to fetch next ticket with expired wait time. Error {}", + e + ); + Poll::Ready(Some(Err(e))) } + // When the hashmap delay holding tickets is empty, as we poll this tickets stream in a + // select! statement, to avoid re-polling the stream till it fills up again with new + // tickets pending a re-attempt at registration we return Poll::Pending. Poll::Ready(None) => Poll::Pending, Poll::Pending => Poll::Pending, } From 47578168c042a94ccb5274fe90b6f0dfd2648ca5 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 27 Jul 2022 15:10:46 +1000 Subject: [PATCH 305/391] Attempt to simplify the handler --- src/handler/active_requests.rs | 8 +- src/handler/mod.rs | 569 ++++++--------------------------- src/handler/request_call.rs | 165 ++++++++++ src/handler/tests.rs | 2 +- 4 files changed, 266 insertions(+), 478 deletions(-) create mode 100644 src/handler/request_call.rs diff --git a/src/handler/active_requests.rs b/src/handler/active_requests.rs index 497c8e4a1..f488d02f8 100644 --- a/src/handler/active_requests.rs +++ b/src/handler/active_requests.rs @@ -21,7 +21,7 @@ impl ActiveRequests { } pub(crate) fn insert(&mut self, node_address: NodeAddress, request_call: RequestCall) { - let nonce = *request_call.packet.message_nonce(); + let nonce = *request_call.packet().message_nonce(); self.active_requests_mapping .insert(node_address.clone(), request_call); self.active_requests_nonce_mapping @@ -55,7 +55,7 @@ impl ActiveRequests { // Remove the associated nonce mapping. match self .active_requests_nonce_mapping - .remove(request_call.packet.message_nonce()) + .remove(request_call.packet().message_nonce()) { Some(_) => Some(request_call), None => { @@ -84,7 +84,7 @@ impl ActiveRequests { } for (address, request) in self.active_requests_mapping.iter() { - let nonce = request.packet.message_nonce(); + let nonce = request.packet().message_nonce(); if !self.active_requests_nonce_mapping.contains_key(nonce) { panic!("Address {} maps to request with nonce {:?}, which does not exist in `active_requests_nonce_mapping`", address, nonce); } @@ -99,7 +99,7 @@ impl Stream for ActiveRequests { Poll::Ready(Some(Ok((node_address, request_call)))) => { // Remove the associated nonce mapping. self.active_requests_nonce_mapping - .remove(request_call.packet.message_nonce()); + .remove(request_call.packet().message_nonce()); Poll::Ready(Some(Ok((node_address, request_call)))) } Poll::Ready(Some(Err(err))) => Poll::Ready(Some(Err(err))), diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 8b43d9bc7..bea26cf08 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -42,7 +42,7 @@ use enr::{CombinedKey, NodeId}; use futures::prelude::*; use parking_lot::RwLock; use std::{ - collections::{hash_map::Entry, HashMap}, + collections::HashMap, convert::TryFrom, default::Default, net::SocketAddr, @@ -56,6 +56,7 @@ use tracing::{debug, error, trace, warn}; mod active_requests; mod crypto; +mod request_call; mod session; mod tests; @@ -65,6 +66,7 @@ use crate::metrics::METRICS; use crate::lru_time_cache::LruTimeCache; use active_requests::ActiveRequests; +use request_call::RequestCall; use session::Session; // The time interval to check banned peer timeouts and unban peers when the timeout has elapsed (in @@ -155,97 +157,10 @@ pub struct Challenge { remote_enr: Option, } -/// A request to a node that we are waiting for a response. -#[derive(Debug, Clone)] -pub(crate) struct RequestCall { - contact: NodeContact, - /// The raw discv5 packet sent. - packet: Packet, - /// The unencrypted message. Required if need to re-encrypt and re-send. - request: Request, - /// Handshakes attempted. - handshake_sent: bool, - /// The number of times this request has been re-sent. - retries: u8, - /// If we receive a Nodes Response with a total greater than 1. This keeps track of the - /// remaining responses expected. - remaining_responses: Option, - /// If we receive a AdNodes Response with a total greater than 1. This keeps track of the - /// remaining responses expected. - remaining_adnode_responses: Option, - /// Signifies if we are initiating the session with a random packet. This is only used to - /// determine the connection direction of the session. - initiating_session: bool, -} - -impl RequestCall { - fn new( - contact: NodeContact, - packet: Packet, - request: Request, - initiating_session: bool, - ) -> Self { - RequestCall { - contact, - packet, - request, - handshake_sent: false, - retries: 1, - remaining_responses: None, - remaining_adnode_responses: None, - initiating_session, - } - } - - fn id(&self) -> &RequestId { - &self.request.id - } -} - -/// TOPICQUERY requests receive 2 types of responses ADNODES and NODES, in an -/// order which cannot be guaranteed. If a peer sends the wrong combination of -/// responses the peer is blacklisted. -#[derive(Default)] -pub enum TopicQueryResponseState { - /// The Start state is intermediary upon receving the first response to the - /// TOPICQUERY request, either a NODES or ADNODES response. - #[default] - Start, - /// A NODES response has been completely received. - Nodes, - /// An ADNODES response has been completely received. - AdNodes, -} - -/// REGTOPIC requests receive 3 types of responses TICKET, NODES and possibly -/// a REGCONFIRMATION. The order of the ticket and nodes is non-determinsitic -/// but the regconf, if it comes, always come up to 10 seconds (depending on -/// when in the registration window the request comes) + latency later. If a -/// peer sends the wrong permutation of responses the peer is blacklisted. -#[derive(Default)] -pub enum RegTopicResponseState { - /// The Start state is intermediary upon receving the first response to the - /// REGTOPIC request, either a NODES or TICKET response. - #[default] - Start, - /// A NODES response has been completely received. - Nodes, - /// A TICKET response has been received. - Ticket, - /// A REGISTERCONFIRMATION response has been received. - RegisterConfirmation, -} - -/// The time out for awaiting REGCONFIRMATION responses is the registration window (10 seconds) -/// plus some seconds for processing. -const TIMEOUT_REGCONFIRMATION: Duration = Duration::from_secs(15); - /// Process to handle handshakes and sessions established from raw RPC communications between nodes. pub struct Handler { /// Configuration for the discv5 service. request_retries: u8, - /// Configuration for the discv5 service of duration for which nodes are banned. - ban_duration: Option, /// The local node id to save unnecessary read locks on the ENR. The NodeID should not change /// during the operation of the server. node_id: NodeId, @@ -255,16 +170,8 @@ pub struct Handler { key: Arc>, /// Pending raw requests. active_requests: ActiveRequests, - /// Pending raw REGTOPIC requests awaiting a REGCONFIRMATION response that may come. - active_requests_regconf: ActiveRequests, /// The expected responses by SocketAddr which allows packets to pass the underlying filter. filter_expected_responses: Arc>>, - /// Keeps track of the 2 expected responses, NODES and ADNODES that should be received from a - /// TOPICQUERY request. - topic_query_responses: HashMap, - /// Keeps track of the 3 expected responses, NODES and TICKET that should be received from a - /// REGTOPIC request, and REGCONFIRMATION that may be recieved. - reg_topic_responses: HashMap, /// Requests awaiting a handshake completion. pending_requests: HashMap>, /// Currently in-progress outbound handshakes (WHOAREYOU packets) with peers. @@ -345,18 +252,12 @@ impl Handler { let mut handler = Handler { request_retries: config.request_retries, - ban_duration: config.ban_duration, node_id, enr, key, active_requests: ActiveRequests::new(config.request_timeout), - active_requests_regconf: ActiveRequests::new( - TIMEOUT_REGCONFIRMATION + config.request_timeout, - ), pending_requests: HashMap::new(), filter_expected_responses, - topic_query_responses: HashMap::new(), - reg_topic_responses: HashMap::new(), sessions: LruTimeCache::new( config.session_timeout, Some(config.session_cache_capacity), @@ -400,7 +301,7 @@ impl Handler { self.process_inbound_packet(inbound_packet).await; } Some(Ok((node_address, pending_request))) = self.active_requests.next() => { - trace!("Discarding request {} with timeout", pending_request.request.body); + trace!("Discarding request {} with timeout", pending_request.kind()); self.handle_request_timeout(node_address, pending_request).await; } Some(Ok((node_address, _challenge))) = self.active_challenges.next() => { @@ -495,47 +396,10 @@ impl Handler { node_address: NodeAddress, mut request_call: RequestCall, ) { - if let RequestBody::RegisterTopic { .. } = request_call.request.body { - if let Entry::Occupied(entry) = self.reg_topic_responses.entry(node_address.clone()) { - let response_state = entry.get(); - if let RegTopicResponseState::RegisterConfirmation = response_state { - // There is no guarantee that a REGCONFIRMATION responses should come to a REGTOPIC - // request. A timeout while awaiting a REGCONFIRMATION is not a failure. - self.reg_topic_responses.remove(&node_address); - self.remove_expected_response(node_address.socket_addr); - self.send_next_request(node_address).await; - return; - } else if let RegTopicResponseState::Ticket | RegTopicResponseState::Nodes = - response_state - { - self.reg_topic_responses.remove(&node_address); - trace!("Request timed out with {}", node_address); - // Remove the request from the awaiting packet_filter - self.remove_expected_response(node_address.socket_addr); - // The request has timed out. We keep any established session for future use. - self.fail_request(request_call, RequestError::Timeout, false) - .await; - return; - } - } - } else if let RequestBody::TopicQuery { .. } = request_call.request.body { - if let Entry::Occupied(entry) = self.topic_query_responses.entry(node_address.clone()) { - let response_state = entry.get(); - if let TopicQueryResponseState::AdNodes | TopicQueryResponseState::Nodes = - response_state - { - self.topic_query_responses.remove(&node_address); - trace!("Request timed out with {}", node_address); - // Remove the request from the awaiting packet_filter - self.remove_expected_response(node_address.socket_addr); - // The request has timed out. We keep any established session for future use. - self.fail_request(request_call, RequestError::Timeout, false) - .await; - } - } - return; - } - if request_call.retries >= self.request_retries { + // NOTE: We consider it a node fault if we are waiting for a REGCONFIRMATION and we receive + // a timeout. We should only be waiting for a REGCONFIRMATION if we know one should be + // coming. + if request_call.retries() >= self.request_retries { trace!("Request timed out with {}", node_address); // Remove the request from the awaiting packet_filter self.remove_expected_response(node_address.socket_addr); @@ -546,12 +410,12 @@ impl Handler { // increment the request retry count and restart the timeout trace!( "Resending message: {} to {}", - request_call.request, + request_call.raw_request(), node_address ); - self.send(node_address.clone(), request_call.packet.clone()) + self.send(node_address.clone(), request_call.packet().clone()) .await; - request_call.retries += 1; + request_call.retry(); self.active_requests.insert(node_address, request_call); } } @@ -702,26 +566,26 @@ impl Handler { }; // double check the message nonces match - if request_call.packet.message_nonce() != &request_nonce { + if request_call.packet().message_nonce() != &request_nonce { // This could theoretically happen if a peer uses the same node id across // different connections. - warn!("Received a WHOAREYOU from a non expected source. Source: {}, message_nonce {} , expected_nonce: {}", request_call.contact, hex::encode(request_call.packet.message_nonce()), hex::encode(request_nonce)); + warn!("Received a WHOAREYOU from a non expected source. Source: {}, message_nonce {} , expected_nonce: {}", request_call.contact(), hex::encode(request_call.packet().message_nonce()), hex::encode(request_nonce)); // NOTE: Both mappings are removed in this case. return; } trace!( "Received a WHOAREYOU packet response. Source: {}", - request_call.contact + request_call.contact() ); // We do not allow multiple WHOAREYOU packets for a single challenge request. If we have // already sent a WHOAREYOU ourselves, we drop sessions who send us a WHOAREYOU in // response. - if request_call.handshake_sent { + if request_call.handshake_sent() { warn!( "Authentication response already sent. Dropping session. Node: {}", - request_call.contact + request_call.contact() ); self.fail_request(request_call, RequestError::InvalidRemotePacket, true) .await; @@ -739,12 +603,12 @@ impl Handler { // Generate a new session and authentication packet let (auth_packet, mut session) = match Session::encrypt_with_header( - &request_call.contact, + request_call.contact(), self.key.clone(), updated_enr, &self.node_id, &challenge_data, - &(request_call.request.clone().encode()), + &(request_call.raw_request().clone().encode()), ) { Ok(v) => v, Err(e) => { @@ -768,45 +632,45 @@ impl Handler { // // All sent requests must have an associated node_id. Therefore the following // must not panic. - let node_address = request_call.contact.node_address(); - match request_call.contact.enr() { + let node_address = request_call.contact().node_address(); + match request_call.contact().enr() { Some(enr) => { // NOTE: Here we decide if the session is outgoing or ingoing. The condition for an // outgoing session is that we originally sent a RANDOM packet (signifying we did // not have a session for a request) and the packet is not a PING (we are not // trying to update an old session that may have expired. let connection_direction = { - match (&request_call.initiating_session, &request_call.request.body) { + match (request_call.initiating_session(), request_call.kind()) { (true, RequestBody::Ping { .. }) => ConnectionDirection::Incoming, (true, _) => ConnectionDirection::Outgoing, (false, _) => ConnectionDirection::Incoming, } }; - // We already know the ENR. Send the handshake response packet - trace!("Sending Authentication response to node: {}", node_address); - request_call.packet = auth_packet.clone(); - request_call.handshake_sent = true; - request_call.initiating_session = false; - // Reinsert the request_call - self.insert_active_request(request_call.clone()); - // Send the actual packet to the send task. - self.send(node_address.clone(), auth_packet).await; - // Notify the application that the session has been established - let event = match request_call.request.body { + let event = match request_call.kind() { RequestBody::RegisterTopic { topic, enr: _, ticket: _, } | RequestBody::TopicQuery { topic } => { - HandlerOut::EstablishedTopic(enr, connection_direction, topic) + HandlerOut::EstablishedTopic(enr, connection_direction, *topic) } _ => { HandlerOut::Established(enr, node_address.socket_addr, connection_direction) } }; + + // We already know the ENR. Send the handshake response packet + trace!("Sending Authentication response to node: {}", node_address); + request_call.upgrade_to_auth_packet(auth_packet.clone()); + request_call.set_initiating_session(false); + // Reinsert the request_call + self.insert_active_request(request_call); + // Send the actual packet to the send task. + self.send(node_address.clone(), auth_packet).await; + self.service_send .send(event) .await @@ -814,12 +678,10 @@ impl Handler { } None => { // Don't know the ENR. Establish the session, but request an ENR also - // Send the Auth response - let contact = request_call.contact.clone(); + let contact = request_call.contact().clone(); trace!("Sending Authentication response to node: {}", node_address); - request_call.packet = auth_packet.clone(); - request_call.handshake_sent = true; + request_call.upgrade_to_auth_packet(auth_packet.clone()); // Reinsert the request_call self.insert_active_request(request_call); self.send(node_address.clone(), auth_packet).await; @@ -1113,319 +975,56 @@ impl Handler { // Find a matching request, if any trace!("Received {} response", response.body); - let (request_call, is_regconf) = - if let Some(request_call) = self.active_requests_regconf.remove(&node_address) { - (Some(request_call), true) - } else { - (self.active_requests.remove(&node_address), false) - }; - - if let Some(mut request_call) = request_call { + if let Some(mut request_call) = self.active_requests.remove(&node_address) { if request_call.id() != &response.id { // add the request back and reset the timer - if is_regconf { - trace!( - "Received an RPC Response from a node we are also waiting for a REGISTERCONFIRMATION from. {}", - node_address - ); - self.active_requests_regconf - .insert(node_address, request_call); - } else { - trace!( - "Received an RPC Response to an unknown request. Likely late response. {}", - node_address - ); - self.active_requests.insert(node_address, request_call); - } + trace!( + "Received an RPC Response to an unknown request. Likely late response. {}", + node_address + ); + self.active_requests.insert(node_address, request_call); return; } - let blacklist_peer = |handler: &mut Handler| { - // Remove the expected response - handler.remove_expected_response(node_address.socket_addr); - let ban_timeout = handler.ban_duration.map(|v| Instant::now() + v); - PERMIT_BAN_LIST - .write() - .ban(node_address.clone(), ban_timeout); - }; - // The response matches a request - // Check to see if this is a Nodes response, in which case we may require to wait for - // extra responses - if let ResponseBody::Nodes { total, .. } = response.body { - if total > 1 { - // This is a multi-response Nodes response - if let Some(remaining_responses) = request_call.remaining_responses.as_mut() { - *remaining_responses -= 1; - if remaining_responses != &0 { - trace!("Reinserting active request"); - // more responses remaining, add back the request and send the response - // add back the request and send the response - self.active_requests - .insert(node_address.clone(), request_call); - if let Err(e) = self - .service_send - .send(HandlerOut::Response(node_address, Box::new(response))) - .await - { - warn!("Failed to inform of response {}", e) - } - return; - } - } else { - // This is the first instance - request_call.remaining_responses = Some(total - 1); - // add back the request and send the response - self.active_requests - .insert(node_address.clone(), request_call); - if let Err(e) = self - .service_send - .send(HandlerOut::Response(node_address, Box::new(response))) - .await - { - warn!("Failed to inform of response {}", e) - } + // Check to see if the matching request requires us to wait for extra responses. + match response.body { + ResponseBody::Nodes { total, .. } => { + // Update the request call state and if there are no more messages expected, remove + // the request + if request_call.register_nodes_response(total) { + // This is a multi-response Nodes response + trace!("Reinserting active request"); + self.reinsert_request(node_address, request_call, response) + .await; return; } } - // If the total number of NODES responses arrived and it is for a REGTOPIC or a - // TOPICQUERY the active request might be waiting for more types of responses. - match request_call.request.body { - RequestBody::RegisterTopic { .. } => { - trace!("Received a NODES reponse for a REGTOPIC request"); - let response_state = self - .reg_topic_responses - .entry(node_address.clone()) - .or_default(); - - match response_state { - RegTopicResponseState::Start => { - *response_state = RegTopicResponseState::Nodes; - self.active_requests - .insert(node_address.clone(), request_call); - if let Err(e) = self - .service_send - .send(HandlerOut::Response( - node_address.clone(), - Box::new(response), - )) - .await - { - warn!("Failed to inform of response {}", e) - } - return; - } - RegTopicResponseState::Ticket => { - *response_state = RegTopicResponseState::RegisterConfirmation; - // Still a REGCONFIRMATION may come hence request call is reinserted, in a separate - // struct to avoid blocking further requests to the node address during the request timeout. - self.active_requests_regconf - .insert(node_address.clone(), request_call); - if let Err(e) = self - .service_send - .send(HandlerOut::Response( - node_address.clone(), - Box::new(response), - )) - .await - { - warn!("Failed to inform of response {}", e) - } - return; - } - RegTopicResponseState::Nodes - | RegTopicResponseState::RegisterConfirmation => { - debug!("No more NODES responses should be received if REGTOPIC response is in Nodes or RegisterConfirmation state."); - warn!( - "Peer returned more than one set of NODES responses for REGTOPIC request. Blacklisting {}", - node_address - ); - self.fail_request(request_call, RequestError::InvalidResponseCombo("Received more than one set of NODES responses for a REGTOPIC request".into()), true).await; - blacklist_peer(self); - return; - } - } - } - RequestBody::TopicQuery { .. } => { - trace!("Received a NODES reponse for a TOPICQUERY request"); - let response_state = self - .topic_query_responses - .entry(node_address.clone()) - .or_default(); - - match response_state { - TopicQueryResponseState::Start => { - *response_state = TopicQueryResponseState::Nodes; - self.active_requests - .insert(node_address.clone(), request_call); - if let Err(e) = self - .service_send - .send(HandlerOut::Response( - node_address.clone(), - Box::new(response), - )) - .await - { - warn!("Failed to inform of response {}", e) - } - return; - } - TopicQueryResponseState::AdNodes => { - self.topic_query_responses.remove(&node_address); - } - TopicQueryResponseState::Nodes => { - debug!("No more NODES responses should be received if TOPICQUERY response is in Nodes state."); - warn!( - "Peer returned more than one set of NODES responses for TOPICQUERY request. Blacklisting {}", - node_address - ); - self.fail_request(request_call, RequestError::InvalidResponseCombo("Received more than one set of NODES responses for a TOPICQUERY request".into()), true).await; - blacklist_peer(self); - return; - } - } - } - _ => {} - } - } else if let ResponseBody::AdNodes { total, .. } = response.body { - if total > 1 { - // This is a multi-response Nodes response - if let Some(ref mut remaining_adnode_responses) = - request_call.remaining_adnode_responses - { - *remaining_adnode_responses -= 1; - if remaining_adnode_responses != &0 { - trace!("Reinserting active TOPICQUERY request"); - // more responses remaining, add back the request and send the response - // add back the request and send the response - self.active_requests - .insert(node_address.clone(), request_call); - if let Err(e) = self - .service_send - .send(HandlerOut::Response(node_address, Box::new(response))) - .await - { - warn!("Failed to inform of response {}", e) - } - return; - } - } else { - // This is the first instance - request_call.remaining_responses = Some(total - 1); - // add back the request and send the response - self.active_requests - .insert(node_address.clone(), request_call); - if let Err(e) = self - .service_send - .send(HandlerOut::Response(node_address, Box::new(response))) - .await - { - warn!("Failed to inform of response {}", e) - } + ResponseBody::Ticket { wait_time, .. } => { + // We may want to keep the request alive if further nodes responses are due, or if + // a REGCONFIRMATION is expected. + if request_call.register_ticket(wait_time) { + trace!("Reinserting active request"); + // There are more responses remaining, add back the request and send the response + self.reinsert_request(node_address, request_call, response) + .await; return; } } - let response_state = self - .topic_query_responses - .entry(node_address.clone()) - .or_default(); - - match response_state { - TopicQueryResponseState::Start => { - *response_state = TopicQueryResponseState::AdNodes; - self.active_requests - .insert(node_address.clone(), request_call); - if let Err(e) = self - .service_send - .send(HandlerOut::Response( - node_address.clone(), - Box::new(response), - )) - .await - { - warn!("Failed to inform of response {}", e) - } - return; - } - TopicQueryResponseState::Nodes => { - self.topic_query_responses.remove(&node_address); - } - TopicQueryResponseState::AdNodes => { - debug!("No more ADNODES responses should be received if TOPICQUERY response is in AdNodes state."); - warn!( - "Peer returned more than one set of ADNODES responses for TOPICQUERY request. Blacklisting {}", - node_address - ); - self.fail_request(request_call, RequestError::InvalidResponseCombo("Received more than one set of ADNODES responses for a TOPICQUERY request".into()), true).await; - blacklist_peer(self); + ResponseBody::RegisterConfirmation { .. } => { + if request_call.register_confirmation() { + trace!("Reinserting active request"); + self.reinsert_request(node_address, request_call, response) + .await; return; } } - } else if let ResponseBody::Ticket { .. } = response.body { - // The request is reinserted for either a NODES response or a potential REGCONFIRMATION - // response that may come. - let response_state = self - .reg_topic_responses - .entry(node_address.clone()) - .or_default(); - - match response_state { - RegTopicResponseState::Start => { - *response_state = RegTopicResponseState::Ticket; - self.active_requests - .insert(node_address.clone(), request_call.clone()); - if let Err(e) = self - .service_send - .send(HandlerOut::Response( - node_address.clone(), - Box::new(response), - )) - .await - { - warn!("Failed to inform of response {}", e) - } - return; - } - RegTopicResponseState::Nodes => { - *response_state = RegTopicResponseState::RegisterConfirmation; - // Still a REGCONFIRMATION may come hence request call is reinserted, in a separate - // struct to avoid blocking further requests to the node address during the request timeout. - self.active_requests_regconf - .insert(node_address.clone(), request_call); - if let Err(e) = self - .service_send - .send(HandlerOut::Response( - node_address.clone(), - Box::new(response), - )) - .await - { - warn!("Failed to inform of response {}", e) - } - return; - } - RegTopicResponseState::Ticket | RegTopicResponseState::RegisterConfirmation => { - debug!("No more TICKET responses should be received if REGTOPIC response is in Ticket or RegisterConfirmation state."); - warn!( - "Peer returned more than one TICKET responses for REGTOPIC request. Blacklisting {}", - node_address - ); - self.fail_request( - request_call, - RequestError::InvalidResponseCombo( - "Received more than one TICKET response for a REGTOPIC request" - .into(), - ), - true, - ) - .await; - blacklist_peer(self); - return; - } + ResponseBody::Pong { .. } + | ResponseBody::Talk { .. } + | ResponseBody::AdNodes { .. } => { + // These are all associated with a single response } - } else if let ResponseBody::RegisterConfirmation { .. } = response.body { - self.reg_topic_responses.remove(&node_address); } // Remove the expected response @@ -1450,9 +1049,33 @@ impl Handler { } } + /// A helper function used in `handle_response` to re-insert a request_call and await another + /// response, whilst sending the response back to the service. + async fn reinsert_request( + &mut self, + node_address: NodeAddress, + request_call: RequestCall, + response: Response, + ) { + // There are more messages to be received + trace!("Reinserting active request"); + // more responses remaining, add back the request and send the response + // add back the request and send the response + self.active_requests + .insert(node_address.clone(), request_call); + if let Err(e) = self + .service_send + .send(HandlerOut::Response(node_address, Box::new(response))) + .await + { + warn!("Failed to inform of response {}", e) + } + return; + } + /// Inserts a request and associated auth_tag mapping. fn insert_active_request(&mut self, request_call: RequestCall) { - let node_address = request_call.contact.node_address(); + let node_address = request_call.contact().node_address(); // adds the mapping of message nonce to node address self.active_requests.insert(node_address, request_call); @@ -1478,16 +1101,16 @@ impl Handler { ) { // The Request has expired, remove the session. // Fail the current request - let request_id = request_call.request.id; + let request_id = request_call.id(); if let Err(e) = self .service_send - .send(HandlerOut::RequestFailed(request_id, error.clone())) + .send(HandlerOut::RequestFailed(request_id.clone(), error.clone())) .await { warn!("Failed to inform request failure {}", e) } - let node_address = request_call.contact.node_address(); + let node_address = request_call.contact().node_address(); self.fail_session(&node_address, error, remove_session) .await; } diff --git a/src/handler/request_call.rs b/src/handler/request_call.rs new file mode 100644 index 000000000..748a00894 --- /dev/null +++ b/src/handler/request_call.rs @@ -0,0 +1,165 @@ +use super::*; + +/// The maximum number of NODES responses we allow at the handler level. +const MAX_NODES_RESPONSES: u64 = 5; + +/// A request to a node that we are waiting for a response. +#[derive(Debug)] +pub(crate) struct RequestCall { + contact: NodeContact, + /// The raw discv5 packet sent. + packet: Packet, + /// The unencrypted message. Required if need to re-encrypt and re-send. + request: Request, + /// Handshakes attempted. + handshake_sent: bool, + /// The number of times this request has been re-sent. + retries: u8, + /// A NODES response can span multiple datagrams. If we are receiving multiple NODES responses, + /// this tracks the number of datagrams we are still expecting. + awaiting_nodes: Option, + /// For topic registrations we expect to receive a ticket. We keep the request alive until we + /// receive a ticket. + ticket_received: bool, + /// Signifies if we are initiating the session with a random packet. This is only used to + /// determine the connection direction of the session. + initiating_session: bool, +} + +impl RequestCall { + pub fn new( + contact: NodeContact, + packet: Packet, + request: Request, + initiating_session: bool, + ) -> Self { + RequestCall { + contact, + packet, + request, + handshake_sent: false, + retries: 1, + awaiting_nodes: None, + ticket_received: false, + initiating_session, + } + } + + /// Increments the retry count. + pub fn retry(&mut self) { + self.retries = self.retries.saturating_add(1); + } + + /// We are now sending an authentication response to the node. The packet is being upgraded to + /// an authentication packet. + pub fn upgrade_to_auth_packet(&mut self, packet: Packet) { + self.packet = packet; + self.handshake_sent = true; + } + + /// Sets the initiating_session flag. + pub fn set_initiating_session(&mut self, initiating_session: bool) { + self.initiating_session = initiating_session; + } + + /// We have received a NODES response, with a given total. + /// If we require further messages, update the state of the [`RequestCall`]. If this request + /// has more messages to be received, this function returns true. + pub fn register_nodes_response(&mut self, total: u64) -> bool { + if total > 1 && total <= MAX_NODES_RESPONSES { + if let Some(mut remaining) = self.awaiting_nodes { + remaining = remaining.saturating_sub(1); + if remaining == 0 { + // Change the state so that `register_ticket` can be informed we are no longer + // waiting for messages + self.awaiting_nodes = None; + } else { + return true; // still waiting for more messages + } + } else { + // This is the first instance + self.awaiting_nodes = Some(total - 1); + return true; // still waiting for more messages + } + } + + // This is either a single message, the node is faulty, or we have the final NODES response + // we were waiting for. + // We are not waiting for more messages, unless we are still waiting for a ticket. + if matches!(self.kind(), RequestBody::RegisterTopic { .. }) { + !self.ticket_received // We are still waiting for a Ticket + } else { + false // This was a single NODES response and we have no interest in waiting for more messages. + } + } + + /// A TICKET response has been received. + /// This updates the state of the request and returns true if we should wait for more + /// responses. + pub fn register_ticket(&mut self, wait_time: u64) -> bool { + if self.ticket_received { + // We have already received a ticket, do not wait for anything further. + return false; + } + + self.ticket_received = true; + + // If the ticket is confirmed, we expect an immediate confirmation + if wait_time == 0 { + return true; + } + + // If we are still expecting more NODES to be returned, wait for these also. + if self.awaiting_nodes.is_some() { + return true; + } + false + } + + // We have received a Register Confirmation. Returns true if we are still expecting more + // responses. + pub fn register_confirmation(&mut self) -> bool { + // If there are more NODES responses to come, wait for these. + self.awaiting_nodes.is_some() + } + + /// Returns the request ID associated with the [`RequestCall`]. + pub fn id(&self) -> &RequestId { + &self.request.id + } + + /// Returns the raw request. + pub fn raw_request(&self) -> &Request { + &self.request + } + + /// Returns the raw packet of the request + pub fn packet(&self) -> &Packet { + &self.packet + } + + /// The destination contact for this request. + pub fn contact(&self) -> &NodeContact { + &self.contact + } + + /// Returns the [`RequestBody`] associated with the [`RequestCall`]. + pub fn kind(&self) -> &RequestBody { + &self.request.body + } + + /// Returns the number of retries this request has undertaken. + pub fn retries(&self) -> u8 { + self.retries + } + + /// Whether we have sent a handshake or not. + pub fn handshake_sent(&self) -> bool { + self.handshake_sent + } + + /// Whether our node is the one that is initiating the session. + pub fn initiating_session(&self) -> bool { + self.initiating_session + } +} diff --git a/src/handler/tests.rs b/src/handler/tests.rs index 91fbac890..9b9114206 100644 --- a/src/handler/tests.rs +++ b/src/handler/tests.rs @@ -247,7 +247,7 @@ async fn test_active_requests_insert() { let request_call = RequestCall::new(contact, packet, request, initiating_session); // insert the pair and verify the mapping remains in sync - let nonce = *request_call.packet.message_nonce(); + let nonce = *request_call.packet().message_nonce(); active_requests.insert(node_address, request_call); active_requests.check_invariant(); active_requests.remove_by_nonce(&nonce); From 8617437e3a76e09c72d76d2048d6def05fd6cdd2 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 27 Jul 2022 11:41:51 +0200 Subject: [PATCH 306/391] Remove ADNODES response --- src/handler/mod.rs | 7 +--- src/rpc.rs | 52 ++------------------------ src/service.rs | 93 +++------------------------------------------- 3 files changed, 10 insertions(+), 142 deletions(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index bea26cf08..e3ed23cf3 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -1020,10 +1020,8 @@ impl Handler { return; } } - ResponseBody::Pong { .. } - | ResponseBody::Talk { .. } - | ResponseBody::AdNodes { .. } => { - // These are all associated with a single response + ResponseBody::Pong { .. } | ResponseBody::Talk { .. } => { + // These are both associated with a single response } } @@ -1070,7 +1068,6 @@ impl Handler { { warn!("Failed to inform of response {}", e) } - return; } /// Inserts a request and associated auth_tag mapping. diff --git a/src/rpc.rs b/src/rpc.rs index 5a2c7fe2d..b71f0f852 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -1,6 +1,5 @@ use crate::advertisement::topic::TopicHash; use enr::{CombinedKey, Enr, NodeId}; -use more_asserts::debug_unreachable; use rlp::{DecoderError, Rlp, RlpStream}; use std::net::{IpAddr, Ipv6Addr}; use tokio::time::Duration; @@ -134,14 +133,6 @@ pub enum ResponseBody { /// The topic of a successful REGTOPIC request. topic: TopicHash, }, - /// A NODES response to a TOPICQUERY which also receives a NODES response - /// with peers to add to topic kbuckets. - AdNodes { - /// The total number of responses that make up this response. - total: u64, - /// A list of ENR's returned by the responder. - nodes: Vec>, - }, } impl Request { @@ -220,7 +211,6 @@ impl Response { ResponseBody::Talk { .. } => 6, ResponseBody::Ticket { .. } => 8, ResponseBody::RegisterConfirmation { .. } => 9, - ResponseBody::AdNodes { .. } => 11, } } @@ -241,7 +231,6 @@ impl Response { ResponseBody::RegisterConfirmation { .. } => { matches!(req, RequestBody::RegisterTopic { .. }) } - ResponseBody::AdNodes { .. } => matches!(req, RequestBody::TopicQuery { .. }), } } @@ -265,7 +254,7 @@ impl Response { buf.extend_from_slice(&s.out()); buf } - ResponseBody::Nodes { total, nodes } | ResponseBody::AdNodes { total, nodes } => { + ResponseBody::Nodes { total, nodes } => { let mut s = RlpStream::new(); s.begin_list(3); s.append(&id.as_bytes()); @@ -345,16 +334,8 @@ impl std::fmt::Display for ResponseBody { "PONG: Enr-seq: {}, Ip: {:?}, Port: {}", enr_seq, ip, port ), - ResponseBody::Nodes { total, nodes } | ResponseBody::AdNodes { total, nodes } => { - let response_type = match self { - ResponseBody::Nodes { .. } => "NODES", - ResponseBody::AdNodes { .. } => "ADNODES", - _ => { - debug_unreachable!("Only NODES and ADNODES"); - "" - } - }; - write!(f, "{}: total: {}, Nodes: [", response_type, total)?; + ResponseBody::Nodes { total, nodes } => { + write!(f, "NODES: total: {}, Nodes: [", total)?; let mut first = true; for id in nodes { if !first { @@ -691,33 +672,6 @@ impl Message { body: RequestBody::TopicQuery { topic }, }) } - 11 => { - // AdNodesResponse - if list_len != 3 { - debug!( - "AdNodes Response has an invalid RLP list length. Expected 3, found {}", - list_len - ); - return Err(DecoderError::RlpIncorrectListLen); - } - - let nodes = { - let enr_list_rlp = rlp.at(2)?; - if enr_list_rlp.is_empty() { - // no records - vec![] - } else { - enr_list_rlp.as_list::>()? - } - }; - Message::Response(Response { - id, - body: ResponseBody::AdNodes { - total: rlp.val_at::(1)?, - nodes, - }, - }) - } _ => { return Err(DecoderError::Custom("Unknown RPC message type")); } diff --git a/src/service.rs b/src/service.rs index 4ec59b04e..264c5a67e 100644 --- a/src/service.rs +++ b/src/service.rs @@ -231,7 +231,7 @@ pub struct Service { /// Keeps track of the number of responses received from a NODES response. active_nodes_responses: HashMap, - /// Keeps track of the number of responses received from a NODES response. + /// Keeps track of the number of responses received from a NODES response containing ads. active_adnodes_responses: HashMap, /// Keeps track of the 2 expected responses, NODES and ADNODES that should be received from a @@ -1620,83 +1620,6 @@ impl Service { self.discovered(&node_id, nodes, active_request.query_id, None) } } - ResponseBody::AdNodes { total, mut nodes } => { - // handle the case that there is more than one response - if total > 1 { - let mut current_response = self - .active_adnodes_responses - .remove(&node_id) - .unwrap_or_default(); - - debug!( - "ADNODES Response: {} of {} received", - current_response.count, total - ); - // if there are more responses coming, store the nodes and wait for - // another response - // We allow for implementations to send at a minimum 3 nodes per response. - // We allow for the number of nodes to be returned as the maximum we emit. - if current_response.count < self.config.max_nodes_response / 3 + 1 - && (current_response.count as u64) < total - { - current_response.count += 1; - - current_response.received_nodes.append(&mut nodes); - self.active_adnodes_responses - .insert(node_id, current_response); - self.active_requests.insert(id, active_request); - return; - } - - // have received all the Nodes responses we are willing to accept - // ignore duplicates here as they will be handled when adding - // to the DHT - current_response.received_nodes.append(&mut nodes); - nodes = current_response.received_nodes; - } - - debug!( - "Received a ADNODES response of len: {}, total: {}, from: {}", - nodes.len(), - total, - active_request.contact - ); - // note: If a peer sends an initial NODES response with a total > 1 then - // in a later response sends a response with a total of 1, all previous nodes - // will be ignored. - // ensure any mapping is removed in this rare case - self.active_adnodes_responses.remove(&node_id); - - if let RequestBody::TopicQuery { topic } = active_request.request_body { - nodes.retain(|enr| (self.config.table_filter)(enr)); - if let Some(query) = self.active_topic_queries.queries.get_mut(&topic) { - nodes.into_iter().for_each(|enr| { - trace!( - "Inserting node {} into query for topic hash {}", - enr.node_id(), - topic - ); - query.results.insert(enr.node_id(), enr); - }); - *query.queried_peers.entry(node_id).or_default() = true; - } - let response_state = self.topic_query_responses.entry(node_id).or_default(); - - match response_state { - TopicQueryResponseState::Start => { - *response_state = TopicQueryResponseState::AdNodes; - self.active_requests.insert(id, active_request); - } - TopicQueryResponseState::Nodes => { - trace!("TOPICQUERY has received expected responses"); - self.topic_query_responses.remove(&node_id); - } - TopicQueryResponseState::AdNodes => { - debug_unreachable!("No more ADNODES responses should be received if TOPICQUERY response is in AdNodes state.") - } - } - } - } ResponseBody::Pong { enr_seq, ip, port } => { let socket = SocketAddr::new(ip, port); // perform ENR majority-based update if required. @@ -2082,7 +2005,7 @@ impl Service { node_address, rpc_id, "TOPICQUERY", - ResponseBody::AdNodes { + ResponseBody::Nodes { total: 1u64, nodes: Vec::new(), }, @@ -2267,15 +2190,9 @@ impl Service { let responses: Vec = to_send_nodes .into_iter() .map(|nodes| { - let body = match resp_body { - ResponseBody::AdNodes { .. } => ResponseBody::AdNodes { - total: (rpc_index + 1) as u64, - nodes, - }, - _ => ResponseBody::Nodes { - total: (rpc_index + 1) as u64, - nodes, - }, + let body = ResponseBody::Nodes { + total: (rpc_index + 1) as u64, + nodes, }; Response { id: rpc_id.clone(), From ba15a64f8473854b3b08830be1a0e0af7093ba78 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 27 Jul 2022 15:06:06 +0200 Subject: [PATCH 307/391] Rlp encode topics for enr --- src/discv5.rs | 10 +++-- src/service.rs | 101 +++++++++++++++++++++++++++++++------------------ 2 files changed, 71 insertions(+), 40 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 93e8eca93..5c97a44f4 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -622,12 +622,14 @@ impl Discv5 { let channel = channel .as_ref() .map_err(|_| RequestError::ServiceNotStarted)?; - let topic_hash = Topic::new(topic).hash(); - let event = ServiceRequest::RegisterTopic(topic_hash); + let topic = Topic::new(topic); debug!( - "Registering topic {} with Sha256 hash {}", - topic, topic_hash + "Registering topic {} with topic hash {} {}", + topic, + topic.hash(), + topic.hash_function_name(), ); + let event = ServiceRequest::RegisterTopic(topic); // send the request channel .send(event) diff --git a/src/service.rs b/src/service.rs index 264c5a67e..181bab7fe 100644 --- a/src/service.rs +++ b/src/service.rs @@ -22,7 +22,7 @@ use crate::{ ActiveRegtopicRequests, TicketPools, Tickets, MAX_WAIT_TIME_TICKET, TICKET_LIMIT_DURATION, }, - topic::TopicHash, + topic::{Sha256Topic as Topic, TopicHash}, Ads, AD_LIFETIME, }, discv5::PERMIT_BAN_LIST, @@ -50,6 +50,7 @@ use fnv::FnvHashMap; use futures::{future::select_all, prelude::*}; use more_asserts::debug_unreachable; use parking_lot::RwLock; +use rlp::{Rlp, RlpStream}; use rpc::*; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap}, @@ -189,7 +190,7 @@ pub enum ServiceRequest { TopicQuery(TopicHash, oneshot::Sender, RequestError>>), /// RegisterTopic publishes this node as an advertiser for a topic in a discv5 network /// until removed. - RegisterTopic(TopicHash), + RegisterTopic(Topic), /// Stops publishing this node as an advetiser for a topic. RemoveTopic(TopicHash, oneshot::Sender>), /// Retrieves the ads currently published by this node on other nodes in a discv5 network. @@ -621,50 +622,78 @@ impl Service { } self.send_topic_queries(topic_hash, Some(callback)); } - ServiceRequest::RegisterTopic(topic_hash) => { + ServiceRequest::RegisterTopic(topic) => { + let topic_hash = topic.hash(); if self.registration_attempts.insert(topic_hash, BTreeMap::new()).is_some() { warn!("This topic is already being advertised"); } else { - // NOTE: Currently we don't expose custom filter support in the configuration. Users can - // optionally use the IP filter via the ip_limit configuration parameter. In the future, we - // may expose this functionality to the users if there is demand for it. - let (table_filter, bucket_filter) = if self.config.ip_limit { - ( - Some(Box::new(kbucket::IpTableFilter) as Box>), - Some(Box::new(kbucket::IpBucketFilter) as Box>), - ) + let topics_field = if let Some(topics) = self.local_enr.read().get("topics") { + let rlp = Rlp::new(topics); + let item_count = rlp.iter().count(); + let mut rlp_stream = RlpStream::new_list(item_count + 1); + for item in rlp.iter() { + if let Ok(data) = item.data().map_err(|e| debug_unreachable!("Topic item which was previously encoded in enr, cannot be decoded into data. Error {}", e)) { + rlp_stream.append(&data); + } + } + rlp_stream.append(&topic.topic().as_bytes()); + rlp_stream.out() } else { - (None, None) + let mut rlp_stream = RlpStream::new_list(1); + rlp_stream.append(&topic.topic().as_bytes()); + rlp_stream.out() }; - trace!("Initiating kbuckets for topic hash {}", topic_hash); - let mut kbuckets = KBucketsTable::new( - NodeId::new(&topic_hash.as_bytes()).into(), - Duration::from_secs(60), - self.config.incoming_bucket_limit, - table_filter, - bucket_filter, - ); + let enr_size = self.local_enr.read().size() + topics_field.len(); + if enr_size >= 300 { + error!("Failed to register topic {}. The ENR would be a total of {} bytes if this topic was registered, the maximum size is 300 bytes", topic.topic(), enr_size); + } - debug!("Adding {} entries from local routing table to topic's kbuckets", self.kbuckets.write().iter().count()); + if self.local_enr + .write() + .insert("topics", &topics_field, &self.enr_key.write()) + .map_err(|e| error!("Failed to insert field 'topics' into local enr. Error {:?}", e)).is_ok() { + // NOTE: Currently we don't expose custom filter support in the configuration. Users can + // optionally use the IP filter via the ip_limit configuration parameter. In the future, we + // may expose this functionality to the users if there is demand for it. + let (table_filter, bucket_filter) = if self.config.ip_limit { + ( + Some(Box::new(kbucket::IpTableFilter) as Box>), + Some(Box::new(kbucket::IpBucketFilter) as Box>), + ) + } else { + (None, None) + }; + + trace!("Initiating kbuckets for topic hash {}", topic_hash); + let mut kbuckets = KBucketsTable::new( + NodeId::new(&topic_hash.as_bytes()).into(), + Duration::from_secs(60), + self.config.incoming_bucket_limit, + table_filter, + bucket_filter, + ); - for entry in self.kbuckets.write().iter() { - match kbuckets.insert_or_update(entry.node.key, entry.node.value.clone(), entry.status) { - InsertResult::Inserted - | InsertResult::Pending { .. } - | InsertResult::StatusUpdated { .. } - | InsertResult::ValueUpdated - | InsertResult::Updated { .. } - | InsertResult::UpdatedPending => trace!( - "Added node id {} to kbucket of topic hash {}", - entry.node.value.node_id(), - topic_hash - ), - InsertResult::Failed(f) => error!("Failed to insert ENR for topic hash {}. Failure reason: {:?}", topic_hash, f), + debug!("Adding {} entries from local routing table to topic's kbuckets", self.kbuckets.write().iter().count()); + + for entry in self.kbuckets.write().iter() { + match kbuckets.insert_or_update(entry.node.key, entry.node.value.clone(), entry.status) { + InsertResult::Inserted + | InsertResult::Pending { .. } + | InsertResult::StatusUpdated { .. } + | InsertResult::ValueUpdated + | InsertResult::Updated { .. } + | InsertResult::UpdatedPending => trace!( + "Added node id {} to kbucket of topic hash {}", + entry.node.value.node_id(), + topic_hash + ), + InsertResult::Failed(f) => error!("Failed to insert ENR for topic hash {}. Failure reason: {:?}", topic_hash, f), + } } + self.topics_kbuckets.insert(topic_hash, kbuckets); + METRICS.topics_to_publish.store(self.registration_attempts.len(), Ordering::Relaxed); } - self.topics_kbuckets.insert(topic_hash, kbuckets); - METRICS.topics_to_publish.store(self.registration_attempts.len(), Ordering::Relaxed); } } ServiceRequest::ActiveTopics(callback) => { From f88d89fdc8a58046985bf93636870d8ced49bbdf Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 27 Jul 2022 18:18:36 +0200 Subject: [PATCH 308/391] Use ticket wait time to communicate a successful registration instead of REGCONFIRMATION --- src/error.rs | 7 +- src/handler/mod.rs | 8 -- src/rpc.rs | 130 +++++++--------------- src/service.rs | 266 +++++++++++++++----------------------------- src/service/test.rs | 5 +- 5 files changed, 138 insertions(+), 278 deletions(-) diff --git a/src/error.rs b/src/error.rs index f0319f97d..d7377247b 100644 --- a/src/error.rs +++ b/src/error.rs @@ -116,9 +116,14 @@ pub enum RequestError { /// A request that is responded with multiple respones /// gets the wrong combination of responses. InvalidResponseCombo(String), - /// A REGTOPIC requerst has sent a ticket that was not + /// A REGTOPIC request has sent a ticket that was not /// issued by us. InvalidTicket, + /// A REGTOPIC request is trying to register another node + /// than itself. + RegistrationOtherNode, + /// A REGTOPIC is not respecting the assigned wait time. + InvalidWaitTime, } #[derive(Debug, Clone, PartialEq)] diff --git a/src/handler/mod.rs b/src/handler/mod.rs index e3ed23cf3..8978b4283 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -1012,14 +1012,6 @@ impl Handler { return; } } - ResponseBody::RegisterConfirmation { .. } => { - if request_call.register_confirmation() { - trace!("Reinserting active request"); - self.reinsert_request(node_address, request_call, response) - .await; - return; - } - } ResponseBody::Pong { .. } | ResponseBody::Talk { .. } => { // These are both associated with a single response } diff --git a/src/rpc.rs b/src/rpc.rs index b71f0f852..6f782b7b2 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -1,8 +1,11 @@ use crate::advertisement::topic::TopicHash; use enr::{CombinedKey, Enr, NodeId}; use rlp::{DecoderError, Rlp, RlpStream}; -use std::net::{IpAddr, Ipv6Addr}; -use tokio::time::Duration; +use std::{ + net::{IpAddr, Ipv6Addr}, + time::{SystemTime, UNIX_EPOCH}, +}; +use tokio::time::{Duration, Instant}; use tracing::{debug, error, warn}; /// Type to manage the request IDs. @@ -128,11 +131,6 @@ pub enum ResponseBody { /// The topic hash for which the opaque ticket is issued. topic: TopicHash, }, - /// The REGCONFIRMATION response. - RegisterConfirmation { - /// The topic of a successful REGTOPIC request. - topic: TopicHash, - }, } impl Request { @@ -142,7 +140,7 @@ impl Request { RequestBody::FindNode { .. } => 3, RequestBody::Talk { .. } => 5, RequestBody::RegisterTopic { .. } => 7, - RequestBody::TopicQuery { .. } => 10, + RequestBody::TopicQuery { .. } => 9, } } @@ -210,7 +208,6 @@ impl Response { ResponseBody::Nodes { .. } => 4, ResponseBody::Talk { .. } => 6, ResponseBody::Ticket { .. } => 8, - ResponseBody::RegisterConfirmation { .. } => 9, } } @@ -228,9 +225,6 @@ impl Response { } ResponseBody::Talk { .. } => matches!(req, RequestBody::Talk { .. }), ResponseBody::Ticket { .. } => matches!(req, RequestBody::RegisterTopic { .. }), - ResponseBody::RegisterConfirmation { .. } => { - matches!(req, RequestBody::RegisterTopic { .. }) - } } } @@ -293,14 +287,6 @@ impl Response { buf.extend_from_slice(&s.out()); buf } - ResponseBody::RegisterConfirmation { topic } => { - let mut s = RlpStream::new(); - s.begin_list(2); - s.append(&id.as_bytes()); - s.append(&topic); - buf.extend_from_slice(&s.out()); - buf - } } } } @@ -364,9 +350,6 @@ impl std::fmt::Display for ResponseBody { topic ) } - ResponseBody::RegisterConfirmation { topic } => { - write!(f, "REGCONFIRMATION: Registered: {}", topic) - } } } } @@ -625,30 +608,6 @@ impl Message { }) } 9 => { - // RegisterConfirmationResponse - if list_len != 2 { - debug!( - "RegisterConfirmation response has an invalid RLP list length. Expected 2, found {}", - list_len - ); - return Err(DecoderError::RlpIncorrectListLen); - } - let topic = { - let topic_bytes = rlp.val_at::>(1)?; - if topic_bytes.len() > 32 { - debug!("RegisterConfirmation Request has a topic greater than 32 bytes"); - return Err(DecoderError::RlpIsTooBig); - } - let mut topic = [0u8; 32]; - topic[32 - topic_bytes.len()..].copy_from_slice(&topic_bytes); - TopicHash::from_raw(topic) - }; - Message::Response(Response { - id, - body: ResponseBody::RegisterConfirmation { topic }, - }) - } - 10 => { // TopicQueryRequest if list_len != 2 { debug!( @@ -687,9 +646,9 @@ pub struct Ticket { src_node_id: NodeId, src_ip: IpAddr, topic: TopicHash, - //req_time: Instant, + req_time: Instant, wait_time: Duration, - cum_wait: Duration, + //cum_wait: Duration, } impl rlp::Encodable for Ticket { @@ -701,13 +660,13 @@ impl rlp::Encodable for Ticket { IpAddr::V6(addr) => s.append(&(addr.octets().to_vec())), }; s.append(&self.topic); - /*if let Ok(time_since_unix) = SystemTime::now().duration_since(UNIX_EPOCH) { + if let Ok(time_since_unix) = SystemTime::now().duration_since(UNIX_EPOCH) { let time_since_req = self.req_time.elapsed(); let time_stamp = time_since_unix - time_since_req; s.append(&time_stamp.as_secs().to_be_bytes().to_vec()); - }*/ - s.append(&self.wait_time.as_secs().to_be_bytes().to_vec()); + } s.append(&self.wait_time.as_secs().to_be_bytes().to_vec()); + //s.append(&self.cum_wait.as_secs().to_be_bytes().to_vec()); } } @@ -768,7 +727,7 @@ impl rlp::Decodable for Ticket { let topic = decoded_list.remove(0).as_val::()?; - /*let req_time = { + let req_time = { if let Ok(time_since_unix) = SystemTime::now().duration_since(UNIX_EPOCH) { let secs_data = decoded_list.remove(0).data()?; let mut secs_bytes = [0u8; 8]; @@ -786,7 +745,7 @@ impl rlp::Decodable for Ticket { } else { return Err(DecoderError::Custom("SystemTime before UNIX EPOCH!")); } - };*/ + }; let wait_time = { let secs_data = decoded_list.remove(0).data()?; @@ -796,21 +755,21 @@ impl rlp::Decodable for Ticket { Duration::from_secs(secs) }; - let cum_wait = { + /*let cum_wait = { let secs_data = decoded_list.remove(0).data()?; let mut secs_bytes = [0u8; 8]; secs_bytes.copy_from_slice(secs_data); let secs = u64::from_be_bytes(secs_bytes); Duration::from_secs(secs) - }; + };*/ Ok(Self { src_node_id, src_ip, topic, - //req_time, + req_time, wait_time, - cum_wait, + //cum_wait, }) } } @@ -830,17 +789,17 @@ impl Ticket { src_node_id: NodeId, src_ip: IpAddr, topic: TopicHash, - //req_time: Instant, + req_time: Instant, wait_time: Duration, - cum_wait: Duration, + //cum_wait: Duration, ) -> Self { Ticket { src_node_id, src_ip, topic, - //req_time, + req_time, wait_time, - cum_wait, + //cum_wait, } } @@ -848,22 +807,26 @@ impl Ticket { self.topic } - /*pub fn req_time(&self) -> Instant { + pub fn req_time(&self) -> Instant { self.req_time - }*/ + } pub fn wait_time(&self) -> Duration { self.wait_time } - pub fn cum_wait(&self) -> Duration { - self.cum_wait + pub fn set_wait_time(&mut self, wait_time: Duration) { + self.wait_time = wait_time; } - pub fn set_cum_wait(&mut self, prev_cum_wait: Duration) { - self.cum_wait = prev_cum_wait + self.wait_time; + /*pub fn cum_wait(&self) -> Duration { + self.cum_wait } + pub fn update_cum_wait(&mut self) { + self.cum_wait = self.cum_wait + self.wait_time; + }*/ + pub fn encode(&self) -> Vec { let mut buf = Vec::new(); let mut s = RlpStream::new(); @@ -1162,9 +1125,9 @@ mod tests { node_id, ip, TopicHash::from_raw([1u8; 32]), - //Instant::now(), + Instant::now(), Duration::from_secs(11), - Duration::from_secs(25), + //Duration::from_secs(25), ); let ticket = ticket.encode(); @@ -1198,9 +1161,9 @@ mod tests { node_id, ip, TopicHash::from_raw([1u8; 32]), - //Instant::now(), + Instant::now(), Duration::from_secs(11), - Duration::from_secs(25), + //Duration::from_secs(25), ); let encoded = ticket.encode(); @@ -1223,9 +1186,9 @@ mod tests { node_id, ip, TopicHash::from_raw([1u8; 32]), - //Instant::now(), + Instant::now(), Duration::from_secs(11), - Duration::from_secs(25), + //Duration::from_secs(25), ); let ticket_key: [u8; 16] = rand::random(); @@ -1273,9 +1236,9 @@ mod tests { node_id, ip, TopicHash::from_raw([1u8; 32]), - //Instant::now(), + Instant::now(), Duration::from_secs(11), - Duration::from_secs(25), + //Duration::from_secs(25), ); let ticket = ticket.encode(); @@ -1294,21 +1257,6 @@ mod tests { assert_eq!(response, decoded); } - #[test] - fn encode_decode_register_confirmation_response() { - let response = Message::Response(Response { - id: RequestId(vec![1]), - body: ResponseBody::RegisterConfirmation { - topic: TopicHash::from_raw([1u8; 32]), - }, - }); - - let encoded = response.clone().encode(); - let decoded = Message::decode(&encoded).unwrap(); - - assert_eq!(response, decoded); - } - #[test] fn encode_decode_topic_query_request() { let request = Message::Request(Request { diff --git a/src/service.rs b/src/service.rs index 181bab7fe..3ec91379e 100644 --- a/src/service.rs +++ b/src/service.rs @@ -18,10 +18,7 @@ use self::{ }; use crate::{ advertisement::{ - ticket::{ - ActiveRegtopicRequests, TicketPools, Tickets, MAX_WAIT_TIME_TICKET, - TICKET_LIMIT_DURATION, - }, + ticket::{ActiveRegtopicRequests, Tickets, MAX_WAIT_TIME_TICKET, TICKET_LIMIT_DURATION}, topic::{Sha256Topic as Topic, TopicHash}, Ads, AD_LIFETIME, }, @@ -289,9 +286,6 @@ pub struct Service { /// Tickets received by other nodes. tickets: Tickets, - /// Locally issued tickets returned by nodes pending registration for free local ad slots. - ticket_pools: TicketPools, - /// Locally initiated topic query requests in process. active_topic_queries: ActiveTopicQueries, } @@ -513,7 +507,6 @@ impl Service { discovered_peers_topic: HashMap::new(), ticket_key: rand::random(), tickets: Tickets::default(), - ticket_pools: TicketPools::default(), active_topic_queries: ActiveTopicQueries::new( config.topic_query_timeout, config.max_nodes_response, @@ -860,38 +853,11 @@ impl Service { } Some(Ok((active_topic, active_ticket))) = self.tickets.next() => { let enr = self.local_enr.read().clone(); - // When the ticket time expires a new regtopic request is automatically sent - // to the ticket issuer and the registration state for the given topic is - // updated. - if let Some(reg_attempts) = self.registration_attempts.get_mut(&active_topic.topic()) { - for kbucket_reg_attempts in reg_attempts.values_mut() { - let reg_state = kbucket_reg_attempts.reg_attempts.remove(active_topic.node_id()); - if reg_state.is_some() { - break; - } - } - } + // When the ticket time expires a new REGTOPIC request is automatically sent to the + // ticket issuer and the registration attempt stays in the [`RegistrationState::Ticket`] + // from sending the first REGTOPIC request to this contact for this topic. self.reg_topic_request(active_ticket.contact(), active_topic.topic(), enr, Some(active_ticket.ticket())); } - Some(Ok((topic, mut ticket_pool))) = self.ticket_pools.next() => { - // Remove any tickets which don't have a current wait time of None. - ticket_pool.retain(|node_id, pool_ticket| self.ads.ticket_wait_time(topic, *node_id, *pool_ticket.ip()) == None); - // Select ticket with longest cummulative wait time. - if let Some(pool_ticket) = ticket_pool.values().max_by_key(|pool_ticket| pool_ticket.ticket().cum_wait()) { - let enr = pool_ticket.node_record(); - if let Ok(node_contact) = NodeContact::try_from_enr(enr.clone(), self.config.ip_mode) { - let node_id = enr.node_id(); - if let Err((wait_time, e)) = self.ads.insert(enr.clone(), topic, node_contact.socket_addr().ip()) { - error!("Couldn't insert ad from node id {} into ads. Error {}", node_id, e); - let new_ticket = Ticket::new(node_id, *pool_ticket.ip(), topic, wait_time, pool_ticket.ticket().cum_wait() + wait_time); - self.send_ticket_response(node_contact.node_address(), pool_ticket.req_id().clone(), new_ticket, wait_time); - } else { - self.send_regconfirmation_response(node_contact.node_address(), pool_ticket.req_id().clone(), topic); - METRICS.hosted_ads.store(self.ads.len(), Ordering::Relaxed); - } - } - } - } Some(topic_query_progress) = self.active_topic_queries.next() => { match topic_query_progress { TopicQueryState::Finished(topic_hash) | TopicQueryState::TimedOut(topic_hash) => { @@ -1312,25 +1278,35 @@ impl Service { self.send_event(Discv5Event::TalkRequest(req)); } RequestBody::RegisterTopic { topic, enr, ticket } => { - // Drop if request tries to advertise another node than sender + // Blacklist if request tries to advertise another node than the sender if enr.node_id() != node_address.node_id { - debug!("The enr node id in REGTOPIC request body does not match sender's. Nodes can only register themselves."); + warn!("The enr node id in REGTOPIC request body does not match sender's. Nodes can only register themselves. Blacklisting peer {}.", node_address.node_id); + let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); + PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + self.rpc_failure(id, RequestError::RegistrationOtherNode); return; } match self.config.ip_mode { IpMode::Ip4 => { if enr.udp4_socket().map(SocketAddr::V4) != Some(node_address.socket_addr) { - debug!("The enr ip in REGTOPIC request body does not match sender's. Nodes can only register themselves."); + warn!("The enr ip in REGTOPIC request body does not match sender's. Nodes can only register themselves. Blacklisting peer {}.", node_address.node_id); + let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); + PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + self.rpc_failure(id, RequestError::RegistrationOtherNode); return; } } IpMode::Ip6 { .. } => { if enr.udp6_socket().map(SocketAddr::V6) != Some(node_address.socket_addr) { - debug!("The enr ip in REGTOPIC request body does not match sender's. Nodes can only register themselves."); + warn!("The enr ip in REGTOPIC request body does not match sender's. Nodes can only register themselves. Blacklisting peer {}.", node_address.node_id); + let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); + PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + self.rpc_failure(id, RequestError::RegistrationOtherNode); return; } } } + self.send_find_topic_nodes_response( topic, node_address.clone(), @@ -1338,21 +1314,6 @@ impl Service { "REGTOPIC", ); - // The current wait time for a given topic. - let wait_time = self - .ads - .ticket_wait_time(topic, node_address.node_id, node_address.socket_addr.ip()) - .unwrap_or(Duration::from_secs(0)); - - let mut new_ticket = Ticket::new( - node_address.node_id, - node_address.socket_addr.ip(), - topic, - //tokio::time::Instant::now(), - wait_time, - wait_time, - ); - if !ticket.is_empty() { let decrypted_ticket = { let aead = Aes128Gcm::new(GenericArray::from_slice(&self.ticket_key)); @@ -1366,65 +1327,54 @@ impl Service { }) }; if let Ok(decrypted_ticket) = decrypted_ticket { - Ticket::decode(&decrypted_ticket) - .map_err(|e| { - error!("Failed to decode ticket in REGTOPIC request. Error: {}", e) - }) - .map(|ticket| { - if let Some(ticket) = ticket { - // A ticket is always be issued upon receiving a REGTOPIC request, even if there is no - // wait time for the ad slot. See discv5 spec. This node will not store tickets received - // with wait time 0. - new_ticket.set_cum_wait(ticket.cum_wait()); - self.send_ticket_response( - node_address.clone(), - id.clone(), - new_ticket.clone(), - wait_time, - ); - // If current wait time is 0, the ticket is added to the matching ticket pool. - if wait_time <= Duration::from_secs(0) { - // Drop if src_node_id, src_ip and topic derived from node_address and request - // don't match those in ticket. For example if a malicious node tries to use - // another ticket issued by us. - if ticket == new_ticket { - self.ticket_pools.insert( - enr, - id, - ticket, - node_address.socket_addr.ip(), - ); - } - } - } - }) - .ok(); + if let Ok(Some(ticket)) = Ticket::decode(&decrypted_ticket).map_err(|e| { + error!("Failed to decode ticket in REGTOPIC request. Error: {}", e) + }) { + // If the node has not respected the wait time and arrives before the wait time has + // expired or more than 5 seconds later than it has expired, the peer is blacklisted + let waited_time = ticket.req_time().elapsed(); + let wait_time = ticket.wait_time(); + if waited_time < wait_time + || waited_time >= wait_time + Duration::from_secs(5) + { + warn!("The REGTOPIC has not waited the time assigned in the ticket. Blacklisting peer {}.", node_address.node_id); + let ban_timeout = + self.config.ban_duration.map(|v| Instant::now() + v); + PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + self.rpc_failure(id, RequestError::InvalidWaitTime); + return; + } + } } else { - warn!("Node sent a ticket that couldn't be decrypted with local ticket key. Blacklisting: {}", node_address.node_id); + warn!("Node sent a ticket that couldn't be decrypted with local ticket key. Blacklisting peer {}", node_address.node_id); let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); self.rpc_failure(id, RequestError::InvalidTicket); + return; } - } else { - // A ticket is always be issued upon receiving a REGTOPIC request, even if there is no - // wait time for the ad slot. See discv5 spec. This node will not store tickets received - // with wait time 0. - self.send_ticket_response( - node_address.clone(), - id.clone(), - new_ticket.clone(), - wait_time, - ); - // If current wait time is 0, the ticket is added to the matching ticket pool. - if wait_time == Duration::from_secs(0) { - self.ticket_pools.insert( - enr, - id, - new_ticket, - node_address.socket_addr.ip(), - ); - } } + + let mut new_ticket = Ticket::new( + node_address.node_id, + node_address.socket_addr.ip(), + topic, + tokio::time::Instant::now(), + Duration::default(), + ); + + // If there is no wait time and the ad is successfuly registered as an ad, the new ticket is sent + // with wait time set to zero indicating successful registration. + if let Err((wait_time, e)) = + self.ads + .insert(enr.clone(), topic, node_address.socket_addr.ip()) + { + // If there is wait time for the requesting node for this topic to register as an ad, due to the + // current state of the topic table, the wait time on the new ticket to send is updated. + new_ticket.set_wait_time(wait_time); + } + + let wait_time = new_ticket.wait_time(); + self.send_ticket_response(node_address.clone(), id.clone(), new_ticket, wait_time); } RequestBody::TopicQuery { topic } => { self.send_find_topic_nodes_response( @@ -1782,56 +1732,43 @@ impl Service { wait_time, topic, } => { - if wait_time <= MAX_WAIT_TIME_TICKET && wait_time > 0 { - if let Err(e) = self.tickets.insert( - active_request.contact, - ticket, - Duration::from_secs(wait_time), - topic, - ) { - error!( - "Failed storing ticket from node id {}. Error {}", - node_id, e - ); - if let Some(reg_attempts_by_distance) = - self.registration_attempts.get_mut(&topic) + if wait_time <= MAX_WAIT_TIME_TICKET { + let now = Instant::now(); + let peer_key: kbucket::Key = node_id.into(); + let topic_key: kbucket::Key = NodeId::new(&topic.as_bytes()).into(); + if let Some(distance) = peer_key.log2_distance(&topic_key) { + let registration_attempts = + self.registration_attempts.entry(topic).or_default(); + if let Some(reg_state) = registration_attempts + .entry(distance) + .or_default() + .reg_attempts + .get_mut(&node_id) { - let now = Instant::now(); - let peer_key: kbucket::Key = node_id.into(); - let topic_key: kbucket::Key = - NodeId::new(&topic.as_bytes()).into(); - if let Some(distance) = peer_key.log2_distance(&topic_key) { - reg_attempts_by_distance.get_mut(&distance).map(|bucket| { - bucket.reg_attempts.get_mut(&node_id).map(|reg_state| { - *reg_state = RegistrationState::TicketLimit(now) - }) - }); + if wait_time > 0 { + if let Err(e) = self.tickets.insert( + active_request.contact, + ticket, + Duration::from_secs(wait_time), + topic, + ) { + error!( + "Failed storing ticket from node id {}. Error {}", + node_id, e + ); + *reg_state = RegistrationState::TicketLimit(now); + } + } else { + *reg_state = RegistrationState::Confirmed(now); + METRICS.active_regtopic_req.store( + self.active_regtopic_requests.len(), + Ordering::Relaxed, + ); } } } } } - ResponseBody::RegisterConfirmation { topic } => { - let now = Instant::now(); - let peer_key: kbucket::Key = node_id.into(); - let topic_key: kbucket::Key = NodeId::new(&topic.as_bytes()).into(); - if let Some(distance) = peer_key.log2_distance(&topic_key) { - let registration_attempts = - self.registration_attempts.entry(topic).or_default(); - if let Some(reg_state) = registration_attempts - .entry(distance) - .or_default() - .reg_attempts - .get_mut(&node_id) - { - *reg_state = RegistrationState::Confirmed(now); - } - - METRICS - .active_regtopic_req - .store(self.active_regtopic_requests.len(), Ordering::Relaxed); - } - } } } else { warn!( @@ -1995,27 +1932,6 @@ impl Service { }); } - /// The response sent to a node which is selected out of a ticket pool of registrants - /// for a free ad slot. - fn send_regconfirmation_response( - &mut self, - node_address: NodeAddress, - rpc_id: RequestId, - topic: TopicHash, - ) { - let response = Response { - id: rpc_id, - body: ResponseBody::RegisterConfirmation { topic }, - }; - debug!( - "Sending REGCONFIRMATION response to: {}. Response: {} ", - node_address, response - ); - let _ = self - .handler_send - .send(HandlerIn::Response(node_address, Box::new(response))); - } - /// Response to a topic query containing the nodes currently advertised for the /// requested topic if any. fn send_topic_query_adnodes_response( diff --git a/src/service/test.rs b/src/service/test.rs index 2e42a6cf3..a7cfbd8de 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -102,7 +102,6 @@ async fn build_service( registration_attempts: HashMap::new(), topics_kbuckets: HashMap::new(), discovered_peers_topic: HashMap::new(), - ticket_pools: TicketPools::default(), active_topic_queries: ActiveTopicQueries::new( config.topic_query_timeout, config.max_nodes_response, @@ -226,9 +225,9 @@ async fn encrypt_decrypt_ticket() { node_id, ip, TopicHash::from_raw([1u8; 32]), - //tokio::time::Instant::now(), + tokio::time::Instant::now(), tokio::time::Duration::from_secs(5), - tokio::time::Duration::from_secs(25), + //tokio::time::Duration::from_secs(25), ); let ticket_key = decoded_enr.get("ticket_key").unwrap(); From a5829afb3855b91c385a0b792ce85bfd0910ffaf Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 27 Jul 2022 18:30:03 +0200 Subject: [PATCH 309/391] Remove unused TicketsPool due to replacing comparison of ad candidates' cummulative wait time against each other with first-to-come-in-correct-wait-time-wins --- src/advertisement/ticket.rs | 129 +----------------------------------- src/handler/request_call.rs | 7 -- src/service.rs | 17 +++-- 3 files changed, 13 insertions(+), 140 deletions(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 68630e5db..556c9cd90 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -1,11 +1,7 @@ use super::*; -use crate::{ - rpc::{RequestId, Ticket}, - service::ActiveRequest, -}; +use crate::{rpc::RequestId, service::ActiveRequest}; use delay_map::HashMapDelay; use enr::NodeId; -use more_asserts::debug_unreachable; use node_info::NodeContact; use std::{cmp::Eq, collections::hash_map::Entry}; @@ -19,12 +15,6 @@ pub const TICKET_LIMIT_DURATION: Duration = Duration::from_secs(60 * 15); /// time period). pub const MAX_TICKETS_NODE_TOPIC: u8 = 3; -/// The time window in which tickets are accepted for any given free ad slot. -const REGISTRATION_WINDOW_IN_SECS: u64 = 10; - -/// Max nodes that are considered in the selection process for an ad slot. -//const MAX_REGISTRANTS_AD_SLOT: usize = 50; - /// The duration for which requests are stored. const REQUEST_TIMEOUT_IN_SECS: u64 = 15; @@ -241,123 +231,6 @@ impl TicketHistory { } } -/// The RegistrationWindow is the time from when an ad slot becomes free until no more -/// registration attempts are accepted for the ad slot. -#[derive(Clone)] -struct RegistrationWindow { - /// The RegistrationWindow exists for a specific ad slot, so for a specific topic. - topic: TopicHash, - /// The open_time is used to make sure the RegistrationWindow closes after - /// REGISTRATION_WINDOW_IN_SECS. - open_time: Instant, -} - -/// The tickets that will be considered for an ad slot. -pub struct PoolTicket { - /// The node record of the node that returned the ticket. - enr: Enr, - /// The request id of the REGTOPIC that the ticket was returned in. - req_id: RequestId, - /// The returned ticket. - ticket: Ticket, - /// The ip address of the node that returned the ticket. - ip: IpAddr, -} - -impl PoolTicket { - pub fn new(enr: Enr, req_id: RequestId, ticket: Ticket, ip: IpAddr) -> Self { - PoolTicket { - enr, - req_id, - ticket, - ip, - } - } - - pub fn node_record(&self) -> &Enr { - &self.enr - } - - pub fn req_id(&self) -> &RequestId { - &self.req_id - } - - pub fn ticket(&self) -> &Ticket { - &self.ticket - } - - pub fn ip(&self) -> &IpAddr { - &self.ip - } -} - -/// The TicketPools collects all the registration attempts for a free ad slot. -#[derive(Default)] -pub struct TicketPools { - /// The ticket_pools keeps track of all the registrants and their Tickets. One - /// ticket pool per TopicHash can be open at a time. A ticket pool collects the - /// valid tickets received within the registration window for a topic. - ticket_pools: HashMap>, - /// The expirations keeps track of when to close a ticket pool so the next one - /// can be opened. - expirations: VecDeque, -} - -impl TicketPools { - pub fn insert(&mut self, _node_record: Enr, _req_id: RequestId, _ticket: Ticket, _ip: IpAddr) { - /*if let Some(open_time) = ticket.req_time().checked_add(ticket.wait_time()) { - if open_time.elapsed() <= Duration::from_secs(REGISTRATION_WINDOW_IN_SECS) { - let pool = self.ticket_pools.entry(ticket.topic()).or_default(); - // Drop request if pool contains 50 nodes, these nodes are out of luck and - // won't be automatically included in next registration window for this topic - if pool.len() < MAX_REGISTRANTS_AD_SLOT { - if pool.is_empty() { - self.expirations.push_back(RegistrationWindow { - topic: ticket.topic(), - open_time, - }); - } - pool.insert( - node_record.node_id(), - PoolTicket::new(node_record, req_id, ticket, ip), - ); - } - } - }*/ - } -} - -impl Stream for TicketPools { - type Item = Result<(TopicHash, HashMap), String>; - fn poll_next(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - let ticket_pool = self.expirations.front(); - if let Some(reg_window) = ticket_pool { - if reg_window.open_time.elapsed() < Duration::from_secs(REGISTRATION_WINDOW_IN_SECS) { - return Poll::Pending; - } - } else { - return Poll::Pending; - } - self.expirations - .pop_front() - .map(|reg_window| { - self.ticket_pools - .remove_entry(®_window.topic) - .map(|(topic, ticket_pool)| { - self.expirations.pop_front(); - Poll::Ready(Some(Ok((topic, ticket_pool)))) - }) - .unwrap_or_else(|| { - debug_unreachable!( - "Mismatched mapping between ticket_pools and expirations invariant" - ); - Poll::Pending - }) - }) - .unwrap_or(Poll::Pending) - } -} - /// Since according to spec, a REGTOPIC request can receive both a TICKET and /// then REGISTRATION_WINDOW_IN_SECS seconds later optionally also a /// REGCONFIRMATION response, ActiveRegtopicRequests need to be handled separate diff --git a/src/handler/request_call.rs b/src/handler/request_call.rs index 748a00894..4c325942b 100644 --- a/src/handler/request_call.rs +++ b/src/handler/request_call.rs @@ -116,13 +116,6 @@ impl RequestCall { false } - // We have received a Register Confirmation. Returns true if we are still expecting more - // responses. - pub fn register_confirmation(&mut self) -> bool { - // If there are more NODES responses to come, wait for these. - self.awaiting_nodes.is_some() - } - /// Returns the request ID associated with the [`RequestCall`]. pub fn id(&self) -> &RequestId { &self.request.id diff --git a/src/service.rs b/src/service.rs index 3ec91379e..7e05e73e1 100644 --- a/src/service.rs +++ b/src/service.rs @@ -82,6 +82,9 @@ const MAX_REGTOPICS_REGISTER_INTERVAL: usize = 16; /// The max number of uncontacted peers to store before the kbuckets per topic. const MAX_UNCONTACTED_PEERS_TOPIC_BUCKET: usize = 16; +/// The duration in seconds which a node can come late to an assigned wait time. +const WAIT_TIME_MARGINAL: Duration = Duration::from_secs(5); + /// Request type for Protocols using `TalkReq` message. /// /// Automatically responds with an empty body on drop if @@ -1335,7 +1338,7 @@ impl Service { let waited_time = ticket.req_time().elapsed(); let wait_time = ticket.wait_time(); if waited_time < wait_time - || waited_time >= wait_time + Duration::from_secs(5) + || waited_time >= wait_time + WAIT_TIME_MARGINAL { warn!("The REGTOPIC has not waited the time assigned in the ticket. Blacklisting peer {}.", node_address.node_id); let ban_timeout = @@ -1366,15 +1369,19 @@ impl Service { // with wait time set to zero indicating successful registration. if let Err((wait_time, e)) = self.ads - .insert(enr.clone(), topic, node_address.socket_addr.ip()) + .insert(enr, topic, node_address.socket_addr.ip()) { - // If there is wait time for the requesting node for this topic to register as an ad, due to the - // current state of the topic table, the wait time on the new ticket to send is updated. + // The wait time on the new ticket to send is updated if there is wait time for the requesting + // node for this topic to register as an ad due to the current state of the topic table. + error!( + "Registration attempt from peer {} for topic hash {} failed. Error: {}", + node_address.node_id, topic, e + ); new_ticket.set_wait_time(wait_time); } let wait_time = new_ticket.wait_time(); - self.send_ticket_response(node_address.clone(), id.clone(), new_ticket, wait_time); + self.send_ticket_response(node_address, id, new_ticket, wait_time); } RequestBody::TopicQuery { topic } => { self.send_find_topic_nodes_response( From 43376e2441abb7a83f2275852e7d86fddbd80fbf Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 28 Jul 2022 11:02:09 +0200 Subject: [PATCH 310/391] Remove multiple responses to topic requests in Handler --- src/handler/mod.rs | 17 ++++------------- src/handler/request_call.rs | 37 +------------------------------------ 2 files changed, 5 insertions(+), 49 deletions(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 8978b4283..8e92c5ec0 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -1001,19 +1001,10 @@ impl Handler { return; } } - ResponseBody::Ticket { wait_time, .. } => { - // We may want to keep the request alive if further nodes responses are due, or if - // a REGCONFIRMATION is expected. - if request_call.register_ticket(wait_time) { - trace!("Reinserting active request"); - // There are more responses remaining, add back the request and send the response - self.reinsert_request(node_address, request_call, response) - .await; - return; - } - } - ResponseBody::Pong { .. } | ResponseBody::Talk { .. } => { - // These are both associated with a single response + ResponseBody::Pong { .. } + | ResponseBody::Talk { .. } + | ResponseBody::Ticket { .. } => { + // These are all associated with a single response } } diff --git a/src/handler/request_call.rs b/src/handler/request_call.rs index 4c325942b..e4f224b52 100644 --- a/src/handler/request_call.rs +++ b/src/handler/request_call.rs @@ -18,9 +18,6 @@ pub(crate) struct RequestCall { /// A NODES response can span multiple datagrams. If we are receiving multiple NODES responses, /// this tracks the number of datagrams we are still expecting. awaiting_nodes: Option, - /// For topic registrations we expect to receive a ticket. We keep the request alive until we - /// receive a ticket. - ticket_received: bool, /// Signifies if we are initiating the session with a random packet. This is only used to /// determine the connection direction of the session. initiating_session: bool, @@ -40,7 +37,6 @@ impl RequestCall { handshake_sent: false, retries: 1, awaiting_nodes: None, - ticket_received: false, initiating_session, } } @@ -82,38 +78,7 @@ impl RequestCall { return true; // still waiting for more messages } } - - // This is either a single message, the node is faulty, or we have the final NODES response - // we were waiting for. - // We are not waiting for more messages, unless we are still waiting for a ticket. - if matches!(self.kind(), RequestBody::RegisterTopic { .. }) { - !self.ticket_received // We are still waiting for a Ticket - } else { - false // This was a single NODES response and we have no interest in waiting for more messages. - } - } - - /// A TICKET response has been received. - /// This updates the state of the request and returns true if we should wait for more - /// responses. - pub fn register_ticket(&mut self, wait_time: u64) -> bool { - if self.ticket_received { - // We have already received a ticket, do not wait for anything further. - return false; - } - - self.ticket_received = true; - - // If the ticket is confirmed, we expect an immediate confirmation - if wait_time == 0 { - return true; - } - - // If we are still expecting more NODES to be returned, wait for these also. - if self.awaiting_nodes.is_some() { - return true; - } - false + false // This was a single NODES response and we have no interest in waiting for more messages. } /// Returns the request ID associated with the [`RequestCall`]. From 7cd0fdf30a58762df2d38f2b14d7db642adda16f Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 28 Jul 2022 14:16:04 +0200 Subject: [PATCH 311/391] Mix regtopic requests with normal active requests at service level as regtopics will only recieve one response (ticket) --- src/advertisement/ticket.rs | 114 +----------------------------------- src/service.rs | 102 +++----------------------------- src/service/test.rs | 3 - 3 files changed, 8 insertions(+), 211 deletions(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 556c9cd90..75801533e 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -1,9 +1,8 @@ use super::*; -use crate::{rpc::RequestId, service::ActiveRequest}; use delay_map::HashMapDelay; use enr::NodeId; use node_info::NodeContact; -use std::{cmp::Eq, collections::hash_map::Entry}; +use std::cmp::Eq; /// The max wait time accpeted for tickets. pub const MAX_WAIT_TIME_TICKET: u64 = 60 * 5; @@ -15,13 +14,6 @@ pub const TICKET_LIMIT_DURATION: Duration = Duration::from_secs(60 * 15); /// time period). pub const MAX_TICKETS_NODE_TOPIC: u8 = 3; -/// The duration for which requests are stored. -const REQUEST_TIMEOUT_IN_SECS: u64 = 15; - -/// Each REGTOPIC request gets a TICKET response, NODES response and can get -/// a REGCONFIRMATION response. -const MAX_RESPONSES_REGTOPIC: u8 = 3; - /// A topic is active when it's associated with the NodeId from a node it is /// published on. #[derive(PartialEq, Eq, Hash, Clone)] @@ -230,107 +222,3 @@ impl TicketHistory { } } } - -/// Since according to spec, a REGTOPIC request can receive both a TICKET and -/// then REGISTRATION_WINDOW_IN_SECS seconds later optionally also a -/// REGCONFIRMATION response, ActiveRegtopicRequests need to be handled separate -/// from ActiveRequests in Service. -#[derive(Clone)] -pub struct ActiveRegtopicRequest { - /// The RequestId identifies an ActiveRequest. - req_id: RequestId, - /// The insert_time is used to make sure an ActiveRegtopicRequest persists - /// no longer than REQUEST_TIMEOUT_IN_SECS. - insert_time: Instant, -} - -impl ActiveRegtopicRequest { - fn new(req_id: RequestId, insert_time: Instant) -> Self { - ActiveRegtopicRequest { - insert_time, - req_id, - } - } -} - -/// The ActiveRegtopicRequests keeps ActiveRequests until they have matched -/// with MAX_RESPONSES_PER_REGTOPIC responses. -#[derive(Default)] -pub struct ActiveRegtopicRequests { - requests: HashMap, - request_history: HashMap, - expirations: VecDeque, -} - -impl ActiveRegtopicRequests { - /// Checks if there are currently any active REGTOPIC requests. - pub fn is_empty(&self) -> bool { - self.expirations.is_empty() - } - - /// Returns the total amount of REGTOPIC requests currently active. - pub fn len(&self) -> usize { - self.expirations.len() - } - - /// Removes a specific REGTOPIC request if it exists. - pub fn remove(&mut self, req_id: &RequestId) -> Option { - if let Some(seen_count) = self.request_history.get_mut(req_id) { - *seen_count += 1; - if *seen_count == 0 { - self.request_history.remove(req_id); - self.requests.remove(req_id) - } else { - self.requests.get(req_id).map(|req| ActiveRequest { - contact: req.contact.clone(), - request_body: req.request_body.clone(), - query_id: req.query_id, - callback: None, - }) - } - } else { - None - } - } - - /// Caution! Reinsert should only be called if a NODES response to a REGTOPIC needs to be divided - /// into multiple NODES responses, the request must be reinserted. - pub fn reinsert(&mut self, req_id: RequestId) { - self.remove_expired(); - if let Entry::Occupied(ref mut entry) = self.request_history.entry(req_id) { - *entry.get_mut() += 1; - } - } - - /// Inserts a REGTOPIC request into [`ActiveRegtopicRequests`] after removing timed out [`ActiveRegtopicRequest`]s. - pub fn insert(&mut self, req_id: RequestId, req: ActiveRequest) { - self.remove_expired(); - let now = Instant::now(); - - self.requests.insert(req_id.clone(), req); - self.request_history - .insert(req_id.clone(), MAX_RESPONSES_REGTOPIC); - self.expirations - .push_back(ActiveRegtopicRequest::new(req_id, now)); - } - - /// If a REGTOPIC request doesn't receive the expected responses it times out, and calling this - /// function will remove timed out entries. - fn remove_expired(&mut self) { - let mut expired = Vec::new(); - self.expirations - .iter() - .take_while(|req| { - req.insert_time.elapsed() >= Duration::from_secs(REQUEST_TIMEOUT_IN_SECS) - }) - .for_each(|req| { - expired.push(req.clone()); - }); - - expired.into_iter().for_each(|req| { - self.requests.remove(&req.req_id); - self.request_history.remove(&req.req_id); - self.expirations.pop_front(); - }); - } -} diff --git a/src/service.rs b/src/service.rs index 7e05e73e1..c36105281 100644 --- a/src/service.rs +++ b/src/service.rs @@ -18,7 +18,7 @@ use self::{ }; use crate::{ advertisement::{ - ticket::{ActiveRegtopicRequests, Tickets, MAX_WAIT_TIME_TICKET, TICKET_LIMIT_DURATION}, + ticket::{Tickets, MAX_WAIT_TIME_TICKET, TICKET_LIMIT_DURATION}, topic::{Sha256Topic as Topic, TopicHash}, Ads, AD_LIFETIME, }, @@ -232,18 +232,6 @@ pub struct Service { /// Keeps track of the number of responses received from a NODES response. active_nodes_responses: HashMap, - /// Keeps track of the number of responses received from a NODES response containing ads. - active_adnodes_responses: HashMap, - - /// Keeps track of the 2 expected responses, NODES and ADNODES that should be received from a - /// TOPICQUERY request. - topic_query_responses: HashMap, - - /// Keeps track of the 3 expected responses, TICKET and NODES that should be received from a - /// REGTOPIC request and REGCONFIRMATION that may be received if there is a free ad slot and - /// the node is selected by the remote node for the free ad slot. - active_regtopic_requests: ActiveRegtopicRequests, - /// A map of votes nodes have made about our external IP address. We accept the majority. ip_votes: Option, @@ -307,20 +295,6 @@ pub enum TopicQueryState { Unsatisfied(TopicHash), } -/// The state of a response to a single TOPICQUERY request. A topic lookup/query is -/// made up of several TOPICQUERYs each being sent to a different peer. -#[derive(Default)] -pub enum TopicQueryResponseState { - /// The Start state is intermediary upon receving the first response to the - /// TOPICQUERY request, either a NODES or ADNODES response. - #[default] - Start, - /// A NODES response has been completely received. - Nodes, - /// An ADNODES response has been completely received. - AdNodes, -} - /// At any given time, a set number of registrations should be active per topic hash to /// set to be registered. A registration is active when either a ticket for an adslot is /// held and the ticket wait time has not yet expired, or a REGCONFIRMATION has been @@ -494,9 +468,6 @@ impl Service { queries: QueryPool::new(config.query_timeout), active_requests: Default::default(), active_nodes_responses: HashMap::new(), - active_adnodes_responses: HashMap::new(), - topic_query_responses: HashMap::new(), - active_regtopic_requests: ActiveRegtopicRequests::default(), ip_votes, handler_send, handler_recv, @@ -1368,8 +1339,7 @@ impl Service { // If there is no wait time and the ad is successfuly registered as an ad, the new ticket is sent // with wait time set to zero indicating successful registration. if let Err((wait_time, e)) = - self.ads - .insert(enr, topic, node_address.socket_addr.ip()) + self.ads.insert(enr, topic, node_address.socket_addr.ip()) { // The wait time on the new ticket to send is updated if there is wait time for the requesting // node for this topic to register as an ad due to the current state of the topic table. @@ -1401,12 +1371,7 @@ impl Service { // verify we know of the rpc_id let id = response.id.clone(); - let active_request = self - .active_requests - .remove(&id) - .or_else(|| self.active_regtopic_requests.remove(&id)); - - if let Some(mut active_request) = active_request { + if let Some(mut active_request) = self.active_requests.remove(&id) { debug!( "Received RPC response: {} to request: {} from: {}", response.body, active_request.request_body, active_request.contact @@ -1551,14 +1516,7 @@ impl Service { current_response.received_nodes.append(&mut nodes); self.active_nodes_responses .insert(node_id, current_response); - match active_request.request_body { - RequestBody::RegisterTopic { .. } => { - self.active_regtopic_requests.reinsert(id); - } - _ => { - self.active_requests.insert(id, active_request); - } - } + self.active_requests.insert(id, active_request); return; } @@ -1581,30 +1539,7 @@ impl Service { // ensure any mapping is removed in this rare case self.active_nodes_responses.remove(&node_id); - if let RequestBody::TopicQuery { topic } = active_request.request_body { - self.discovered(&node_id, nodes, active_request.query_id, Some(topic)); - - let response_state = self.topic_query_responses.entry(node_id).or_default(); - - match response_state { - TopicQueryResponseState::Start => { - *response_state = TopicQueryResponseState::Nodes; - self.active_requests.insert(id, active_request); - } - TopicQueryResponseState::AdNodes => { - self.topic_query_responses.remove(&node_id); - } - TopicQueryResponseState::Nodes => { - debug_unreachable!("No more NODES responses should be received if TOPICQUERY response is in Nodes state.") - } - } - } else if let RequestBody::RegisterTopic { topic, .. } = - active_request.request_body - { - self.discovered(&node_id, nodes, active_request.query_id, Some(topic)); - } else if let RequestBody::FindNode { .. } = active_request.request_body { - self.discovered(&node_id, nodes, active_request.query_id, None) - } + self.discovered(&node_id, nodes, active_request.query_id, None); } ResponseBody::Pong { enr_seq, ip, port } => { let socket = SocketAddr::new(ip, port); @@ -1767,10 +1702,6 @@ impl Service { } } else { *reg_state = RegistrationState::Confirmed(now); - METRICS.active_regtopic_req.store( - self.active_regtopic_requests.len(), - Ordering::Relaxed, - ); } } } @@ -2225,18 +2156,7 @@ impl Service { .send(HandlerIn::Request(contact, Box::new(request))) .is_ok() { - match request_body { - RequestBody::RegisterTopic { .. } => { - self.active_regtopic_requests - .insert(id.clone(), active_request); - METRICS - .active_regtopic_req - .store(self.active_regtopic_requests.len(), Ordering::Relaxed); - } - _ => { - self.active_requests.insert(id.clone(), active_request); - } - } + self.active_requests.insert(id.clone(), active_request); } id } @@ -2610,11 +2530,7 @@ impl Service { /// specified). fn rpc_failure(&mut self, id: RequestId, error: RequestError) { trace!("RPC Error removing request. Reason: {:?}, id {}", error, id); - if let Some(active_request) = self - .active_requests - .remove(&id) - .or_else(|| self.active_regtopic_requests.remove(&id)) - { + if let Some(active_request) = self.active_requests.remove(&id) { // If this is initiated by the user, return an error on the callback. All callbacks // support a request error. match active_request.callback { @@ -2698,10 +2614,6 @@ impl Service { if let Some(bucket) = registration_attempts.get_mut(&distance) { bucket.reg_attempts.remove(&node_id); } - - METRICS - .active_regtopic_req - .store(self.active_regtopic_requests.len(), Ordering::Relaxed); } self.connection_updated(node_id, ConnectionStatus::Disconnected, Some(topic)); return; diff --git a/src/service/test.rs b/src/service/test.rs index a7cfbd8de..2eb6bfa73 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -86,9 +86,6 @@ async fn build_service( queries: QueryPool::new(config.query_timeout), active_requests: Default::default(), active_nodes_responses: HashMap::new(), - active_adnodes_responses: HashMap::new(), - topic_query_responses: HashMap::new(), - active_regtopic_requests: ActiveRegtopicRequests::default(), ip_votes: None, handler_send, handler_recv, From e2c45510313c934ce1cea92c67fc2db7157dd6f5 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 28 Jul 2022 14:19:17 +0200 Subject: [PATCH 312/391] Remove multiple responses to topic requests in service --- src/rpc.rs | 4 +--- src/service.rs | 23 +++++------------------ 2 files changed, 6 insertions(+), 21 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index 6f782b7b2..1f3288c4f 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -218,9 +218,7 @@ impl Response { ResponseBody::Nodes { .. } => { matches!( req, - RequestBody::FindNode { .. } - | RequestBody::TopicQuery { .. } - | RequestBody::RegisterTopic { .. } + RequestBody::FindNode { .. } | RequestBody::TopicQuery { .. } ) } ResponseBody::Talk { .. } => matches!(req, RequestBody::Talk { .. }), diff --git a/src/service.rs b/src/service.rs index c36105281..7c990abae 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1411,24 +1411,11 @@ impl Service { // These are sanitized and ordered let distances_requested: Vec = match &active_request.request_body { RequestBody::FindNode { distances } => distances.clone(), - RequestBody::TopicQuery { topic } - | RequestBody::RegisterTopic { topic, .. } => { - let peer_key: kbucket::Key = node_address.node_id.into(); - let topic_key: kbucket::Key = - NodeId::new(&topic.as_bytes()).into(); - let distance_to_topic = peer_key.log2_distance(&topic_key); - if let Some(distance) = distance_to_topic { - [distance - 1, distance, distance + 1].into() - } else { - warn!("The node id of this peer is the requested topic hash. Blacklisting peer with node id {}", node_id); - let ban_timeout = - self.config.ban_duration.map(|v| Instant::now() + v); - PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); - self.rpc_failure(id, RequestError::InvalidTicket); - return; - } + RequestBody::TopicQuery { .. } => + _ => { + debug_unreachable!("Only FINDNODE and TOPICQUERY requests get NODES responses"); + vec![] } - _ => unreachable!(), }; // This could be an ENR request from the outer service. If so respond to the @@ -2146,7 +2133,7 @@ impl Service { let request_body = active_request.request_body.clone(); let request: Request = Request { id: id.clone(), - body: request_body.clone(), + body: request_body, }; let contact = active_request.contact.clone(); From 9d02c6757114a084d00414ab697a2ebe21dd017a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 28 Jul 2022 15:00:18 +0200 Subject: [PATCH 313/391] Add ads returned in NODES responses to topic lookup query --- src/service.rs | 98 ++++++++++++++++++++++++++++++++------------------ 1 file changed, 63 insertions(+), 35 deletions(-) diff --git a/src/service.rs b/src/service.rs index 7c990abae..aea18cf8e 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1411,9 +1411,11 @@ impl Service { // These are sanitized and ordered let distances_requested: Vec = match &active_request.request_body { RequestBody::FindNode { distances } => distances.clone(), - RequestBody::TopicQuery { .. } => + RequestBody::TopicQuery { .. } => vec![], // Any distance is allowed for ads _ => { - debug_unreachable!("Only FINDNODE and TOPICQUERY requests get NODES responses"); + debug_unreachable!( + "Only FINDNODE and TOPICQUERY requests get NODES responses" + ); vec![] } }; @@ -1440,43 +1442,47 @@ impl Service { warn!("Failed to send response in callback {:?}", e) } return; - } + } else if !distances_requested.is_empty() { + // This is a repsonse to a FINDNODE request with specifically request distances + // Filter out any nodes that are not of the correct distance - // Filter out any nodes that are not of the correct distance - let peer_key: kbucket::Key = node_id.into(); + let peer_key: kbucket::Key = node_id.into(); - // The distances we send are sanitized an ordered. - // We never send an ENR request in combination of other requests. - if distances_requested.len() == 1 && distances_requested[0] == 0 { - // we requested an ENR update - if nodes.len() > 1 { - warn!( - "Peer returned more than one ENR for itself. Blacklisting {}", - node_address - ); - let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); - PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + // The distances we send are sanitized an ordered. + // We never send an ENR request in combination of other requests. + if distances_requested.len() == 1 && distances_requested[0] == 0 { + // we requested an ENR update + if nodes.len() > 1 { + warn!( + "Peer returned more than one ENR for itself. Blacklisting {}", + node_address + ); + let ban_timeout = + self.config.ban_duration.map(|v| Instant::now() + v); + PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + nodes.retain(|enr| { + peer_key.log2_distance(&enr.node_id().into()).is_none() + }); + } + } else { + let before_len = nodes.len(); nodes.retain(|enr| { - peer_key.log2_distance(&enr.node_id().into()).is_none() + peer_key + .log2_distance(&enr.node_id().into()) + .map(|distance| distances_requested.contains(&distance)) + .unwrap_or_else(|| false) }); - } - } else { - let before_len = nodes.len(); - nodes.retain(|enr| { - peer_key - .log2_distance(&enr.node_id().into()) - .map(|distance| distances_requested.contains(&distance)) - .unwrap_or_else(|| false) - }); - if nodes.len() < before_len { - // Peer sent invalid ENRs. Blacklist the Node - warn!( - "Peer sent invalid ENR. Blacklisting {}", - active_request.contact - ); - let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); - PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + if nodes.len() < before_len { + // Peer sent invalid ENRs. Blacklist the Node + warn!( + "Peer sent invalid ENR. Blacklisting {}", + active_request.contact + ); + let ban_timeout = + self.config.ban_duration.map(|v| Instant::now() + v); + PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + } } } @@ -1526,7 +1532,29 @@ impl Service { // ensure any mapping is removed in this rare case self.active_nodes_responses.remove(&node_id); - self.discovered(&node_id, nodes, active_request.query_id, None); + if let RequestBody::FindNode { .. } = &active_request.request_body { + self.discovered(&node_id, nodes, active_request.query_id, None); + } else if let RequestBody::TopicQuery { topic } = &active_request.request_body { + nodes.retain(|enr| { + if enr.node_id() == self.local_enr.read().node_id() { + // Don't add this node as a result to the query if it is currently advertising + // the topic and was returned as an ad in the NODES response. + return false; + } + (self.config.table_filter)(enr) + }); + if let Some(query) = self.active_topic_queries.queries.get_mut(topic) { + nodes.into_iter().for_each(|enr| { + trace!( + "Inserting node {} into query for topic hash {}", + enr.node_id(), + topic + ); + query.results.insert(enr.node_id(), enr); + }); + *query.queried_peers.entry(node_id).or_default() = true; + } + } } ResponseBody::Pong { enr_seq, ip, port } => { let socket = SocketAddr::new(ip, port); From c90b338c35e6247079f14d88d5f720c566150a4a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 28 Jul 2022 15:11:52 +0200 Subject: [PATCH 314/391] fixup! Remove multiple responses to topic requests in service --- src/service.rs | 82 +++++--------------------------------------------- 1 file changed, 8 insertions(+), 74 deletions(-) diff --git a/src/service.rs b/src/service.rs index aea18cf8e..52d9f6cdb 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1281,13 +1281,6 @@ impl Service { } } - self.send_find_topic_nodes_response( - topic, - node_address.clone(), - id.clone(), - "REGTOPIC", - ); - if !ticket.is_empty() { let decrypted_ticket = { let aead = Aes128Gcm::new(GenericArray::from_slice(&self.ticket_key)); @@ -1354,13 +1347,6 @@ impl Service { self.send_ticket_response(node_address, id, new_ticket, wait_time); } RequestBody::TopicQuery { topic } => { - self.send_find_topic_nodes_response( - topic, - node_address.clone(), - id.clone(), - "TOPICQUERY", - ); - trace!("Sending ADNODES response"); self.send_topic_query_adnodes_response(node_address, id, topic); } } @@ -1897,7 +1883,12 @@ impl Service { .ads .get_ad_nodes(topic) .map(|ad| ad.node_record().clone()) - .collect(); + .collect::>(); + trace!( + "Sending NODES response(s) containing all together {} ads for topic hash {}", + nodes_to_send.len(), + topic + ); self.send_nodes_response( nodes_to_send, node_address, @@ -1905,64 +1896,7 @@ impl Service { "TOPICQUERY", ResponseBody::Nodes { total: 1u64, - nodes: Vec::new(), - }, - ); - } - - /// Finds a list of ENRs in the local routing table's kbucktets at the distance ±1 that - /// the topic hash would be placed in, to send in a NODES response to a TOPICQUERY or - /// REGTOPIC request. - fn send_find_topic_nodes_response( - &mut self, - topic: TopicHash, - node_address: NodeAddress, - id: RequestId, - req_type: &str, - ) { - let local_key: kbucket::Key = self.local_enr.read().node_id().into(); - let topic_key: kbucket::Key = NodeId::new(&topic.as_bytes()).into(); - let distance_to_topic = local_key.log2_distance(&topic_key); - - let mut closest_peers: Vec = Vec::new(); - if let Some(distance) = distance_to_topic { - self.kbuckets - .write() - .nodes_by_distances(&[distance], self.config.max_nodes_response) - .iter() - .for_each(|entry| { - if entry.node.key.preimage() != &node_address.node_id { - closest_peers.push(entry.node.value.clone()) - } - }); - - if closest_peers.len() < self.config.max_nodes_response { - for entry in self - .kbuckets - .write() - .nodes_by_distances( - &[distance - 1, distance + 1], - self.config.max_nodes_response - closest_peers.len(), - ) - .iter() - { - if closest_peers.len() > self.config.max_nodes_response { - break; - } - if entry.node.key.preimage() != &node_address.node_id { - closest_peers.push(entry.node.value.clone()) - } - } - } - } - self.send_nodes_response( - closest_peers, - node_address, - id, - req_type, - ResponseBody::Nodes { - total: 1u64, - nodes: Vec::new(), + nodes: Vec::new(), // `send_nodes_response` handles dividing `nodes_to_send` into multiple NODES responses }, ); } @@ -2012,7 +1946,7 @@ impl Service { "FINDNODE", ResponseBody::Nodes { total: 1u64, - nodes: Vec::new(), + nodes: Vec::new(), // `send_nodes_response` handles dividing `nodes_to_send` into multiple NODES responses }, ); } From 9d1a376bc52c24f0d7276661443d986f0eb85165 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 29 Jul 2022 14:51:17 +0200 Subject: [PATCH 315/391] Add nodes to topic kbuckets by FINDNODE iterative query --- examples/find_nodes.rs | 2 +- src/discv5.rs | 5 +- src/service.rs | 242 +++++++++++++++++++++-------------------- src/service/test.rs | 1 + 4 files changed, 131 insertions(+), 119 deletions(-) diff --git a/examples/find_nodes.rs b/examples/find_nodes.rs index 5a0d61630..f4bfd5798 100644 --- a/examples/find_nodes.rs +++ b/examples/find_nodes.rs @@ -191,7 +191,7 @@ async fn main() { } match discv5_ev { Discv5Event::Discovered(enr) => info!("Enr discovered {}", enr), - Discv5Event::DiscoveredNewPeerTopic(enr, topic_hash) => info!("Enr discovered {} for topic {}", enr, topic_hash), + Discv5Event::DiscoveredPeerTopic(enr, topic_hash) => info!("Enr discovered {} for topic {}", enr, topic_hash), Discv5Event::EnrAdded { enr, replaced: _ } => info!("Enr added {}", enr), Discv5Event::NodeInserted { node_id, replaced: _ } => info!("Node inserted {}", node_id), Discv5Event::NodeInsertedTopic { node_id, replaced: _, topic_hash } => info!("Node inserted {} in topic hash {} kbucket", node_id, topic_hash), diff --git a/src/discv5.rs b/src/discv5.rs index 5c97a44f4..31375c159 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -63,8 +63,9 @@ pub enum Discv5Event { /// This happen spontaneously through queries as nodes return ENR's. These ENR's are not /// guaranteed to be live or contactable. Discovered(Enr), - /// A node has been discovered from either a REGTOPIC or a TOPICQUERY request. See [`Discv5Event::Discovered`]. - DiscoveredNewPeerTopic(Enr, TopicHash), + /// A node has been discovered from a FINDNODE request using the given TopiHash as key. + /// See [`Discv5Event::Discovered`]. + DiscoveredPeerTopic(Enr, TopicHash), /// A new ENR was added to the routing table. EnrAdded { enr: Enr, replaced: Option }, /// A new node has been added to the routing table. diff --git a/src/service.rs b/src/service.rs index 52d9f6cdb..f76add92c 100644 --- a/src/service.rs +++ b/src/service.rs @@ -229,6 +229,11 @@ pub struct Service { /// query. active_requests: FnvHashMap, + /// To fill a topic's kbuckets, FINDNODE requests are sent using the topic hash as a node id (key). + /// With XOR metrics the buckets closest to topic hash will be filled this way. The request will + /// always time out. + find_node_topic_requests: HashMap, + /// Keeps track of the number of responses received from a NODES response. active_nodes_responses: HashMap, @@ -467,6 +472,7 @@ impl Service { kbuckets, queries: QueryPool::new(config.query_timeout), active_requests: Default::default(), + find_node_topic_requests: Default::default(), active_nodes_responses: HashMap::new(), ip_votes, handler_send, @@ -588,6 +594,11 @@ impl Service { self.topics_kbuckets.insert(topic_hash, kbuckets); } self.send_topic_queries(topic_hash, Some(callback)); + + // To fill the kbuckets closest to the topic hash, start a find node query searching + // for the topic hash's bytes wrapped in a NodeId. + let topic_key = NodeId::new(&topic_hash.as_bytes()); + self.start_findnode_query(topic_key, None); } ServiceRequest::RegisterTopic(topic) => { let topic_hash = topic.hash(); @@ -660,6 +671,11 @@ impl Service { } self.topics_kbuckets.insert(topic_hash, kbuckets); METRICS.topics_to_publish.store(self.registration_attempts.len(), Ordering::Relaxed); + + // To fill the kbuckets closest to the topic hash, start a find node query searching + // for the topic hash's bytes wrapped in a NodeId. + let topic_key = NodeId::new(&topic_hash.as_bytes()); + self.start_findnode_query(topic_key, None); } } } @@ -1519,7 +1535,9 @@ impl Service { self.active_nodes_responses.remove(&node_id); if let RequestBody::FindNode { .. } = &active_request.request_body { - self.discovered(&node_id, nodes, active_request.query_id, None); + // In the case that it is a FINDNODE request using a topic hash as key, remove the mapping. + let topic = self.find_node_topic_requests.remove(&id); + self.discovered(&node_id, nodes, active_request.query_id, topic); } else if let RequestBody::TopicQuery { topic } = &active_request.request_body { nodes.retain(|enr| { if enr.node_id() == self.local_enr.read().node_id() { @@ -2125,106 +2143,113 @@ impl Service { source: &NodeId, mut enrs: Vec, query_id: Option, - topic_hash: Option, + topic: Option, ) { let local_id = self.local_enr.read().node_id(); - enrs.retain(|enr| { - if enr.node_id() == local_id { - return false; - } - - let mut new_or_updated_peer = false; - - // If any of the discovered nodes are in the routing table, and there contains an older ENR, update it. - // If there is an event stream send the Discovered event - if self.config.report_discovered_peers && topic_hash.is_none() { - self.send_event(Discv5Event::Discovered(enr.clone())); + if let Some(topic_hash) = topic { + enrs.retain(|enr| enr.node_id() != local_id); + for enr in enrs.iter() { + // If there is an event stream send the DiscoveredPeerTopic event. + if self.config.report_discovered_peers { + self.send_event(Discv5Event::DiscoveredPeerTopic(enr.clone(), topic_hash)); + } } + let mut discovered_new_peer = false; + if let Some(kbuckets_topic) = self.topics_kbuckets.get_mut(&topic_hash) { + for enr in enrs { + // If the node sends its own id we don't count it + if source != &enr.node_id() { + continue; + } + // ignore peers that don't pass the table filter + if (self.config.table_filter)(&enr) { + let key = kbucket::Key::from(enr.node_id()); - // ignore peers that don't pass the table filter - if (self.config.table_filter)(enr) { - let kbuckets_topic = topic_hash.and_then(|topic_hash| { - self.topics_kbuckets - .get_mut(&topic_hash) - .and_then(|kbuckets| { - Some(kbuckets).or_else(|| { - debug_unreachable!("A kbuckets table should exist for topic hash"); - None - }) - }) - }); + // If the ENR exists in the routing table and the discovered ENR has a greater + // sequence number, perform some filter checks before updating the enr. - let key = kbucket::Key::from(enr.node_id()); - - // If the ENR exists in the routing table and the discovered ENR has a greater - // sequence number, perform some filter checks before updating the enr. - - if let Some(kbuckets_topic) = kbuckets_topic { - match kbuckets_topic.entry(&key) { - kbucket::Entry::Present(entry, _) => { - if entry.value().seq() < enr.seq() { - if let UpdateResult::Failed(reason) = - kbuckets_topic.update_node(&key, enr.clone(), None) { - self.peers_to_ping.remove(&enr.node_id()); - debug!( - "Failed to update discovered ENR for kbucket of topic hash {:?}. Node: {}, Reason: {:?}", - topic_hash, source, reason - ); - return false; // Remove this peer from the discovered list if the update failed - } - new_or_updated_peer = true; - } - }, - kbucket::Entry::Pending(mut entry, _) => { - if entry.value().seq() < enr.seq() { - if let UpdateResult::Failed(reason) = - kbuckets_topic.update_node(&key, enr.clone(), None) { - self.peers_to_ping.remove(&enr.node_id()); - debug!( - "Failed to update discovered ENR for kbucket of topic hash {:?}. Node: {}, Reason: {:?}", - topic_hash, source, reason - ); - return false; // Remove this peer from the discovered list if the update failed - } - new_or_updated_peer = true; + let must_update_enr = match kbuckets_topic.entry(&key) { + kbucket::Entry::Present(entry, _) => entry.value().seq() < enr.seq(), + kbucket::Entry::Pending(mut entry, _) => { + entry.value().seq() < enr.seq() } - } - kbucket::Entry::Absent(_) => { - if let Some(topic_hash) = topic_hash { - if self.config.report_discovered_peers { - self.send_event(Discv5Event::DiscoveredNewPeerTopic(enr.clone(), topic_hash)); - } - trace!("Discovered new peer {} for topic hash {}", enr.node_id(), topic_hash); + kbucket::Entry::Absent(_) => { + trace!( + "Discovered new peer {} for topic hash {}", + enr.node_id(), + topic_hash + ); let discovered_peers = self.discovered_peers_topic.entry(topic_hash).or_default(); - // If the intermediary storage before the topic's kbucktes is at bounds, discard the - // uncontacted peers. let node_id = enr.node_id(); let peer_key: kbucket::Key = node_id.into(); - let topic_key: kbucket::Key = NodeId::new(&topic_hash.as_bytes()).into(); + let topic_key: kbucket::Key = + NodeId::new(&topic_hash.as_bytes()).into(); if let Some(distance) = peer_key.log2_distance(&topic_key) { let bucket = discovered_peers.entry(distance).or_default(); + // If the intermediary storage before the topic's kbucktes is at bounds, discard the + // uncontacted peers. if bucket.len() < MAX_UNCONTACTED_PEERS_TOPIC_BUCKET { bucket.insert(node_id, enr.clone()); + discovered_new_peer = true; } else { warn!("Discarding uncontacted peers, uncontacted peers at bounds for topic hash {}", topic_hash); } } + false + } + _ => false, + }; + if must_update_enr { + if let UpdateResult::Failed(reason) = + kbuckets_topic.update_node(&key, enr.clone(), None) + { + self.peers_to_ping.remove(&enr.node_id()); + debug!( + "Failed to update discovered ENR for kbucket of topic hash {:?}. Node: {}, Reason: {:?}", + topic_hash, source, reason + ); } - new_or_updated_peer = true; } - _ => {} } - } else { + } + if discovered_new_peer { + // If a topic lookup has dried up (no more peers to query), and we now have found new peers or updated enrs for + // known peers to that topic, the query can now proceed as long as it hasn't timed out already. + if let Some(query) = self.active_topic_queries.queries.get_mut(&topic_hash) { + query.dry = false; + } + } + } + } else { + enrs.retain(|enr| { + if enr.node_id() == local_id { + return false; + } + + // If any of the discovered nodes are in the routing table, and there contains an older ENR, update it. + // If there is an event stream send the Discovered event + if self.config.report_discovered_peers { + self.send_event(Discv5Event::Discovered(enr.clone())); + } + + // ignore peers that don't pass the table filter + if (self.config.table_filter)(enr) { + let key = kbucket::Key::from(enr.node_id()); + + // If the ENR exists in the routing table and the discovered ENR has a greater + // sequence number, perform some filter checks before updating the enr. + let must_update_enr = match self.kbuckets.write().entry(&key) { kbucket::Entry::Present(entry, _) => entry.value().seq() < enr.seq(), kbucket::Entry::Pending(mut entry, _) => entry.value().seq() < enr.seq(), _ => false, }; if must_update_enr { - if let UpdateResult::Failed(reason) = - self.kbuckets.write().update_node(&key, enr.clone(), None) { + if let UpdateResult::Failed(reason) = + self.kbuckets.write().update_node(&key, enr.clone(), None) + { self.peers_to_ping.remove(&enr.node_id()); debug!( "Failed to update discovered ENR. Node: {}, Reason: {:?}", @@ -2233,54 +2258,37 @@ impl Service { return false; // Remove this peer from the discovered list if the update failed } } + } else { + return false; // Didn't pass the table filter remove the peer } - } else { - return false; // Didn't pass the table filter remove the peer - } - // The remaining ENRs are used if this request was part of a query. If we are - // requesting the target of the query, this ENR could be the result of requesting the - // target-nodes own id. We don't want to add this as a "new" discovered peer in the - // query, so we remove it from the discovered list here. - if topic_hash.is_some() { - // For a topic lookup or registration only new or updated peers are retained. - new_or_updated_peer && source != &enr.node_id() - } else { + // The remaining ENRs are used if this request was part of a query. If we are + // requesting the target of the query, this ENR could be the result of requesting the + // target-nodes own id. We don't want to add this as a "new" discovered peer in the + // query, so we remove it from the discovered list here. source != &enr.node_id() - } - }); - - if let Some(topic_hash) = topic_hash { - if enrs.is_empty() { - return; - } - // If a topic lookup has dried up (no more peers to query), the query can now proceed as long as - // it hasn't timed out already. - if let Some(query) = self.active_topic_queries.queries.get_mut(&topic_hash) { - query.dry = false; - } - return; - } + }); - // if this is part of a query, update the query - if let Some(query_id) = query_id { - if let Some(query) = self.queries.get_mut(query_id) { - let mut peer_count = 0; - for enr_ref in enrs.iter() { - if !query - .target_mut() - .untrusted_enrs - .iter() - .any(|e| e.node_id() == enr_ref.node_id()) - { - query.target_mut().untrusted_enrs.push(enr_ref.clone()); + // if this is part of a query, update the query + if let Some(query_id) = query_id { + if let Some(query) = self.queries.get_mut(query_id) { + let mut peer_count = 0; + for enr_ref in enrs.iter() { + if !query + .target_mut() + .untrusted_enrs + .iter() + .any(|e| e.node_id() == enr_ref.node_id()) + { + query.target_mut().untrusted_enrs.push(enr_ref.clone()); + } + peer_count += 1; } - peer_count += 1; + debug!("{} peers found for query id {:?}", peer_count, query_id); + query.on_success(source, &enrs) + } else { + debug!("Response returned for ended query {:?}", query_id) } - debug!("{} peers found for query id {:?}", peer_count, query_id); - query.on_success(source, &enrs) - } else { - debug!("Response returned for ended query {:?}", query_id) } } } @@ -2512,13 +2520,15 @@ impl Service { "NODES Response failed, but was partially processed from: {}", active_request.contact ); + // In the case that it is a FINDNODE request using a topic hash as key, remove the mapping. + let topic = self.find_node_topic_requests.remove(&id); // if it's a query mark it as success, to process the partial // collection of peers self.discovered( &node_id, nodes_response.received_nodes, active_request.query_id, - None, + topic, ); } } else { diff --git a/src/service/test.rs b/src/service/test.rs index 2eb6bfa73..13c9ce502 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -85,6 +85,7 @@ async fn build_service( kbuckets, queries: QueryPool::new(config.query_timeout), active_requests: Default::default(), + find_node_topic_requests: Default::default(), active_nodes_responses: HashMap::new(), ip_votes: None, handler_send, From db89688f0bc2c456fc021f197fb99d337490a693 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 29 Jul 2022 15:27:06 +0200 Subject: [PATCH 316/391] Query for more peers upon dry topic lookup --- src/service.rs | 36 ++++++++++++++++++++++++------------ 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/src/service.rs b/src/service.rs index f76add92c..443a9494c 100644 --- a/src/service.rs +++ b/src/service.rs @@ -298,6 +298,9 @@ pub enum TopicQueryState { /// Not enough ads have been returned from the first round of sending TOPICQUERY /// requests, new peers in the topic's kbucktes should be queried. Unsatisfied(TopicHash), + /// Not enough results were found and not enough new peers where found to send + /// TOPICQUERYs to. + Dry(TopicHash), } /// At any given time, a set number of registrations should be active per topic hash to @@ -360,9 +363,7 @@ impl Stream for ActiveTopicQueries { type Item = TopicQueryState; fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { for (topic_hash, query) in self.queries.iter() { - if query.dry { - return Poll::Pending; - } else if query.results.len() >= self.num_results { + if query.results.len() >= self.num_results { return Poll::Ready(Some(TopicQueryState::Finished(*topic_hash))); } else if query.start.elapsed() >= self.time_out { warn!( @@ -370,6 +371,8 @@ impl Stream for ActiveTopicQueries { query.results.len() ); return Poll::Ready(Some(TopicQueryState::TimedOut(*topic_hash))); + } else if query.dry { + return Poll::Ready(Some(TopicQueryState::Dry(*topic_hash))); } else { let exhausted_peers = query .queried_peers @@ -593,12 +596,14 @@ impl Service { } self.topics_kbuckets.insert(topic_hash, kbuckets); } - self.send_topic_queries(topic_hash, Some(callback)); - // To fill the kbuckets closest to the topic hash, start a find node query searching - // for the topic hash's bytes wrapped in a NodeId. + // To fill the kbuckets closest to the topic hash as well as those further away + // (itertively getting closer to node ids to the topic hash) start a find node + // query searching for the topic hash's bytes wrapped in a NodeId. let topic_key = NodeId::new(&topic_hash.as_bytes()); self.start_findnode_query(topic_key, None); + + self.send_topic_queries(topic_hash, Some(callback)); } ServiceRequest::RegisterTopic(topic) => { let topic_hash = topic.hash(); @@ -672,8 +677,9 @@ impl Service { self.topics_kbuckets.insert(topic_hash, kbuckets); METRICS.topics_to_publish.store(self.registration_attempts.len(), Ordering::Relaxed); - // To fill the kbuckets closest to the topic hash, start a find node query searching - // for the topic hash's bytes wrapped in a NodeId. + // To fill the kbuckets closest to the topic hash as well as those further away + // (itertively getting closer to node ids to the topic hash) start a find node + // query searching for the topic hash's bytes wrapped in a NodeId. let topic_key = NodeId::new(&topic_hash.as_bytes()); self.start_findnode_query(topic_key, None); } @@ -858,10 +864,15 @@ impl Service { } } } - }, - TopicQueryState::Unsatisfied(topic_hash) => { - self.send_topic_queries(topic_hash, None); - }, + } + TopicQueryState::Dry(topic_hash) => { + // To fill the kbuckets closest to the topic hash as well as those further away + // (itertively getting closer to node ids to the topic hash) start a find node + // query searching for the topic hash's bytes wrapped in a NodeId. + let topic_key = NodeId::new(&topic_hash.as_bytes()); + self.start_findnode_query(topic_key, None); + } + TopicQueryState::Unsatisfied(topic_hash) => self.send_topic_queries(topic_hash, None), } } _ = registration_interval.tick() => { @@ -2218,6 +2229,7 @@ impl Service { // If a topic lookup has dried up (no more peers to query), and we now have found new peers or updated enrs for // known peers to that topic, the query can now proceed as long as it hasn't timed out already. if let Some(query) = self.active_topic_queries.queries.get_mut(&topic_hash) { + debug!("Found new peers to send TOPICQUERY to, unsetting query status dry"); query.dry = false; } } From d2b5339e7f4bf1714628ad6cf3aaa483e359732c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 29 Jul 2022 15:44:48 +0200 Subject: [PATCH 317/391] Fix bug in stream of topic lookups --- src/service.rs | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/src/service.rs b/src/service.rs index 443a9494c..6c093cced 100644 --- a/src/service.rs +++ b/src/service.rs @@ -298,9 +298,6 @@ pub enum TopicQueryState { /// Not enough ads have been returned from the first round of sending TOPICQUERY /// requests, new peers in the topic's kbucktes should be queried. Unsatisfied(TopicHash), - /// Not enough results were found and not enough new peers where found to send - /// TOPICQUERYs to. - Dry(TopicHash), } /// At any given time, a set number of registrations should be active per topic hash to @@ -372,7 +369,7 @@ impl Stream for ActiveTopicQueries { ); return Poll::Ready(Some(TopicQueryState::TimedOut(*topic_hash))); } else if query.dry { - return Poll::Ready(Some(TopicQueryState::Dry(*topic_hash))); + return Poll::Pending; } else { let exhausted_peers = query .queried_peers @@ -865,13 +862,6 @@ impl Service { } } } - TopicQueryState::Dry(topic_hash) => { - // To fill the kbuckets closest to the topic hash as well as those further away - // (itertively getting closer to node ids to the topic hash) start a find node - // query searching for the topic hash's bytes wrapped in a NodeId. - let topic_key = NodeId::new(&topic_hash.as_bytes()); - self.start_findnode_query(topic_key, None); - } TopicQueryState::Unsatisfied(topic_hash) => self.send_topic_queries(topic_hash, None), } } @@ -2231,6 +2221,11 @@ impl Service { if let Some(query) = self.active_topic_queries.queries.get_mut(&topic_hash) { debug!("Found new peers to send TOPICQUERY to, unsetting query status dry"); query.dry = false; + // To fill the kbuckets closest to the topic hash as well as those further away + // (itertively getting closer to node ids to the topic hash) start a find node + // query searching for the topic hash's bytes wrapped in a NodeId. + let topic_key = NodeId::new(&topic_hash.as_bytes()); + self.start_findnode_query(topic_key, None); } } } From 1d7ef49795035126f7b12c7577a20eeecc197754 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 29 Jul 2022 16:58:10 +0200 Subject: [PATCH 318/391] Verify enrs of ads returned by TOPICQUERY --- src/service.rs | 146 ++++++++++++++++++++++++++----------------------- 1 file changed, 78 insertions(+), 68 deletions(-) diff --git a/src/service.rs b/src/service.rs index 6c093cced..2b4775663 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1411,80 +1411,73 @@ impl Service { ); } - // These are sanitized and ordered - let distances_requested: Vec = match &active_request.request_body { - RequestBody::FindNode { distances } => distances.clone(), - RequestBody::TopicQuery { .. } => vec![], // Any distance is allowed for ads - _ => { - debug_unreachable!( - "Only FINDNODE and TOPICQUERY requests get NODES responses" - ); - vec![] - } - }; - - // This could be an ENR request from the outer service. If so respond to the - // callback and End. - if let Some(CallbackResponse::Enr(callback)) = active_request.callback.take() { - // Currently only support requesting for ENR's. Verify this is the case. - if !distances_requested.is_empty() && distances_requested[0] != 0 { - error!("Retrieved a callback request that wasn't for a peer's ENR"); - return; - } - // This must be for asking for an ENR - if nodes.len() > 1 { - warn!( - "Peer returned more than one ENR for itself. {}", - active_request.contact - ); - } - let response = nodes - .pop() - .ok_or(RequestError::InvalidEnr("Peer did not return an ENR")); - if let Err(e) = callback.send(response) { - warn!("Failed to send response in callback {:?}", e) - } - return; - } else if !distances_requested.is_empty() { - // This is a repsonse to a FINDNODE request with specifically request distances - // Filter out any nodes that are not of the correct distance - - let peer_key: kbucket::Key = node_id.into(); - - // The distances we send are sanitized an ordered. - // We never send an ENR request in combination of other requests. - if distances_requested.len() == 1 && distances_requested[0] == 0 { - // we requested an ENR update + // Distances are sanitized and ordered + if let RequestBody::FindNode { distances } = &active_request.request_body { + // This could be an ENR request from the outer service. If so respond to the + // callback and End. + if let Some(CallbackResponse::Enr(callback)) = + active_request.callback.take() + { + // Currently only support requesting for ENR's. Verify this is the case. + if !distances.is_empty() && distances[0] != 0 { + error!("Retrieved a callback request that wasn't for a peer's ENR"); + return; + } + // This must be for asking for an ENR if nodes.len() > 1 { warn!( + "Peer returned more than one ENR for itself. {}", + active_request.contact + ); + } + let response = nodes + .pop() + .ok_or(RequestError::InvalidEnr("Peer did not return an ENR")); + if let Err(e) = callback.send(response) { + warn!("Failed to send response in callback {:?}", e) + } + return; + } else if !distances.is_empty() { + // This is a repsonse to a FINDNODE request with specifically request distances + // Filter out any nodes that are not of the correct distance + + let peer_key: kbucket::Key = node_id.into(); + + // The distances we send are sanitized an ordered. + // We never send an ENR request in combination of other requests. + if distances.len() == 1 && distances[0] == 0 { + // we requested an ENR update + if nodes.len() > 1 { + warn!( "Peer returned more than one ENR for itself. Blacklisting {}", node_address ); - let ban_timeout = - self.config.ban_duration.map(|v| Instant::now() + v); - PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + let ban_timeout = + self.config.ban_duration.map(|v| Instant::now() + v); + PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + nodes.retain(|enr| { + peer_key.log2_distance(&enr.node_id().into()).is_none() + }); + } + } else { + let before_len = nodes.len(); nodes.retain(|enr| { - peer_key.log2_distance(&enr.node_id().into()).is_none() + peer_key + .log2_distance(&enr.node_id().into()) + .map(|distance| distances.contains(&distance)) + .unwrap_or_else(|| false) }); - } - } else { - let before_len = nodes.len(); - nodes.retain(|enr| { - peer_key - .log2_distance(&enr.node_id().into()) - .map(|distance| distances_requested.contains(&distance)) - .unwrap_or_else(|| false) - }); - if nodes.len() < before_len { - // Peer sent invalid ENRs. Blacklist the Node - warn!( - "Peer sent invalid ENR. Blacklisting {}", - active_request.contact - ); - let ban_timeout = - self.config.ban_duration.map(|v| Instant::now() + v); - PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + if nodes.len() < before_len { + // Peer sent invalid ENRs. Blacklist the Node + warn!( + "Peer sent invalid ENR. Blacklisting {}", + active_request.contact + ); + let ban_timeout = + self.config.ban_duration.map(|v| Instant::now() + v); + PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + } } } } @@ -1524,7 +1517,7 @@ impl Service { } debug!( - "Received a nodes response of len: {}, total: {}, from: {}", + "Received a NODES response of len: {}, total: {}, from: {}", nodes.len(), total, active_request.contact @@ -1546,7 +1539,24 @@ impl Service { // the topic and was returned as an ad in the NODES response. return false; } - (self.config.table_filter)(enr) + if !(self.config.table_filter)(enr) { + return false; + } + // Ads are checked for validity, if they do not contain the topic in their enr, they are discarded + if let Some(topics) = enr.get("topics") { + let rlp = Rlp::new(topics); + for item in rlp.iter() { + if let Ok(data) = item.data().map_err(|e| error!("Could not decode a topic in topics field in enr of peer {}. Error {}", enr.node_id(), e)) { + if let Ok(topic_string) = std::str::from_utf8(data).map_err(|e| error!("Could not decode topic in topics field into utf8, in enr of peer {}. Error {}", enr.node_id(), e)) { + let topic_hash = Topic::new(topic_string).hash(); + if &topic_hash == topic { + return true; + } + } + } + } + } + false }); if let Some(query) = self.active_topic_queries.queries.get_mut(topic) { nodes.into_iter().for_each(|enr| { From 813af2643f85f84e7e7e5df9fad1eb71a59a5697 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 29 Jul 2022 17:13:22 +0200 Subject: [PATCH 319/391] Verify topics field in ENR upon incoming registration attempt --- src/error.rs | 3 +++ src/service.rs | 50 +++++++++++++++++++++++++++++++------------------- 2 files changed, 34 insertions(+), 19 deletions(-) diff --git a/src/error.rs b/src/error.rs index d7377247b..046e03580 100644 --- a/src/error.rs +++ b/src/error.rs @@ -124,6 +124,9 @@ pub enum RequestError { RegistrationOtherNode, /// A REGTOPIC is not respecting the assigned wait time. InvalidWaitTime, + /// A REGTOPIC tries to advertise a topic it does not + /// list in its enr. + InvalidTopicsEnr, } #[derive(Debug, Clone, PartialEq)] diff --git a/src/service.rs b/src/service.rs index 2b4775663..21b9054e0 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1270,33 +1270,45 @@ impl Service { } RequestBody::RegisterTopic { topic, enr, ticket } => { // Blacklist if request tries to advertise another node than the sender - if enr.node_id() != node_address.node_id { - warn!("The enr node id in REGTOPIC request body does not match sender's. Nodes can only register themselves. Blacklisting peer {}.", node_address.node_id); + let registration_of_other_node = enr.node_id() != node_address.node_id + || match self.config.ip_mode { + IpMode::Ip4 => { + enr.udp4_socket().map(SocketAddr::V4) != Some(node_address.socket_addr) + } + IpMode::Ip6 { .. } => { + enr.udp6_socket().map(SocketAddr::V6) != Some(node_address.socket_addr) + } + }; + if registration_of_other_node { + warn!("The enr in the REGTOPIC request body does not match sender's. Nodes can only register themselves. Blacklisting peer {}.", node_address.node_id); let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); self.rpc_failure(id, RequestError::RegistrationOtherNode); return; } - match self.config.ip_mode { - IpMode::Ip4 => { - if enr.udp4_socket().map(SocketAddr::V4) != Some(node_address.socket_addr) { - warn!("The enr ip in REGTOPIC request body does not match sender's. Nodes can only register themselves. Blacklisting peer {}.", node_address.node_id); - let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); - PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); - self.rpc_failure(id, RequestError::RegistrationOtherNode); - return; - } - } - IpMode::Ip6 { .. } => { - if enr.udp6_socket().map(SocketAddr::V6) != Some(node_address.socket_addr) { - warn!("The enr ip in REGTOPIC request body does not match sender's. Nodes can only register themselves. Blacklisting peer {}.", node_address.node_id); - let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); - PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); - self.rpc_failure(id, RequestError::RegistrationOtherNode); - return; + + // Blacklist if node doesn't contain the given topic in its enr 'topics' field + let mut topic_in_enr = false; + if let Some(topics) = enr.get("topics") { + let rlp = Rlp::new(topics); + for item in rlp.iter() { + if let Ok(data) = item.data().map_err(|e| error!("Could not decode a topic in topics field in enr of peer {}. Error {}", enr.node_id(), e)) { + if let Ok(topic_string) = std::str::from_utf8(data).map_err(|e| error!("Could not decode topic in topics field into utf8, in enr of peer {}. Error {}", enr.node_id(), e)) { + let topic_hash = Topic::new(topic_string).hash(); + if topic_hash == topic { + topic_in_enr = true; + } + } } } } + if !topic_in_enr { + warn!("The topic given in the REGTOPIC request body cannot be found in sender's 'topics' enr field. Blacklisting peer {}.", node_address.node_id); + let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); + PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + self.rpc_failure(id, RequestError::InvalidTopicsEnr); + return; + } if !ticket.is_empty() { let decrypted_ticket = { From fe8a335cfad1bc5cfa2bd519221ac9cd25188413 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 30 Jul 2022 10:53:47 +0200 Subject: [PATCH 320/391] Simplify code --- src/service.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/service.rs b/src/service.rs index 21b9054e0..a80cb8aa2 100644 --- a/src/service.rs +++ b/src/service.rs @@ -2338,12 +2338,6 @@ impl Service { let kbuckets_topic = topic_hash.and_then(|topic_hash| { self.topics_kbuckets .get_mut(&topic_hash) - .and_then(|kbuckets| { - Some(kbuckets).or_else(|| { - debug_unreachable!("A kbuckets table should exist for topic hash"); - None - }) - }) }); let key = kbucket::Key::from(node_id); From 0a266686010e28d7eeda2cfe485d0d5db28ebb86 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 30 Jul 2022 16:10:31 +0200 Subject: [PATCH 321/391] Minimize LOC --- src/service.rs | 185 ++++++++++++++++++++++++------------------------- 1 file changed, 89 insertions(+), 96 deletions(-) diff --git a/src/service.rs b/src/service.rs index a80cb8aa2..416a28f77 100644 --- a/src/service.rs +++ b/src/service.rs @@ -2170,70 +2170,85 @@ impl Service { ) { let local_id = self.local_enr.read().node_id(); - if let Some(topic_hash) = topic { - enrs.retain(|enr| enr.node_id() != local_id); - for enr in enrs.iter() { - // If there is an event stream send the DiscoveredPeerTopic event. - if self.config.report_discovered_peers { - self.send_event(Discv5Event::DiscoveredPeerTopic(enr.clone(), topic_hash)); + enrs.retain(|enr| { + let node_id = enr.node_id(); + // If we are requesting the target of the query, this ENR could be the result of requesting the + // target-nodes own id. We don't want to add this as a "new" discovered peer in the query, so we + // remove it from the discovered list here. + if local_id == node_id { + return false; + } + // If there is an event stream send the DiscoveredPeerTopic event. + if self.config.report_discovered_peers { + match topic { + Some(topic_hash) => { + self.send_event(Discv5Event::DiscoveredPeerTopic(enr.clone(), topic_hash)) + } + None => self.send_event(Discv5Event::Discovered(enr.clone())), } } + // The remaining ENRs are used if this request was part of a query. If we are + // requesting the target of the query, this ENR could be the result of requesting the + // target-nodes own id. We don't want to add this as a "new" discovered peer in the + // query, so we remove it from the discovered list here. + if source == &node_id { + return false; + } + // Ignore peers that don't pass the table filter + (self.config.table_filter)(enr) + }); + + if let Some(topic_hash) = topic { let mut discovered_new_peer = false; if let Some(kbuckets_topic) = self.topics_kbuckets.get_mut(&topic_hash) { for enr in enrs { - // If the node sends its own id we don't count it - if source != &enr.node_id() { - continue; - } - // ignore peers that don't pass the table filter - if (self.config.table_filter)(&enr) { - let key = kbucket::Key::from(enr.node_id()); + let key = kbucket::Key::from(enr.node_id()); - // If the ENR exists in the routing table and the discovered ENR has a greater - // sequence number, perform some filter checks before updating the enr. + // If the ENR exists in the routing table and the discovered ENR has a greater + // sequence number, perform some filter checks before updating the enr. - let must_update_enr = match kbuckets_topic.entry(&key) { - kbucket::Entry::Present(entry, _) => entry.value().seq() < enr.seq(), - kbucket::Entry::Pending(mut entry, _) => { - entry.value().seq() < enr.seq() - } - kbucket::Entry::Absent(_) => { - trace!( - "Discovered new peer {} for topic hash {}", - enr.node_id(), - topic_hash - ); - let discovered_peers = - self.discovered_peers_topic.entry(topic_hash).or_default(); - let node_id = enr.node_id(); - let peer_key: kbucket::Key = node_id.into(); - let topic_key: kbucket::Key = - NodeId::new(&topic_hash.as_bytes()).into(); - if let Some(distance) = peer_key.log2_distance(&topic_key) { - let bucket = discovered_peers.entry(distance).or_default(); - // If the intermediary storage before the topic's kbucktes is at bounds, discard the - // uncontacted peers. - if bucket.len() < MAX_UNCONTACTED_PEERS_TOPIC_BUCKET { - bucket.insert(node_id, enr.clone()); - discovered_new_peer = true; - } else { - warn!("Discarding uncontacted peers, uncontacted peers at bounds for topic hash {}", topic_hash); - } + let must_update_enr = match kbuckets_topic.entry(&key) { + kbucket::Entry::Present(entry, _) => entry.value().seq() < enr.seq(), + kbucket::Entry::Pending(mut entry, _) => entry.value().seq() < enr.seq(), + kbucket::Entry::Absent(_) => { + trace!( + "Discovered new peer {} for topic hash {}", + enr.node_id(), + topic_hash + ); + let discovered_peers = + self.discovered_peers_topic.entry(topic_hash).or_default(); + let node_id = enr.node_id(); + let peer_key: kbucket::Key = node_id.into(); + let topic_key: kbucket::Key = + NodeId::new(&topic_hash.as_bytes()).into(); + if let Some(distance) = peer_key.log2_distance(&topic_key) { + let bucket = discovered_peers.entry(distance).or_default(); + // If the intermediary storage before the topic's kbucktes is at bounds, discard the + // uncontacted peers. + if bucket.len() < MAX_UNCONTACTED_PEERS_TOPIC_BUCKET { + bucket.insert(node_id, enr.clone()); + discovered_new_peer = true; + } else { + warn!("Discarding uncontacted peers, uncontacted peers at bounds for topic hash {}", topic_hash); } - false } - _ => false, - }; - if must_update_enr { - if let UpdateResult::Failed(reason) = - kbuckets_topic.update_node(&key, enr.clone(), None) - { - self.peers_to_ping.remove(&enr.node_id()); - debug!( + false + } + _ => false, + }; + if must_update_enr { + if let UpdateResult::Failed(reason) = + kbuckets_topic.update_node(&key, enr.clone(), None) + { + self.peers_to_ping.remove(&enr.node_id()); + debug!( "Failed to update discovered ENR for kbucket of topic hash {:?}. Node: {}, Reason: {:?}", topic_hash, source, reason ); - } + } else { + // If the enr was successfully updated, progress might be made in a topic lookup + discovered_new_peer = true; } } } @@ -2253,52 +2268,32 @@ impl Service { } } else { enrs.retain(|enr| { - if enr.node_id() == local_id { - return false; - } + let key = kbucket::Key::from(enr.node_id()); - // If any of the discovered nodes are in the routing table, and there contains an older ENR, update it. - // If there is an event stream send the Discovered event - if self.config.report_discovered_peers { - self.send_event(Discv5Event::Discovered(enr.clone())); - } + // If the ENR exists in the routing table and the discovered ENR has a greater + // sequence number, perform some filter checks before updating the enr. - // ignore peers that don't pass the table filter - if (self.config.table_filter)(enr) { - let key = kbucket::Key::from(enr.node_id()); - - // If the ENR exists in the routing table and the discovered ENR has a greater - // sequence number, perform some filter checks before updating the enr. - - let must_update_enr = match self.kbuckets.write().entry(&key) { - kbucket::Entry::Present(entry, _) => entry.value().seq() < enr.seq(), - kbucket::Entry::Pending(mut entry, _) => entry.value().seq() < enr.seq(), - _ => false, - }; - if must_update_enr { - if let UpdateResult::Failed(reason) = - self.kbuckets.write().update_node(&key, enr.clone(), None) - { - self.peers_to_ping.remove(&enr.node_id()); - debug!( - "Failed to update discovered ENR. Node: {}, Reason: {:?}", - source, reason - ); - return false; // Remove this peer from the discovered list if the update failed - } + let must_update_enr = match self.kbuckets.write().entry(&key) { + kbucket::Entry::Present(entry, _) => entry.value().seq() < enr.seq(), + kbucket::Entry::Pending(mut entry, _) => entry.value().seq() < enr.seq(), + _ => false, + }; + if must_update_enr { + if let UpdateResult::Failed(reason) = + self.kbuckets.write().update_node(&key, enr.clone(), None) + { + self.peers_to_ping.remove(&enr.node_id()); + debug!( + "Failed to update discovered ENR. Node: {}, Reason: {:?}", + source, reason + ); + return false; // Remove this peer from the discovered list if the update failed } - } else { - return false; // Didn't pass the table filter remove the peer } - - // The remaining ENRs are used if this request was part of a query. If we are - // requesting the target of the query, this ENR could be the result of requesting the - // target-nodes own id. We don't want to add this as a "new" discovered peer in the - // query, so we remove it from the discovered list here. - source != &enr.node_id() + true }); - // if this is part of a query, update the query + // The remaining ENRs are used if this request was part of a query. Update the query if let Some(query_id) = query_id { if let Some(query) = self.queries.get_mut(query_id) { let mut peer_count = 0; @@ -2335,10 +2330,8 @@ impl Service { let mut ping_peer = None; let mut event_to_send = None; - let kbuckets_topic = topic_hash.and_then(|topic_hash| { - self.topics_kbuckets - .get_mut(&topic_hash) - }); + let kbuckets_topic = + topic_hash.and_then(|topic_hash| self.topics_kbuckets.get_mut(&topic_hash)); let key = kbucket::Key::from(node_id); match new_status { From deb42fdb06965ea6d0a59fb71589f0a6643ae58a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 1 Aug 2022 19:00:16 +0200 Subject: [PATCH 322/391] Remove function relevant for potential future implementation of mutliple hash algortihms support --- src/advertisement/topic.rs | 13 +------------ src/discv5.rs | 27 ++++++++++++--------------- src/lib.rs | 2 +- 3 files changed, 14 insertions(+), 28 deletions(-) diff --git a/src/advertisement/topic.rs b/src/advertisement/topic.rs index 642f68456..d7dda1878 100644 --- a/src/advertisement/topic.rs +++ b/src/advertisement/topic.rs @@ -31,7 +31,6 @@ pub type Sha256Topic = Topic; pub trait Hasher { /// The function that takes a topic string and creates a topic hash. fn hash(topic_string: String) -> TopicHash; - fn hash_function_name() -> String; } /// A type for representing topics who use the identity hash. @@ -56,11 +55,6 @@ impl Hasher for Sha256Hash { hash.copy_from_slice(&sha256); TopicHash { hash } } - - /// Returns the name of the hashing algorithm this [`Hasher`] implements. - fn hash_function_name() -> String { - "Sha256".to_owned() - } } /// The 32-bytes that are sent in the body of a topic request are interpreted @@ -140,12 +134,7 @@ impl Topic { pub fn hash(&self) -> TopicHash { H::hash(self.topic.clone()) } - - /// Returns the name of the [`Hasher`] configured for the topic. - pub fn hash_function_name(&self) -> String { - H::hash_function_name() - } - + /// Returns the string passed to the topic upon instantiation. pub fn topic(&self) -> String { self.topic.clone() diff --git a/src/discv5.rs b/src/discv5.rs index 31375c159..35be7b65c 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -45,11 +45,11 @@ lazy_static! { RwLock::new(crate::PermitBanList::default()); } -/// Helper function that returns a labeled list of hashes of the given topic string according to -/// all implemented hashing algorithms. Currently only one, Sha256, is implemented. -pub static HASHES: for<'a> fn(topic: &'a str) -> Vec<(TopicHash, String)> = |topic| { +/// Helper function that returns the hash of the given topic string according to the +/// implemented hashing algorithm. +pub static HASH: for<'a> fn(topic: &'a str) -> TopicHash = |topic| { let sha256_topic = Topic::new(topic); - vec![(sha256_topic.hash(), sha256_topic.hash_function_name())] + sha256_topic.hash() }; mod test; @@ -535,7 +535,7 @@ impl Discv5 { .await .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; - callback_recv.await.map_err(|e| RequestError::ChannelFailed(format!("Failed to receive table entries' ids for topic {} with topic hash {} {}. Error {}", topic, topic_hash, topic.hash_function_name(), e)))? + callback_recv.await.map_err(|e| RequestError::ChannelFailed(format!("Failed to receive table entries' ids for topic {} with topic hash {}. Error {}", topic, topic_hash, e)))? } } @@ -567,16 +567,15 @@ impl Discv5 { // await the response let ad_nodes = callback_recv.await.map_err(|e| { RequestError::ChannelFailed(format!( - "Failed to receive ad nodes from lookup of topic {} with topic hash {} {}. Error {}", - topic, topic_hash, topic.hash_function_name(), e + "Failed to receive ad nodes from lookup of topic {} with topic hash {}. Error {}", + topic, topic_hash, e )) })?; if ad_nodes.is_ok() { debug!( - "Received ad nodes for topic {} with topic hash {} {}", + "Received ad nodes for topic {} with topic hash {}", topic, - topic_hash, - topic.hash_function_name() + topic_hash ); } ad_nodes @@ -625,10 +624,9 @@ impl Discv5 { .map_err(|_| RequestError::ServiceNotStarted)?; let topic = Topic::new(topic); debug!( - "Registering topic {} with topic hash {} {}", + "Registering topic {} with topic hash {}", topic, topic.hash(), - topic.hash_function_name(), ); let event = ServiceRequest::RegisterTopic(topic); // send the request @@ -663,7 +661,7 @@ impl Discv5 { .send(event) .await .map_err(|_| RequestError::ServiceNotStarted)?; - callback_recv.await.map_err(|e| RequestError::ChannelFailed(format!("Failed to receive regsitration attempts for topic {} with topic hash {} {}. Error {}", topic, topic_hash, topic.hash_function_name(), e)))? + callback_recv.await.map_err(|e| RequestError::ChannelFailed(format!("Failed to receive regsitration attempts for topic {} with topic hash {}. Error {}", topic, topic_hash, e)))? } } /// Retrieves the topics that we have published on other nodes. @@ -716,10 +714,9 @@ impl Discv5 { // await the response callback_recv.await.map_err(|e| { RequestError::ChannelFailed(format!( - "Failed to receive ads for topic {} with topic hash {} {}. Error {}", + "Failed to receive ads for topic {} with topic hash {}. Error {}", topic, topic_hash, - topic.hash_function_name(), e )) })? diff --git a/src/lib.rs b/src/lib.rs index aeb913ac0..a79ae5eb5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -125,7 +125,7 @@ extern crate lazy_static; pub type Enr = enr::Enr; -pub use crate::discv5::{Discv5, Discv5Event, HASHES}; +pub use crate::discv5::{Discv5, Discv5Event, HASH}; pub use config::{Discv5Config, Discv5ConfigBuilder}; pub use error::{Discv5Error, QueryError, RequestError, ResponseError}; pub use executor::{Executor, TokioExecutor}; From 50c462da84d4b67410079e19776977cc9102b96c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 1 Aug 2022 21:18:23 +0200 Subject: [PATCH 323/391] Use strings as topics in REGTOPICS --- src/advertisement/ticket.rs | 15 ++++--- src/advertisement/topic.rs | 8 +++- src/discv5.rs | 28 ++++++------ src/handler/mod.rs | 7 ++- src/lib.rs | 1 + src/rpc.rs | 25 ++++------- src/service.rs | 89 ++++++++++++++++++++++--------------- 7 files changed, 96 insertions(+), 77 deletions(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 75801533e..06dd43755 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -1,8 +1,9 @@ use super::*; +use crate::Topic; use delay_map::HashMapDelay; use enr::NodeId; use node_info::NodeContact; -use std::cmp::Eq; +use std::{cmp::Eq, hash::Hash}; /// The max wait time accpeted for tickets. pub const MAX_WAIT_TIME_TICKET: u64 = 60 * 5; @@ -16,24 +17,24 @@ pub const MAX_TICKETS_NODE_TOPIC: u8 = 3; /// A topic is active when it's associated with the NodeId from a node it is /// published on. -#[derive(PartialEq, Eq, Hash, Clone)] +#[derive(PartialEq, Eq, Clone, Hash)] pub struct ActiveTopic { /// NodeId of the sender of the TICKET response. node_id: NodeId, /// The topic hash as it is sent in the TICKET response. - topic: TopicHash, + topic: Topic, } impl ActiveTopic { /// Makes a topic active (currently associated with an ad slot or a ticket) by /// associating it with a node id. - pub fn new(node_id: NodeId, topic: TopicHash) -> Self { + pub fn new(node_id: NodeId, topic: Topic) -> Self { ActiveTopic { node_id, topic } } /// Returns the topic of a topic that is active. - pub fn topic(&self) -> TopicHash { - self.topic + pub fn topic(&self) -> &Topic { + &self.topic } /// Returns the node id of a topic that is active. @@ -98,7 +99,7 @@ impl Tickets { contact: NodeContact, ticket: Vec, wait_time: Duration, - topic: TopicHash, + topic: Topic, ) -> Result<(), &str> { let active_topic = ActiveTopic::new(contact.node_id(), topic); diff --git a/src/advertisement/topic.rs b/src/advertisement/topic.rs index d7dda1878..7285eb332 100644 --- a/src/advertisement/topic.rs +++ b/src/advertisement/topic.rs @@ -134,13 +134,19 @@ impl Topic { pub fn hash(&self) -> TopicHash { H::hash(self.topic.clone()) } - + /// Returns the string passed to the topic upon instantiation. pub fn topic(&self) -> String { self.topic.clone() } } +impl Hash for Topic { + fn hash(&self, state: &mut T) { + self.hash().hash(state) + } +} + impl PartialEq for Topic { /// Each hash algortihm used to publish a hashed topic (as XOR metric key) is in /// discv5 seen as its own [`Topic`] upon comparison. That means a topic string diff --git a/src/discv5.rs b/src/discv5.rs index 35be7b65c..c322aa066 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -13,7 +13,7 @@ //! The server can be shutdown using the [`Discv5::shutdown`] function. use crate::{ - advertisement::topic::{Sha256Topic as Topic, TopicHash}, + advertisement::topic::TopicHash, error::{Discv5Error, QueryError, RequestError}, kbucket::{ self, ConnectionDirection, ConnectionState, FailureReason, InsertResult, KBucketsTable, @@ -21,7 +21,7 @@ use crate::{ }, node_info::NodeContact, service::{QueryKind, RegAttempts, Service, ServiceRequest, TalkRequest}, - Discv5Config, Enr, + Discv5Config, Enr, Topic, }; use enr::{CombinedKey, EnrError, EnrKey, NodeId}; use parking_lot::RwLock; @@ -574,8 +574,7 @@ impl Discv5 { if ad_nodes.is_ok() { debug!( "Received ad nodes for topic {} with topic hash {}", - topic, - topic_hash + topic, topic_hash ); } ad_nodes @@ -587,22 +586,23 @@ impl Discv5 { /// interval no registration attempts will be made for the topic. pub fn remove_topic( &self, - topic_hash: TopicHash, + topic_str: &'static str, ) -> impl Future> + 'static { let channel = self.clone_channel(); async move { let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; let (callback_send, callback_recv) = oneshot::channel(); - let event = ServiceRequest::RemoveTopic(topic_hash, callback_send); + let topic = Topic::new(topic_str); + let event = ServiceRequest::RemoveTopic(topic, callback_send); channel .send(event) .await .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; callback_recv.await.map_err(|e| { RequestError::ChannelFailed(format!( - "Failed to receive removed topic hash {}. Error {}", - topic_hash, e + "Failed to receive removed topic {}. Error {}", + topic_str, e )) })? } @@ -644,7 +644,7 @@ impl Discv5 { /// same node. Caution! The returned map will also contain pub fn reg_attempts( &self, - topic: &'static str, + topic_str: &'static str, ) -> impl Future, RequestError>> + 'static { let channel = self.clone_channel(); let (callback_send, callback_recv) = oneshot::channel(); @@ -653,15 +653,15 @@ impl Discv5 { let channel = channel .as_ref() .map_err(|_| RequestError::ServiceNotStarted)?; - let topic = Topic::new(topic); + let topic = Topic::new(topic_str); let topic_hash = topic.hash(); - let event = ServiceRequest::RegistrationAttempts(topic_hash, callback_send); + let event = ServiceRequest::RegistrationAttempts(topic, callback_send); channel .send(event) .await .map_err(|_| RequestError::ServiceNotStarted)?; - callback_recv.await.map_err(|e| RequestError::ChannelFailed(format!("Failed to receive regsitration attempts for topic {} with topic hash {}. Error {}", topic, topic_hash, e)))? + callback_recv.await.map_err(|e| RequestError::ChannelFailed(format!("Failed to receive regsitration attempts for topic {} with topic hash {}. Error {}", topic_str, topic_hash, e)))? } } /// Retrieves the topics that we have published on other nodes. @@ -715,9 +715,7 @@ impl Discv5 { callback_recv.await.map_err(|e| { RequestError::ChannelFailed(format!( "Failed to receive ads for topic {} with topic hash {}. Error {}", - topic, - topic_hash, - e + topic, topic_hash, e )) })? } diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 8e92c5ec0..c42b6197c 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -35,7 +35,7 @@ use crate::{ rpc::{Message, Request, RequestBody, RequestId, Response, ResponseBody}, socket, socket::{FilterConfig, Socket}, - Enr, + Enr, Topic, }; use delay_map::HashMapDelay; use enr::{CombinedKey, NodeId}; @@ -653,8 +653,11 @@ impl Handler { topic, enr: _, ticket: _, + } => { + let topic_hash = Topic::new(topic).hash(); + HandlerOut::EstablishedTopic(enr, connection_direction, topic_hash) } - | RequestBody::TopicQuery { topic } => { + RequestBody::TopicQuery { topic } => { HandlerOut::EstablishedTopic(enr, connection_direction, *topic) } _ => { diff --git a/src/lib.rs b/src/lib.rs index a79ae5eb5..288bed950 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -124,6 +124,7 @@ pub mod socket; extern crate lazy_static; pub type Enr = enr::Enr; +pub type Topic = crate::advertisement::topic::Sha256Topic; pub use crate::discv5::{Discv5, Discv5Event, HASH}; pub use config::{Discv5Config, Discv5ConfigBuilder}; diff --git a/src/rpc.rs b/src/rpc.rs index 1f3288c4f..1dd9d1444 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -85,8 +85,8 @@ pub enum RequestBody { }, /// A REGTOPIC request. RegisterTopic { - /// The hashed topic we want to advertise at the node receiving this request. - topic: TopicHash, + /// The topic string we want to advertise at the node receiving this request. + topic: String, // Current node record of sender. enr: crate::Enr, // Ticket content of ticket from a previous registration attempt or empty. @@ -129,7 +129,7 @@ pub enum ResponseBody { /// The time in seconds to wait before attempting to register again. wait_time: u64, /// The topic hash for which the opaque ticket is issued. - topic: TopicHash, + topic: String, }, } @@ -566,16 +566,7 @@ impl Message { debug!("RegisterTopic request has an invalid RLP list length. Expected 4, found {}", list_len); return Err(DecoderError::RlpIncorrectListLen); } - let topic = { - let topic_bytes = rlp.val_at::>(1)?; - if topic_bytes.len() > 32 { - debug!("RegisterTopic request has a topic greater than 32 bytes"); - return Err(DecoderError::RlpIsTooBig); - } - let mut topic = [0u8; 32]; - topic[32 - topic_bytes.len()..].copy_from_slice(&topic_bytes); - TopicHash::from_raw(topic) - }; + let topic = rlp.val_at::(1)?; let enr_rlp = rlp.at(2)?; let enr = enr_rlp.as_val::>()?; let ticket = rlp.val_at::>(3)?; @@ -595,7 +586,7 @@ impl Message { } let ticket = rlp.val_at::>(1)?; let wait_time = rlp.val_at::(2)?; - let topic = rlp.val_at::(3)?; + let topic = rlp.val_at::(3)?; Message::Response(Response { id, body: ResponseBody::Ticket { @@ -1099,7 +1090,7 @@ mod tests { let request = Message::Request(Request { id: RequestId(vec![1]), body: RequestBody::RegisterTopic { - topic: TopicHash::from_raw([1u8; 32]), + topic: "lighthouse".to_string(), enr, ticket: Vec::new(), }, @@ -1133,7 +1124,7 @@ mod tests { let request = Message::Request(Request { id: RequestId(vec![1]), body: RequestBody::RegisterTopic { - topic: TopicHash::from_raw([1u8; 32]), + topic: "lighthouse".to_string(), enr, ticket, }, @@ -1245,7 +1236,7 @@ mod tests { body: ResponseBody::Ticket { ticket, wait_time: 1u64, - topic: TopicHash::from_raw([1u8; 32]), + topic: "lighthouse".to_string(), }, }); diff --git a/src/service.rs b/src/service.rs index 416a28f77..a6f815d9b 100644 --- a/src/service.rs +++ b/src/service.rs @@ -19,7 +19,7 @@ use self::{ use crate::{ advertisement::{ ticket::{Tickets, MAX_WAIT_TIME_TICKET, TICKET_LIMIT_DURATION}, - topic::{Sha256Topic as Topic, TopicHash}, + topic::TopicHash, Ads, AD_LIFETIME, }, discv5::PERMIT_BAN_LIST, @@ -35,7 +35,7 @@ use crate::{ query_pool::{ FindNodeQueryConfig, PredicateQueryConfig, QueryId, QueryPool, QueryPoolState, TargetKey, }, - rpc, Discv5Config, Discv5Event, Enr, IpMode, + rpc, Discv5Config, Discv5Event, Enr, IpMode, Topic, }; use aes_gcm::{ aead::{generic_array::GenericArray, Aead, NewAead, Payload}, @@ -192,14 +192,14 @@ pub enum ServiceRequest { /// until removed. RegisterTopic(Topic), /// Stops publishing this node as an advetiser for a topic. - RemoveTopic(TopicHash, oneshot::Sender>), + RemoveTopic(Topic, oneshot::Sender>), /// Retrieves the ads currently published by this node on other nodes in a discv5 network. ActiveTopics(oneshot::Sender>, RequestError>>), /// Retrieves the ads adveritsed for other nodes for a given topic. Ads(TopicHash, oneshot::Sender, RequestError>>), /// Retrieves the registration attempts acitve for a given topic. RegistrationAttempts( - TopicHash, + Topic, oneshot::Sender, RequestError>>, ), /// Retrieves the node id of entries in a given topic's kbuckets by distance. @@ -266,7 +266,7 @@ pub struct Service { /// Topics tracks registration attempts of the topic hashes to advertise on /// other nodes. - registration_attempts: HashMap>, + registration_attempts: HashMap>, /// KBuckets per topic hash. topics_kbuckets: HashMap>, @@ -509,8 +509,8 @@ impl Service { let mut topics_to_reg_iter = self .registration_attempts .keys() - .copied() - .collect::>() + .cloned() + .collect::>() .into_iter(); loop { @@ -604,7 +604,7 @@ impl Service { } ServiceRequest::RegisterTopic(topic) => { let topic_hash = topic.hash(); - if self.registration_attempts.insert(topic_hash, BTreeMap::new()).is_some() { + if self.registration_attempts.insert(topic.clone(), BTreeMap::new()).is_some() { warn!("This topic is already being advertised"); } else { let topics_field = if let Some(topics) = self.local_enr.read().get("topics") { @@ -684,13 +684,13 @@ impl Service { } ServiceRequest::ActiveTopics(callback) => { let mut active_topics = HashMap::>::new(); - self.registration_attempts.iter_mut().for_each(|(topic_hash, reg_attempts_by_distance)| { + self.registration_attempts.iter_mut().for_each(|(topic, reg_attempts_by_distance)| { for reg_attempts in reg_attempts_by_distance.values_mut() { reg_attempts.reg_attempts.retain(|node_id, reg_state| { match reg_state { RegistrationState::Confirmed(insert_time) => { if insert_time.elapsed() < AD_LIFETIME { - active_topics.entry(*topic_hash).or_default().push(*node_id); + active_topics.entry(topic.hash()).or_default().push(*node_id); true } else { false @@ -707,11 +707,11 @@ impl Service { error!("Failed to return active topics"); } } - ServiceRequest::RemoveTopic(topic_hash, callback) => { - if self.registration_attempts.remove(&topic_hash).is_some() { + ServiceRequest::RemoveTopic(topic, callback) => { + if self.registration_attempts.remove(&topic).is_some() { METRICS.topics_to_publish.store(self.registration_attempts.len(), Ordering::Relaxed); - if callback.send(Ok(base64::encode(topic_hash.as_bytes()))).is_err() { - error!("Failed to return the removed topic {}", topic_hash); + if callback.send(Ok(topic.topic())).is_err() { + error!("Failed to return the removed topic {}", topic.topic()); } } } @@ -849,7 +849,7 @@ impl Service { // When the ticket time expires a new REGTOPIC request is automatically sent to the // ticket issuer and the registration attempt stays in the [`RegistrationState::Ticket`] // from sending the first REGTOPIC request to this contact for this topic. - self.reg_topic_request(active_ticket.contact(), active_topic.topic(), enr, Some(active_ticket.ticket())); + self.reg_topic_request(active_ticket.contact(), active_topic.topic().clone(), enr, Some(active_ticket.ticket())); } Some(topic_query_progress) = self.active_topic_queries.next() => { match topic_query_progress { @@ -868,16 +868,22 @@ impl Service { _ = registration_interval.tick() => { let mut sent_regtopics = 0; let mut topic_item = topics_to_reg_iter.next(); - while let Some(topic_hash) = topic_item { - trace!("Republishing topic hash {}", topic_hash); - sent_regtopics += self.send_register_topics(topic_hash); + let mut restart_iteration = false; + while let Some(topic) = topic_item { + trace!("Republishing topic {} with hash {}", topic.topic(), topic.hash()); + sent_regtopics += self.send_register_topics(topic); if sent_regtopics >= MAX_REGTOPICS_REGISTER_INTERVAL { break } - topic_item = topics_to_reg_iter.next(); + topic_item = if let Some(item) = topics_to_reg_iter.next() { + Some(item) + } else { + restart_iteration = true; + None + } } - if topic_item.is_none() { - topics_to_reg_iter = self.registration_attempts.keys().copied().collect::>().into_iter(); + if restart_iteration { + topics_to_reg_iter = self.registration_attempts.keys().cloned().collect::>().into_iter(); } } } @@ -885,15 +891,16 @@ impl Service { } /// Internal function that starts a topic registration. This function should not be called outside of [`REGISTER_INTERVAL`]. - fn send_register_topics(&mut self, topic_hash: TopicHash) -> usize { + fn send_register_topics(&mut self, topic: Topic) -> usize { trace!("Sending REGTOPICS"); + let topic_hash = topic.hash(); if let Entry::Occupied(ref mut kbuckets) = self.topics_kbuckets.entry(topic_hash) { trace!( "Found {} entries in kbuckets of topic hash {}", kbuckets.get_mut().iter().count(), topic_hash ); - let reg_attempts = self.registration_attempts.entry(topic_hash).or_default(); + let reg_attempts = self.registration_attempts.entry(topic.clone()).or_default(); let mut new_peers = Vec::new(); // Ensure that max_reg_attempts_bucket registration attempts are alive per bucket if that many peers are @@ -984,7 +991,7 @@ impl Service { if let Ok(node_contact) = NodeContact::try_from_enr(peer, self.config.ip_mode) .map_err(|e| error!("Failed to send REGTOPIC to peer. Error: {:?}", e)) { - self.reg_topic_request(node_contact, topic_hash, local_enr.clone(), None); + self.reg_topic_request(node_contact, topic.clone(), local_enr.clone(), None); // If an uncontacted peer has a faulty enr, don't count the registration attempt. sent_regtopics += 1; } @@ -1269,6 +1276,7 @@ impl Service { self.send_event(Discv5Event::TalkRequest(req)); } RequestBody::RegisterTopic { topic, enr, ticket } => { + let topic = Topic::new(topic); // Blacklist if request tries to advertise another node than the sender let registration_of_other_node = enr.node_id() != node_address.node_id || match self.config.ip_mode { @@ -1295,7 +1303,7 @@ impl Service { if let Ok(data) = item.data().map_err(|e| error!("Could not decode a topic in topics field in enr of peer {}. Error {}", enr.node_id(), e)) { if let Ok(topic_string) = std::str::from_utf8(data).map_err(|e| error!("Could not decode topic in topics field into utf8, in enr of peer {}. Error {}", enr.node_id(), e)) { let topic_hash = Topic::new(topic_string).hash(); - if topic_hash == topic { + if topic_hash == topic.hash() { topic_in_enr = true; } } @@ -1353,7 +1361,7 @@ impl Service { let mut new_ticket = Ticket::new( node_address.node_id, node_address.socket_addr.ip(), - topic, + topic.hash(), tokio::time::Instant::now(), Duration::default(), ); @@ -1361,7 +1369,8 @@ impl Service { // If there is no wait time and the ad is successfuly registered as an ad, the new ticket is sent // with wait time set to zero indicating successful registration. if let Err((wait_time, e)) = - self.ads.insert(enr, topic, node_address.socket_addr.ip()) + self.ads + .insert(enr, topic.hash(), node_address.socket_addr.ip()) { // The wait time on the new ticket to send is updated if there is wait time for the requesting // node for this topic to register as an ad due to the current state of the topic table. @@ -1373,7 +1382,7 @@ impl Service { } let wait_time = new_ticket.wait_time(); - self.send_ticket_response(node_address, id, new_ticket, wait_time); + self.send_ticket_response(node_address, id, topic, new_ticket, wait_time); } RequestBody::TopicQuery { topic } => { self.send_topic_query_adnodes_response(node_address, id, topic); @@ -1719,10 +1728,12 @@ impl Service { if wait_time <= MAX_WAIT_TIME_TICKET { let now = Instant::now(); let peer_key: kbucket::Key = node_id.into(); - let topic_key: kbucket::Key = NodeId::new(&topic.as_bytes()).into(); + let topic = Topic::new(topic); + let topic_key: kbucket::Key = + NodeId::new(&topic.hash().as_bytes()).into(); if let Some(distance) = peer_key.log2_distance(&topic_key) { let registration_attempts = - self.registration_attempts.entry(topic).or_default(); + self.registration_attempts.entry(topic.clone()).or_default(); if let Some(reg_state) = registration_attempts .entry(distance) .or_default() @@ -1840,7 +1851,7 @@ impl Service { fn reg_topic_request( &mut self, contact: NodeContact, - topic: TopicHash, + topic: Topic, enr: Enr, ticket: Option>, ) { @@ -1850,7 +1861,7 @@ impl Service { Vec::new() }; let request_body = RequestBody::RegisterTopic { - topic, + topic: topic.topic(), enr, ticket: ticket_bytes, }; @@ -1881,6 +1892,7 @@ impl Service { &mut self, node_address: NodeAddress, rpc_id: RequestId, + topic: Topic, ticket: Ticket, wait_time: Duration, ) { @@ -1898,7 +1910,7 @@ impl Service { body: ResponseBody::Ticket { ticket: encrypted_ticket, wait_time: wait_time.as_secs(), - topic: ticket.topic(), + topic: topic.topic(), }, }; trace!( @@ -2581,7 +2593,10 @@ impl Service { ticket: _, } => { let peer_key: kbucket::Key = node_id.into(); - let topic_key: kbucket::Key = NodeId::new(&topic.as_bytes()).into(); + let topic = Topic::new(topic); + let topic_hash = topic.hash(); + let topic_key: kbucket::Key = + NodeId::new(&topic_hash.as_bytes()).into(); if let Some(distance) = peer_key.log2_distance(&topic_key) { // Remove the registration attempt before disconnecting the peer. let registration_attempts = @@ -2590,7 +2605,11 @@ impl Service { bucket.reg_attempts.remove(&node_id); } } - self.connection_updated(node_id, ConnectionStatus::Disconnected, Some(topic)); + self.connection_updated( + node_id, + ConnectionStatus::Disconnected, + Some(topic_hash), + ); return; } // for all other requests, if any are queries, mark them as failures. From 9359d194df1586f95fd543619f7e74510fd505e9 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 2 Aug 2022 12:09:33 +0200 Subject: [PATCH 324/391] Add trace message --- src/service.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index a6f815d9b..28cb42bea 100644 --- a/src/service.rs +++ b/src/service.rs @@ -866,11 +866,12 @@ impl Service { } } _ = registration_interval.tick() => { + trace!("New registration interval"); let mut sent_regtopics = 0; let mut topic_item = topics_to_reg_iter.next(); let mut restart_iteration = false; while let Some(topic) = topic_item { - trace!("Republishing topic {} with hash {}", topic.topic(), topic.hash()); + trace!("Publishing topic {} with hash {}", topic.topic(), topic.hash()); sent_regtopics += self.send_register_topics(topic); if sent_regtopics >= MAX_REGTOPICS_REGISTER_INTERVAL { break From 8eed39f26f5680df8aceb74da2ff1ba9f0595dd1 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 2 Aug 2022 12:13:04 +0200 Subject: [PATCH 325/391] Improve trace message --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index 28cb42bea..5ee46968e 100644 --- a/src/service.rs +++ b/src/service.rs @@ -866,7 +866,7 @@ impl Service { } } _ = registration_interval.tick() => { - trace!("New registration interval"); + trace!("New registration interval, {}/{} topics to publish", topics_to_reg_iter.clone().count(), self.registration_attempts.len()); let mut sent_regtopics = 0; let mut topic_item = topics_to_reg_iter.next(); let mut restart_iteration = false; From f7221df597d81e4ce5d882145545d50b7c60528c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 2 Aug 2022 12:36:41 +0200 Subject: [PATCH 326/391] Fix bogus while loop logic --- src/service.rs | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/src/service.rs b/src/service.rs index 5ee46968e..6e1018c0d 100644 --- a/src/service.rs +++ b/src/service.rs @@ -509,8 +509,8 @@ impl Service { let mut topics_to_reg_iter = self .registration_attempts .keys() - .cloned() - .collect::>() + .map(|topic| (topic.clone(), topic.hash())) + .collect::>() .into_iter(); loop { @@ -869,22 +869,16 @@ impl Service { trace!("New registration interval, {}/{} topics to publish", topics_to_reg_iter.clone().count(), self.registration_attempts.len()); let mut sent_regtopics = 0; let mut topic_item = topics_to_reg_iter.next(); - let mut restart_iteration = false; - while let Some(topic) = topic_item { + while let Some((topic, _topic_hash)) = topic_item { trace!("Publishing topic {} with hash {}", topic.topic(), topic.hash()); - sent_regtopics += self.send_register_topics(topic); + sent_regtopics += self.send_register_topics(topic.clone()); if sent_regtopics >= MAX_REGTOPICS_REGISTER_INTERVAL { break } - topic_item = if let Some(item) = topics_to_reg_iter.next() { - Some(item) - } else { - restart_iteration = true; - None - } + topic_item = topics_to_reg_iter.next(); } - if restart_iteration { - topics_to_reg_iter = self.registration_attempts.keys().cloned().collect::>().into_iter(); + if topics_to_reg_iter.next().is_none() { + topics_to_reg_iter = self.registration_attempts.keys().map(|topic| (topic.clone(), topic.hash())).collect::>().into_iter(); } } } From 7fb23a1d030066af8eb147657c58b274e66e5407 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 2 Aug 2022 14:06:43 +0200 Subject: [PATCH 327/391] Add peers to topic's kbuckets when query is done not during --- src/service.rs | 258 +++++++++++++++++--------------------- src/service/query_info.rs | 7 +- src/service/test.rs | 1 - 3 files changed, 123 insertions(+), 143 deletions(-) diff --git a/src/service.rs b/src/service.rs index 6e1018c0d..aed0da494 100644 --- a/src/service.rs +++ b/src/service.rs @@ -229,11 +229,6 @@ pub struct Service { /// query. active_requests: FnvHashMap, - /// To fill a topic's kbuckets, FINDNODE requests are sent using the topic hash as a node id (key). - /// With XOR metrics the buckets closest to topic hash will be filled this way. The request will - /// always time out. - find_node_topic_requests: HashMap, - /// Keeps track of the number of responses received from a NODES response. active_nodes_responses: HashMap, @@ -472,7 +467,6 @@ impl Service { kbuckets, queries: QueryPool::new(config.query_timeout), active_requests: Default::default(), - find_node_topic_requests: Default::default(), active_nodes_responses: HashMap::new(), ip_votes, handler_send, @@ -527,7 +521,8 @@ impl Service { ServiceRequest::StartQuery(query, callback) => { match query { QueryKind::FindNode { target_node } => { - self.start_findnode_query(target_node, Some(callback)); + let query_type = QueryType::FindNode(target_node); + self.start_findnode_query(query_type, Some(callback)); } QueryKind::Predicate { target_node, target_peer_no, predicate } => { self.start_predicate_query(target_node, target_peer_no, predicate, Some(callback)); @@ -598,7 +593,8 @@ impl Service { // (itertively getting closer to node ids to the topic hash) start a find node // query searching for the topic hash's bytes wrapped in a NodeId. let topic_key = NodeId::new(&topic_hash.as_bytes()); - self.start_findnode_query(topic_key, None); + let query_type = QueryType::FindTopic(topic_key); + self.start_findnode_query(query_type, None); self.send_topic_queries(topic_hash, Some(callback)); } @@ -678,7 +674,8 @@ impl Service { // (itertively getting closer to node ids to the topic hash) start a find node // query searching for the topic hash's bytes wrapped in a NodeId. let topic_key = NodeId::new(&topic_hash.as_bytes()); - self.start_findnode_query(topic_key, None); + let query_type = QueryType::FindTopic(topic_key); + self.start_findnode_query(query_type, None); } } } @@ -805,6 +802,7 @@ impl Service { // query is superfluous, however it may be useful in future versions. QueryEvent::Finished(query) | QueryEvent::TimedOut(query) => { let id = query.id(); + let query_type = query.target().query_type.clone(); let mut result = query.into_result(); // obtain the ENR's for the resulting nodes let mut found_enrs = Vec::new(); @@ -825,6 +823,71 @@ impl Service { if callback.send(found_enrs).is_err() { warn!("Callback dropped for query {}. Results dropped", *id); } + } else if let QueryType::FindTopic(topic_key) = query_type { + let topic_hash = TopicHash::from_raw(topic_key.raw()); + let mut discovered_new_peer = false; + if let Some(kbuckets_topic) = self.topics_kbuckets.get_mut(&topic_hash) { + for enr in found_enrs { + trace!("Found new peer {} for topic {}", enr, topic_hash); + let key = kbucket::Key::from(enr.node_id()); + + // If the ENR exists in the routing table and the discovered ENR has a greater + // sequence number, perform some filter checks before updating the enr. + + let must_update_enr = match kbuckets_topic.entry(&key) { + kbucket::Entry::Present(entry, _) => entry.value().seq() < enr.seq(), + kbucket::Entry::Pending(mut entry, _) => entry.value().seq() < enr.seq(), + kbucket::Entry::Absent(_) => { + trace!( + "Discovered new peer {} for topic hash {}", + enr.node_id(), + topic_hash + ); + let discovered_peers = + self.discovered_peers_topic.entry(topic_hash).or_default(); + let node_id = enr.node_id(); + let peer_key: kbucket::Key = node_id.into(); + let topic_key: kbucket::Key = + NodeId::new(&topic_hash.as_bytes()).into(); + if let Some(distance) = peer_key.log2_distance(&topic_key) { + let bucket = discovered_peers.entry(distance).or_default(); + // If the intermediary storage before the topic's kbucktes is at bounds, discard the + // uncontacted peers. + if bucket.len() < MAX_UNCONTACTED_PEERS_TOPIC_BUCKET { + bucket.insert(node_id, enr.clone()); + discovered_new_peer = true; + } else { + warn!("Discarding uncontacted peers, uncontacted peers at bounds for topic hash {}", topic_hash); + } + } + false + } + _ => false, + }; + if must_update_enr { + if let UpdateResult::Failed(reason) = + kbuckets_topic.update_node(&key, enr.clone(), None) + { + self.peers_to_ping.remove(&enr.node_id()); + debug!( + "Failed to update discovered ENR of peer {} for kbucket of topic hash {:?}. Reason: {:?}", + topic_hash, enr.node_id(), reason + ); + } else { + // If the enr was successfully updated, progress might be made in a topic lookup + discovered_new_peer = true; + } + } + } + if discovered_new_peer { + // If a topic lookup has dried up (no more peers to query), and we now have found new peers or updated enrs for + // known peers to that topic, the query can now proceed as long as it hasn't timed out already. + if let Some(query) = self.active_topic_queries.queries.get_mut(&topic_hash) { + debug!("Found new peers to send TOPICQUERY to, unsetting query status dry"); + query.dry = false; + } + } + } } } } @@ -1090,11 +1153,11 @@ impl Service { /// Internal function that starts a query. fn start_findnode_query( &mut self, - target_node: NodeId, + query_type: QueryType, callback: Option>>, ) { let mut target = QueryInfo { - query_type: QueryType::FindNode(target_node), + query_type, untrusted_enrs: Default::default(), distances_to_request: DISTANCES_TO_REQUEST_PER_PEER, callback, @@ -1546,8 +1609,7 @@ impl Service { if let RequestBody::FindNode { .. } = &active_request.request_body { // In the case that it is a FINDNODE request using a topic hash as key, remove the mapping. - let topic = self.find_node_topic_requests.remove(&id); - self.discovered(&node_id, nodes, active_request.query_id, topic); + self.discovered(&node_id, nodes, active_request.query_id); } else if let RequestBody::TopicQuery { topic } = &active_request.request_body { nodes.retain(|enr| { if enr.node_id() == self.local_enr.read().node_id() { @@ -2168,13 +2230,7 @@ impl Service { } /// Processes discovered peers from a query or a TOPICQUERY or REGTOPIC request. - fn discovered( - &mut self, - source: &NodeId, - mut enrs: Vec, - query_id: Option, - topic: Option, - ) { + fn discovered(&mut self, source: &NodeId, mut enrs: Vec, query_id: Option) { let local_id = self.local_enr.read().node_id(); enrs.retain(|enr| { @@ -2187,12 +2243,7 @@ impl Service { } // If there is an event stream send the DiscoveredPeerTopic event. if self.config.report_discovered_peers { - match topic { - Some(topic_hash) => { - self.send_event(Discv5Event::DiscoveredPeerTopic(enr.clone(), topic_hash)) - } - None => self.send_event(Discv5Event::Discovered(enr.clone())), - } + self.send_event(Discv5Event::Discovered(enr.clone())); } // The remaining ENRs are used if this request was part of a query. If we are // requesting the target of the query, this ENR could be the result of requesting the @@ -2202,124 +2253,54 @@ impl Service { return false; } // Ignore peers that don't pass the table filter - (self.config.table_filter)(enr) - }); - - if let Some(topic_hash) = topic { - let mut discovered_new_peer = false; - if let Some(kbuckets_topic) = self.topics_kbuckets.get_mut(&topic_hash) { - for enr in enrs { - let key = kbucket::Key::from(enr.node_id()); - - // If the ENR exists in the routing table and the discovered ENR has a greater - // sequence number, perform some filter checks before updating the enr. - - let must_update_enr = match kbuckets_topic.entry(&key) { - kbucket::Entry::Present(entry, _) => entry.value().seq() < enr.seq(), - kbucket::Entry::Pending(mut entry, _) => entry.value().seq() < enr.seq(), - kbucket::Entry::Absent(_) => { - trace!( - "Discovered new peer {} for topic hash {}", - enr.node_id(), - topic_hash - ); - let discovered_peers = - self.discovered_peers_topic.entry(topic_hash).or_default(); - let node_id = enr.node_id(); - let peer_key: kbucket::Key = node_id.into(); - let topic_key: kbucket::Key = - NodeId::new(&topic_hash.as_bytes()).into(); - if let Some(distance) = peer_key.log2_distance(&topic_key) { - let bucket = discovered_peers.entry(distance).or_default(); - // If the intermediary storage before the topic's kbucktes is at bounds, discard the - // uncontacted peers. - if bucket.len() < MAX_UNCONTACTED_PEERS_TOPIC_BUCKET { - bucket.insert(node_id, enr.clone()); - discovered_new_peer = true; - } else { - warn!("Discarding uncontacted peers, uncontacted peers at bounds for topic hash {}", topic_hash); - } - } - false - } - _ => false, - }; - if must_update_enr { - if let UpdateResult::Failed(reason) = - kbuckets_topic.update_node(&key, enr.clone(), None) - { - self.peers_to_ping.remove(&enr.node_id()); - debug!( - "Failed to update discovered ENR for kbucket of topic hash {:?}. Node: {}, Reason: {:?}", - topic_hash, source, reason - ); - } else { - // If the enr was successfully updated, progress might be made in a topic lookup - discovered_new_peer = true; - } - } - } - if discovered_new_peer { - // If a topic lookup has dried up (no more peers to query), and we now have found new peers or updated enrs for - // known peers to that topic, the query can now proceed as long as it hasn't timed out already. - if let Some(query) = self.active_topic_queries.queries.get_mut(&topic_hash) { - debug!("Found new peers to send TOPICQUERY to, unsetting query status dry"); - query.dry = false; - // To fill the kbuckets closest to the topic hash as well as those further away - // (itertively getting closer to node ids to the topic hash) start a find node - // query searching for the topic hash's bytes wrapped in a NodeId. - let topic_key = NodeId::new(&topic_hash.as_bytes()); - self.start_findnode_query(topic_key, None); - } - } + if !(self.config.table_filter)(enr) { + return false; } - } else { - enrs.retain(|enr| { - let key = kbucket::Key::from(enr.node_id()); - // If the ENR exists in the routing table and the discovered ENR has a greater - // sequence number, perform some filter checks before updating the enr. + let key = kbucket::Key::from(enr.node_id()); - let must_update_enr = match self.kbuckets.write().entry(&key) { - kbucket::Entry::Present(entry, _) => entry.value().seq() < enr.seq(), - kbucket::Entry::Pending(mut entry, _) => entry.value().seq() < enr.seq(), - _ => false, - }; - if must_update_enr { - if let UpdateResult::Failed(reason) = - self.kbuckets.write().update_node(&key, enr.clone(), None) - { - self.peers_to_ping.remove(&enr.node_id()); - debug!( - "Failed to update discovered ENR. Node: {}, Reason: {:?}", - source, reason - ); - return false; // Remove this peer from the discovered list if the update failed - } + // If the ENR exists in the routing table and the discovered ENR has a greater + // sequence number, perform some filter checks before updating the enr. + + let must_update_enr = match self.kbuckets.write().entry(&key) { + kbucket::Entry::Present(entry, _) => entry.value().seq() < enr.seq(), + kbucket::Entry::Pending(mut entry, _) => entry.value().seq() < enr.seq(), + _ => false, + }; + if must_update_enr { + if let UpdateResult::Failed(reason) = + self.kbuckets.write().update_node(&key, enr.clone(), None) + { + self.peers_to_ping.remove(&enr.node_id()); + debug!( + "Failed to update discovered ENR. Node: {}, Reason: {:?}", + source, reason + ); + return false; // Remove this peer from the discovered list if the update failed } - true - }); + } + true + }); - // The remaining ENRs are used if this request was part of a query. Update the query - if let Some(query_id) = query_id { - if let Some(query) = self.queries.get_mut(query_id) { - let mut peer_count = 0; - for enr_ref in enrs.iter() { - if !query - .target_mut() - .untrusted_enrs - .iter() - .any(|e| e.node_id() == enr_ref.node_id()) - { - query.target_mut().untrusted_enrs.push(enr_ref.clone()); - } - peer_count += 1; + // The remaining ENRs are used if this request was part of a query. Update the query + if let Some(query_id) = query_id { + if let Some(query) = self.queries.get_mut(query_id) { + let mut peer_count = 0; + for enr_ref in enrs.iter() { + if !query + .target_mut() + .untrusted_enrs + .iter() + .any(|e| e.node_id() == enr_ref.node_id()) + { + query.target_mut().untrusted_enrs.push(enr_ref.clone()); } - debug!("{} peers found for query id {:?}", peer_count, query_id); - query.on_success(source, &enrs) - } else { - debug!("Response returned for ended query {:?}", query_id) + peer_count += 1; } + debug!("{} peers found for query id {:?}", peer_count, query_id); + query.on_success(source, &enrs) + } else { + debug!("Response returned for ended query {:?}", query_id) } } } @@ -2543,15 +2524,12 @@ impl Service { "NODES Response failed, but was partially processed from: {}", active_request.contact ); - // In the case that it is a FINDNODE request using a topic hash as key, remove the mapping. - let topic = self.find_node_topic_requests.remove(&id); // if it's a query mark it as success, to process the partial // collection of peers self.discovered( &node_id, nodes_response.received_nodes, active_request.query_id, - topic, ); } } else { diff --git a/src/service/query_info.rs b/src/service/query_info.rs index 4efe46d32..21967911f 100644 --- a/src/service/query_info.rs +++ b/src/service/query_info.rs @@ -26,13 +26,16 @@ pub struct QueryInfo { pub enum QueryType { /// The user requested a `FIND_NODE` query to be performed. It should be reported when finished. FindNode(NodeId), + /// The user requested a `FIND_NODE` query to be performed to find the nodes closest to a topic + /// key. It should be reported when finished. + FindTopic(NodeId), } impl QueryInfo { /// Builds an RPC Request, given the QueryInfo pub(crate) fn rpc_request(&self, peer: NodeId) -> RequestBody { match self.query_type { - QueryType::FindNode(node_id) => { + QueryType::FindNode(node_id) | QueryType::FindTopic(node_id) => { let distances = findnode_log2distance(node_id, peer, self.distances_to_request) .unwrap_or_else(|| vec![0]); RequestBody::FindNode { distances } @@ -44,7 +47,7 @@ impl QueryInfo { impl crate::query_pool::TargetKey for QueryInfo { fn key(&self) -> Key { match self.query_type { - QueryType::FindNode(ref node_id) => { + QueryType::FindNode(ref node_id) | QueryType::FindTopic(ref node_id) => { Key::new_raw(*node_id, *GenericArray::from_slice(&node_id.raw())) } } diff --git a/src/service/test.rs b/src/service/test.rs index 13c9ce502..2eb6bfa73 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -85,7 +85,6 @@ async fn build_service( kbuckets, queries: QueryPool::new(config.query_timeout), active_requests: Default::default(), - find_node_topic_requests: Default::default(), active_nodes_responses: HashMap::new(), ip_votes: None, handler_send, From bd9604bda7e94af0a0417b6195a305bb611da50c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 3 Aug 2022 11:46:29 +0200 Subject: [PATCH 328/391] Only ad nodes which support topics version to topic kbuckets --- Cargo.toml | 1 + src/service.rs | 168 +++++++++++++++++++++++--------------------- src/service/test.rs | 18 ++++- 3 files changed, 103 insertions(+), 84 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index fe2c47a4f..b0a65a06c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,6 +41,7 @@ hashlink = "0.7.0" delay_map = "0.1.1" more-asserts = "0.2.2" base64 = "0.13.0" +iota = "0.2.2" [dev-dependencies] rand_07 = { package = "rand", version = "0.7" } diff --git a/src/service.rs b/src/service.rs index aed0da494..b849fd2ef 100644 --- a/src/service.rs +++ b/src/service.rs @@ -45,6 +45,7 @@ use delay_map::HashSetDelay; use enr::{CombinedKey, NodeId}; use fnv::FnvHashMap; use futures::{future::select_all, prelude::*}; +use iota::iota; use more_asserts::debug_unreachable; use parking_lot::RwLock; use rlp::{Rlp, RlpStream}; @@ -85,6 +86,31 @@ const MAX_UNCONTACTED_PEERS_TOPIC_BUCKET: usize = 16; /// The duration in seconds which a node can come late to an assigned wait time. const WAIT_TIME_MARGINAL: Duration = Duration::from_secs(5); +// Discv5 versions. +iota! { + const TOPICS: u8 = 1 << iota; + , NAT +} + +/// Check if a given peer supports one or more versions of the Discv5 protocol. +const CHECK_VERSION: fn(peer: &Enr, supported_versions: Vec) -> bool = + |peer, supported_versions| { + if let Some(version) = peer.get("version") { + if let Some(v) = version.get(0) { + // Only add nodes which support the topics version + return supported_versions.contains(v); + } else { + error!("Version field in enr of peer {} is empty", peer.node_id()); + return false; + } + } + error!( + "Enr of peer {} doesn't contain filed 'version'", + peer.node_id() + ); + false + }; + /// Request type for Protocols using `TalkReq` message. /// /// Automatically responds with an empty body on drop if @@ -548,53 +574,13 @@ impl Service { ServiceRequest::TopicQuery(topic_hash, callback) => { // If we look up the topic hash for the first time we initialise its kbuckets. if let Entry::Vacant(_) = self.topics_kbuckets.entry(topic_hash) { - // NOTE: Currently we don't expose custom filter support in the configuration. Users can - // optionally use the IP filter via the ip_limit configuration parameter. In the future, we - // may expose this functionality to the users if there is demand for it. - let (table_filter, bucket_filter) = if self.config.ip_limit { - ( - Some(Box::new(kbucket::IpTableFilter) as Box>), - Some(Box::new(kbucket::IpBucketFilter) as Box>), - ) - } else { - (None, None) - }; - - trace!("Initiating kbuckets for topic hash {}", topic_hash); - let mut kbuckets = KBucketsTable::new( - NodeId::new(&topic_hash.as_bytes()).into(), - Duration::from_secs(60), - self.config.incoming_bucket_limit, - table_filter, - bucket_filter, - ); - - debug!("Adding {} entries from local routing table to topic's kbuckets", self.kbuckets.write().iter().count()); - - for entry in self.kbuckets.write().iter() { - match kbuckets.insert_or_update(entry.node.key, entry.node.value.clone(), entry.status) { - InsertResult::Inserted - | InsertResult::Pending { .. } - | InsertResult::StatusUpdated { .. } - | InsertResult::ValueUpdated - | InsertResult::Updated { .. } - | InsertResult::UpdatedPending => trace!( - "Added node id {} to kbucket of topic hash {}", - entry.node.value.node_id(), - topic_hash - ), - InsertResult::Failed(f) => error!("Failed to insert ENR for topic hash {}. Failure reason: {:?}", topic_hash, f), - } - } - self.topics_kbuckets.insert(topic_hash, kbuckets); + self.init_topic_kbuckets(topic_hash); } - // To fill the kbuckets closest to the topic hash as well as those further away // (itertively getting closer to node ids to the topic hash) start a find node // query searching for the topic hash's bytes wrapped in a NodeId. let topic_key = NodeId::new(&topic_hash.as_bytes()); - let query_type = QueryType::FindTopic(topic_key); - self.start_findnode_query(query_type, None); + self.start_findnode_query(QueryType::FindTopic(topic_key), None); self.send_topic_queries(topic_hash, Some(callback)); } @@ -629,53 +615,15 @@ impl Service { .write() .insert("topics", &topics_field, &self.enr_key.write()) .map_err(|e| error!("Failed to insert field 'topics' into local enr. Error {:?}", e)).is_ok() { - // NOTE: Currently we don't expose custom filter support in the configuration. Users can - // optionally use the IP filter via the ip_limit configuration parameter. In the future, we - // may expose this functionality to the users if there is demand for it. - let (table_filter, bucket_filter) = if self.config.ip_limit { - ( - Some(Box::new(kbucket::IpTableFilter) as Box>), - Some(Box::new(kbucket::IpBucketFilter) as Box>), - ) - } else { - (None, None) - }; - - trace!("Initiating kbuckets for topic hash {}", topic_hash); - let mut kbuckets = KBucketsTable::new( - NodeId::new(&topic_hash.as_bytes()).into(), - Duration::from_secs(60), - self.config.incoming_bucket_limit, - table_filter, - bucket_filter, - ); - debug!("Adding {} entries from local routing table to topic's kbuckets", self.kbuckets.write().iter().count()); - - for entry in self.kbuckets.write().iter() { - match kbuckets.insert_or_update(entry.node.key, entry.node.value.clone(), entry.status) { - InsertResult::Inserted - | InsertResult::Pending { .. } - | InsertResult::StatusUpdated { .. } - | InsertResult::ValueUpdated - | InsertResult::Updated { .. } - | InsertResult::UpdatedPending => trace!( - "Added node id {} to kbucket of topic hash {}", - entry.node.value.node_id(), - topic_hash - ), - InsertResult::Failed(f) => error!("Failed to insert ENR for topic hash {}. Failure reason: {:?}", topic_hash, f), - } - } - self.topics_kbuckets.insert(topic_hash, kbuckets); + self.init_topic_kbuckets(topic_hash); METRICS.topics_to_publish.store(self.registration_attempts.len(), Ordering::Relaxed); // To fill the kbuckets closest to the topic hash as well as those further away // (itertively getting closer to node ids to the topic hash) start a find node // query searching for the topic hash's bytes wrapped in a NodeId. let topic_key = NodeId::new(&topic_hash.as_bytes()); - let query_type = QueryType::FindTopic(topic_key); - self.start_findnode_query(query_type, None); + self.start_findnode_query(QueryType::FindTopic(topic_key), None); } } } @@ -828,6 +776,9 @@ impl Service { let mut discovered_new_peer = false; if let Some(kbuckets_topic) = self.topics_kbuckets.get_mut(&topic_hash) { for enr in found_enrs { + if !CHECK_VERSION(&enr, vec![TOPICS, TOPICS|NAT]) { + continue; + } trace!("Found new peer {} for topic {}", enr, topic_hash); let key = kbucket::Key::from(enr.node_id()); @@ -948,6 +899,59 @@ impl Service { } } + fn init_topic_kbuckets(&mut self, topic_hash: TopicHash) { + trace!("Initiating kbuckets for topic hash {}", topic_hash); + + // NOTE: Currently we don't expose custom filter support in the configuration. Users can + // optionally use the IP filter via the ip_limit configuration parameter. In the future, we + // may expose this functionality to the users if there is demand for it. + let (table_filter, bucket_filter) = if self.config.ip_limit { + ( + Some(Box::new(kbucket::IpTableFilter) as Box>), + Some(Box::new(kbucket::IpBucketFilter) as Box>), + ) + } else { + (None, None) + }; + + let mut kbuckets = KBucketsTable::new( + NodeId::new(&topic_hash.as_bytes()).into(), + Duration::from_secs(60), + self.config.incoming_bucket_limit, + table_filter, + bucket_filter, + ); + + debug!( + "Adding {} entries from local routing table to topic's kbuckets", + self.kbuckets.write().iter().count() + ); + + for entry in self.kbuckets.write().iter() { + let enr = entry.node.value.clone(); + if !CHECK_VERSION(&enr, vec![TOPICS, TOPICS | NAT]) { + continue; + } + match kbuckets.insert_or_update(entry.node.key, enr, entry.status) { + InsertResult::Inserted + | InsertResult::Pending { .. } + | InsertResult::StatusUpdated { .. } + | InsertResult::ValueUpdated + | InsertResult::Updated { .. } + | InsertResult::UpdatedPending => trace!( + "Added node id {} to kbucket of topic hash {}", + entry.node.value.node_id(), + topic_hash + ), + InsertResult::Failed(f) => error!( + "Failed to insert ENR for topic hash {}. Failure reason: {:?}", + topic_hash, f + ), + } + } + self.topics_kbuckets.insert(topic_hash, kbuckets); + } + /// Internal function that starts a topic registration. This function should not be called outside of [`REGISTER_INTERVAL`]. fn send_register_topics(&mut self, topic: Topic) -> usize { trace!("Sending REGTOPICS"); diff --git a/src/service/test.rs b/src/service/test.rs index 2eb6bfa73..465d18513 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -252,5 +252,19 @@ async fn encrypt_decrypt_ticket() { assert_eq!(decoded_ticket, ticket); } -#[tokio::test] -async fn test_ticketing() {} +#[test] +fn test_version_check() { + // Create the test values needed + let port = 6666; + let ip: std::net::IpAddr = "127.0.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let mut enr = crate::enr::EnrBuilder::new("v4") + .ip(ip) + .udp4(port) + .build(&key) + .unwrap(); + let supported_versions = TOPICS | NAT; + enr.insert("version", &[supported_versions], &key).unwrap(); + + assert!(CHECK_VERSION(&enr, vec!(supported_versions))); +} From e11c6dd8d6f00b30d08d771841d20d51ff0a1931 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 3 Aug 2022 14:28:47 +0200 Subject: [PATCH 329/391] Advertise this node as supporting topics --- src/service.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index b849fd2ef..2414c9d5c 100644 --- a/src/service.rs +++ b/src/service.rs @@ -52,7 +52,7 @@ use rlp::{Rlp, RlpStream}; use rpc::*; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap}, - io::Error, + io::{Error, ErrorKind}, net::SocketAddr, pin::Pin, sync::{atomic::Ordering, Arc}, @@ -469,6 +469,9 @@ impl Service { None }; + // This node supports topic requests REGTOPIC and TOPICQUERY, and their responses. + local_enr.write().insert("version", &[TOPICS], &enr_key.write()).map_err(|e| Error::new(ErrorKind::Other, format!("Failed to insert field 'version' into local enr. Error {:?}", e)))?; + // build the session service let (handler_exit, handler_send, handler_recv) = Handler::spawn( local_enr.clone(), From c0dca81892cd96706149f92340ce4fd8ef486456 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 3 Aug 2022 14:33:00 +0200 Subject: [PATCH 330/391] Run cargo fmt --- src/service.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index 2414c9d5c..f44a4b6ae 100644 --- a/src/service.rs +++ b/src/service.rs @@ -470,7 +470,18 @@ impl Service { }; // This node supports topic requests REGTOPIC and TOPICQUERY, and their responses. - local_enr.write().insert("version", &[TOPICS], &enr_key.write()).map_err(|e| Error::new(ErrorKind::Other, format!("Failed to insert field 'version' into local enr. Error {:?}", e)))?; + local_enr + .write() + .insert("version", &[TOPICS], &enr_key.write()) + .map_err(|e| { + Error::new( + ErrorKind::Other, + format!( + "Failed to insert field 'version' into local enr. Error {:?}", + e + ), + ) + })?; // build the session service let (handler_exit, handler_send, handler_recv) = Handler::spawn( From 10d73be0705cc4127f808c513867b81a4f511edb Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 3 Aug 2022 14:49:49 +0200 Subject: [PATCH 331/391] Move enr version insertion to up a layer to discv5 --- src/discv5.rs | 37 ++++++++++++++++++++++++++++++++++++- src/discv5/test.rs | 22 +++++++++++++++++++++- src/service.rs | 44 ++------------------------------------------ src/service/test.rs | 17 ----------------- 4 files changed, 59 insertions(+), 61 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index c322aa066..2e86c3109 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -24,6 +24,7 @@ use crate::{ Discv5Config, Enr, Topic, }; use enr::{CombinedKey, EnrError, EnrKey, NodeId}; +use iota::iota; use parking_lot::RwLock; use std::{ collections::{BTreeMap, HashMap}, @@ -33,7 +34,7 @@ use std::{ time::{Duration, Instant}, }; use tokio::sync::{mpsc, oneshot}; -use tracing::{debug, warn}; +use tracing::{debug, error, warn}; #[cfg(feature = "libp2p")] use libp2p_core::Multiaddr; @@ -52,6 +53,31 @@ pub static HASH: for<'a> fn(topic: &'a str) -> TopicHash = |topic| { sha256_topic.hash() }; +// Discv5 versions. +iota! { + pub const TOPICS: u8 = 1 << iota; + , NAT +} + +/// Check if a given peer supports one or more versions of the Discv5 protocol. +pub const CHECK_VERSION: fn(peer: &Enr, supported_versions: Vec) -> bool = + |peer, supported_versions| { + if let Some(version) = peer.get("version") { + if let Some(v) = version.get(0) { + // Only add nodes which support the topics version + return supported_versions.contains(v); + } else { + error!("Version field in enr of peer {} is empty", peer.node_id()); + return false; + } + } + error!( + "Enr of peer {} doesn't contain field 'version'", + peer.node_id() + ); + false + }; + mod test; /// Events that can be produced by the `Discv5` event stream. @@ -141,6 +167,15 @@ impl Discv5 { bucket_filter, ))); + // This node supports topic requests REGTOPIC and TOPICQUERY, and their responses. + if let Err(e) = local_enr + .write() + .insert("version", &[TOPICS], &enr_key.write()) + { + error!("Failed writing to enr. Error {:?}", e); + return Err("Failed to insert field 'version' into local enr"); + } + // Update the PermitBan list based on initial configuration *PERMIT_BAN_LIST.write() = config.permit_ban_list.clone(); diff --git a/src/discv5/test.rs b/src/discv5/test.rs index e64628597..e94505515 100644 --- a/src/discv5/test.rs +++ b/src/discv5/test.rs @@ -1,6 +1,9 @@ #![cfg(test)] -use crate::{kbucket, Discv5, *}; +use crate::{ + discv5::{CHECK_VERSION, NAT, TOPICS}, + kbucket, Discv5, *, +}; use enr::{k256, CombinedKey, Enr, EnrBuilder, EnrKey, NodeId}; use rand_core::{RngCore, SeedableRng}; use std::{collections::HashMap, net::Ipv4Addr}; @@ -624,3 +627,20 @@ async fn test_bucket_limits() { // Number of entries should be equal to `bucket_limit`. assert_eq!(discv5.kbuckets.read().iter_ref().count(), bucket_limit); } + +#[test] +fn test_version_check() { + // Create the test values needed + let port = 6666; + let ip: std::net::IpAddr = "127.0.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let mut enr = crate::enr::EnrBuilder::new("v4") + .ip(ip) + .udp4(port) + .build(&key) + .unwrap(); + let supported_versions = TOPICS | NAT; + enr.insert("version", &[supported_versions], &key).unwrap(); + + assert!(CHECK_VERSION(&enr, vec!(supported_versions))); +} diff --git a/src/service.rs b/src/service.rs index f44a4b6ae..9de2dbbbb 100644 --- a/src/service.rs +++ b/src/service.rs @@ -22,7 +22,7 @@ use crate::{ topic::TopicHash, Ads, AD_LIFETIME, }, - discv5::PERMIT_BAN_LIST, + discv5::{CHECK_VERSION, NAT, PERMIT_BAN_LIST, TOPICS}, error::{RequestError, ResponseError}, handler::{Handler, HandlerIn, HandlerOut}, kbucket::{ @@ -45,14 +45,13 @@ use delay_map::HashSetDelay; use enr::{CombinedKey, NodeId}; use fnv::FnvHashMap; use futures::{future::select_all, prelude::*}; -use iota::iota; use more_asserts::debug_unreachable; use parking_lot::RwLock; use rlp::{Rlp, RlpStream}; use rpc::*; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap}, - io::{Error, ErrorKind}, + io::Error, net::SocketAddr, pin::Pin, sync::{atomic::Ordering, Arc}, @@ -86,31 +85,6 @@ const MAX_UNCONTACTED_PEERS_TOPIC_BUCKET: usize = 16; /// The duration in seconds which a node can come late to an assigned wait time. const WAIT_TIME_MARGINAL: Duration = Duration::from_secs(5); -// Discv5 versions. -iota! { - const TOPICS: u8 = 1 << iota; - , NAT -} - -/// Check if a given peer supports one or more versions of the Discv5 protocol. -const CHECK_VERSION: fn(peer: &Enr, supported_versions: Vec) -> bool = - |peer, supported_versions| { - if let Some(version) = peer.get("version") { - if let Some(v) = version.get(0) { - // Only add nodes which support the topics version - return supported_versions.contains(v); - } else { - error!("Version field in enr of peer {} is empty", peer.node_id()); - return false; - } - } - error!( - "Enr of peer {} doesn't contain filed 'version'", - peer.node_id() - ); - false - }; - /// Request type for Protocols using `TalkReq` message. /// /// Automatically responds with an empty body on drop if @@ -469,20 +443,6 @@ impl Service { None }; - // This node supports topic requests REGTOPIC and TOPICQUERY, and their responses. - local_enr - .write() - .insert("version", &[TOPICS], &enr_key.write()) - .map_err(|e| { - Error::new( - ErrorKind::Other, - format!( - "Failed to insert field 'version' into local enr. Error {:?}", - e - ), - ) - })?; - // build the session service let (handler_exit, handler_send, handler_recv) = Handler::spawn( local_enr.clone(), diff --git a/src/service/test.rs b/src/service/test.rs index 465d18513..51277af89 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -251,20 +251,3 @@ async fn encrypt_decrypt_ticket() { assert_eq!(decoded_ticket, ticket); } - -#[test] -fn test_version_check() { - // Create the test values needed - let port = 6666; - let ip: std::net::IpAddr = "127.0.0.1".parse().unwrap(); - let key = CombinedKey::generate_secp256k1(); - let mut enr = crate::enr::EnrBuilder::new("v4") - .ip(ip) - .udp4(port) - .build(&key) - .unwrap(); - let supported_versions = TOPICS | NAT; - enr.insert("version", &[supported_versions], &key).unwrap(); - - assert!(CHECK_VERSION(&enr, vec!(supported_versions))); -} From a6d57f2485e72337cfb6f585a1aedc23b8e8a8f0 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 3 Aug 2022 15:01:30 +0200 Subject: [PATCH 332/391] Add debug --- src/discv5.rs | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 2e86c3109..ef0a8efe5 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -55,8 +55,8 @@ pub static HASH: for<'a> fn(topic: &'a str) -> TopicHash = |topic| { // Discv5 versions. iota! { - pub const TOPICS: u8 = 1 << iota; - , NAT + pub const NAT: u8 = 1 << iota; + , TOPICS } /// Check if a given peer supports one or more versions of the Discv5 protocol. @@ -65,17 +65,18 @@ pub const CHECK_VERSION: fn(peer: &Enr, supported_versions: Vec) -> bool = if let Some(version) = peer.get("version") { if let Some(v) = version.get(0) { // Only add nodes which support the topics version - return supported_versions.contains(v); + supported_versions.contains(v) } else { error!("Version field in enr of peer {} is empty", peer.node_id()); - return false; + false } + } else { + error!( + "Enr of peer {} doesn't contain field 'version'", + peer.node_id() + ); + false } - error!( - "Enr of peer {} doesn't contain field 'version'", - peer.node_id() - ); - false }; mod test; @@ -176,6 +177,8 @@ impl Discv5 { return Err("Failed to insert field 'version' into local enr"); } + println!("{:?}", local_enr.read().get("version").unwrap()); + // Update the PermitBan list based on initial configuration *PERMIT_BAN_LIST.write() = config.permit_ban_list.clone(); From 247041074abfe7193f7fa8bce5fd0b2caadcfadf Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 3 Aug 2022 15:26:22 +0200 Subject: [PATCH 333/391] If topic lookup runs dry, find more peers --- src/service.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/service.rs b/src/service.rs index 9de2dbbbb..784f19914 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1114,6 +1114,7 @@ impl Service { debug!("Found no new peers to send TOPICQUERY to, setting query status to dry"); if let Some(query) = self.active_topic_queries.queries.get_mut(&topic_hash) { query.dry = true; + self.start_findnode_query(QueryType::FindTopic(topic_key), None); } return; } From 5a7f0e8b3c8aee1392e2b6712b7d0426947df139 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 3 Aug 2022 15:27:57 +0200 Subject: [PATCH 334/391] fixup! If topic lookup runs dry, find more peers --- src/service.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/service.rs b/src/service.rs index 784f19914..80044e4cb 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1114,6 +1114,7 @@ impl Service { debug!("Found no new peers to send TOPICQUERY to, setting query status to dry"); if let Some(query) = self.active_topic_queries.queries.get_mut(&topic_hash) { query.dry = true; + let topic_key = NodeId::new(&topic_hash.as_bytes()); self.start_findnode_query(QueryType::FindTopic(topic_key), None); } return; From d02bd7a0958408df49046dc897c07fc5656e3c42 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 5 Aug 2022 16:54:09 +0200 Subject: [PATCH 335/391] Allow peers which don't include version field in enr --- src/discv5.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/discv5.rs b/src/discv5.rs index ef0a8efe5..24274c66e 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -60,6 +60,7 @@ iota! { } /// Check if a given peer supports one or more versions of the Discv5 protocol. +/// /// Returns true if any of the given versions are supported. pub const CHECK_VERSION: fn(peer: &Enr, supported_versions: Vec) -> bool = |peer, supported_versions| { if let Some(version) = peer.get("version") { @@ -71,7 +72,7 @@ pub const CHECK_VERSION: fn(peer: &Enr, supported_versions: Vec) -> bool = false } } else { - error!( + warn!( "Enr of peer {} doesn't contain field 'version'", peer.node_id() ); From c830ed070413b51ba60b0bf5f805bd2353996754 Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Tue, 30 Aug 2022 14:40:36 +0200 Subject: [PATCH 336/391] Fix typo Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- src/advertisement/ticket.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 06dd43755..e1b13f15f 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -8,7 +8,7 @@ use std::{cmp::Eq, hash::Hash}; /// The max wait time accpeted for tickets. pub const MAX_WAIT_TIME_TICKET: u64 = 60 * 5; -/// The time window within in which the number of new tickets from a peer for a topic will be limitied. +/// The time window within which the number of new tickets from a peer for a topic will be limitied. pub const TICKET_LIMIT_DURATION: Duration = Duration::from_secs(60 * 15); /// Max tickets that are stored for an individual node for a topic (in the configured From 679635f48ea47c683094bcf599cb3054ae38bd71 Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Tue, 30 Aug 2022 14:42:08 +0200 Subject: [PATCH 337/391] Fix typo Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- src/discv5.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/discv5.rs b/src/discv5.rs index 24274c66e..ebc183855 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -60,7 +60,7 @@ iota! { } /// Check if a given peer supports one or more versions of the Discv5 protocol. -/// /// Returns true if any of the given versions are supported. +/// Returns true if any of the given versions is supported. pub const CHECK_VERSION: fn(peer: &Enr, supported_versions: Vec) -> bool = |peer, supported_versions| { if let Some(version) = peer.get("version") { From 8bdd65301264b454d5737fa2c6b6565acc3c8a38 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 30 Aug 2022 14:49:15 +0200 Subject: [PATCH 338/391] Use consants instead of string literals --- src/discv5.rs | 19 ++++++++++++------- src/discv5/test.rs | 7 ++++--- src/service.rs | 14 +++++++------- 3 files changed, 23 insertions(+), 17 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 24274c66e..7f36f3981 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -53,17 +53,21 @@ pub static HASH: for<'a> fn(topic: &'a str) -> TopicHash = |topic| { sha256_topic.hash() }; +/// Custom ENR keys. +const ENR_KEY_VERSION: &str = "version"; +pub const ENR_KEY_TOPICS: &str = "topics"; + // Discv5 versions. iota! { - pub const NAT: u8 = 1 << iota; - , TOPICS + pub const VERSION_NAT: u8 = 1 << iota; + , VERISON_TOPICS } /// Check if a given peer supports one or more versions of the Discv5 protocol. /// /// Returns true if any of the given versions are supported. pub const CHECK_VERSION: fn(peer: &Enr, supported_versions: Vec) -> bool = |peer, supported_versions| { - if let Some(version) = peer.get("version") { + if let Some(version) = peer.get(ENR_KEY_VERSION) { if let Some(v) = version.get(0) { // Only add nodes which support the topics version supported_versions.contains(v) @@ -170,15 +174,16 @@ impl Discv5 { ))); // This node supports topic requests REGTOPIC and TOPICQUERY, and their responses. - if let Err(e) = local_enr - .write() - .insert("version", &[TOPICS], &enr_key.write()) + if let Err(e) = + local_enr + .write() + .insert(ENR_KEY_VERSION, &[VERISON_TOPICS], &enr_key.write()) { error!("Failed writing to enr. Error {:?}", e); return Err("Failed to insert field 'version' into local enr"); } - println!("{:?}", local_enr.read().get("version").unwrap()); + println!("{:?}", local_enr.read().get(ENR_KEY_VERSION).unwrap()); // Update the PermitBan list based on initial configuration *PERMIT_BAN_LIST.write() = config.permit_ban_list.clone(); diff --git a/src/discv5/test.rs b/src/discv5/test.rs index e94505515..f8c221cae 100644 --- a/src/discv5/test.rs +++ b/src/discv5/test.rs @@ -1,7 +1,7 @@ #![cfg(test)] use crate::{ - discv5::{CHECK_VERSION, NAT, TOPICS}, + discv5::{CHECK_VERSION, ENR_KEY_VERSION, VERISON_TOPICS, VERSION_NAT}, kbucket, Discv5, *, }; use enr::{k256, CombinedKey, Enr, EnrBuilder, EnrKey, NodeId}; @@ -639,8 +639,9 @@ fn test_version_check() { .udp4(port) .build(&key) .unwrap(); - let supported_versions = TOPICS | NAT; - enr.insert("version", &[supported_versions], &key).unwrap(); + let supported_versions = VERISON_TOPICS | VERSION_NAT; + enr.insert(ENR_KEY_VERSION, &[supported_versions], &key) + .unwrap(); assert!(CHECK_VERSION(&enr, vec!(supported_versions))); } diff --git a/src/service.rs b/src/service.rs index 80044e4cb..409907c7c 100644 --- a/src/service.rs +++ b/src/service.rs @@ -22,7 +22,7 @@ use crate::{ topic::TopicHash, Ads, AD_LIFETIME, }, - discv5::{CHECK_VERSION, NAT, PERMIT_BAN_LIST, TOPICS}, + discv5::{CHECK_VERSION, ENR_KEY_TOPICS, PERMIT_BAN_LIST, VERISON_TOPICS, VERSION_NAT}, error::{RequestError, ResponseError}, handler::{Handler, HandlerIn, HandlerOut}, kbucket::{ @@ -563,7 +563,7 @@ impl Service { if self.registration_attempts.insert(topic.clone(), BTreeMap::new()).is_some() { warn!("This topic is already being advertised"); } else { - let topics_field = if let Some(topics) = self.local_enr.read().get("topics") { + let topics_field = if let Some(topics) = self.local_enr.read().get(ENR_KEY_TOPICS) { let rlp = Rlp::new(topics); let item_count = rlp.iter().count(); let mut rlp_stream = RlpStream::new_list(item_count + 1); @@ -587,7 +587,7 @@ impl Service { if self.local_enr .write() - .insert("topics", &topics_field, &self.enr_key.write()) + .insert(ENR_KEY_TOPICS, &topics_field, &self.enr_key.write()) .map_err(|e| error!("Failed to insert field 'topics' into local enr. Error {:?}", e)).is_ok() { self.init_topic_kbuckets(topic_hash); @@ -750,7 +750,7 @@ impl Service { let mut discovered_new_peer = false; if let Some(kbuckets_topic) = self.topics_kbuckets.get_mut(&topic_hash) { for enr in found_enrs { - if !CHECK_VERSION(&enr, vec![TOPICS, TOPICS|NAT]) { + if !CHECK_VERSION(&enr, vec![VERISON_TOPICS, VERISON_TOPICS|VERSION_NAT]) { continue; } trace!("Found new peer {} for topic {}", enr, topic_hash); @@ -903,7 +903,7 @@ impl Service { for entry in self.kbuckets.write().iter() { let enr = entry.node.value.clone(); - if !CHECK_VERSION(&enr, vec![TOPICS, TOPICS | NAT]) { + if !CHECK_VERSION(&enr, vec![VERISON_TOPICS, VERISON_TOPICS | VERSION_NAT]) { continue; } match kbuckets.insert_or_update(entry.node.key, enr, entry.status) { @@ -1335,7 +1335,7 @@ impl Service { // Blacklist if node doesn't contain the given topic in its enr 'topics' field let mut topic_in_enr = false; - if let Some(topics) = enr.get("topics") { + if let Some(topics) = enr.get(ENR_KEY_TOPICS) { let rlp = Rlp::new(topics); for item in rlp.iter() { if let Ok(data) = item.data().map_err(|e| error!("Could not decode a topic in topics field in enr of peer {}. Error {}", enr.node_id(), e)) { @@ -1601,7 +1601,7 @@ impl Service { return false; } // Ads are checked for validity, if they do not contain the topic in their enr, they are discarded - if let Some(topics) = enr.get("topics") { + if let Some(topics) = enr.get(ENR_KEY_TOPICS) { let rlp = Rlp::new(topics); for item in rlp.iter() { if let Ok(data) = item.data().map_err(|e| error!("Could not decode a topic in topics field in enr of peer {}. Error {}", enr.node_id(), e)) { From 36a60cc6ea3a736362ffc59254ca01f9680d14b4 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 30 Aug 2022 15:03:59 +0200 Subject: [PATCH 339/391] Fix non-backwards compatibale changes as introduced by new stable rust --- src/discv5.rs | 2 +- src/error.rs | 8 ++++---- src/handler/mod.rs | 4 ++-- src/ipmode.rs | 2 +- src/packet/mod.rs | 6 +++--- src/rpc.rs | 10 +++++----- 6 files changed, 16 insertions(+), 16 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 7f36f3981..233d6dcd4 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -68,7 +68,7 @@ iota! { pub const CHECK_VERSION: fn(peer: &Enr, supported_versions: Vec) -> bool = |peer, supported_versions| { if let Some(version) = peer.get(ENR_KEY_VERSION) { - if let Some(v) = version.get(0) { + if let Some(v) = version.first() { // Only add nodes which support the topics version supported_versions.contains(v) } else { diff --git a/src/error.rs b/src/error.rs index 046e03580..9603d90da 100644 --- a/src/error.rs +++ b/src/error.rs @@ -47,7 +47,7 @@ impl From for Discv5Error { } } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] /// Types of packet errors. pub enum PacketError { /// The packet type is unknown. @@ -70,7 +70,7 @@ pub enum PacketError { InvalidEnr(DecoderError), } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] #[non_exhaustive] pub enum ResponseError { /// The channel used to send the response has already been closed. @@ -89,7 +89,7 @@ impl fmt::Display for ResponseError { impl std::error::Error for ResponseError {} -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum RequestError { /// The request timed out. Timeout, @@ -129,7 +129,7 @@ pub enum RequestError { InvalidTopicsEnr, } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum QueryError { /// The discv5 service is not currently running. ServiceNotStarted, diff --git a/src/handler/mod.rs b/src/handler/mod.rs index c42b6197c..dad7f013a 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -106,7 +106,7 @@ pub enum HandlerIn { } /// Messages sent between a node on the network and `Handler`. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum HandlerOut { /// A session has been established with a node. /// @@ -145,7 +145,7 @@ pub enum ConnectionDirection { /// A reference for the application layer to send back when the handler requests any known /// ENR for the NodeContact. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct WhoAreYouRef(pub NodeAddress, MessageNonce); #[derive(Debug)] diff --git a/src/ipmode.rs b/src/ipmode.rs index e7ae8eb55..b8774931f 100644 --- a/src/ipmode.rs +++ b/src/ipmode.rs @@ -5,7 +5,7 @@ use std::net::SocketAddr; /// Sets the socket type to be established and also determines the type of ENRs that we will store /// in our routing table. /// We store ENR's that have a `get_contractable_addr()` based on the `IpMode` set. -#[derive(Debug, Clone, Copy, PartialEq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum IpMode { /// IPv4 only. This creates an IPv4 only UDP socket and will only store ENRs in the local /// routing table if they contain a contactable IPv4 address. diff --git a/src/packet/mod.rs b/src/packet/mod.rs index adf8facae..cd599a2c9 100644 --- a/src/packet/mod.rs +++ b/src/packet/mod.rs @@ -72,7 +72,7 @@ impl AsRef<[u8]> for ChallengeData { } } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct Packet { /// Random data unique to the packet. pub iv: u128, @@ -82,7 +82,7 @@ pub struct Packet { pub message: Vec, } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct PacketHeader { /// The nonce of the associated message pub message_nonce: MessageNonce, @@ -106,7 +106,7 @@ impl PacketHeader { } } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum PacketKind { /// An ordinary message. Message { diff --git a/src/rpc.rs b/src/rpc.rs index 1dd9d1444..76208508f 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -37,7 +37,7 @@ impl RequestId { } } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] /// A combined type representing requests and responses. pub enum Message { /// A request, which contains its [`RequestId`]. @@ -46,7 +46,7 @@ pub enum Message { Response(Response), } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] /// A request sent between nodes. pub struct Request { /// The [`RequestId`] of the request. @@ -55,7 +55,7 @@ pub struct Request { pub body: RequestBody, } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] /// A response sent in response to a [`Request`] pub struct Response { /// The [`RequestId`] of the request that triggered this response. @@ -64,7 +64,7 @@ pub struct Response { pub body: ResponseBody, } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum RequestBody { /// A PING request. Ping { @@ -99,7 +99,7 @@ pub enum RequestBody { }, } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum ResponseBody { /// A PONG response. Pong { From 033cf8278476eb0db59313236e7c68ab0c034168 Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Tue, 30 Aug 2022 15:06:53 +0200 Subject: [PATCH 340/391] Make name more verbose Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index 80044e4cb..89ec0d5c7 100644 --- a/src/service.rs +++ b/src/service.rs @@ -71,7 +71,7 @@ pub(crate) const DISTANCES_TO_REQUEST_PER_PEER: usize = 3; /// The number of registration attempts that should be active per distance /// if there are sufficient peers. -const MAX_REG_ATTEMPTS_DISTANCE: usize = 16; +const MAX_REG_ATTEMPTS_PER_DISTANCE: usize = 16; /// Registration of topics are paced to occur at intervals t avoid a self-provoked DoS. const REGISTER_INTERVAL: Duration = Duration::from_secs(60); From cfc837bad506e395cb2a612644016e36131012e3 Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Tue, 30 Aug 2022 15:07:52 +0200 Subject: [PATCH 341/391] Clarify docs Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index 89ec0d5c7..aaf1c69bf 100644 --- a/src/service.rs +++ b/src/service.rs @@ -69,7 +69,7 @@ mod test; /// NOTE: This must not be larger than 127. pub(crate) const DISTANCES_TO_REQUEST_PER_PEER: usize = 3; -/// The number of registration attempts that should be active per distance +/// The maximum number of registration attempts that may be active per distance /// if there are sufficient peers. const MAX_REG_ATTEMPTS_PER_DISTANCE: usize = 16; From 0271c7dd85a49589585e857fb42e7c4e603dc379 Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Tue, 30 Aug 2022 15:08:14 +0200 Subject: [PATCH 342/391] Fix typo Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index aaf1c69bf..6696b9f04 100644 --- a/src/service.rs +++ b/src/service.rs @@ -73,7 +73,7 @@ pub(crate) const DISTANCES_TO_REQUEST_PER_PEER: usize = 3; /// if there are sufficient peers. const MAX_REG_ATTEMPTS_PER_DISTANCE: usize = 16; -/// Registration of topics are paced to occur at intervals t avoid a self-provoked DoS. +/// Registration of topics are paced to occur at intervals to avoid a self-provoked DoS. const REGISTER_INTERVAL: Duration = Duration::from_secs(60); /// Registration attempts must be limited per registration interval. From fdec54083293c9e8a0ebdfc85c9781f2f8c43766 Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Tue, 30 Aug 2022 15:08:36 +0200 Subject: [PATCH 343/391] Make name more verbose Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index 6696b9f04..65d4501a5 100644 --- a/src/service.rs +++ b/src/service.rs @@ -77,7 +77,7 @@ const MAX_REG_ATTEMPTS_PER_DISTANCE: usize = 16; const REGISTER_INTERVAL: Duration = Duration::from_secs(60); /// Registration attempts must be limited per registration interval. -const MAX_REGTOPICS_REGISTER_INTERVAL: usize = 16; +const MAX_REGTOPICS_REGISTER_PER_INTERVAL: usize = 16; /// The max number of uncontacted peers to store before the kbuckets per topic. const MAX_UNCONTACTED_PEERS_TOPIC_BUCKET: usize = 16; From 2a5942abf3f57c15f6e44617c73dea1cc95aa46b Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Mon, 5 Sep 2022 08:27:17 +0200 Subject: [PATCH 344/391] Make name more verbose Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index cbef3f9ba..1990c3464 100644 --- a/src/service.rs +++ b/src/service.rs @@ -80,7 +80,7 @@ const REGISTER_INTERVAL: Duration = Duration::from_secs(60); const MAX_REGTOPICS_REGISTER_PER_INTERVAL: usize = 16; /// The max number of uncontacted peers to store before the kbuckets per topic. -const MAX_UNCONTACTED_PEERS_TOPIC_BUCKET: usize = 16; +const MAX_UNCONTACTED_PEERS_PER_TOPIC_BUCKET: usize = 16; /// The duration in seconds which a node can come late to an assigned wait time. const WAIT_TIME_MARGINAL: Duration = Duration::from_secs(5); From 21aa98d182b0b767bfeef7e0782c16730d84909b Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Mon, 5 Sep 2022 08:27:59 +0200 Subject: [PATCH 345/391] Improve wording Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index 1990c3464..7572b2d73 100644 --- a/src/service.rs +++ b/src/service.rs @@ -83,7 +83,7 @@ const MAX_REGTOPICS_REGISTER_PER_INTERVAL: usize = 16; const MAX_UNCONTACTED_PEERS_PER_TOPIC_BUCKET: usize = 16; /// The duration in seconds which a node can come late to an assigned wait time. -const WAIT_TIME_MARGINAL: Duration = Duration::from_secs(5); +const WAIT_TIME_TOLERANCE: Duration = Duration::from_secs(5); /// Request type for Protocols using `TalkReq` message. /// From 89928a40dc639bbf798a1c270b28514449c2553a Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Mon, 5 Sep 2022 08:28:32 +0200 Subject: [PATCH 346/391] Correct typo Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index 7572b2d73..daec2220a 100644 --- a/src/service.rs +++ b/src/service.rs @@ -191,7 +191,7 @@ pub enum ServiceRequest { /// RegisterTopic publishes this node as an advertiser for a topic in a discv5 network /// until removed. RegisterTopic(Topic), - /// Stops publishing this node as an advetiser for a topic. + /// Stops publishing this node as an advertiser for a topic. RemoveTopic(Topic, oneshot::Sender>), /// Retrieves the ads currently published by this node on other nodes in a discv5 network. ActiveTopics(oneshot::Sender>, RequestError>>), From 49ed28f799d8dd5223585a8dfb49066bd24f993e Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Mon, 5 Sep 2022 08:28:52 +0200 Subject: [PATCH 347/391] Correct typo Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index daec2220a..be889c1ae 100644 --- a/src/service.rs +++ b/src/service.rs @@ -274,7 +274,7 @@ pub struct Service { /// The key used for en-/decrypting tickets. ticket_key: [u8; 16], - /// Tickets received by other nodes. + /// Tickets received from other nodes. tickets: Tickets, /// Locally initiated topic query requests in process. From 36b9c431ae738a491feaa2d8ffb75079cc7cb01b Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Mon, 5 Sep 2022 08:29:14 +0200 Subject: [PATCH 348/391] Correct typo Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index be889c1ae..64c4eda69 100644 --- a/src/service.rs +++ b/src/service.rs @@ -195,7 +195,7 @@ pub enum ServiceRequest { RemoveTopic(Topic, oneshot::Sender>), /// Retrieves the ads currently published by this node on other nodes in a discv5 network. ActiveTopics(oneshot::Sender>, RequestError>>), - /// Retrieves the ads adveritsed for other nodes for a given topic. + /// Retrieves the ads advertised for other nodes for a given topic. Ads(TopicHash, oneshot::Sender, RequestError>>), /// Retrieves the registration attempts acitve for a given topic. RegistrationAttempts( From 3bd661c4813ea1dae12f550fd7e6dfd3fe9d8c61 Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Mon, 5 Sep 2022 08:29:30 +0200 Subject: [PATCH 349/391] Correct typo Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index 64c4eda69..6392f2c44 100644 --- a/src/service.rs +++ b/src/service.rs @@ -197,7 +197,7 @@ pub enum ServiceRequest { ActiveTopics(oneshot::Sender>, RequestError>>), /// Retrieves the ads advertised for other nodes for a given topic. Ads(TopicHash, oneshot::Sender, RequestError>>), - /// Retrieves the registration attempts acitve for a given topic. + /// Retrieves the registration attempts active for a given topic. RegistrationAttempts( Topic, oneshot::Sender, RequestError>>, From 610fd7fbdeaa8d31272b583e31c60829883235d3 Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Mon, 5 Sep 2022 08:30:03 +0200 Subject: [PATCH 350/391] Improve docs semantics Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- src/service.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/service.rs b/src/service.rs index 6392f2c44..babae5b9d 100644 --- a/src/service.rs +++ b/src/service.rs @@ -259,8 +259,7 @@ pub struct Service { /// Ads advertised locally for other nodes. ads: Ads, - /// Topics tracks registration attempts of the topic hashes to advertise on - /// other nodes. + /// Registrations attempts underway for each topic. registration_attempts: HashMap>, /// KBuckets per topic hash. From 830a29c2a6b778c9c86b1b8ddeda60435406d6ec Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Mon, 5 Sep 2022 08:30:26 +0200 Subject: [PATCH 351/391] Correct typo Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index babae5b9d..04bb65ac2 100644 --- a/src/service.rs +++ b/src/service.rs @@ -266,7 +266,7 @@ pub struct Service { topics_kbuckets: HashMap>, /// The peers returned in a NODES response to a TOPICQUERY or REGTOPIC request are inserted in - /// this intermediary stroage to check their connectivity before inserting them in the topic's + /// this intermediary storage to check their connectivity before inserting them in the topic's /// kbuckets. discovered_peers_topic: HashMap>>, From 7a23172c4e41c2749092c6ba60106b84ccfd6917 Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Mon, 5 Sep 2022 08:30:52 +0200 Subject: [PATCH 352/391] Correct typo Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index 04bb65ac2..f6ca9c2c6 100644 --- a/src/service.rs +++ b/src/service.rs @@ -290,7 +290,7 @@ pub enum TopicQueryState { /// The topic look up has not obtained enough results and has timed out. TimedOut(TopicHash), /// Not enough ads have been returned from the first round of sending TOPICQUERY - /// requests, new peers in the topic's kbucktes should be queried. + /// requests, new peers in the topic's kbuckets should be queried. Unsatisfied(TopicHash), } From a72e750f1f616453e4a160f61c7652c7783c4a70 Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Mon, 5 Sep 2022 08:31:23 +0200 Subject: [PATCH 353/391] Correct typo Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index f6ca9c2c6..17be738b7 100644 --- a/src/service.rs +++ b/src/service.rs @@ -295,7 +295,7 @@ pub enum TopicQueryState { } /// At any given time, a set number of registrations should be active per topic hash to -/// set to be registered. A registration is active when either a ticket for an adslot is +/// set to be registered. A registration is active when either a ticket for an ad slot is /// held and the ticket wait time has not yet expired, or a REGCONFIRMATION has been /// received for an ad slot and the ad lifetime has not yet elapsed. #[derive(Debug, Clone)] From 00a7d8f2be41a4f4c9ce313c00a9e13a3c62dee6 Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Mon, 5 Sep 2022 08:32:56 +0200 Subject: [PATCH 354/391] Correct typo Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- src/service.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/service.rs b/src/service.rs index 17be738b7..49fe9d4ba 100644 --- a/src/service.rs +++ b/src/service.rs @@ -306,8 +306,8 @@ pub enum RegistrationState { /// wait time. Ticket, /// A fixed number of tickets are accepted within a certain time span. A node id in - /// ticket limit regsitration state will not be sent a REGTOPIC till the ticket - /// TICKET_LIMITER_DURATION has expired. + /// ticket limit registration state will not be sent a REGTOPIC until the ticket + /// TICKET_LIMIT_DURATION has expired. TicketLimit(Instant), } From f46d9da0aca18ab33e8a5cce290eb94fd201187c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 5 Sep 2022 08:33:25 +0200 Subject: [PATCH 355/391] Link constant in docs --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index cbef3f9ba..beea30019 100644 --- a/src/service.rs +++ b/src/service.rs @@ -308,7 +308,7 @@ pub enum RegistrationState { Ticket, /// A fixed number of tickets are accepted within a certain time span. A node id in /// ticket limit regsitration state will not be sent a REGTOPIC till the ticket - /// TICKET_LIMITER_DURATION has expired. + /// [`TICKET_LIMIT_DURATION`] has expired. TicketLimit(Instant), } From 090a361d58fd33eee6594b01e2c5233e7614179a Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Mon, 5 Sep 2022 08:35:45 +0200 Subject: [PATCH 356/391] Correct typo Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- src/service.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/service.rs b/src/service.rs index e179890a1..ece9973c5 100644 --- a/src/service.rs +++ b/src/service.rs @@ -312,8 +312,8 @@ pub enum RegistrationState { } /// An active topic query/lookup keeps track of which peers from the topic's kbuckets -/// have already been queired until the set number of ads are found for the lookup or it -/// is prematurely terminated in lack of peers or time. +/// have already been queried until the set number of ads are found for the lookup or it +/// is prematurely terminated by lack of peers or time. pub struct ActiveTopicQuery { /// A NodeId mapped to false is waiting for a response. A value of true means the /// TOPICQUERY has received a response or the request has failed. From 96af7f7b15593d6e334f050eb227d41ecf2e7497 Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Mon, 5 Sep 2022 08:35:59 +0200 Subject: [PATCH 357/391] Correct typo Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index ece9973c5..a051e0762 100644 --- a/src/service.rs +++ b/src/service.rs @@ -323,7 +323,7 @@ pub struct ActiveTopicQuery { /// The resulting ad nodes are returned to the app layer when the query has reached /// a Finished, TimedOut or Dry state. callback: Option, RequestError>>>, - /// A start time is used to montior time out of the query. + /// A start time is used to monitor time out of the query. start: Instant, /// A query is marked as dry being true if no peers are found in the topic's kbuckets /// that aren't already queried peers. From b1a8440388760ed5f670ee4c894047d95ecd3f43 Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Mon, 5 Sep 2022 08:36:18 +0200 Subject: [PATCH 358/391] Correct typo Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index a051e0762..8f16eb438 100644 --- a/src/service.rs +++ b/src/service.rs @@ -550,7 +550,7 @@ impl Service { self.init_topic_kbuckets(topic_hash); } // To fill the kbuckets closest to the topic hash as well as those further away - // (itertively getting closer to node ids to the topic hash) start a find node + // (iteratively getting closer to node ids to the topic hash) start a find node // query searching for the topic hash's bytes wrapped in a NodeId. let topic_key = NodeId::new(&topic_hash.as_bytes()); self.start_findnode_query(QueryType::FindTopic(topic_key), None); From ef31700305e855fa5ea3895bf4ad61c36c00ee53 Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Mon, 5 Sep 2022 08:37:05 +0200 Subject: [PATCH 359/391] Correct typo Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index 8f16eb438..2b990d5b1 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1403,7 +1403,7 @@ impl Service { Duration::default(), ); - // If there is no wait time and the ad is successfuly registered as an ad, the new ticket is sent + // If there is no wait time and the ad is successfully registered as an ad, the new ticket is sent // with wait time set to zero indicating successful registration. if let Err((wait_time, e)) = self.ads From 642a96256f8d09726a6135fd7454b5b083705fc5 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 5 Sep 2022 09:10:49 +0200 Subject: [PATCH 360/391] Use custom type to make code more descriptive --- src/service.rs | 40 ++++++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/src/service.rs b/src/service.rs index e179890a1..e416c7a39 100644 --- a/src/service.rs +++ b/src/service.rs @@ -65,13 +65,16 @@ mod ip_vote; mod query_info; mod test; +/// The log2distance between to keys. +pub type Log2Distance = u64; + /// The number of distances (buckets) we simultaneously request from each peer. /// NOTE: This must not be larger than 127. pub(crate) const DISTANCES_TO_REQUEST_PER_PEER: usize = 3; /// The maximum number of registration attempts that may be active per distance /// if there are sufficient peers. -const MAX_REG_ATTEMPTS_PER_DISTANCE: usize = 16; +const MAX_REG_ATTEMPTS_PER_LOG2DISTANCE: usize = 16; /// Registration of topics are paced to occur at intervals to avoid a self-provoked DoS. const REGISTER_INTERVAL: Duration = Duration::from_secs(60); @@ -200,12 +203,12 @@ pub enum ServiceRequest { /// Retrieves the registration attempts active for a given topic. RegistrationAttempts( Topic, - oneshot::Sender, RequestError>>, + oneshot::Sender, RequestError>>, ), /// Retrieves the node id of entries in a given topic's kbuckets by distance. TableEntriesIdTopic( TopicHash, - oneshot::Sender>, RequestError>>, + oneshot::Sender>, RequestError>>, ), } @@ -259,16 +262,17 @@ pub struct Service { /// Ads advertised locally for other nodes. ads: Ads, - /// Registrations attempts underway for each topic. - registration_attempts: HashMap>, + /// Registrations attempts underway for each topic stored by bucket index, i.e. the + /// log2distance to the local node id. + registration_attempts: HashMap>, /// KBuckets per topic hash. topics_kbuckets: HashMap>, /// The peers returned in a NODES response to a TOPICQUERY or REGTOPIC request are inserted in /// this intermediary storage to check their connectivity before inserting them in the topic's - /// kbuckets. - discovered_peers_topic: HashMap>>, + /// kbuckets. Peers are stored by bucket index, i.e. the log2distance to the local node id. + discovered_peers_topic: HashMap>>, /// The key used for en-/decrypting tickets. ticket_key: [u8; 16], @@ -657,7 +661,7 @@ impl Service { // The bucket's index in the Vec of buckets in the kbucket table will // be one less than the distance as the log2distance 0 from the local // node, i.e. the local node, is not assigned a bucket. - let distance = index as u64 + 1; + let distance = index as Log2Distance + 1; let mut node_ids = Vec::new(); bucket.iter().for_each(|node| node_ids.push(*node.key.preimage())); table_entries.insert(distance, node_ids); @@ -777,7 +781,7 @@ impl Service { let bucket = discovered_peers.entry(distance).or_default(); // If the intermediary storage before the topic's kbucktes is at bounds, discard the // uncontacted peers. - if bucket.len() < MAX_UNCONTACTED_PEERS_TOPIC_BUCKET { + if bucket.len() < MAX_UNCONTACTED_PEERS_PER_TOPIC_BUCKET { bucket.insert(node_id, enr.clone()); discovered_new_peer = true; } else { @@ -859,7 +863,7 @@ impl Service { while let Some((topic, _topic_hash)) = topic_item { trace!("Publishing topic {} with hash {}", topic.topic(), topic.hash()); sent_regtopics += self.send_register_topics(topic.clone()); - if sent_regtopics >= MAX_REGTOPICS_REGISTER_INTERVAL { + if sent_regtopics >= MAX_REGTOPICS_REGISTER_PER_INTERVAL { break } topic_item = topics_to_reg_iter.next(); @@ -941,10 +945,10 @@ impl Service { // Ensure that max_reg_attempts_bucket registration attempts are alive per bucket if that many peers are // available at that distance. for (index, bucket) in kbuckets.get_mut().buckets_iter().enumerate() { - if new_peers.len() >= MAX_REGTOPICS_REGISTER_INTERVAL { + if new_peers.len() >= MAX_REGTOPICS_REGISTER_PER_INTERVAL { break; } - let distance = index as u64 + 1; + let distance = index as Log2Distance + 1; let mut active_reg_attempts_bucket = 0; let registrations = reg_attempts.entry(distance).or_default(); @@ -976,7 +980,7 @@ impl Service { if let Some(peers) = self.discovered_peers_topic.get_mut(&topic_hash) { if let Some(bucket) = peers.get_mut(&distance) { bucket.retain(|node_id, enr | { - if new_peers_bucket.len() + active_reg_attempts_bucket >= MAX_REG_ATTEMPTS_DISTANCE { + if new_peers_bucket.len() + active_reg_attempts_bucket >= MAX_REG_ATTEMPTS_PER_LOG2DISTANCE { true } else if let Entry::Vacant(_) = registrations.reg_attempts.entry(*node_id) { debug!("Found new registration peer in uncontacted peers for topic {}. Peer: {:?}", topic_hash, node_id); @@ -994,12 +998,12 @@ impl Service { // The count of active registration attempts for a distance after expired ads have been // removed is less than the max number of registration attempts that should be active // per bucket and is not equal to the total number of peers available in that bucket. - if active_reg_attempts_bucket < MAX_REG_ATTEMPTS_DISTANCE + if active_reg_attempts_bucket < MAX_REG_ATTEMPTS_PER_LOG2DISTANCE && registrations.reg_attempts.len() != bucket.num_entries() { for peer in bucket.iter() { if new_peers_bucket.len() + active_reg_attempts_bucket - >= MAX_REG_ATTEMPTS_DISTANCE + >= MAX_REG_ATTEMPTS_PER_LOG2DISTANCE { break; } @@ -1376,7 +1380,7 @@ impl Service { let waited_time = ticket.req_time().elapsed(); let wait_time = ticket.wait_time(); if waited_time < wait_time - || waited_time >= wait_time + WAIT_TIME_MARGINAL + || waited_time >= wait_time + WAIT_TIME_TOLERANCE { warn!("The REGTOPIC has not waited the time assigned in the ticket. Blacklisting peer {}.", node_address.node_id); let ban_timeout = @@ -1462,7 +1466,7 @@ impl Service { // size of 1280 and ENR's have a max size of 300 bytes. // // Bucket sizes should be 16. In this case, there should be no more than 5*DISTANCES_TO_REQUEST_PER_PEER responses, to return all required peers. - if total > 5 * DISTANCES_TO_REQUEST_PER_PEER as u64 { + if total > 5 * DISTANCES_TO_REQUEST_PER_PEER as Log2Distance { warn!( "NodesResponse has a total larger than {}, nodes will be truncated", DISTANCES_TO_REQUEST_PER_PEER * 5 @@ -1996,7 +2000,7 @@ impl Service { &mut self, node_address: NodeAddress, rpc_id: RequestId, - mut distances: Vec, + mut distances: Vec, ) { // NOTE: At most we only allow 5 distances to be sent (see the decoder). If each of these // buckets are full, that equates to 80 ENR's to respond with. From a5248769df4b01e4ee68b75eb2526f470879ab55 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 5 Sep 2022 11:23:05 +0200 Subject: [PATCH 361/391] Improve code structure --- src/service.rs | 152 +++++++++++++++++++++++++++++-------------------- 1 file changed, 90 insertions(+), 62 deletions(-) diff --git a/src/service.rs b/src/service.rs index 3d67bc018..109160ca2 100644 --- a/src/service.rs +++ b/src/service.rs @@ -562,70 +562,10 @@ impl Service { self.send_topic_queries(topic_hash, Some(callback)); } ServiceRequest::RegisterTopic(topic) => { - let topic_hash = topic.hash(); - if self.registration_attempts.insert(topic.clone(), BTreeMap::new()).is_some() { - warn!("This topic is already being advertised"); - } else { - let topics_field = if let Some(topics) = self.local_enr.read().get(ENR_KEY_TOPICS) { - let rlp = Rlp::new(topics); - let item_count = rlp.iter().count(); - let mut rlp_stream = RlpStream::new_list(item_count + 1); - for item in rlp.iter() { - if let Ok(data) = item.data().map_err(|e| debug_unreachable!("Topic item which was previously encoded in enr, cannot be decoded into data. Error {}", e)) { - rlp_stream.append(&data); - } - } - rlp_stream.append(&topic.topic().as_bytes()); - rlp_stream.out() - } else { - let mut rlp_stream = RlpStream::new_list(1); - rlp_stream.append(&topic.topic().as_bytes()); - rlp_stream.out() - }; - - let enr_size = self.local_enr.read().size() + topics_field.len(); - if enr_size >= 300 { - error!("Failed to register topic {}. The ENR would be a total of {} bytes if this topic was registered, the maximum size is 300 bytes", topic.topic(), enr_size); - } - - if self.local_enr - .write() - .insert(ENR_KEY_TOPICS, &topics_field, &self.enr_key.write()) - .map_err(|e| error!("Failed to insert field 'topics' into local enr. Error {:?}", e)).is_ok() { - - self.init_topic_kbuckets(topic_hash); - METRICS.topics_to_publish.store(self.registration_attempts.len(), Ordering::Relaxed); - - // To fill the kbuckets closest to the topic hash as well as those further away - // (itertively getting closer to node ids to the topic hash) start a find node - // query searching for the topic hash's bytes wrapped in a NodeId. - let topic_key = NodeId::new(&topic_hash.as_bytes()); - self.start_findnode_query(QueryType::FindTopic(topic_key), None); - } - } + self.start_register_topic(topic); } ServiceRequest::ActiveTopics(callback) => { - let mut active_topics = HashMap::>::new(); - self.registration_attempts.iter_mut().for_each(|(topic, reg_attempts_by_distance)| { - for reg_attempts in reg_attempts_by_distance.values_mut() { - reg_attempts.reg_attempts.retain(|node_id, reg_state| { - match reg_state { - RegistrationState::Confirmed(insert_time) => { - if insert_time.elapsed() < AD_LIFETIME { - active_topics.entry(topic.hash()).or_default().push(*node_id); - true - } else { - false - } - } - RegistrationState::TicketLimit(insert_time) => insert_time.elapsed() < TICKET_LIMIT_DURATION, - RegistrationState::Ticket => true, - } - }); - } - }); - - if callback.send(Ok(active_topics)).is_err() { + if callback.send(Ok(self.get_active_topics())).is_err() { error!("Failed to return active topics"); } } @@ -876,6 +816,36 @@ impl Service { } } + fn get_active_topics(&mut self) -> HashMap> { + let mut active_topics = HashMap::>::new(); + self.registration_attempts + .iter_mut() + .for_each(|(topic, reg_attempts_by_distance)| { + for reg_attempts in reg_attempts_by_distance.values_mut() { + reg_attempts + .reg_attempts + .retain(|node_id, reg_state| match reg_state { + RegistrationState::Confirmed(insert_time) => { + if insert_time.elapsed() < AD_LIFETIME { + active_topics + .entry(topic.hash()) + .or_default() + .push(*node_id); + true + } else { + false + } + } + RegistrationState::TicketLimit(insert_time) => { + insert_time.elapsed() < TICKET_LIMIT_DURATION + } + RegistrationState::Ticket => true, + }); + } + }); + active_topics + } + fn init_topic_kbuckets(&mut self, topic_hash: TopicHash) { trace!("Initiating kbuckets for topic hash {}", topic_hash); @@ -929,6 +899,64 @@ impl Service { self.topics_kbuckets.insert(topic_hash, kbuckets); } + /// Starts the continuous process of registering a topic, i.e. advertising it by peers. + fn start_register_topic(&mut self, topic: Topic) { + let topic_hash = topic.hash(); + if self + .registration_attempts + .insert(topic.clone(), BTreeMap::new()) + .is_some() + { + warn!("The topic {} is already being advertised", topic.topic()); + } else { + let topics_field = if let Some(topics) = self.local_enr.read().get(ENR_KEY_TOPICS) { + let rlp = Rlp::new(topics); + let item_count = rlp.iter().count(); + let mut rlp_stream = RlpStream::new_list(item_count + 1); + for item in rlp.iter() { + if let Ok(data) = item.data().map_err(|e| debug_unreachable!("Topic item which was previously encoded in enr, cannot be decoded into data. Error {}", e)) { + rlp_stream.append(&data); + } + } + rlp_stream.append(&topic.topic().as_bytes()); + rlp_stream.out() + } else { + let mut rlp_stream = RlpStream::new_list(1); + rlp_stream.append(&topic.topic().as_bytes()); + rlp_stream.out() + }; + + let enr_size = self.local_enr.read().size() + topics_field.len(); + if enr_size >= 300 { + error!("Failed to register topic {}. The ENR would be a total of {} bytes if this topic was registered, the maximum size is 300 bytes", topic.topic(), enr_size); + } + + if self + .local_enr + .write() + .insert(ENR_KEY_TOPICS, &topics_field, &self.enr_key.write()) + .map_err(|e| { + error!( + "Failed to insert field 'topics' into local enr. Error {:?}", + e + ) + }) + .is_ok() + { + self.init_topic_kbuckets(topic_hash); + METRICS + .topics_to_publish + .store(self.registration_attempts.len(), Ordering::Relaxed); + + // To fill the kbuckets closest to the topic hash as well as those further away + // (itertively getting closer to node ids to the topic hash) start a find node + // query searching for the topic hash's bytes wrapped in a NodeId. + let topic_key = NodeId::new(&topic_hash.as_bytes()); + self.start_findnode_query(QueryType::FindTopic(topic_key), None); + } + } + } + /// Internal function that starts a topic registration. This function should not be called outside of [`REGISTER_INTERVAL`]. fn send_register_topics(&mut self, topic: Topic) -> usize { trace!("Sending REGTOPICS"); From bdb5974194144e8f32973cd4596e7694781ac15d Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 5 Sep 2022 11:26:52 +0200 Subject: [PATCH 362/391] Fix bug of replacing a topics registration attempts --- src/service.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/service.rs b/src/service.rs index 109160ca2..e6b9ca447 100644 --- a/src/service.rs +++ b/src/service.rs @@ -902,13 +902,11 @@ impl Service { /// Starts the continuous process of registering a topic, i.e. advertising it by peers. fn start_register_topic(&mut self, topic: Topic) { let topic_hash = topic.hash(); - if self - .registration_attempts - .insert(topic.clone(), BTreeMap::new()) - .is_some() - { + if self.registration_attempts.contains_key(&topic.clone()) { warn!("The topic {} is already being advertised", topic.topic()); } else { + self.registration_attempts + .insert(topic.clone(), BTreeMap::new()); let topics_field = if let Some(topics) = self.local_enr.read().get(ENR_KEY_TOPICS) { let rlp = Rlp::new(topics); let item_count = rlp.iter().count(); From 1e8e28db00f9cb3adc663bbf37b62d4019becc05 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 5 Sep 2022 18:21:36 +0200 Subject: [PATCH 363/391] Make proper use of callbacks to ServiceRequests and fix misc typos --- .vscode/settings.json | 35 ++++++++ src/advertisement/topic.rs | 2 +- src/discv5.rs | 40 +++++---- src/discv5/test.rs | 4 +- src/error.rs | 16 +++- src/kbucket/bucket.rs | 10 +-- src/service.rs | 171 ++++++++++++++++++++----------------- 7 files changed, 172 insertions(+), 106 deletions(-) create mode 100644 .vscode/settings.json diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 000000000..f498c89d9 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,35 @@ +{ + "cSpell.words": [ + "behaviour", + "Datagrams", + "discv", + "docsrs", + "ECDH", + "ENR's", + "enrs", + "FINDNODE", + "gossipsub", + "Hasher", + "HMAC", + "hotspots", + "initialise", + "Kademlia", + "kbucket", + "kbuckets", + "libp", + "Multiaddr", + "oneshot", + "preimage", + "REGCONFIRMATION", + "REGTOPIC", + "REGTOPICS", + "secp", + "sigpi", + "TOPICQUERY", + "unban", + "uncontacted", + "Uninitialised", + "unsetting", + "whoareyou" + ] +} \ No newline at end of file diff --git a/src/advertisement/topic.rs b/src/advertisement/topic.rs index 7285eb332..fbd0af61e 100644 --- a/src/advertisement/topic.rs +++ b/src/advertisement/topic.rs @@ -148,7 +148,7 @@ impl Hash for Topic { } impl PartialEq for Topic { - /// Each hash algortihm used to publish a hashed topic (as XOR metric key) is in + /// Each hash algorithm used to publish a hashed topic (as XOR metric key) is in /// discv5 seen as its own [`Topic`] upon comparison. That means a topic string /// can be published/registered more than once using different [`Hasher`]s. fn eq(&self, other: &Topic) -> bool { diff --git a/src/discv5.rs b/src/discv5.rs index d4e633bc4..a7b709869 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -60,7 +60,7 @@ pub const ENR_KEY_TOPICS: &str = "topics"; // Discv5 versions. iota! { pub const VERSION_NAT: u8 = 1 << iota; - , VERISON_TOPICS + , VERSION_TOPICS } /// Check if a given peer supports one or more versions of the Discv5 protocol. @@ -89,7 +89,7 @@ mod test; /// Events that can be produced by the `Discv5` event stream. #[derive(Debug)] pub enum Discv5Event { - /// A node has been discovered from a FINDNODES request. + /// A node has been discovered from a FINDNODE request. /// /// The ENR of the node is returned. Various properties can be derived from the ENR. /// This happen spontaneously through queries as nodes return ENR's. These ENR's are not @@ -141,9 +141,9 @@ impl Discv5 { enr_key: CombinedKey, mut config: Discv5Config, ) -> Result { - // ensure the keypair matches the one that signed the enr. + // ensure the key-pair matches the one that signed the enr. if local_enr.public_key() != enr_key.public() { - return Err("Provided keypair does not match the provided ENR"); + return Err("Provided key-pair does not match the provided ENR"); } // If an executor is not provided, assume a current tokio runtime is running. If not panic. @@ -177,7 +177,7 @@ impl Discv5 { if let Err(e) = local_enr .write() - .insert(ENR_KEY_VERSION, &[VERISON_TOPICS], &enr_key.write()) + .insert(ENR_KEY_VERSION, &[VERSION_TOPICS], &enr_key.write()) { error!("Failed writing to enr. Error {:?}", e); return Err("Failed to insert field 'version' into local enr"); @@ -308,7 +308,7 @@ impl Discv5 { nodes_to_send } - /// Mark a node in the routing table as `Disconnnected`. + /// Mark a node in the routing table as `Disconnected`. /// /// A `Disconnected` node will be present in the routing table and will be only /// used if there are no other `Connected` peers in the bucket. @@ -572,7 +572,7 @@ impl Discv5 { let topic = Topic::new(topic); let topic_hash = topic.hash(); - let event = ServiceRequest::TableEntriesIdTopic(topic_hash, callback_send); + let event = ServiceRequest::TableEntriesIdTopicKBuckets(topic_hash, callback_send); channel .send(event) @@ -626,12 +626,12 @@ impl Discv5 { } /// Removes a topic we do not wish to keep advertising on other nodes. This does not tell any nodes - /// we are currently adveritsed on to remove us as advertisements, however in the next registration + /// we are currently advertised on to remove us as advertisements, however in the next registration /// interval no registration attempts will be made for the topic. pub fn remove_topic( &self, topic_str: &'static str, - ) -> impl Future> + 'static { + ) -> impl Future> + 'static { let channel = self.clone_channel(); async move { @@ -645,7 +645,7 @@ impl Discv5 { .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; callback_recv.await.map_err(|e| { RequestError::ChannelFailed(format!( - "Failed to receive removed topic {}. Error {}", + "Failed to receive result from remove topic operation for topic {}. Error {}", topic_str, e )) })? @@ -658,7 +658,7 @@ impl Discv5 { /// registering a topic it must be removed by calling remove_topic. pub fn register_topic( &self, - topic: &'static str, + topic_str: &'static str, ) -> impl Future> + 'static { let channel = self.clone_channel(); @@ -666,24 +666,30 @@ impl Discv5 { let channel = channel .as_ref() .map_err(|_| RequestError::ServiceNotStarted)?; - let topic = Topic::new(topic); + let topic = Topic::new(topic_str); debug!( "Registering topic {} with topic hash {}", topic, topic.hash(), ); - let event = ServiceRequest::RegisterTopic(topic); + let (callback_send, callback_recv) = oneshot::channel(); + let event = ServiceRequest::RegisterTopic(topic, callback_send); // send the request channel .send(event) .await .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; - Ok(()) + callback_recv.await.map_err(|e| { + RequestError::ChannelFailed(format!( + "Failed to receive result from register topic operation for topic {}. Error {}", + topic_str, e + )) + })? } } /// Retrieves the registration attempts for a given topic, either confirmed registrations that - /// are still active on other nodes or regsitration attempts that returned tickets we are + /// are still active on other nodes or registration attempts that returned tickets we are /// currently waiting on to expire (ticket wait time) before re-attempting registration at that /// same node. Caution! The returned map will also contain pub fn reg_attempts( @@ -705,7 +711,7 @@ impl Discv5 { .send(event) .await .map_err(|_| RequestError::ServiceNotStarted)?; - callback_recv.await.map_err(|e| RequestError::ChannelFailed(format!("Failed to receive regsitration attempts for topic {} with topic hash {}. Error {}", topic_str, topic_hash, e)))? + callback_recv.await.map_err(|e| RequestError::ChannelFailed(format!("Failed to receive registration attempts for topic {} with topic hash {}. Error {}", topic_str, topic_hash, e)))? } } /// Retrieves the topics that we have published on other nodes. @@ -761,7 +767,7 @@ impl Discv5 { "Failed to receive ads for topic {} with topic hash {}. Error {}", topic, topic_hash, e )) - })? + }) } } diff --git a/src/discv5/test.rs b/src/discv5/test.rs index f8c221cae..9288818a3 100644 --- a/src/discv5/test.rs +++ b/src/discv5/test.rs @@ -1,7 +1,7 @@ #![cfg(test)] use crate::{ - discv5::{CHECK_VERSION, ENR_KEY_VERSION, VERISON_TOPICS, VERSION_NAT}, + discv5::{CHECK_VERSION, ENR_KEY_VERSION, VERSION_NAT, VERSION_TOPICS}, kbucket, Discv5, *, }; use enr::{k256, CombinedKey, Enr, EnrBuilder, EnrKey, NodeId}; @@ -639,7 +639,7 @@ fn test_version_check() { .udp4(port) .build(&key) .unwrap(); - let supported_versions = VERISON_TOPICS | VERSION_NAT; + let supported_versions = VERSION_TOPICS | VERSION_NAT; enr.insert(ENR_KEY_VERSION, &[supported_versions], &key) .unwrap(); diff --git a/src/error.rs b/src/error.rs index 9603d90da..33afb6612 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,4 +1,4 @@ -use crate::{handler::Challenge, node_info::NonContactable}; +use crate::{handler::Challenge, node_info::NonContactable, Topic}; use rlp::DecoderError; use std::fmt; @@ -101,6 +101,8 @@ pub enum RequestError { ChannelFailed(String), /// An invalid ENR was provided. InvalidEnr(&'static str), + /// Failed to update enr. + EnrWriteFailed, /// The remote's ENR is invalid. InvalidRemoteEnr, /// The remote returned and invalid packet. @@ -113,7 +115,7 @@ pub enum RequestError { EntropyFailure(&'static str), /// Finding nodes closest to a topic hash failed. TopicDistance(String), - /// A request that is responded with multiple respones + /// A request that is responded with multiple responses /// gets the wrong combination of responses. InvalidResponseCombo(String), /// A REGTOPIC request has sent a ticket that was not @@ -126,7 +128,15 @@ pub enum RequestError { InvalidWaitTime, /// A REGTOPIC tries to advertise a topic it does not /// list in its enr. - InvalidTopicsEnr, + InvalidEnrTopicsField, + /// The ENR can't fit the given topic into its topic field. + InsufficientSpaceEnr(Topic), + /// Neither a topic look up or registration has been done for the topic. + TopicKBucketsUninitialised, + /// Trying to stop registering a topic which isn't being registered. + TopicNotRegistered, + /// Trying to start registering a topic which is already in registration. + TopicAlreadyRegistered, } #[derive(Debug, Clone, PartialEq, Eq)] diff --git a/src/kbucket/bucket.rs b/src/kbucket/bucket.rs index 9aaa7e8cb..f89c06c6b 100644 --- a/src/kbucket/bucket.rs +++ b/src/kbucket/bucket.rs @@ -217,7 +217,7 @@ pub enum FailureReason { /// The node didn't pass the table filter. TableFilter, /// The node didn't exist. - KeyNonExistant, + KeyNonExistent, /// The bucket was full. BucketFull, /// Cannot update self, @@ -467,10 +467,10 @@ where } UpdateResult::UpdatedPending } else { - UpdateResult::Failed(FailureReason::KeyNonExistant) + UpdateResult::Failed(FailureReason::KeyNonExistent) } } else { - UpdateResult::Failed(FailureReason::KeyNonExistant) + UpdateResult::Failed(FailureReason::KeyNonExistent) } } @@ -506,10 +506,10 @@ where pending.node.value = value; UpdateResult::UpdatedPending } else { - UpdateResult::Failed(FailureReason::KeyNonExistant) + UpdateResult::Failed(FailureReason::KeyNonExistent) } } else { - UpdateResult::Failed(FailureReason::KeyNonExistant) + UpdateResult::Failed(FailureReason::KeyNonExistent) } } diff --git a/src/service.rs b/src/service.rs index e6b9ca447..06a90ac7a 100644 --- a/src/service.rs +++ b/src/service.rs @@ -22,7 +22,7 @@ use crate::{ topic::TopicHash, Ads, AD_LIFETIME, }, - discv5::{CHECK_VERSION, ENR_KEY_TOPICS, PERMIT_BAN_LIST, VERISON_TOPICS, VERSION_NAT}, + discv5::{CHECK_VERSION, ENR_KEY_TOPICS, PERMIT_BAN_LIST, VERSION_NAT, VERSION_TOPICS}, error::{RequestError, ResponseError}, handler::{Handler, HandlerIn, HandlerOut}, kbucket::{ @@ -193,20 +193,20 @@ pub enum ServiceRequest { TopicQuery(TopicHash, oneshot::Sender, RequestError>>), /// RegisterTopic publishes this node as an advertiser for a topic in a discv5 network /// until removed. - RegisterTopic(Topic), + RegisterTopic(Topic, oneshot::Sender>), /// Stops publishing this node as an advertiser for a topic. - RemoveTopic(Topic, oneshot::Sender>), + RemoveTopic(Topic, oneshot::Sender>), /// Retrieves the ads currently published by this node on other nodes in a discv5 network. ActiveTopics(oneshot::Sender>, RequestError>>), /// Retrieves the ads advertised for other nodes for a given topic. - Ads(TopicHash, oneshot::Sender, RequestError>>), + Ads(TopicHash, oneshot::Sender>), /// Retrieves the registration attempts active for a given topic. RegistrationAttempts( Topic, oneshot::Sender, RequestError>>, ), - /// Retrieves the node id of entries in a given topic's kbuckets by distance. - TableEntriesIdTopic( + /// Retrieves the node id of entries in a given topic's kbuckets by log2distance (bucket index). + TableEntriesIdTopicKBuckets( TopicHash, oneshot::Sender>, RequestError>>, ), @@ -561,8 +561,11 @@ impl Service { self.send_topic_queries(topic_hash, Some(callback)); } - ServiceRequest::RegisterTopic(topic) => { - self.start_register_topic(topic); + ServiceRequest::RegisterTopic(topic, callback) => { + let result = self.start_topic_registration(topic.clone()); + if callback.send(result).is_err() { + error!("Failed to return result of register topic operation for topic {}", topic); + } } ServiceRequest::ActiveTopics(callback) => { if callback.send(Ok(self.get_active_topics())).is_err() { @@ -570,33 +573,36 @@ impl Service { } } ServiceRequest::RemoveTopic(topic, callback) => { - if self.registration_attempts.remove(&topic).is_some() { + let result = if self.registration_attempts.remove(&topic).is_some() { METRICS.topics_to_publish.store(self.registration_attempts.len(), Ordering::Relaxed); - if callback.send(Ok(topic.topic())).is_err() { - error!("Failed to return the removed topic {}", topic.topic()); - } + Ok(()) + } else { + Err(RequestError::TopicNotRegistered) + }; + if callback.send(result).is_err() { + error!("Failed to return the result of the remove topic operation for topic {}", topic); } } ServiceRequest::Ads(topic_hash, callback) => { let ads = self.ads.get_ad_nodes(topic_hash).map(|ad_node| ad_node.node_record().clone()).collect::>(); - if callback.send(Ok(ads)).is_err() { + if callback.send(ads).is_err() { error!("Failed to return ads for topic {}", topic_hash); } } ServiceRequest::RegistrationAttempts(topic_hash, callback) => { let reg_attempts = if let Some(reg_attempts) = self.registration_attempts.get(&topic_hash) { - reg_attempts.clone() + Ok(reg_attempts.clone()) } else { error!("Topic hash {} is not being registered", topic_hash); - BTreeMap::new() + Err(RequestError::TopicNotRegistered) }; - if callback.send(Ok(reg_attempts)).is_err() { + if callback.send(reg_attempts).is_err() { error!("Failed to return registration attempts for topic hash {}", topic_hash); } } - ServiceRequest::TableEntriesIdTopic(topic_hash, callback) => { - let mut table_entries = BTreeMap::new(); - if let Some(kbuckets) = self.topics_kbuckets.get_mut(&topic_hash) { + ServiceRequest::TableEntriesIdTopicKBuckets(topic_hash, callback) => { + let table_entries = if let Some(kbuckets) = self.topics_kbuckets.get_mut(&topic_hash) { + let mut entries = BTreeMap::new(); for (index, bucket) in kbuckets.buckets_iter().enumerate() { // The bucket's index in the Vec of buckets in the kbucket table will // be one less than the distance as the log2distance 0 from the local @@ -604,10 +610,13 @@ impl Service { let distance = index as Log2Distance + 1; let mut node_ids = Vec::new(); bucket.iter().for_each(|node| node_ids.push(*node.key.preimage())); - table_entries.insert(distance, node_ids); + entries.insert(distance, node_ids); } - } - if callback.send(Ok(table_entries)).is_err() { + Ok(entries) + } else { + Err(RequestError::TopicKBucketsUninitialised) + }; + if callback.send(table_entries).is_err() { error!("Failed to return table entries' ids for topic hash {}", topic_hash); } } @@ -638,7 +647,7 @@ impl Service { // do not know of this peer debug!("NodeId unknown, requesting ENR. {}", whoareyou_ref.0); if let Err(e) = self.handler_send.send(HandlerIn::WhoAreYou(whoareyou_ref, None)) { - warn!("Failed to send who are you to unknonw enr peer {}", e); + warn!("Failed to send who are you to unknown enr peer {}", e); } } } @@ -693,7 +702,7 @@ impl Service { let mut discovered_new_peer = false; if let Some(kbuckets_topic) = self.topics_kbuckets.get_mut(&topic_hash) { for enr in found_enrs { - if !CHECK_VERSION(&enr, vec![VERISON_TOPICS, VERISON_TOPICS|VERSION_NAT]) { + if !CHECK_VERSION(&enr, vec![VERSION_TOPICS, VERSION_TOPICS|VERSION_NAT]) { continue; } trace!("Found new peer {} for topic {}", enr, topic_hash); @@ -719,7 +728,7 @@ impl Service { NodeId::new(&topic_hash.as_bytes()).into(); if let Some(distance) = peer_key.log2_distance(&topic_key) { let bucket = discovered_peers.entry(distance).or_default(); - // If the intermediary storage before the topic's kbucktes is at bounds, discard the + // If the intermediary storage before the topic's kbuckets is at bounds, discard the // uncontacted peers. if bucket.len() < MAX_UNCONTACTED_PEERS_PER_TOPIC_BUCKET { bucket.insert(node_id, enr.clone()); @@ -801,7 +810,7 @@ impl Service { let mut sent_regtopics = 0; let mut topic_item = topics_to_reg_iter.next(); while let Some((topic, _topic_hash)) = topic_item { - trace!("Publishing topic {} with hash {}", topic.topic(), topic.hash()); + trace!("Publishing topic {} with hash {}", topic, topic.hash()); sent_regtopics += self.send_register_topics(topic.clone()); if sent_regtopics >= MAX_REGTOPICS_REGISTER_PER_INTERVAL { break @@ -876,7 +885,7 @@ impl Service { for entry in self.kbuckets.write().iter() { let enr = entry.node.value.clone(); - if !CHECK_VERSION(&enr, vec![VERISON_TOPICS, VERISON_TOPICS | VERSION_NAT]) { + if !CHECK_VERSION(&enr, vec![VERSION_TOPICS, VERSION_TOPICS | VERSION_NAT]) { continue; } match kbuckets.insert_or_update(entry.node.key, enr, entry.status) { @@ -899,58 +908,64 @@ impl Service { self.topics_kbuckets.insert(topic_hash, kbuckets); } - /// Starts the continuous process of registering a topic, i.e. advertising it by peers. - fn start_register_topic(&mut self, topic: Topic) { + /// Starts the continuous process of registering a topic, i.e. advertising it at peers. + fn start_topic_registration(&mut self, topic: Topic) -> Result<(), RequestError> { let topic_hash = topic.hash(); - if self.registration_attempts.contains_key(&topic.clone()) { - warn!("The topic {} is already being advertised", topic.topic()); - } else { - self.registration_attempts - .insert(topic.clone(), BTreeMap::new()); - let topics_field = if let Some(topics) = self.local_enr.read().get(ENR_KEY_TOPICS) { - let rlp = Rlp::new(topics); - let item_count = rlp.iter().count(); - let mut rlp_stream = RlpStream::new_list(item_count + 1); - for item in rlp.iter() { - if let Ok(data) = item.data().map_err(|e| debug_unreachable!("Topic item which was previously encoded in enr, cannot be decoded into data. Error {}", e)) { - rlp_stream.append(&data); - } + if self.registration_attempts.contains_key(&topic) { + warn!("The topic {} is already being advertised", topic); + return Err(RequestError::TopicAlreadyRegistered); + } + self.registration_attempts + .insert(topic.clone(), BTreeMap::new()); + + let topics_field = if let Some(topics) = self.local_enr.read().get(ENR_KEY_TOPICS) { + let rlp = Rlp::new(topics); + let item_count = rlp.iter().count(); + let mut rlp_stream = RlpStream::new_list(item_count + 1); + for item in rlp.iter() { + if let Ok(data) = item.data().map_err(|e| debug_unreachable!("Topic item which was previously encoded in enr, cannot be decoded into data. Error {}", e)) { + rlp_stream.append(&data); } - rlp_stream.append(&topic.topic().as_bytes()); - rlp_stream.out() - } else { - let mut rlp_stream = RlpStream::new_list(1); - rlp_stream.append(&topic.topic().as_bytes()); - rlp_stream.out() - }; - - let enr_size = self.local_enr.read().size() + topics_field.len(); - if enr_size >= 300 { - error!("Failed to register topic {}. The ENR would be a total of {} bytes if this topic was registered, the maximum size is 300 bytes", topic.topic(), enr_size); } + rlp_stream.append(&topic.topic().as_bytes()); + rlp_stream.out() + } else { + let mut rlp_stream = RlpStream::new_list(1); + rlp_stream.append(&topic.topic().as_bytes()); + rlp_stream.out() + }; - if self - .local_enr + let enr_size = self.local_enr.read().size() + topics_field.len(); + if enr_size >= 300 { + error!("Failed to register topic {}. The ENR would be a total of {} bytes if this topic was registered, the maximum size is 300 bytes", topic.topic(), enr_size); + return Err(RequestError::InsufficientSpaceEnr(topic)); + } + + let result = + self.local_enr .write() - .insert(ENR_KEY_TOPICS, &topics_field, &self.enr_key.write()) - .map_err(|e| { - error!( - "Failed to insert field 'topics' into local enr. Error {:?}", - e - ) - }) - .is_ok() - { + .insert(ENR_KEY_TOPICS, &topics_field, &self.enr_key.write()); + + match result { + Err(e) => { + error!( + "Failed to insert field 'topics' into local enr. Error {:?}", + e + ); + Err(RequestError::EnrWriteFailed) + } + Ok(_) => { self.init_topic_kbuckets(topic_hash); METRICS .topics_to_publish .store(self.registration_attempts.len(), Ordering::Relaxed); // To fill the kbuckets closest to the topic hash as well as those further away - // (itertively getting closer to node ids to the topic hash) start a find node + // (iteratively getting closer to node ids to the topic hash) start a find node // query searching for the topic hash's bytes wrapped in a NodeId. let topic_key = NodeId::new(&topic_hash.as_bytes()); self.start_findnode_query(QueryType::FindTopic(topic_key), None); + Ok(()) } } } @@ -1087,7 +1102,7 @@ impl Service { }); // Attempt to query max_topic_query_peers peers at a time. Possibly some peers will return more than one result - // (ADNODES of length > 1), or no results will be returned from that peer. + // (NODES of length > 1), or no results will be returned from that peer. let max_topic_query_peers = self.config.max_nodes_response; let mut new_query_peers: Vec = Vec::new(); @@ -1381,7 +1396,7 @@ impl Service { warn!("The topic given in the REGTOPIC request body cannot be found in sender's 'topics' enr field. Blacklisting peer {}.", node_address.node_id); let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); - self.rpc_failure(id, RequestError::InvalidTopicsEnr); + self.rpc_failure(id, RequestError::InvalidEnrTopicsField); return; } @@ -1452,7 +1467,7 @@ impl Service { self.send_ticket_response(node_address, id, topic, new_ticket, wait_time); } RequestBody::TopicQuery { topic } => { - self.send_topic_query_adnodes_response(node_address, id, topic); + self.send_topic_query_nodes_response(node_address, id, topic); } } } @@ -1526,7 +1541,7 @@ impl Service { } return; } else if !distances.is_empty() { - // This is a repsonse to a FINDNODE request with specifically request distances + // This is a response to a FINDNODE request with specifically request distances // Filter out any nodes that are not of the correct distance let peer_key: kbucket::Key = node_id.into(); @@ -1992,7 +2007,7 @@ impl Service { /// Response to a topic query containing the nodes currently advertised for the /// requested topic if any. - fn send_topic_query_adnodes_response( + fn send_topic_query_nodes_response( &mut self, node_address: NodeAddress, rpc_id: RequestId, @@ -2110,14 +2125,14 @@ impl Service { for enr in nodes_to_send.into_iter() { let entry_size = rlp::encode(&enr).len(); // Responses assume that a session is established. Thus, on top of the encoded - // ENR's the packet should be a regular message. A regular message has an IV (16 - // bytes), and a header of 55 bytes. The find-nodes RPC requires 16 bytes for the ID and the + // ENRs the packet should be a regular message. A regular message has an IV (16 + // bytes), and a header of 55 bytes. The FINDNODE RPC requires 16 bytes for the ID and the // `total` field. Also there is a 16 byte HMAC for encryption and an extra byte for // RLP encoding. // - // We could also be responding via an autheader which can take up to 282 bytes in its + // We could also be responding via an auth header which can take up to 282 bytes in its // header. - // As most messages will be normal messages we will try and pack as many ENR's we + // As most messages will be normal messages we will try and pack as many ENRs we // can in and drop the response packet if a user requests an auth message of a very // packed response. // @@ -2316,7 +2331,7 @@ impl Service { /// Update the connection status of a node in the routing table. /// This tracks whether or not we should be pinging peers. Disconnected peers are removed from - /// the queue and newly added peers to the routing table (or topics kbucktes) are added to the queue. + /// the queue and newly added peers to the routing table (or topics kbuckets) are added to the queue. fn connection_updated( &mut self, node_id: NodeId, @@ -2390,7 +2405,7 @@ impl Service { enr.clone(), Some(ConnectionState::Connected), ) { - UpdateResult::Failed(FailureReason::KeyNonExistant) => {} + UpdateResult::Failed(FailureReason::KeyNonExistent) => {} UpdateResult::Failed(reason) => { self.peers_to_ping.remove(&node_id); debug!( @@ -2405,7 +2420,7 @@ impl Service { for kbuckets in self.topics_kbuckets.values_mut() { match kbuckets.update_node(&key, enr.clone(), Some(ConnectionState::Connected)) { - UpdateResult::Failed(FailureReason::KeyNonExistant) => {} + UpdateResult::Failed(FailureReason::KeyNonExistent) => {} UpdateResult::Failed(reason) => { self.peers_to_ping.remove(&node_id); debug!( @@ -2432,7 +2447,7 @@ impl Service { // If the node has disconnected, remove any ping timer for the node. match update_result { UpdateResult::Failed(reason) => match reason { - FailureReason::KeyNonExistant => {} + FailureReason::KeyNonExistent => {} others => { warn!( "Could not update node to disconnected. Node: {}, Reason: {:?}", From a4e4d0bcc881cc22228d3e8bbb9d1bdf6a666036 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 5 Sep 2022 18:37:17 +0200 Subject: [PATCH 364/391] Stop some continued registrations for a removed topic --- src/advertisement/ticket.rs | 12 +++++++++++- src/service.rs | 2 ++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index e1b13f15f..1d5229398 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -109,6 +109,16 @@ impl Tickets { .insert_at(active_topic, ActiveTicket::new(contact, ticket), wait_time); Ok(()) } + + /// Removes all tickets held for the given topic. + pub fn remove(&mut self, topic: &Topic) { + for (active_topic, _) in self.tickets.iter() { + if active_topic.topic() == topic { + self.tickets.remove(active_topic); + } + } + } + } impl Stream for Tickets { @@ -153,7 +163,7 @@ pub struct TicketHistory { /// The ticket_count keeps track of how many tickets are stored for the /// ActiveTopic. ticket_count: HashMap, - /// Up to MAX_TICKETS_PER_NODE_TOPIC PendingTickets in expirations maps + /// Up to [`MAX_TICKETS_PER_NODE_TOPIC`] PendingTickets in expirations map /// to an ActiveTopic in ticket_count. expirations: VecDeque, /// The time a PendingTicket remains in expirations. diff --git a/src/service.rs b/src/service.rs index 06a90ac7a..ae657dea7 100644 --- a/src/service.rs +++ b/src/service.rs @@ -573,6 +573,8 @@ impl Service { } } ServiceRequest::RemoveTopic(topic, callback) => { + self.tickets.remove(&topic); + let result = if self.registration_attempts.remove(&topic).is_some() { METRICS.topics_to_publish.store(self.registration_attempts.len(), Ordering::Relaxed); Ok(()) From eb92b9b62011936a41204cb1a3ecca710aeab391 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 5 Sep 2022 18:44:31 +0200 Subject: [PATCH 365/391] fixup! Stop some continued registrations for a removed topic --- src/advertisement/ticket.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 1d5229398..834306fa5 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -112,11 +112,7 @@ impl Tickets { /// Removes all tickets held for the given topic. pub fn remove(&mut self, topic: &Topic) { - for (active_topic, _) in self.tickets.iter() { - if active_topic.topic() == topic { - self.tickets.remove(active_topic); - } - } + self.tickets.retain(|active_topic, _| active_topic.topic() != topic); } } From c79db974419e7db1cd55e7167230574dc19347eb Mon Sep 17 00:00:00 2001 From: Emilia Hane <58548332+emhane@users.noreply.github.com> Date: Mon, 5 Sep 2022 19:15:42 +0200 Subject: [PATCH 366/391] Fix typo Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index ae657dea7..bf04707d6 100644 --- a/src/service.rs +++ b/src/service.rs @@ -65,7 +65,7 @@ mod ip_vote; mod query_info; mod test; -/// The log2distance between to keys. +/// The log2distance between two keys. pub type Log2Distance = u64; /// The number of distances (buckets) we simultaneously request from each peer. From a142ad2b052f8880d51cc159cd7db15a9ccfd8c4 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 5 Sep 2022 20:02:51 +0200 Subject: [PATCH 367/391] Misc fixes --- src/advertisement/mod.rs | 10 +++++----- src/advertisement/ticket.rs | 30 +++++++++++++++--------------- src/discv5.rs | 6 +++++- src/service.rs | 16 +++++++++++----- src/service/test.rs | 2 +- 5 files changed, 37 insertions(+), 27 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index e90d5b3b6..cf0abb51b 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -18,7 +18,7 @@ mod test; pub mod ticket; pub mod topic; -/// The max nodes to adveritse for a topic. +/// The max nodes to advertise for a topic. const MAX_ADS_TOPIC: usize = 100; /// The max nodes to advertise. @@ -78,7 +78,7 @@ impl AdTopic { } } -/// The Ads struct contains adveritsed AdNodes. Topics table is used to refer to +/// The Ads struct contains advertised AdNodes. Topics table is used to refer to /// all the ads, and the table stores ads by topic. #[derive(Clone, Debug)] pub struct Ads { @@ -90,10 +90,10 @@ pub struct Ads { /// The ad_lifetime is specified by the spec but can be modified for /// testing purposes. ad_lifetime: Duration, - /// The max_ads_per_topic limit is up to the user although recommnedations + /// The max_ads_per_topic limit is up to the user although recommendations /// are given in the specs. max_ads_topic: usize, - /// The max_ads limit is up to the user although recommnedations are + /// The max_ads limit is up to the user although recommendations are /// given in the specs. max_ads: usize, /// Max ads per subnet for the whole table, @@ -160,7 +160,7 @@ impl Ads { self.ads.get(&topic).into_iter().flatten() } - /// Ticket wait time enforces diversity among adveritsed nodes. The ticket wait time is + /// Ticket wait time enforces diversity among advertised nodes. The ticket wait time is /// calculated after removing expired entries based on the current state of the topics /// table (ads). pub fn ticket_wait_time( diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs index 834306fa5..5d3a4a8ac 100644 --- a/src/advertisement/ticket.rs +++ b/src/advertisement/ticket.rs @@ -5,15 +5,15 @@ use enr::NodeId; use node_info::NodeContact; use std::{cmp::Eq, hash::Hash}; -/// The max wait time accpeted for tickets. +/// The max wait time accepted for tickets. pub const MAX_WAIT_TIME_TICKET: u64 = 60 * 5; -/// The time window within which the number of new tickets from a peer for a topic will be limitied. +/// The time window within which the number of new tickets from a peer for a topic will be limited. pub const TICKET_LIMIT_DURATION: Duration = Duration::from_secs(60 * 15); /// Max tickets that are stored for an individual node for a topic (in the configured /// time period). -pub const MAX_TICKETS_NODE_TOPIC: u8 = 3; +pub const MAX_TICKETS_PER_NODE_PER_TOPIC: u8 = 3; /// A topic is active when it's associated with the NodeId from a node it is /// published on. @@ -71,7 +71,7 @@ impl ActiveTicket { } } -/// Tickets holds the tickets recieved in TICKET responses to locally initiated +/// Tickets holds the tickets received in TICKET responses to locally initiated /// REGTOPIC requests. pub struct Tickets { /// Tickets maps an [`ActiveTopic`] to an [`ActiveTicket`]. @@ -112,9 +112,9 @@ impl Tickets { /// Removes all tickets held for the given topic. pub fn remove(&mut self, topic: &Topic) { - self.tickets.retain(|active_topic, _| active_topic.topic() != topic); + self.tickets + .retain(|active_topic, _| active_topic.topic() != topic); } - } impl Stream for Tickets { @@ -143,23 +143,23 @@ impl Stream for Tickets { /// A PendingTicket maps to a Ticket received by another node in Tickets upon insert. #[derive(Clone)] struct PendingTicket { - /// The ActiveTopic serves to match the Ticket to an entry in Tickets' + /// The [`ActiveTopic`] serves to match the [`ActiveTicket`] to an entry in [`Tickets`]' /// tickets HashMapDelay. active_topic: ActiveTopic, - /// The insert_time is used to check MAX_TICKETS_PER_NODE_TOPIC against + /// The insert_time is used to check [`MAX_TICKETS_PER_NODE_PER_TOPIC`] against /// the ticket_limiter_duration. insert_time: Instant, } /// TicketHistory keeps track of how many times a ticket was replaced for -/// an ActiveTopic within the time limit given by ticket_limiter_duration -/// and limits it to MAX_TICKETS_PER_NODE_TOPIC times. +/// an [`ActiveTopic`] within the time limit given by ticket_limiter_duration +/// and limits it to [`MAX_TICKETS_PER_NODE_PER_TOPIC`] times. #[derive(Default)] pub struct TicketHistory { /// The ticket_count keeps track of how many tickets are stored for the /// ActiveTopic. ticket_count: HashMap, - /// Up to [`MAX_TICKETS_PER_NODE_TOPIC`] PendingTickets in expirations map + /// Up to [`MAX_TICKETS_PER_NODE_PER_TOPIC`] PendingTickets in expirations map /// to an ActiveTopic in ticket_count. expirations: VecDeque, /// The time a PendingTicket remains in expirations. @@ -176,17 +176,17 @@ impl TicketHistory { } /// Inserts a ticket into [`TicketHistory`] unless the ticket of the given active - /// topic has already been updated the limit amount of [`MAX_TICKETS_NODE_TOPIC`] + /// topic has already been updated the limit amount of [`MAX_TICKETS_PER_NODE_PER_TOPIC`] /// times per ticket limit duration, then it is discarded and an error is returned. /// Expired entries are removed before insertion. pub fn insert(&mut self, active_topic: ActiveTopic) -> Result<(), &str> { self.remove_expired(); let insert_time = Instant::now(); let count = self.ticket_count.entry(active_topic.clone()).or_default(); - if *count >= MAX_TICKETS_NODE_TOPIC { + if *count >= MAX_TICKETS_PER_NODE_PER_TOPIC { debug!( "Max {} tickets per NodeId - Topic mapping accepted in {} minutes", - MAX_TICKETS_NODE_TOPIC, + MAX_TICKETS_PER_NODE_PER_TOPIC, self.ticket_limit_duration.as_secs() ); return Err("Ticket limit reached"); @@ -201,7 +201,7 @@ impl TicketHistory { /// Removes entries that have been stored for at least the ticket limit duration. /// If the same [`ActiveTopic`] is inserted again the count up till - /// [`MAX_TICKETS_NODE_TOPIC`] inserts/updates starts anew. + /// [`MAX_TICKETS_PER_NODE_PER_TOPIC`] inserts/updates starts anew. fn remove_expired(&mut self) { let now = Instant::now(); let ticket_limiter_duration = self.ticket_limit_duration; diff --git a/src/discv5.rs b/src/discv5.rs index a7b709869..70d83c3b3 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -53,6 +53,10 @@ pub static HASH: for<'a> fn(topic: &'a str) -> TopicHash = |topic| { sha256_topic.hash() }; +/// The duration which pending entries have to be dormant before they are considered +/// for insertion in a kbucket. +pub(crate) const KBUCKET_PENDING_TIMEOUT: Duration = Duration::from_secs(60); + /// Custom ENR keys. const ENR_KEY_VERSION: &str = "version"; pub const ENR_KEY_TOPICS: &str = "topics"; @@ -167,7 +171,7 @@ impl Discv5 { let enr_key = Arc::new(RwLock::new(enr_key)); let kbuckets = Arc::new(RwLock::new(KBucketsTable::new( local_enr.read().node_id().into(), - Duration::from_secs(60), + KBUCKET_PENDING_TIMEOUT, config.incoming_bucket_limit, table_filter, bucket_filter, diff --git a/src/service.rs b/src/service.rs index ae657dea7..857136e7c 100644 --- a/src/service.rs +++ b/src/service.rs @@ -22,7 +22,10 @@ use crate::{ topic::TopicHash, Ads, AD_LIFETIME, }, - discv5::{CHECK_VERSION, ENR_KEY_TOPICS, PERMIT_BAN_LIST, VERSION_NAT, VERSION_TOPICS}, + discv5::{ + CHECK_VERSION, ENR_KEY_TOPICS, KBUCKET_PENDING_TIMEOUT, PERMIT_BAN_LIST, VERSION_NAT, + VERSION_TOPICS, + }, error::{RequestError, ResponseError}, handler::{Handler, HandlerIn, HandlerOut}, kbucket::{ @@ -699,7 +702,10 @@ impl Service { if callback.send(found_enrs).is_err() { warn!("Callback dropped for query {}. Results dropped", *id); } - } else if let QueryType::FindTopic(topic_key) = query_type { + return; + } + + if let QueryType::FindTopic(topic_key) = query_type { let topic_hash = TopicHash::from_raw(topic_key.raw()); let mut discovered_new_peer = false; if let Some(kbuckets_topic) = self.topics_kbuckets.get_mut(&topic_hash) { @@ -736,7 +742,7 @@ impl Service { bucket.insert(node_id, enr.clone()); discovered_new_peer = true; } else { - warn!("Discarding uncontacted peers, uncontacted peers at bounds for topic hash {}", topic_hash); + debug!("Discarding uncontacted peers, uncontacted peers at bounds for topic hash {}", topic_hash); } } false @@ -874,7 +880,7 @@ impl Service { let mut kbuckets = KBucketsTable::new( NodeId::new(&topic_hash.as_bytes()).into(), - Duration::from_secs(60), + KBUCKET_PENDING_TIMEOUT, self.config.incoming_bucket_limit, table_filter, bucket_filter, @@ -974,7 +980,7 @@ impl Service { /// Internal function that starts a topic registration. This function should not be called outside of [`REGISTER_INTERVAL`]. fn send_register_topics(&mut self, topic: Topic) -> usize { - trace!("Sending REGTOPICS"); + trace!("Sending REGTOPICS for topic {}", topic); let topic_hash = topic.hash(); if let Entry::Occupied(ref mut kbuckets) = self.topics_kbuckets.entry(topic_hash) { trace!( diff --git a/src/service/test.rs b/src/service/test.rs index 51277af89..0f97641cf 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -69,7 +69,7 @@ async fn build_service( let kbuckets = Arc::new(RwLock::new(KBucketsTable::new( local_enr.read().node_id().into(), - Duration::from_secs(60), + KBUCKET_PENDING_TIMEOUT, config.incoming_bucket_limit, table_filter, bucket_filter, From ace89ab170767a2008f8dd6b1e92bdc0cd5b7343 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 5 Sep 2022 20:10:02 +0200 Subject: [PATCH 368/391] Ignore VS Code custom spelling settings file --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index be506a9f6..ed0044a24 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,6 @@ Cargo.lock # VIM swap files *.sw[op] + +# VS Code settings files +/.vscode/ \ No newline at end of file From 2068253f0ec652db4d5129e078a291bbbcef94c7 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 6 Sep 2022 16:04:34 +0200 Subject: [PATCH 369/391] Properly implement version check --- Cargo.toml | 1 - src/discv5.rs | 42 +++++++++++++++++++----------------------- src/discv5/test.rs | 6 +++--- src/service.rs | 9 +++------ 4 files changed, 25 insertions(+), 33 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index b0a65a06c..fe2c47a4f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,7 +41,6 @@ hashlink = "0.7.0" delay_map = "0.1.1" more-asserts = "0.2.2" base64 = "0.13.0" -iota = "0.2.2" [dev-dependencies] rand_07 = { package = "rand", version = "0.7" } diff --git a/src/discv5.rs b/src/discv5.rs index 70d83c3b3..ab7dd7ab7 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -24,7 +24,6 @@ use crate::{ Discv5Config, Enr, Topic, }; use enr::{CombinedKey, EnrError, EnrKey, NodeId}; -use iota::iota; use parking_lot::RwLock; use std::{ collections::{BTreeMap, HashMap}, @@ -61,32 +60,29 @@ pub(crate) const KBUCKET_PENDING_TIMEOUT: Duration = Duration::from_secs(60); const ENR_KEY_VERSION: &str = "version"; pub const ENR_KEY_TOPICS: &str = "topics"; -// Discv5 versions. -iota! { - pub const VERSION_NAT: u8 = 1 << iota; - , VERSION_TOPICS +/// Discv5 versions. +pub enum Version { + /// The protocol for advertising and looking up to topics in Discv5 is supported. + Topics = 1, } -/// Check if a given peer supports one or more versions of the Discv5 protocol. -/// Returns true if any of the given versions is supported. -pub const CHECK_VERSION: fn(peer: &Enr, supported_versions: Vec) -> bool = - |peer, supported_versions| { - if let Some(version) = peer.get(ENR_KEY_VERSION) { - if let Some(v) = version.first() { - // Only add nodes which support the topics version - supported_versions.contains(v) - } else { - error!("Version field in enr of peer {} is empty", peer.node_id()); - false - } +/// Check if a given peer supports a given version of the Discv5 protocol. +pub const CHECK_VERSION: fn(peer: &Enr, version: Version) -> bool = |peer, version| { + if let Some(supported_versions) = peer.get(ENR_KEY_VERSION) { + if let Some(supported_versions) = supported_versions.first() { + let version_num = version as u8; + supported_versions & version_num == version_num } else { - warn!( - "Enr of peer {} doesn't contain field 'version'", - peer.node_id() - ); false } - }; + } else { + warn!( + "Enr of peer {} doesn't contain field 'version'", + peer.node_id() + ); + false + } +}; mod test; @@ -181,7 +177,7 @@ impl Discv5 { if let Err(e) = local_enr .write() - .insert(ENR_KEY_VERSION, &[VERSION_TOPICS], &enr_key.write()) + .insert(ENR_KEY_VERSION, &[Version::Topics as u8], &enr_key.write()) { error!("Failed writing to enr. Error {:?}", e); return Err("Failed to insert field 'version' into local enr"); diff --git a/src/discv5/test.rs b/src/discv5/test.rs index 9288818a3..8abfadecc 100644 --- a/src/discv5/test.rs +++ b/src/discv5/test.rs @@ -1,7 +1,7 @@ #![cfg(test)] use crate::{ - discv5::{CHECK_VERSION, ENR_KEY_VERSION, VERSION_NAT, VERSION_TOPICS}, + discv5::{Version, CHECK_VERSION, ENR_KEY_VERSION}, kbucket, Discv5, *, }; use enr::{k256, CombinedKey, Enr, EnrBuilder, EnrKey, NodeId}; @@ -639,9 +639,9 @@ fn test_version_check() { .udp4(port) .build(&key) .unwrap(); - let supported_versions = VERSION_TOPICS | VERSION_NAT; + let supported_versions = Version::Topics as u8 | 2; enr.insert(ENR_KEY_VERSION, &[supported_versions], &key) .unwrap(); - assert!(CHECK_VERSION(&enr, vec!(supported_versions))); + assert!(CHECK_VERSION(&enr, Version::Topics)); } diff --git a/src/service.rs b/src/service.rs index 5cc66745b..bdc00e25a 100644 --- a/src/service.rs +++ b/src/service.rs @@ -22,10 +22,7 @@ use crate::{ topic::TopicHash, Ads, AD_LIFETIME, }, - discv5::{ - CHECK_VERSION, ENR_KEY_TOPICS, KBUCKET_PENDING_TIMEOUT, PERMIT_BAN_LIST, VERSION_NAT, - VERSION_TOPICS, - }, + discv5::{Version, CHECK_VERSION, ENR_KEY_TOPICS, KBUCKET_PENDING_TIMEOUT, PERMIT_BAN_LIST}, error::{RequestError, ResponseError}, handler::{Handler, HandlerIn, HandlerOut}, kbucket::{ @@ -710,7 +707,7 @@ impl Service { let mut discovered_new_peer = false; if let Some(kbuckets_topic) = self.topics_kbuckets.get_mut(&topic_hash) { for enr in found_enrs { - if !CHECK_VERSION(&enr, vec![VERSION_TOPICS, VERSION_TOPICS|VERSION_NAT]) { + if !CHECK_VERSION(&enr, Version::Topics) { continue; } trace!("Found new peer {} for topic {}", enr, topic_hash); @@ -893,7 +890,7 @@ impl Service { for entry in self.kbuckets.write().iter() { let enr = entry.node.value.clone(); - if !CHECK_VERSION(&enr, vec![VERSION_TOPICS, VERSION_TOPICS | VERSION_NAT]) { + if !CHECK_VERSION(&enr, Version::Topics) { continue; } match kbuckets.insert_or_update(entry.node.key, enr, entry.status) { From ec1c50ef290ffa105ebbf3b9b6ae3831be94aed1 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 6 Sep 2022 16:06:16 +0200 Subject: [PATCH 370/391] fixup! Ignore VS Code custom spelling settings file --- .vscode/settings.json | 35 ----------------------------------- 1 file changed, 35 deletions(-) delete mode 100644 .vscode/settings.json diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index f498c89d9..000000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "cSpell.words": [ - "behaviour", - "Datagrams", - "discv", - "docsrs", - "ECDH", - "ENR's", - "enrs", - "FINDNODE", - "gossipsub", - "Hasher", - "HMAC", - "hotspots", - "initialise", - "Kademlia", - "kbucket", - "kbuckets", - "libp", - "Multiaddr", - "oneshot", - "preimage", - "REGCONFIRMATION", - "REGTOPIC", - "REGTOPICS", - "secp", - "sigpi", - "TOPICQUERY", - "unban", - "uncontacted", - "Uninitialised", - "unsetting", - "whoareyou" - ] -} \ No newline at end of file From 8dca2229dd119e4ca22e9867df1f9da0549cfc4a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 6 Sep 2022 18:58:01 +0200 Subject: [PATCH 371/391] Encapsulate RLP en-/decoding for topics enr field --- src/advertisement/topic.rs | 83 ++++++++++++++++++++++++++++++++++++-- src/service.rs | 38 +++++++++-------- 2 files changed, 98 insertions(+), 23 deletions(-) diff --git a/src/advertisement/topic.rs b/src/advertisement/topic.rs index fbd0af61e..49d131b34 100644 --- a/src/advertisement/topic.rs +++ b/src/advertisement/topic.rs @@ -19,7 +19,7 @@ // DEALINGS IN THE SOFTWARE. use base64::encode; -use rlp::{DecoderError, Rlp, RlpStream}; +use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; use sha2::{Digest, Sha256}; use std::{fmt, hash::Hash}; use tracing::debug; @@ -78,13 +78,13 @@ impl TopicHash { } } -impl rlp::Encodable for TopicHash { +impl Encodable for TopicHash { fn rlp_append(&self, s: &mut RlpStream) { s.append(&self.hash.to_vec()); } } -impl rlp::Decodable for TopicHash { +impl Decodable for TopicHash { fn decode(rlp: &Rlp<'_>) -> Result { let topic = { let topic_bytes = rlp.data()?; @@ -163,3 +163,80 @@ impl fmt::Display for Topic { write!(f, "{}", self.topic) } } + +pub struct TopicsEnrField { + topics: Vec>, +} + +impl TopicsEnrField { + pub fn new(topics: Vec>) -> Self { + TopicsEnrField { topics } + } + + pub fn add(&mut self, topic: Topic) { + self.topics.push(topic); + } + + pub fn encode(&self) -> Vec { + let mut buf = Vec::new(); + let mut s = RlpStream::new(); + s.append(self); + buf.extend_from_slice(&s.out()); + buf + } + + pub fn decode(topics_field: &[u8]) -> Result, DecoderError> { + if !topics_field.is_empty() { + let rlp = Rlp::new(topics_field); + let topics = rlp.as_val::>()?; + return Ok(Some(topics)); + } + Ok(None) + } +} + +impl rlp::Encodable for TopicsEnrField { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(self.topics.len()); + for topic in self.topics.iter() { + s.append(&topic.topic().as_bytes()); + } + } +} + +impl rlp::Decodable for TopicsEnrField { + fn decode(rlp: &Rlp<'_>) -> Result { + if !rlp.is_list() { + debug!( + "Failed to decode ENR field 'topics'. Not an RLP list: {}", + rlp + ); + return Err(DecoderError::RlpExpectedToBeList); + } + + let item_count = rlp.iter().count(); + let mut decoded_list: Vec> = rlp.iter().collect(); + + let mut topics = Vec::new(); + + for _ in 0..item_count { + match decoded_list.remove(0).data() { + Ok(data) => match std::str::from_utf8(data) { + Ok(topic_string) => { + let topic = Topic::new(topic_string); + topics.push(topic); + } + Err(e) => { + debug!("Failed to decode topic as utf8. Error: {}", e); + return Err(DecoderError::Custom("Topic is not utf8 encoded")); + } + }, + Err(e) => { + debug!("Failed to decode item. Error: {}", e); + return Err(DecoderError::RlpExpectedToBeData); + } + } + } + Ok(TopicsEnrField { topics }) + } +} diff --git a/src/service.rs b/src/service.rs index bdc00e25a..78ae1be98 100644 --- a/src/service.rs +++ b/src/service.rs @@ -19,7 +19,7 @@ use self::{ use crate::{ advertisement::{ ticket::{Tickets, MAX_WAIT_TIME_TICKET, TICKET_LIMIT_DURATION}, - topic::TopicHash, + topic::{TopicHash, TopicsEnrField}, Ads, AD_LIFETIME, }, discv5::{Version, CHECK_VERSION, ENR_KEY_TOPICS, KBUCKET_PENDING_TIMEOUT, PERMIT_BAN_LIST}, @@ -47,7 +47,7 @@ use fnv::FnvHashMap; use futures::{future::select_all, prelude::*}; use more_asserts::debug_unreachable; use parking_lot::RwLock; -use rlp::{Rlp, RlpStream}; +use rlp::Rlp; use rpc::*; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap}, @@ -923,33 +923,31 @@ impl Service { self.registration_attempts .insert(topic.clone(), BTreeMap::new()); - let topics_field = if let Some(topics) = self.local_enr.read().get(ENR_KEY_TOPICS) { - let rlp = Rlp::new(topics); - let item_count = rlp.iter().count(); - let mut rlp_stream = RlpStream::new_list(item_count + 1); - for item in rlp.iter() { - if let Ok(data) = item.data().map_err(|e| debug_unreachable!("Topic item which was previously encoded in enr, cannot be decoded into data. Error {}", e)) { - rlp_stream.append(&data); + let topics_field = |topic: Topic| -> TopicsEnrField<_> { + if let Some(topics) = self.local_enr.read().get(ENR_KEY_TOPICS) { + if let Ok(Some(mut advertised_topics)) = TopicsEnrField::decode(topics) { + advertised_topics.add(topic); + return advertised_topics; } } - rlp_stream.append(&topic.topic().as_bytes()); - rlp_stream.out() - } else { - let mut rlp_stream = RlpStream::new_list(1); - rlp_stream.append(&topic.topic().as_bytes()); - rlp_stream.out() + let mut advertised_topics = TopicsEnrField::new(Vec::new()); + advertised_topics.add(topic); + advertised_topics }; - let enr_size = self.local_enr.read().size() + topics_field.len(); + let encoded_topics_field = topics_field(topic.clone()).encode(); + + let enr_size = self.local_enr.read().size() + encoded_topics_field.len(); if enr_size >= 300 { error!("Failed to register topic {}. The ENR would be a total of {} bytes if this topic was registered, the maximum size is 300 bytes", topic.topic(), enr_size); return Err(RequestError::InsufficientSpaceEnr(topic)); } - let result = - self.local_enr - .write() - .insert(ENR_KEY_TOPICS, &topics_field, &self.enr_key.write()); + let result = self.local_enr.write().insert( + ENR_KEY_TOPICS, + &encoded_topics_field, + &self.enr_key.write(), + ); match result { Err(e) => { From 08de697a57ee7fc67293650c4b873175b5d8fce9 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 6 Sep 2022 19:19:40 +0200 Subject: [PATCH 372/391] fixup! Encapsulate RLP en-/decoding for topics enr field --- src/advertisement/mod.rs | 4 ++-- src/advertisement/topic.rs | 4 ++++ src/lib.rs | 2 ++ src/service.rs | 41 +++++++++++++++++--------------------- 4 files changed, 26 insertions(+), 25 deletions(-) diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs index cf0abb51b..21c8d5ca6 100644 --- a/src/advertisement/mod.rs +++ b/src/advertisement/mod.rs @@ -329,11 +329,11 @@ impl Ads { node_record.ip6().map(|ip6| ip6.octets()[0..=5].to_vec()) }; if let Some(subnet) = subnet { - let subnet_expirires = self + let subnet_expiries = self .subnet_expirations .entry(subnet) .or_insert_with(VecDeque::new); - subnet_expirires.push_back(now); + subnet_expiries.push_back(now); } let nodes = self.ads.entry(topic).or_default(); diff --git a/src/advertisement/topic.rs b/src/advertisement/topic.rs index 49d131b34..eb314873d 100644 --- a/src/advertisement/topic.rs +++ b/src/advertisement/topic.rs @@ -177,6 +177,10 @@ impl TopicsEnrField { self.topics.push(topic); } + pub fn topics_iter(&self) -> impl Iterator> { + self.topics.iter() + } + pub fn encode(&self) -> Vec { let mut buf = Vec::new(); let mut s = RlpStream::new(); diff --git a/src/lib.rs b/src/lib.rs index 288bed950..b13607b11 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -125,6 +125,8 @@ extern crate lazy_static; pub type Enr = enr::Enr; pub type Topic = crate::advertisement::topic::Sha256Topic; +pub type TopicsEnrField = + crate::advertisement::topic::TopicsEnrField; pub use crate::discv5::{Discv5, Discv5Event, HASH}; pub use config::{Discv5Config, Discv5ConfigBuilder}; diff --git a/src/service.rs b/src/service.rs index 78ae1be98..d6f7b9855 100644 --- a/src/service.rs +++ b/src/service.rs @@ -19,7 +19,7 @@ use self::{ use crate::{ advertisement::{ ticket::{Tickets, MAX_WAIT_TIME_TICKET, TICKET_LIMIT_DURATION}, - topic::{TopicHash, TopicsEnrField}, + topic::TopicHash, Ads, AD_LIFETIME, }, discv5::{Version, CHECK_VERSION, ENR_KEY_TOPICS, KBUCKET_PENDING_TIMEOUT, PERMIT_BAN_LIST}, @@ -35,7 +35,7 @@ use crate::{ query_pool::{ FindNodeQueryConfig, PredicateQueryConfig, QueryId, QueryPool, QueryPoolState, TargetKey, }, - rpc, Discv5Config, Discv5Event, Enr, IpMode, Topic, + rpc, Discv5Config, Discv5Event, Enr, IpMode, Topic, TopicsEnrField, }; use aes_gcm::{ aead::{generic_array::GenericArray, Aead, NewAead, Payload}, @@ -47,7 +47,6 @@ use fnv::FnvHashMap; use futures::{future::select_all, prelude::*}; use more_asserts::debug_unreachable; use parking_lot::RwLock; -use rlp::Rlp; use rpc::*; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap}, @@ -923,7 +922,7 @@ impl Service { self.registration_attempts .insert(topic.clone(), BTreeMap::new()); - let topics_field = |topic: Topic| -> TopicsEnrField<_> { + let topics_field = |topic: Topic| -> TopicsEnrField { if let Some(topics) = self.local_enr.read().get(ENR_KEY_TOPICS) { if let Ok(Some(mut advertised_topics)) = TopicsEnrField::decode(topics) { advertised_topics.add(topic); @@ -1381,21 +1380,20 @@ impl Service { } // Blacklist if node doesn't contain the given topic in its enr 'topics' field - let mut topic_in_enr = false; - if let Some(topics) = enr.get(ENR_KEY_TOPICS) { - let rlp = Rlp::new(topics); - for item in rlp.iter() { - if let Ok(data) = item.data().map_err(|e| error!("Could not decode a topic in topics field in enr of peer {}. Error {}", enr.node_id(), e)) { - if let Ok(topic_string) = std::str::from_utf8(data).map_err(|e| error!("Could not decode topic in topics field into utf8, in enr of peer {}. Error {}", enr.node_id(), e)) { - let topic_hash = Topic::new(topic_string).hash(); - if topic_hash == topic.hash() { - topic_in_enr = true; + let topic_in_enr = |topic_hash: &TopicHash| -> bool { + if let Some(topics) = enr.get(ENR_KEY_TOPICS) { + if let Ok(Some(advertised_topics)) = TopicsEnrField::decode(topics) { + for topic in advertised_topics.topics_iter() { + if topic_hash == &topic.hash() { + return true; } } } } - } - if !topic_in_enr { + false + }; + + if !topic_in_enr(&topic.hash()) { warn!("The topic given in the REGTOPIC request body cannot be found in sender's 'topics' enr field. Blacklisting peer {}.", node_address.node_id); let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); @@ -1649,14 +1647,11 @@ impl Service { } // Ads are checked for validity, if they do not contain the topic in their enr, they are discarded if let Some(topics) = enr.get(ENR_KEY_TOPICS) { - let rlp = Rlp::new(topics); - for item in rlp.iter() { - if let Ok(data) = item.data().map_err(|e| error!("Could not decode a topic in topics field in enr of peer {}. Error {}", enr.node_id(), e)) { - if let Ok(topic_string) = std::str::from_utf8(data).map_err(|e| error!("Could not decode topic in topics field into utf8, in enr of peer {}. Error {}", enr.node_id(), e)) { - let topic_hash = Topic::new(topic_string).hash(); - if &topic_hash == topic { - return true; - } + if let Ok(Some(advertised_topics)) = TopicsEnrField::decode(topics) + { + for advertised_topic in advertised_topics.topics_iter() { + if advertised_topic.hash() == *topic { + return true; } } } From bcd51f8f8347e061c277b151b2a5a924783172d6 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 6 Sep 2022 19:39:42 +0200 Subject: [PATCH 373/391] Add test for en-/decoding topics enr field --- src/advertisement/topic.rs | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/advertisement/topic.rs b/src/advertisement/topic.rs index eb314873d..bab276685 100644 --- a/src/advertisement/topic.rs +++ b/src/advertisement/topic.rs @@ -244,3 +244,28 @@ impl rlp::Decodable for TopicsEnrField { Ok(TopicsEnrField { topics }) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn encode_decode_topics_enr_field() { + let topics: Vec = vec![ + Topic::new("lighthouse"), + Topic::new("eth_syncing"), + Topic::new("eth_feeHistory"), + ]; + + let topics_field = TopicsEnrField::new(topics.clone()); + + let encoded = topics_field.encode(); + let decoded = TopicsEnrField::::decode(&encoded) + .unwrap() + .unwrap(); + + for (index, item) in decoded.topics_iter().enumerate() { + assert_eq!(item.topic(), topics[index].topic()); + } + } +} From 39aa82ae8e226c334a957482f39991f8f942868c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 7 Sep 2022 10:08:09 +0200 Subject: [PATCH 374/391] Lay ground for uncostly subscribing (repeated look up) of a topic and deflating state in terms of topics kbuckets --- src/discv5.rs | 10 +++--- src/error.rs | 6 ++-- src/service.rs | 79 +++++++++++++++++++++++++++++++++++---------- src/service/test.rs | 1 + 4 files changed, 72 insertions(+), 24 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index a2f9acc70..2d4634218 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -596,9 +596,8 @@ impl Discv5 { let (callback_send, callback_recv) = oneshot::channel(); let topic = Topic::new(topic); - let topic_hash = topic.hash(); - let event = ServiceRequest::TopicQuery(topic_hash, callback_send); + let event = ServiceRequest::TopicQuery(topic.clone(), callback_send); let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; // send the request @@ -610,13 +609,14 @@ impl Discv5 { let ad_nodes = callback_recv.await.map_err(|e| { RequestError::ChannelFailed(format!( "Failed to receive ad nodes from lookup of topic {} with topic hash {}. Error {}", - topic, topic_hash, e + topic, topic.hash(), e )) })?; if ad_nodes.is_ok() { debug!( "Received ad nodes for topic {} with topic hash {}", - topic, topic_hash + topic, + topic.hash() ); } ad_nodes @@ -636,7 +636,7 @@ impl Discv5 { let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; let (callback_send, callback_recv) = oneshot::channel(); let topic = Topic::new(topic_str); - let event = ServiceRequest::RemoveTopic(topic, callback_send); + let event = ServiceRequest::DeregisterTopic(topic, callback_send); channel .send(event) .await diff --git a/src/error.rs b/src/error.rs index 33afb6612..059c22f6b 100644 --- a/src/error.rs +++ b/src/error.rs @@ -133,9 +133,11 @@ pub enum RequestError { InsufficientSpaceEnr(Topic), /// Neither a topic look up or registration has been done for the topic. TopicKBucketsUninitialised, - /// Trying to stop registering a topic which isn't being registered. + /// The topic isn't stored in the topic query history. + TopicNotQueried, + /// The topic isn't being registered. TopicNotRegistered, - /// Trying to start registering a topic which is already in registration. + /// The topic is already in registration. TopicAlreadyRegistered, } diff --git a/src/service.rs b/src/service.rs index d6f7b9855..d5867ac0b 100644 --- a/src/service.rs +++ b/src/service.rs @@ -49,7 +49,7 @@ use more_asserts::debug_unreachable; use parking_lot::RwLock; use rpc::*; use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap}, + collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, io::Error, net::SocketAddr, pin::Pin, @@ -189,21 +189,25 @@ pub enum ServiceRequest { /// discovered nodes as it traverses the DHT. RequestEventStream(oneshot::Sender>), /// Starts a topic look up of nodes advertising a topic in a discv5 network. - TopicQuery(TopicHash, oneshot::Sender, RequestError>>), + TopicQuery(Topic, oneshot::Sender, RequestError>>), + /// Retrieves a list of previously looked up topics, i.e. topics pertaining a set of topic's kbuckets. + TopicQueryHistory(oneshot::Sender>), + /// Removes a topic from the [`ServiceRequest::TopicQueryHistory`]. + RemoveFromTopicQueryHistory(Topic, oneshot::Sender>), /// RegisterTopic publishes this node as an advertiser for a topic in a discv5 network /// until removed. RegisterTopic(Topic, oneshot::Sender>), - /// Stops publishing this node as an advertiser for a topic. - RemoveTopic(Topic, oneshot::Sender>), - /// Retrieves the ads currently published by this node on other nodes in a discv5 network. - ActiveTopics(oneshot::Sender>, RequestError>>), - /// Retrieves the ads advertised for other nodes for a given topic. - Ads(TopicHash, oneshot::Sender>), /// Retrieves the registration attempts active for a given topic. RegistrationAttempts( Topic, oneshot::Sender, RequestError>>, ), + /// Retrieves the ads currently published by this node on other nodes in a discv5 network. + ActiveTopics(oneshot::Sender>, RequestError>>), + /// Stops publishing this node as an advertiser for a topic. + DeregisterTopic(Topic, oneshot::Sender>), + /// Retrieves the ads advertised for other nodes for a given topic. + Ads(TopicHash, oneshot::Sender>), /// Retrieves the node id of entries in a given topic's kbuckets by log2distance (bucket index). TableEntriesIdTopicKBuckets( TopicHash, @@ -265,6 +269,12 @@ pub struct Service { /// log2distance to the local node id. registration_attempts: HashMap>, + /// The topics that have been looked-up. Upon insertion a set of kbuckets is initialised for + /// the topic, if one didn't already exist from registration. Keeping these kbuckets until + /// a topic is manually removed from topic_lookups (and registration_attempts) makes the + /// repeated look-up for the same topic less costly. + topic_lookups: HashSet, + /// KBuckets per topic hash. topics_kbuckets: HashMap>, @@ -479,6 +489,7 @@ impl Service { event_stream: None, ads: Ads::default(), registration_attempts: HashMap::new(), + topic_lookups: Default::default(), topics_kbuckets: HashMap::new(), discovered_peers_topic: HashMap::new(), ticket_key: rand::random(), @@ -547,8 +558,14 @@ impl Service { error!("Failed to return the event stream channel"); } } - ServiceRequest::TopicQuery(topic_hash, callback) => { - // If we look up the topic hash for the first time we initialise its kbuckets. + ServiceRequest::TopicQuery(topic, callback) => { + // Store the topic to make sure the kbuckets for the topic persist for repeated + // look ups. + self.topic_lookups.insert(topic.clone()); + + let topic_hash = topic.hash(); + // If we look up the topic hash for the first time, and aren't registering it, + // we initialise its kbuckets. if let Entry::Vacant(_) = self.topics_kbuckets.entry(topic_hash) { self.init_topic_kbuckets(topic_hash); } @@ -560,28 +577,56 @@ impl Service { self.send_topic_queries(topic_hash, Some(callback)); } + ServiceRequest::RemoveFromTopicQueryHistory(topic, callback) => { + let result = if self.topic_lookups.remove(&topic) { + // If this topic isn't being registered, free the storage occupied by the topic's kbuckets + // and get rid of the overhead needed to maintain the those kbuckets. + if !self.registration_attempts.contains_key(&topic) { + self.topics_kbuckets.remove(&topic.hash()); + } + Ok(()) + } else { + Err(RequestError::TopicNotQueried) + }; + if callback.send(result).is_err() { + error!("Failed to return result of remove topic query operation for topic {}", topic); + } + } + ServiceRequest::TopicQueryHistory(callback) => { + if callback.send(self.topic_lookups.iter().cloned().collect::>()).is_err() { + error!("Failed to return topic query history"); + } + } ServiceRequest::RegisterTopic(topic, callback) => { let result = self.start_topic_registration(topic.clone()); if callback.send(result).is_err() { error!("Failed to return result of register topic operation for topic {}", topic); } } - ServiceRequest::ActiveTopics(callback) => { - if callback.send(Ok(self.get_active_topics())).is_err() { - error!("Failed to return active topics"); - } - } - ServiceRequest::RemoveTopic(topic, callback) => { + ServiceRequest::DeregisterTopic(topic, callback) => { + // If we have any pending tickets, discard those, i.e. don't return the ticket to the + // peer that issued it. self.tickets.remove(&topic); let result = if self.registration_attempts.remove(&topic).is_some() { METRICS.topics_to_publish.store(self.registration_attempts.len(), Ordering::Relaxed); + // If this topic isn't being looked up, free the storage occupied by the topic's kbuckets + // and get rid of the overhead needed to maintain the those kbuckets. + if !self.topic_lookups.contains(&topic) { + self.topics_kbuckets.remove(&topic.hash()); + } Ok(()) } else { Err(RequestError::TopicNotRegistered) }; + if callback.send(result).is_err() { - error!("Failed to return the result of the remove topic operation for topic {}", topic); + error!("Failed to return the result of the deregister topic operation for topic {}", topic); + } + } + ServiceRequest::ActiveTopics(callback) => { + if callback.send(Ok(self.get_active_topics())).is_err() { + error!("Failed to return active topics"); } } ServiceRequest::Ads(topic_hash, callback) => { diff --git a/src/service/test.rs b/src/service/test.rs index 0f97641cf..4c44b07e9 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -97,6 +97,7 @@ async fn build_service( ticket_key: rand::random(), tickets: Tickets::new(Duration::from_secs(60 * 15)), registration_attempts: HashMap::new(), + topic_lookups: Default::default(), topics_kbuckets: HashMap::new(), discovered_peers_topic: HashMap::new(), active_topic_queries: ActiveTopicQueries::new( From 82c62575811aa8264338c35c1811722b0cf0045d Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 7 Sep 2022 12:22:02 +0200 Subject: [PATCH 375/391] Fix bug of trying to register removed topic and misc grammar fixes --- examples/find_nodes.rs | 2 +- src/config.rs | 2 +- src/discv5.rs | 8 ++++---- src/handler/crypto/mod.rs | 2 +- src/handler/mod.rs | 2 +- src/ipmode.rs | 2 +- src/lib.rs | 2 +- src/query_pool/peers/predicate.rs | 2 +- src/rpc.rs | 2 +- src/service.rs | 28 +++++++++++++++++----------- 10 files changed, 29 insertions(+), 23 deletions(-) diff --git a/examples/find_nodes.rs b/examples/find_nodes.rs index f4bfd5798..8e7026e3d 100644 --- a/examples/find_nodes.rs +++ b/examples/find_nodes.rs @@ -175,7 +175,7 @@ async fn main() { match discv5.find_node(target_random_node_id).await { Err(e) => warn!("Find Node result failed: {:?}", e), Ok(v) => { - // found a list of ENR's print their NodeIds + // found a list of ENRs print their NodeIds let node_ids = v.iter().map(|enr| enr.node_id()).collect::>(); info!("Nodes found: {}", node_ids.len()); for node_id in node_ids { diff --git a/src/config.rs b/src/config.rs index 254225a2d..f671a35aa 100644 --- a/src/config.rs +++ b/src/config.rs @@ -68,7 +68,7 @@ pub struct Discv5Config { /// to contact an ENR. pub ip_mode: IpMode, - /// Reports all discovered ENR's when traversing the DHT to the event stream. Default true. + /// Reports all discovered ENRs when traversing the DHT to the event stream. Default true. pub report_discovered_peers: bool, /// A set of configuration parameters for setting inbound request rate limits. See diff --git a/src/discv5.rs b/src/discv5.rs index 2d4634218..d8a7feb26 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -92,7 +92,7 @@ pub enum Discv5Event { /// A node has been discovered from a FINDNODE request. /// /// The ENR of the node is returned. Various properties can be derived from the ENR. - /// This happen spontaneously through queries as nodes return ENR's. These ENR's are not + /// This happen spontaneously through queries as nodes return ENRs. These ENRs are not /// guaranteed to be live or contactable. Discovered(Enr), /// A node has been discovered from a FINDNODE request using the given TopiHash as key. @@ -239,7 +239,7 @@ impl Discv5 { /// operations involving one of these peers, without having to dial /// them upfront. pub fn add_enr(&self, enr: Enr) -> Result<(), &'static str> { - // only add ENR's that have a valid udp socket. + // only add ENRs that have a valid udp socket. if self.config.ip_mode.get_contactable_addr(&enr).is_none() { warn!("ENR attempted to be added without an UDP socket compatible with configured IpMode has been ignored."); return Err("ENR has no compatible UDP socket to connect to"); @@ -461,7 +461,7 @@ impl Discv5 { .collect() } - /// Returns an iterator over all the ENR's of nodes currently contained in the routing table. + /// Returns an iterator over all the ENRs of nodes currently contained in the routing table. pub fn table_entries_enr(&self) -> Vec { self.kbuckets .write() @@ -636,7 +636,7 @@ impl Discv5 { let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; let (callback_send, callback_recv) = oneshot::channel(); let topic = Topic::new(topic_str); - let event = ServiceRequest::DeregisterTopic(topic, callback_send); + let event = ServiceRequest::StopRegistrationOfTopic(topic, callback_send); channel .send(event) .await diff --git a/src/handler/crypto/mod.rs b/src/handler/crypto/mod.rs index 01d8adb2a..0fae15c16 100644 --- a/src/handler/crypto/mod.rs +++ b/src/handler/crypto/mod.rs @@ -42,7 +42,7 @@ type Key = [u8; KEY_LENGTH]; /* Session key generation */ /// Generates session and auth-response keys for a nonce and remote ENR. This currently only -/// supports Secp256k1 signed ENR's. This returns four keys; initiator key, responder key, auth +/// supports Secp256k1 signed ENRs. This returns four keys; initiator key, responder key, auth /// response key and the ephemeral public key. pub(crate) fn generate_session_keys( local_id: &NodeId, diff --git a/src/handler/mod.rs b/src/handler/mod.rs index dad7f013a..ea467ae01 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -3,7 +3,7 @@ //! The [`Handler`] is responsible for establishing and maintaining sessions with //! connected/discovered nodes. Each node, identified by it's [`NodeId`] is associated with a //! `Session`. This service drives the handshakes for establishing the sessions and associated -//! logic for sending/requesting initial connections/ENR's to/from unknown peers. +//! logic for sending/requesting initial connections/ENRs to/from unknown peers. //! //! The [`Handler`] also manages the timeouts for each request and reports back RPC failures, //! and received messages. Messages are encrypted and decrypted using the diff --git a/src/ipmode.rs b/src/ipmode.rs index b8774931f..0817f20fb 100644 --- a/src/ipmode.rs +++ b/src/ipmode.rs @@ -4,7 +4,7 @@ use std::net::SocketAddr; /// Sets the socket type to be established and also determines the type of ENRs that we will store /// in our routing table. -/// We store ENR's that have a `get_contractable_addr()` based on the `IpMode` set. +/// We store ENRs that have a `get_contractable_addr()` based on the `IpMode` set. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum IpMode { /// IPv4 only. This creates an IPv4 only UDP socket and will only store ENRs in the local diff --git a/src/lib.rs b/src/lib.rs index b13607b11..3dd179ab8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -29,7 +29,7 @@ //! needed and get dropped after a timeout. This section manages the creation and maintenance of //! sessions between nodes and the encryption/decryption of packets from the socket. It is realised by the [`handler::Handler`] struct and it runs in its own task. //! * Service - This section contains the protocol-level logic. In particular it manages the -//! routing table of known ENR's, topic registration/advertisement and performs various queries +//! routing table of known ENRs, topic registration/advertisement and performs various queries //! such as peer discovery. This section is realised by the [`Service`] struct. This also runs in //! it's own thread. //! * Application - This section is the user-facing API which can start/stop the underlying diff --git a/src/query_pool/peers/predicate.rs b/src/query_pool/peers/predicate.rs index 4768a1c35..c3258cfd2 100644 --- a/src/query_pool/peers/predicate.rs +++ b/src/query_pool/peers/predicate.rs @@ -21,7 +21,7 @@ pub(crate) struct PredicateQuery { /// The number of peers for which the query is currently waiting for results. num_waiting: usize, - /// The predicate function to be applied to filter the ENR's found during the search. + /// The predicate function to be applied to filter the ENRs found during the search. predicate: Box bool + Send + 'static>, /// The configuration of the query. diff --git a/src/rpc.rs b/src/rpc.rs index 76208508f..0f747ed32 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -114,7 +114,7 @@ pub enum ResponseBody { Nodes { /// The total number of responses that make up this response. total: u64, - /// A list of ENR's returned by the responder. + /// A list of ENRs returned by the responder. nodes: Vec>, }, /// The TALKRESP response. diff --git a/src/service.rs b/src/service.rs index d5867ac0b..6fe3e54a8 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1,11 +1,11 @@ //! The Discovery v5 protocol. See `lib.rs` for further details. //! -//! Note: Discovered ENR's are not automatically added to the routing table. Only established +//! Note: Discovered ENRs are not automatically added to the routing table. Only established //! sessions get added, ensuring only valid ENRs are added. Manual additions can be made using the //! `add_enr()` function. //! //! Response to queries return `PeerId`. Only the trusted (a session has been established with) -//! `PeerId`'s are returned, as ENR's for these `PeerId`'s are stored in the routing table and as +//! `PeerId`'s are returned, as ENRs for these `PeerId`'s are stored in the routing table and as //! such should have an address to connect to. Untrusted `PeerId`'s can be obtained from the //! `Service::Discovered` event, which is fired as peers get discovered. //! @@ -205,7 +205,7 @@ pub enum ServiceRequest { /// Retrieves the ads currently published by this node on other nodes in a discv5 network. ActiveTopics(oneshot::Sender>, RequestError>>), /// Stops publishing this node as an advertiser for a topic. - DeregisterTopic(Topic, oneshot::Sender>), + StopRegistrationOfTopic(Topic, oneshot::Sender>), /// Retrieves the ads advertised for other nodes for a given topic. Ads(TopicHash, oneshot::Sender>), /// Retrieves the node id of entries in a given topic's kbuckets by log2distance (bucket index). @@ -603,7 +603,7 @@ impl Service { error!("Failed to return result of register topic operation for topic {}", topic); } } - ServiceRequest::DeregisterTopic(topic, callback) => { + ServiceRequest::StopRegistrationOfTopic(topic, callback) => { // If we have any pending tickets, discard those, i.e. don't return the ticket to the // peer that issued it. self.tickets.remove(&topic); @@ -724,7 +724,7 @@ impl Service { let id = query.id(); let query_type = query.target().query_type.clone(); let mut result = query.into_result(); - // obtain the ENR's for the resulting nodes + // obtain the ENRs for the resulting nodes let mut found_enrs = Vec::new(); for node_id in result.closest_peers { if let Some(position) = result.target.untrusted_enrs.iter().position(|enr| enr.node_id() == node_id) { @@ -860,11 +860,17 @@ impl Service { let mut topic_item = topics_to_reg_iter.next(); while let Some((topic, _topic_hash)) = topic_item { trace!("Publishing topic {} with hash {}", topic, topic.hash()); + topic_item = topics_to_reg_iter.next(); + // It could be that a topic has been set to stop registration since the + // iteration through topics_to_reg_iter was started, in that case skip + // that topic. + if !self.registration_attempts.contains_key(&topic) { + continue; + } sent_regtopics += self.send_register_topics(topic.clone()); if sent_regtopics >= MAX_REGTOPICS_REGISTER_PER_INTERVAL { break } - topic_item = topics_to_reg_iter.next(); } if topics_to_reg_iter.next().is_none() { topics_to_reg_iter = self.registration_attempts.keys().map(|topic| (topic.clone(), topic.hash())).collect::>().into_iter(); @@ -1239,7 +1245,7 @@ impl Service { { let mut kbuckets = self.kbuckets.write(); for closest in kbuckets.closest_values(&target_key) { - // Add the known ENR's to the untrusted list + // Add the known ENRs to the untrusted list target.untrusted_enrs.push(closest.value); // Add the key to the list for the query known_closest_peers.push(closest.key); @@ -1284,7 +1290,7 @@ impl Service { { let mut kbuckets = self.kbuckets.write(); for closest in kbuckets.closest_values_predicate(&target_key, &kbucket_predicate) { - // Add the known ENR's to the untrusted list + // Add the known ENRs to the untrusted list target.untrusted_enrs.push(closest.value.clone()); // Add the key to the list for the query known_closest_peers.push(closest.into()); @@ -1550,7 +1556,7 @@ impl Service { match response.body { ResponseBody::Nodes { total, mut nodes } => { // Currently a maximum of DISTANCES_TO_REQUEST_PER_PEER*BUCKET_SIZE peers can be returned. Datagrams have a max - // size of 1280 and ENR's have a max size of 300 bytes. + // size of 1280 and ENRs have a max size of 300 bytes. // // Bucket sizes should be 16. In this case, there should be no more than 5*DISTANCES_TO_REQUEST_PER_PEER responses, to return all required peers. if total > 5 * DISTANCES_TO_REQUEST_PER_PEER as Log2Distance { @@ -1567,7 +1573,7 @@ impl Service { if let Some(CallbackResponse::Enr(callback)) = active_request.callback.take() { - // Currently only support requesting for ENR's. Verify this is the case. + // Currently only support requesting for ENRs. Verify this is the case. if !distances.is_empty() && distances[0] != 0 { error!("Retrieved a callback request that wasn't for a peer's ENR"); return; @@ -2087,7 +2093,7 @@ impl Service { mut distances: Vec, ) { // NOTE: At most we only allow 5 distances to be sent (see the decoder). If each of these - // buckets are full, that equates to 80 ENR's to respond with. + // buckets are full, that equates to 80 ENRs to respond with. let mut nodes_to_send = Vec::new(); distances.sort_unstable(); From a69a6554cd0108d9cd46074a56281591a6bce8fc Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 7 Sep 2022 17:30:37 +0200 Subject: [PATCH 376/391] Move decryption of tickets to Hanlder --- src/handler/crypto/mod.rs | 2 +- src/handler/mod.rs | 39 +++- src/rpc.rs | 458 ++++++++++++++++++++++++++++---------- src/service.rs | 151 +++++-------- src/service/test.rs | 77 ------- 5 files changed, 427 insertions(+), 300 deletions(-) diff --git a/src/handler/crypto/mod.rs b/src/handler/crypto/mod.rs index 0fae15c16..fe50ea72e 100644 --- a/src/handler/crypto/mod.rs +++ b/src/handler/crypto/mod.rs @@ -406,7 +406,7 @@ mod tests { let message = decrypt_message(&key, nonce, &ciphertext, &auth_data).unwrap(); dbg!(&message); dbg!(hex::encode(&message)); - let rpc = crate::rpc::Message::decode(&message).unwrap(); + let rpc = crate::rpc::Message::decode(&message, &[0u8; 16]).unwrap(); println!("{}", rpc); } diff --git a/src/handler/mod.rs b/src/handler/mod.rs index ea467ae01..75ffe2c43 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -32,7 +32,8 @@ use crate::{ discv5::PERMIT_BAN_LIST, error::{Discv5Error, RequestError}, packet::{ChallengeData, IdNonce, MessageNonce, Packet, PacketKind}, - rpc::{Message, Request, RequestBody, RequestId, Response, ResponseBody}, + rpc::{Message, Request, RequestBody, RequestId, Response, ResponseBody, FALSE_TICKET}, + service::BAN_MALICIOUS_PEER, socket, socket::{FilterConfig, Socket}, Enr, Topic, @@ -41,6 +42,7 @@ use delay_map::HashMapDelay; use enr::{CombinedKey, NodeId}; use futures::prelude::*; use parking_lot::RwLock; +use rlp::DecoderError; use std::{ collections::HashMap, convert::TryFrom, @@ -161,6 +163,9 @@ pub struct Challenge { pub struct Handler { /// Configuration for the discv5 service. request_retries: u8, + /// The duration nodes that show malicious behaviour are banned. A configuration for the + /// discv5 service. + ban_duration: Option, /// The local node id to save unnecessary read locks on the ENR. The NodeID should not change /// during the operation of the server. node_id: NodeId, @@ -168,6 +173,8 @@ pub struct Handler { enr: Arc>, /// The key to sign the ENR and set up encrypted communication with peers. key: Arc>, + /// The key used for en-/decrypting tickets. + ticket_key: [u8; 16], /// Pending raw requests. active_requests: ActiveRequests, /// The expected responses by SocketAddr which allows packets to pass the underlying filter. @@ -252,9 +259,11 @@ impl Handler { let mut handler = Handler { request_retries: config.request_retries, + ban_duration: config.ban_duration, node_id, enr, key, + ticket_key: rand::random(), active_requests: ActiveRequests::new(config.request_timeout), pending_requests: HashMap::new(), filter_expected_responses, @@ -479,13 +488,14 @@ impl Handler { // Check for an established session if let Some(session) = self.sessions.get_mut(&node_address) { // Encrypt the message and send - let packet = match session.encrypt_message(self.node_id, &response.encode()) { - Ok(packet) => packet, - Err(e) => { - warn!("Could not encrypt response: {:?}", e); - return; - } - }; + let packet = + match session.encrypt_message(self.node_id, &response.encode(&self.ticket_key)) { + Ok(packet) => packet, + Err(e) => { + warn!("Could not encrypt response: {:?}", e); + return; + } + }; self.send(node_address, packet).await; } else { // Either the session is being established or has expired. We simply drop the @@ -859,10 +869,21 @@ impl Handler { // attempt to decrypt and process the message. let message = match session.decrypt_message(message_nonce, message, authenticated_data) { - Ok(m) => match Message::decode(&m) { + Ok(m) => match Message::decode(&m, &self.ticket_key) { Ok(p) => p, Err(e) => { warn!("Failed to decode message. Error: {:?}, {}", e, node_address); + if let DecoderError::Custom(FALSE_TICKET) = e { + warn!("Node sent a ticket that couldn't be decrypted with local ticket key. Blacklisting peer {}", node_address.node_id); + BAN_MALICIOUS_PEER(self.ban_duration, node_address.clone()); + self.fail_session( + &node_address, + RequestError::InvalidRemotePacket, + true, + ) + .await; + return; + } return; } }, diff --git a/src/rpc.rs b/src/rpc.rs index 0f747ed32..c5da02453 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -1,5 +1,10 @@ -use crate::advertisement::topic::TopicHash; -use enr::{CombinedKey, Enr, NodeId}; +use crate::{advertisement::topic::TopicHash, Enr}; +use aes_gcm::{ + aead::{generic_array::GenericArray, Aead, NewAead, Payload}, + Aes128Gcm, +}; +use enr::NodeId; +use more_asserts::debug_unreachable; use rlp::{DecoderError, Rlp, RlpStream}; use std::{ net::{IpAddr, Ipv6Addr}, @@ -8,6 +13,135 @@ use std::{ use tokio::time::{Duration, Instant}; use tracing::{debug, error, warn}; +pub const FALSE_TICKET: &str = "TICKET_ENCRYPTED_BY_FOREIGN_KEY"; + +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum RequestTicket { + Empty, + LocallyIssued(Ticket), + RemotelyIssued(Vec), +} + +impl RequestTicket { + pub fn encode(&self) -> Vec { + let mut buf = Vec::new(); + let mut s = RlpStream::new(); + s.append(self); + buf.extend_from_slice(&s.out()); + buf + } + + pub fn decode(ticket: &[u8]) -> Result { + let rlp = rlp::Rlp::new(ticket); + let request_ticket = rlp.as_val::()?; + Ok(request_ticket) + } +} + +impl rlp::Encodable for RequestTicket { + fn rlp_append(&self, s: &mut RlpStream) { + match self { + RequestTicket::Empty => { + s.append(&Vec::new()); + } + RequestTicket::LocallyIssued(ticket) => { + debug!("A locally issued ticket will never be sent in the form of a request hence the RequestTicket::LocallyIssued variant should not need to be encoded. This functionality should merely be invoked by tests."); + s.append(ticket); + } + RequestTicket::RemotelyIssued(bytes) => { + // A remotely issued ticket is encoded to return it to its issuer once its wait + // time expires. + s.append(bytes); + } + } + } +} + +impl rlp::Decodable for RequestTicket { + fn decode(rlp: &Rlp<'_>) -> Result { + // If a ticket is incoming in a REGTOPIC request, and we hence decode + // the request, it should only be a ticket that was locally issued. A + // remotely issued ticket RegtopicTicket::Remote will only be encoded + // by this node to return it to its issuer. + Ok(RequestTicket::LocallyIssued(rlp.as_val::()?)) + } +} + +impl std::fmt::Display for RequestTicket { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + RequestTicket::Empty => { + write!(f, "Empty") + } + RequestTicket::LocallyIssued(ticket) => { + write!(f, "Locally issued ticket: {}", ticket) + } + RequestTicket::RemotelyIssued(bytes) => { + write!(f, "Remotely issued ticket: {}", hex::encode(bytes)) + } + } + } +} + +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum ResponseTicket { + LocallyIssued(Ticket), + RemotelyIssued(Vec), +} + +impl ResponseTicket { + pub fn encode(&self) -> Vec { + let mut buf = Vec::new(); + let mut s = RlpStream::new(); + s.append(self); + buf.extend_from_slice(&s.out()); + buf + } + + pub fn decode(ticket: &[u8]) -> Result { + let rlp = rlp::Rlp::new(ticket); + let response_ticket = rlp.as_val::()?; + Ok(response_ticket) + } +} + +impl rlp::Encodable for ResponseTicket { + fn rlp_append(&self, s: &mut RlpStream) { + match self { + ResponseTicket::LocallyIssued(ticket) => { + s.append(ticket); + } + ResponseTicket::RemotelyIssued(bytes) => { + debug!("A remotely issued ticket will never be returned to the issuer in the form of a response hence the ResponseTicket::RemotelyIssued variant should not need to be encoded. This functionality should merely be invoked by tests."); + s.append(bytes); + } + } + } +} + +impl rlp::Decodable for ResponseTicket { + fn decode(rlp: &Rlp<'_>) -> Result { + // If a ticket is incoming in a TICKET response, and we hence decode + // the response, it should only be a ticket that was remotely issued. + // A locally issued ticket ResponseTicket::Local will only be encoded + // by this node and sent to a given peer. + Ok(ResponseTicket::RemotelyIssued(rlp.as_val::>()?)) + } +} + +impl std::fmt::Display for ResponseTicket { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ResponseTicket::LocallyIssued(ticket) => { + write!(f, "Locally issued ticket: {}", ticket) + } + ResponseTicket::RemotelyIssued(bytes) => { + write!(f, "Remotely issued ticket: {}", hex::encode(bytes)) + } + } + } +} + /// Type to manage the request IDs. #[derive(Debug, Clone, PartialEq, Hash, Eq)] pub struct RequestId(pub Vec); @@ -88,9 +222,9 @@ pub enum RequestBody { /// The topic string we want to advertise at the node receiving this request. topic: String, // Current node record of sender. - enr: crate::Enr, + enr: Enr, // Ticket content of ticket from a previous registration attempt or empty. - ticket: Vec, + ticket: RequestTicket, }, /// A TOPICQUERY request. TopicQuery { @@ -115,7 +249,7 @@ pub enum ResponseBody { /// The total number of responses that make up this response. total: u64, /// A list of ENRs returned by the responder. - nodes: Vec>, + nodes: Vec, }, /// The TALKRESP response. Talk { @@ -125,7 +259,7 @@ pub enum ResponseBody { /// The TICKET response. Ticket { /// The response to a REGTOPIC request. - ticket: Vec, + ticket: ResponseTicket, /// The time in seconds to wait before attempting to register again. wait_time: u64, /// The topic hash for which the opaque ticket is issued. @@ -227,7 +361,7 @@ impl Response { } /// Encodes a Message to RLP-encoded bytes. - pub fn encode(self) -> Vec { + pub fn encode(self, ticket_key: &[u8; 16]) -> Vec { let mut buf = Vec::with_capacity(10); let msg_type = self.msg_type(); buf.push(msg_type); @@ -276,13 +410,22 @@ impl Response { wait_time, topic, } => { - let mut s = RlpStream::new(); - s.begin_list(4); - s.append(&id.as_bytes()); - s.append(&ticket); - s.append(&wait_time); - s.append(&topic); - buf.extend_from_slice(&s.out()); + let aead = Aes128Gcm::new(GenericArray::from_slice(ticket_key)); + let payload = Payload { + msg: &ticket.encode(), + aad: b"", + }; + if let Ok(encrypted_ticket) = + aead.encrypt(GenericArray::from_slice(&[1u8; 12]), payload) + { + let mut s = RlpStream::new(); + s.begin_list(4); + s.append(&id.as_bytes()); + s.append(&encrypted_ticket); + s.append(&wait_time); + s.append(&topic); + buf.extend_from_slice(&s.out()); + } buf } } @@ -315,11 +458,11 @@ impl std::fmt::Display for ResponseBody { match self { ResponseBody::Pong { enr_seq, ip, port } => write!( f, - "PONG: Enr-seq: {}, Ip: {:?}, Port: {}", + "PONG: enr-seq: {}, ip: {:?}, port: {}", enr_seq, ip, port ), ResponseBody::Nodes { total, nodes } => { - write!(f, "NODES: total: {}, Nodes: [", total)?; + write!(f, "NODES: total: {}, nodes: [", total)?; let mut first = true; for id in nodes { if !first { @@ -333,7 +476,7 @@ impl std::fmt::Display for ResponseBody { write!(f, "]") } ResponseBody::Talk { response } => { - write!(f, "TALK: Response {}", hex::encode(response)) + write!(f, "TALK: response {}", hex::encode(response)) } ResponseBody::Ticket { ticket, @@ -342,10 +485,8 @@ impl std::fmt::Display for ResponseBody { } => { write!( f, - "TICKET: Ticket: {}, Wait time: {}, Topic: {}", - hex::encode(ticket), - wait_time, - topic + "TICKET: ticket: {}, wait time: {}, topic: {}", + ticket, wait_time, topic ) } } @@ -377,21 +518,21 @@ impl std::fmt::Display for RequestBody { "REGTOPIC: topic: {}, enr: {}, ticket: {}", topic, enr.to_base64(), - hex::encode(ticket), + ticket, ), } } } #[allow(dead_code)] impl Message { - pub fn encode(self) -> Vec { + pub fn encode(self, ticket_key: &[u8; 16]) -> Vec { match self { Self::Request(request) => request.encode(), - Self::Response(response) => response.encode(), + Self::Response(response) => response.encode(ticket_key), } } - pub fn decode(data: &[u8]) -> Result { + pub fn decode(data: &[u8], ticket_key: &[u8; 16]) -> Result { if data.len() < 3 { return Err(DecoderError::RlpIsTooShort); } @@ -518,7 +659,7 @@ impl Message { // no records vec![] } else { - enr_list_rlp.as_list::>()? + enr_list_rlp.as_list::()? } }; Message::Response(Response { @@ -568,11 +709,39 @@ impl Message { } let topic = rlp.val_at::(1)?; let enr_rlp = rlp.at(2)?; - let enr = enr_rlp.as_val::>()?; + let enr = enr_rlp.as_val::()?; let ticket = rlp.val_at::>(3)?; + + let returned_ticket = { + let aead = Aes128Gcm::new(GenericArray::from_slice(ticket_key)); + let payload = Payload { + msg: &ticket, + aad: b"", + }; + if !ticket.is_empty() { + if let Ok(decrypted_ticket) = aead.decrypt(GenericArray::from_slice(&[1u8; 12]), payload).map_err(|e| debug!("Failed to decrypt ticket in REGTOPIC request. Ticket not issued by us. Error: {}", e)) { + if let Ok(decoded_ticket) = RequestTicket::decode(&decrypted_ticket).map_err(|e| { + debug!("Failed to decode ticket in REGTOPIC request. Error: {}", e) + }) { + decoded_ticket + } else { + debug_unreachable!("Encoding of ticket issued locally is faulty"); + return Err(DecoderError::Custom("Faulty encoding of ticket")); + } + } else { + return Err(DecoderError::Custom(FALSE_TICKET)); + } + } else { + RequestTicket::Empty + } + }; Message::Request(Request { id, - body: RequestBody::RegisterTopic { topic, enr, ticket }, + body: RequestBody::RegisterTopic { + topic, + enr, + ticket: returned_ticket, + }, }) } 8 => { @@ -584,7 +753,7 @@ impl Message { ); return Err(DecoderError::RlpIncorrectListLen); } - let ticket = rlp.val_at::>(1)?; + let ticket = rlp.val_at::(1)?; let wait_time = rlp.val_at::(2)?; let topic = rlp.val_at::(3)?; Message::Response(Response { @@ -630,7 +799,7 @@ impl Message { } /// A ticket object, outlined in the spec. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Eq)] pub struct Ticket { src_node_id: NodeId, src_ip: IpAddr, @@ -834,6 +1003,20 @@ impl Ticket { } } +impl std::fmt::Display for Ticket { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Ticket: src node id: {}, src ip: {}, topic: {}, req time: {:?}, wait time: {}", + self.src_node_id, + self.src_ip, + self.topic, + self.req_time, + self.wait_time.as_secs() + ) + } +} + #[cfg(test)] mod tests { use super::*; @@ -845,6 +1028,8 @@ mod tests { #[test] fn ref_test_encode_request_ping() { + let ticket_key: [u8; 16] = rand::random(); + // reference input let id = RequestId(vec![1]); let enr_seq = 1; @@ -856,12 +1041,14 @@ mod tests { // expected hex output let expected_output = hex::decode("01c20101").unwrap(); - dbg!(hex::encode(message.clone().encode())); - assert_eq!(message.encode(), expected_output); + dbg!(hex::encode(message.clone().encode(&ticket_key))); + assert_eq!(message.encode(&ticket_key), expected_output); } #[test] fn ref_test_encode_request_findnode() { + let ticket_key: [u8; 16] = rand::random(); + // reference input let id = RequestId(vec![1]); let distances = vec![256]; @@ -872,13 +1059,15 @@ mod tests { // expected hex output let expected_output = hex::decode("03c501c3820100").unwrap(); - dbg!(hex::encode(message.clone().encode())); + dbg!(hex::encode(message.clone().encode(&ticket_key))); - assert_eq!(message.encode(), expected_output); + assert_eq!(message.encode(&ticket_key), expected_output); } #[test] fn ref_test_encode_response_ping() { + let ticket_key: [u8; 16] = rand::random(); + // reference input let id = RequestId(vec![1]); let enr_seq = 1; @@ -892,12 +1081,14 @@ mod tests { // expected hex output let expected_output = hex::decode("02ca0101847f000001821388").unwrap(); - dbg!(hex::encode(message.clone().encode())); - assert_eq!(message.encode(), expected_output); + dbg!(hex::encode(message.clone().encode(&ticket_key))); + assert_eq!(message.encode(&ticket_key), expected_output); } #[test] fn ref_test_encode_response_nodes_empty() { + let ticket_key: [u8; 16] = rand::random(); + // reference input let id = RequestId(vec![1]); let total = 1; @@ -912,16 +1103,18 @@ mod tests { nodes: vec![], }, }); - assert_eq!(message.encode(), expected_output); + assert_eq!(message.encode(&ticket_key), expected_output); } #[test] fn ref_test_encode_response_nodes() { + let ticket_key: [u8; 16] = rand::random(); + // reference input let id = RequestId(vec![1]); let total = 1; - let enr = "-HW4QCjfjuCfSmIJHxqLYfGKrSz-Pq3G81DVJwd_muvFYJiIOkf0bGtJu7kZVCOPnhSTMneyvR4MRbF3G5TNB4wy2ssBgmlkgnY0iXNlY3AyNTZrMaEDymNMrg1JrLQB2KTGtv6MVbcNEVv0AHacwUAPMljNMTg".parse::>().unwrap(); + let enr = "-HW4QCjfjuCfSmIJHxqLYfGKrSz-Pq3G81DVJwd_muvFYJiIOkf0bGtJu7kZVCOPnhSTMneyvR4MRbF3G5TNB4wy2ssBgmlkgnY0iXNlY3AyNTZrMaEDymNMrg1JrLQB2KTGtv6MVbcNEVv0AHacwUAPMljNMTg".parse::().unwrap(); // expected hex output let expected_output = hex::decode("04f87b0101f877f875b84028df8ee09f4a62091f1a8b61f18aad2cfe3eadc6f350d527077f9aebc56098883a47f46c6b49bbb91954238f9e14933277b2bd1e0c45b1771b94cd078c32dacb0182696482763489736563703235366b31a103ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd3138").unwrap(); @@ -932,18 +1125,20 @@ mod tests { nodes: vec![enr], }, }); - dbg!(hex::encode(message.clone().encode())); - assert_eq!(message.encode(), expected_output); + dbg!(hex::encode(message.clone().encode(&ticket_key))); + assert_eq!(message.encode(&ticket_key), expected_output); } #[test] fn ref_test_encode_response_nodes_multiple() { + let ticket_key: [u8; 16] = rand::random(); + // reference input let id = RequestId(vec![1]); let total = 1; - let enr = "enr:-HW4QBzimRxkmT18hMKaAL3IcZF1UcfTMPyi3Q1pxwZZbcZVRI8DC5infUAB_UauARLOJtYTxaagKoGmIjzQxO2qUygBgmlkgnY0iXNlY3AyNTZrMaEDymNMrg1JrLQB2KTGtv6MVbcNEVv0AHacwUAPMljNMTg".parse::>().unwrap(); + let enr = "enr:-HW4QBzimRxkmT18hMKaAL3IcZF1UcfTMPyi3Q1pxwZZbcZVRI8DC5infUAB_UauARLOJtYTxaagKoGmIjzQxO2qUygBgmlkgnY0iXNlY3AyNTZrMaEDymNMrg1JrLQB2KTGtv6MVbcNEVv0AHacwUAPMljNMTg".parse::().unwrap(); - let enr2 = "enr:-HW4QNfxw543Ypf4HXKXdYxkyzfcxcO-6p9X986WldfVpnVTQX1xlTnWrktEWUbeTZnmgOuAY_KUhbVV1Ft98WoYUBMBgmlkgnY0iXNlY3AyNTZrMaEDDiy3QkHAxPyOgWbxp5oF1bDdlYE6dLCUUp8xfVw50jU".parse::>().unwrap(); + let enr2 = "enr:-HW4QNfxw543Ypf4HXKXdYxkyzfcxcO-6p9X986WldfVpnVTQX1xlTnWrktEWUbeTZnmgOuAY_KUhbVV1Ft98WoYUBMBgmlkgnY0iXNlY3AyNTZrMaEDDiy3QkHAxPyOgWbxp5oF1bDdlYE6dLCUUp8xfVw50jU".parse::().unwrap(); // expected hex output let expected_output = hex::decode("04f8f20101f8eef875b8401ce2991c64993d7c84c29a00bdc871917551c7d330fca2dd0d69c706596dc655448f030b98a77d4001fd46ae0112ce26d613c5a6a02a81a6223cd0c4edaa53280182696482763489736563703235366b31a103ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd3138f875b840d7f1c39e376297f81d7297758c64cb37dcc5c3beea9f57f7ce9695d7d5a67553417d719539d6ae4b445946de4d99e680eb8063f29485b555d45b7df16a1850130182696482763489736563703235366b31a1030e2cb74241c0c4fc8e8166f1a79a05d5b0dd95813a74b094529f317d5c39d235").unwrap(); @@ -955,18 +1150,19 @@ mod tests { nodes: vec![enr, enr2], }, }); - dbg!(hex::encode(message.clone().encode())); - assert_eq!(message.encode(), expected_output); + dbg!(hex::encode(message.clone().encode(&ticket_key))); + assert_eq!(message.encode(&ticket_key), expected_output); } #[test] fn ref_decode_response_nodes_multiple() { + let ticket_key: [u8; 16] = rand::random(); let input = hex::decode("04f8f20101f8eef875b8401ce2991c64993d7c84c29a00bdc871917551c7d330fca2dd0d69c706596dc655448f030b98a77d4001fd46ae0112ce26d613c5a6a02a81a6223cd0c4edaa53280182696482763489736563703235366b31a103ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd3138f875b840d7f1c39e376297f81d7297758c64cb37dcc5c3beea9f57f7ce9695d7d5a67553417d719539d6ae4b445946de4d99e680eb8063f29485b555d45b7df16a1850130182696482763489736563703235366b31a1030e2cb74241c0c4fc8e8166f1a79a05d5b0dd95813a74b094529f317d5c39d235").unwrap(); - let expected_enr1 = "enr:-HW4QBzimRxkmT18hMKaAL3IcZF1UcfTMPyi3Q1pxwZZbcZVRI8DC5infUAB_UauARLOJtYTxaagKoGmIjzQxO2qUygBgmlkgnY0iXNlY3AyNTZrMaEDymNMrg1JrLQB2KTGtv6MVbcNEVv0AHacwUAPMljNMTg".parse::>().unwrap(); - let expected_enr2 = "enr:-HW4QNfxw543Ypf4HXKXdYxkyzfcxcO-6p9X986WldfVpnVTQX1xlTnWrktEWUbeTZnmgOuAY_KUhbVV1Ft98WoYUBMBgmlkgnY0iXNlY3AyNTZrMaEDDiy3QkHAxPyOgWbxp5oF1bDdlYE6dLCUUp8xfVw50jU".parse::>().unwrap(); + let expected_enr1 = "enr:-HW4QBzimRxkmT18hMKaAL3IcZF1UcfTMPyi3Q1pxwZZbcZVRI8DC5infUAB_UauARLOJtYTxaagKoGmIjzQxO2qUygBgmlkgnY0iXNlY3AyNTZrMaEDymNMrg1JrLQB2KTGtv6MVbcNEVv0AHacwUAPMljNMTg".parse::().unwrap(); + let expected_enr2 = "enr:-HW4QNfxw543Ypf4HXKXdYxkyzfcxcO-6p9X986WldfVpnVTQX1xlTnWrktEWUbeTZnmgOuAY_KUhbVV1Ft98WoYUBMBgmlkgnY0iXNlY3AyNTZrMaEDDiy3QkHAxPyOgWbxp5oF1bDdlYE6dLCUUp8xfVw50jU".parse::().unwrap(); - let decoded = Message::decode(&input).unwrap(); + let decoded = Message::decode(&input, &ticket_key).unwrap(); match decoded { Message::Response(response) => match response.body { @@ -983,20 +1179,22 @@ mod tests { #[test] fn encode_decode_ping_request() { + let ticket_key: [u8; 16] = rand::random(); let id = RequestId(vec![1]); let request = Message::Request(Request { id, body: RequestBody::Ping { enr_seq: 15 }, }); - let encoded = request.clone().encode(); - let decoded = Message::decode(&encoded).unwrap(); + let encoded = request.clone().encode(&ticket_key); + let decoded = Message::decode(&encoded, &ticket_key).unwrap(); assert_eq!(request, decoded); } #[test] fn encode_decode_ping_response() { + let ticket_key: [u8; 16] = rand::random(); let id = RequestId(vec![1]); let request = Message::Response(Response { id, @@ -1007,14 +1205,15 @@ mod tests { }, }); - let encoded = request.clone().encode(); - let decoded = Message::decode(&encoded).unwrap(); + let encoded = request.clone().encode(&ticket_key); + let decoded = Message::decode(&encoded, &ticket_key).unwrap(); assert_eq!(request, decoded); } #[test] fn encode_decode_find_node_request() { + let ticket_key: [u8; 16] = rand::random(); let id = RequestId(vec![1]); let request = Message::Request(Request { id, @@ -1023,15 +1222,16 @@ mod tests { }, }); - let encoded = request.clone().encode(); - let decoded = Message::decode(&encoded).unwrap(); + let encoded = request.clone().encode(&ticket_key); + let decoded = Message::decode(&encoded, &ticket_key).unwrap(); assert_eq!(request, decoded); } #[test] fn encode_decode_nodes_response() { - let key = CombinedKey::generate_secp256k1(); + let ticket_key: [u8; 16] = rand::random(); + let key = enr::CombinedKey::generate_secp256k1(); let enr1 = EnrBuilder::new("v4") .ip4("127.0.0.1".parse().unwrap()) .udp4(500) @@ -1057,14 +1257,15 @@ mod tests { }, }); - let encoded = request.clone().encode(); - let decoded = Message::decode(&encoded).unwrap(); + let encoded = request.clone().encode(&ticket_key); + let decoded = Message::decode(&encoded, &ticket_key).unwrap(); assert_eq!(request, decoded); } #[test] fn encode_decode_talk_request() { + let ticket_key: [u8; 16] = rand::random(); let id = RequestId(vec![1]); let request = Message::Request(Request { id, @@ -1074,17 +1275,18 @@ mod tests { }, }); - let encoded = request.clone().encode(); - let decoded = Message::decode(&encoded).unwrap(); + let encoded = request.clone().encode(&ticket_key); + let decoded = Message::decode(&encoded, &ticket_key).unwrap(); assert_eq!(request, decoded); } #[test] fn encode_decode_register_topic_request_empty_ticket() { + let ticket_key: [u8; 16] = rand::random(); let port = 5000; let ip: IpAddr = "127.0.0.1".parse().unwrap(); - let key = CombinedKey::generate_secp256k1(); + let key = enr::CombinedKey::generate_secp256k1(); let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); let request = Message::Request(Request { @@ -1092,25 +1294,28 @@ mod tests { body: RequestBody::RegisterTopic { topic: "lighthouse".to_string(), enr, - ticket: Vec::new(), + ticket: RequestTicket::Empty, }, }); - let encoded = request.clone().encode(); - let decoded = Message::decode(&encoded).unwrap(); + let encoded = request.clone().encode(&ticket_key); + let decoded = Message::decode(&encoded, &ticket_key).unwrap(); assert_eq!(request, decoded); } #[test] - fn encode_decode_register_topic_request() { + fn encode_decode_ticket_transit() { + let local_ticket_key: [u8; 16] = rand::random(); + let remote_ticket_key: [u8; 16] = rand::random(); + let port = 5000; let ip: IpAddr = "127.0.0.1".parse().unwrap(); - let key = CombinedKey::generate_secp256k1(); + let key = enr::CombinedKey::generate_secp256k1(); let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); let node_id = enr.node_id(); - let ticket = Ticket::new( + let og_ticket = Ticket::new( node_id, ip, TopicHash::from_raw([1u8; 32]), @@ -1119,30 +1324,71 @@ mod tests { //Duration::from_secs(25), ); - let ticket = ticket.encode(); - - let request = Message::Request(Request { + // The local node sends a ticket response + let response = Message::Response(Response { id: RequestId(vec![1]), - body: RequestBody::RegisterTopic { + body: ResponseBody::Ticket { + ticket: ResponseTicket::LocallyIssued(og_ticket.clone()), + wait_time: 1u64, topic: "lighthouse".to_string(), - enr, - ticket, }, }); - let encoded = request.clone().encode(); - let decoded = Message::decode(&encoded).unwrap(); - - assert_eq!(request, decoded); + let encoded_resp = response.encode(&local_ticket_key); + + // The response arrives at the remote peer + let decoded_resp = Message::decode(&encoded_resp, &remote_ticket_key).unwrap(); + + if let Message::Response(Response { + id: _, + body: + ResponseBody::Ticket { + ticket: ResponseTicket::RemotelyIssued(ticket_bytes), + .. + }, + }) = decoded_resp + { + // The remote peer returns the ticket to the issuer + let request = Message::Request(Request { + id: RequestId(vec![1]), + body: RequestBody::RegisterTopic { + topic: "lighthouse".to_string(), + enr, + ticket: RequestTicket::RemotelyIssued(ticket_bytes), + }, + }); + + let encoded_req = request.encode(&remote_ticket_key); + + // The request arrives at the issuer who decodes it + let decoded_req = Message::decode(&encoded_req, &local_ticket_key).unwrap(); + + if let Message::Request(Request { + id: _, + body: + RequestBody::RegisterTopic { + topic: _, + enr: _, + ticket: RequestTicket::LocallyIssued(ticket), + }, + }) = decoded_req + { + assert_eq!(og_ticket, ticket); + } else { + panic!(); + } + } else { + panic!(); + } } #[test] - fn encode_decode_ticket() { + fn encode_decode_request_ticket() { // Create the test values needed let port = 5000; let ip: IpAddr = "127.0.0.1".parse().unwrap(); - let key = CombinedKey::generate_secp256k1(); + let key = enr::CombinedKey::generate_secp256k1(); let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); let node_id = enr.node_id(); @@ -1155,19 +1401,20 @@ mod tests { //Duration::from_secs(25), ); - let encoded = ticket.encode(); - let decoded = Ticket::decode(&encoded).unwrap(); + let encoded = RequestTicket::LocallyIssued(ticket.clone()).encode(); - assert_eq!(Some(ticket), decoded); + let decoded = RequestTicket::decode(&encoded).unwrap(); + + assert_eq!(RequestTicket::LocallyIssued(ticket), decoded); } #[test] - fn encode_decode_ticket_with_encryption() { + fn encode_decode_request_ticket_with_encryption() { // Create the test values needed let port = 5000; let ip: IpAddr = "127.0.0.1".parse().unwrap(); - let key = CombinedKey::generate_secp256k1(); + let key = enr::CombinedKey::generate_secp256k1(); let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); let node_id = enr.node_id(); @@ -1182,7 +1429,7 @@ mod tests { let ticket_key: [u8; 16] = rand::random(); - let encoded = ticket.encode(); + let encoded = RequestTicket::LocallyIssued(ticket.clone()).encode(); let encrypted_ticket = { let aead = Aes128Gcm::new(GenericArray::from_slice(&ticket_key)); @@ -1205,57 +1452,24 @@ mod tests { } .unwrap(); - let decoded = Ticket::decode(&decrypted_ticket).unwrap(); + let decoded = RequestTicket::decode(&decrypted_ticket).unwrap(); assert_eq!(encoded, decrypted_ticket); - assert_eq!(Some(ticket), decoded); - } - - #[test] - fn encode_decode_ticket_response() { - // Create the test values needed - let port = 5000; - let ip: IpAddr = "127.0.0.1".parse().unwrap(); - - let key = CombinedKey::generate_secp256k1(); - - let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); - let node_id = enr.node_id(); - let ticket = Ticket::new( - node_id, - ip, - TopicHash::from_raw([1u8; 32]), - Instant::now(), - Duration::from_secs(11), - //Duration::from_secs(25), - ); - - let ticket = ticket.encode(); - let response = Message::Response(Response { - id: RequestId(vec![1]), - body: ResponseBody::Ticket { - ticket, - wait_time: 1u64, - topic: "lighthouse".to_string(), - }, - }); - - let encoded = response.clone().encode(); - let decoded = Message::decode(&encoded).unwrap(); - - assert_eq!(response, decoded); + assert_eq!(RequestTicket::LocallyIssued(ticket), decoded); } #[test] fn encode_decode_topic_query_request() { + let ticket_key: [u8; 16] = rand::random(); + let request = Message::Request(Request { id: RequestId(vec![1]), body: RequestBody::TopicQuery { topic: TopicHash::from_raw([1u8; 32]), }, }); - let encoded = request.clone().encode(); - let decoded = Message::decode(&encoded).unwrap(); + let encoded = request.clone().encode(&ticket_key); + let decoded = Message::decode(&encoded, &ticket_key).unwrap(); assert_eq!(request, decoded); } diff --git a/src/service.rs b/src/service.rs index 6fe3e54a8..b8df0982a 100644 --- a/src/service.rs +++ b/src/service.rs @@ -37,10 +37,6 @@ use crate::{ }, rpc, Discv5Config, Discv5Event, Enr, IpMode, Topic, TopicsEnrField, }; -use aes_gcm::{ - aead::{generic_array::GenericArray, Aead, NewAead, Payload}, - Aes128Gcm, -}; use delay_map::HashSetDelay; use enr::{CombinedKey, NodeId}; use fnv::FnvHashMap; @@ -87,6 +83,12 @@ const MAX_UNCONTACTED_PEERS_PER_TOPIC_BUCKET: usize = 16; /// The duration in seconds which a node can come late to an assigned wait time. const WAIT_TIME_TOLERANCE: Duration = Duration::from_secs(5); +pub const BAN_MALICIOUS_PEER: fn(ban_duration: Option, node_address: NodeAddress) = + |ban_duration, node_address| { + let ban_timeout = ban_duration.map(|v| Instant::now() + v); + PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + }; + /// Request type for Protocols using `TalkReq` message. /// /// Automatically responds with an empty body on drop if @@ -283,9 +285,6 @@ pub struct Service { /// kbuckets. Peers are stored by bucket index, i.e. the log2distance to the local node id. discovered_peers_topic: HashMap>>, - /// The key used for en-/decrypting tickets. - ticket_key: [u8; 16], - /// Tickets received from other nodes. tickets: Tickets, @@ -492,7 +491,6 @@ impl Service { topic_lookups: Default::default(), topics_kbuckets: HashMap::new(), discovered_peers_topic: HashMap::new(), - ticket_key: rand::random(), tickets: Tickets::default(), active_topic_queries: ActiveTopicQueries::new( config.topic_query_timeout, @@ -838,7 +836,7 @@ impl Service { // When the ticket time expires a new REGTOPIC request is automatically sent to the // ticket issuer and the registration attempt stays in the [`RegistrationState::Ticket`] // from sending the first REGTOPIC request to this contact for this topic. - self.reg_topic_request(active_ticket.contact(), active_topic.topic().clone(), enr, Some(active_ticket.ticket())); + self.reg_topic_request(active_ticket.contact(), active_topic.topic().clone(), enr, RequestTicket::RemotelyIssued(active_ticket.ticket())); } Some(topic_query_progress) = self.active_topic_queries.next() => { match topic_query_progress { @@ -1124,7 +1122,12 @@ impl Service { if let Ok(node_contact) = NodeContact::try_from_enr(peer, self.config.ip_mode) .map_err(|e| error!("Failed to send REGTOPIC to peer. Error: {:?}", e)) { - self.reg_topic_request(node_contact, topic.clone(), local_enr.clone(), None); + self.reg_topic_request( + node_contact, + topic.clone(), + local_enr.clone(), + RequestTicket::RemotelyIssued(Vec::new()), + ); // If an uncontacted peer has a faulty enr, don't count the registration attempt. sent_regtopics += 1; } @@ -1424,8 +1427,7 @@ impl Service { }; if registration_of_other_node { warn!("The enr in the REGTOPIC request body does not match sender's. Nodes can only register themselves. Blacklisting peer {}.", node_address.node_id); - let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); - PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + BAN_MALICIOUS_PEER(self.config.ban_duration, node_address); self.rpc_failure(id, RequestError::RegistrationOtherNode); return; } @@ -1452,42 +1454,16 @@ impl Service { return; } - if !ticket.is_empty() { - let decrypted_ticket = { - let aead = Aes128Gcm::new(GenericArray::from_slice(&self.ticket_key)); - let payload = Payload { - msg: &ticket, - aad: b"", - }; - aead.decrypt(GenericArray::from_slice(&[1u8; 12]), payload) - .map_err(|e| { - error!("Failed to decrypt ticket in REGTOPIC request. Error: {}", e) - }) - }; - if let Ok(decrypted_ticket) = decrypted_ticket { - if let Ok(Some(ticket)) = Ticket::decode(&decrypted_ticket).map_err(|e| { - error!("Failed to decode ticket in REGTOPIC request. Error: {}", e) - }) { - // If the node has not respected the wait time and arrives before the wait time has - // expired or more than 5 seconds later than it has expired, the peer is blacklisted - let waited_time = ticket.req_time().elapsed(); - let wait_time = ticket.wait_time(); - if waited_time < wait_time - || waited_time >= wait_time + WAIT_TIME_TOLERANCE - { - warn!("The REGTOPIC has not waited the time assigned in the ticket. Blacklisting peer {}.", node_address.node_id); - let ban_timeout = - self.config.ban_duration.map(|v| Instant::now() + v); - PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); - self.rpc_failure(id, RequestError::InvalidWaitTime); - return; - } - } - } else { - warn!("Node sent a ticket that couldn't be decrypted with local ticket key. Blacklisting peer {}", node_address.node_id); + // If the node has not respected the wait time and arrives before the wait time has + // expired or more than 5 seconds later than it has expired, the peer is blacklisted + if let RequestTicket::LocallyIssued(ticket) = ticket { + let waited_time = ticket.req_time().elapsed(); + let wait_time = ticket.wait_time(); + if waited_time < wait_time || waited_time >= wait_time + WAIT_TIME_TOLERANCE { + warn!("The REGTOPIC has not waited the time assigned in the ticket. Blacklisting peer {}.", node_address.node_id); let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); - self.rpc_failure(id, RequestError::InvalidTicket); + self.rpc_failure(id, RequestError::InvalidWaitTime); return; } } @@ -1516,7 +1492,13 @@ impl Service { } let wait_time = new_ticket.wait_time(); - self.send_ticket_response(node_address, id, topic, new_ticket, wait_time); + self.send_ticket_response( + node_address, + id, + topic, + ResponseTicket::LocallyIssued(new_ticket), + wait_time, + ); } RequestBody::TopicQuery { topic } => { self.send_topic_query_nodes_response(node_address, id, topic); @@ -1871,17 +1853,19 @@ impl Service { .get_mut(&node_id) { if wait_time > 0 { - if let Err(e) = self.tickets.insert( - active_request.contact, - ticket, - Duration::from_secs(wait_time), - topic, - ) { - error!( - "Failed storing ticket from node id {}. Error {}", - node_id, e - ); - *reg_state = RegistrationState::TicketLimit(now); + if let ResponseTicket::RemotelyIssued(ticket_bytes) = ticket { + if let Err(e) = self.tickets.insert( + active_request.contact, + ticket_bytes, + Duration::from_secs(wait_time), + topic, + ) { + error!( + "Failed storing ticket from node id {}. Error {}", + node_id, e + ); + *reg_state = RegistrationState::TicketLimit(now); + } } } else { *reg_state = RegistrationState::Confirmed(now); @@ -1983,17 +1967,12 @@ impl Service { contact: NodeContact, topic: Topic, enr: Enr, - ticket: Option>, + ticket: RequestTicket, ) { - let ticket_bytes = if let Some(ticket) = ticket { - ticket - } else { - Vec::new() - }; let request_body = RequestBody::RegisterTopic { topic: topic.topic(), enr, - ticket: ticket_bytes, + ticket, }; trace!("Sending reg topic to node {}", contact.socket_addr()); self.send_rpc_request(ActiveRequest { @@ -2023,35 +2002,25 @@ impl Service { node_address: NodeAddress, rpc_id: RequestId, topic: Topic, - ticket: Ticket, + ticket: ResponseTicket, wait_time: Duration, ) { - let aead = Aes128Gcm::new(GenericArray::from_slice(&self.ticket_key)); - let payload = Payload { - msg: &ticket.encode(), - aad: b"", + let response = Response { + id: rpc_id, + body: ResponseBody::Ticket { + ticket, + wait_time: wait_time.as_secs(), + topic: topic.topic(), + }, }; - let _ = aead - .encrypt(GenericArray::from_slice(&[1u8; 12]), payload) - .map_err(|e| error!("Failed to send TICKET response: {}", e)) - .map(|encrypted_ticket| { - let response = Response { - id: rpc_id, - body: ResponseBody::Ticket { - ticket: encrypted_ticket, - wait_time: wait_time.as_secs(), - topic: topic.topic(), - }, - }; - trace!( - "Sending TICKET response to: {}. Response: {} ", - node_address, - response - ); - let _ = self - .handler_send - .send(HandlerIn::Response(node_address, Box::new(response))); - }); + trace!( + "Sending TICKET response to: {}. Response: {} ", + node_address, + response + ); + let _ = self + .handler_send + .send(HandlerIn::Response(node_address, Box::new(response))); } /// Response to a topic query containing the nodes currently advertised for the diff --git a/src/service/test.rs b/src/service/test.rs index 4c44b07e9..f6fc2e942 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -1,8 +1,6 @@ #![cfg(test)] use super::*; -use std::net::IpAddr; - use crate::{ handler::Handler, kbucket, @@ -94,7 +92,6 @@ async fn build_service( discv5_recv, event_stream: None, ads: Ads::new(Duration::from_secs(60 * 15), 100, 50000, 10, 3), - ticket_key: rand::random(), tickets: Tickets::new(Duration::from_secs(60 * 15)), registration_attempts: HashMap::new(), topic_lookups: Default::default(), @@ -178,77 +175,3 @@ async fn test_updating_connection_on_ping() { let node = buckets.iter_ref().next().unwrap(); assert!(node.status.is_connected()) } - -#[tokio::test] -async fn encrypt_decrypt_ticket() { - init(); - let enr_key = CombinedKey::generate_secp256k1(); - let ip: IpAddr = "127.0.0.1".parse().unwrap(); - let enr = EnrBuilder::new("v4") - .ip(ip) - .udp4(10006) - .build(&enr_key) - .unwrap(); - - let socket_addr = enr.udp4_socket().unwrap(); - - let service = build_service( - Arc::new(RwLock::new(enr)), - Arc::new(RwLock::new(enr_key)), - socket_addr.into(), - false, - ) - .await; - - let ticket_key: [u8; 16] = rand::random(); - service - .local_enr - .write() - .insert("ticket_key", &ticket_key, &service.enr_key.write()) - .unwrap(); - let decoded_enr = service - .local_enr - .write() - .to_base64() - .parse::() - .unwrap(); - - let port = 6666; - let ip: IpAddr = "127.0.0.1".parse().unwrap(); - let key = CombinedKey::generate_secp256k1(); - let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); - let node_id = enr.node_id(); - - let ticket = Ticket::new( - node_id, - ip, - TopicHash::from_raw([1u8; 32]), - tokio::time::Instant::now(), - tokio::time::Duration::from_secs(5), - //tokio::time::Duration::from_secs(25), - ); - - let ticket_key = decoded_enr.get("ticket_key").unwrap(); - - let aead = Aes128Gcm::new(GenericArray::from_slice(ticket_key)); - let payload = Payload { - msg: &ticket.encode(), - aad: b"", - }; - let nonce = [1u8; 12]; - let encrypted_ticket = aead - .encrypt(GenericArray::from_slice(&nonce), payload) - .unwrap(); - - let decrypted_ticket = { - let payload = Payload { - msg: &encrypted_ticket, - aad: b"", - }; - aead.decrypt(GenericArray::from_slice(&nonce), payload) - .unwrap() - }; - let decoded_ticket = Ticket::decode(&decrypted_ticket).unwrap().unwrap(); - - assert_eq!(decoded_ticket, ticket); -} From 35201e3b3c631c7a8cfc9127e726f5d7fa903e12 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 8 Sep 2022 00:59:01 +0200 Subject: [PATCH 377/391] Make use of the fact that nodes can only register themselves as ads --- src/handler/mod.rs | 6 +- src/rpc.rs | 33 ++------ src/service.rs | 205 ++++++++++++++++++++++----------------------- 3 files changed, 109 insertions(+), 135 deletions(-) diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 75ffe2c43..ad59a0e9b 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -659,11 +659,7 @@ impl Handler { // Notify the application that the session has been established let event = match request_call.kind() { - RequestBody::RegisterTopic { - topic, - enr: _, - ticket: _, - } => { + RequestBody::RegisterTopic { topic, ticket: _ } => { let topic_hash = Topic::new(topic).hash(); HandlerOut::EstablishedTopic(enr, connection_direction, topic_hash) } diff --git a/src/rpc.rs b/src/rpc.rs index c5da02453..c969e20d2 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -221,8 +221,6 @@ pub enum RequestBody { RegisterTopic { /// The topic string we want to advertise at the node receiving this request. topic: String, - // Current node record of sender. - enr: Enr, // Ticket content of ticket from a previous registration attempt or empty. ticket: RequestTicket, }, @@ -313,12 +311,11 @@ impl Request { buf.extend_from_slice(&s.out()); buf } - RequestBody::RegisterTopic { topic, enr, ticket } => { + RequestBody::RegisterTopic { topic, ticket } => { let mut s = RlpStream::new(); s.begin_list(4); s.append(&id.as_bytes()); s.append(&topic); - s.append(&enr); s.append(&ticket); buf.extend_from_slice(&s.out()); buf @@ -513,13 +510,9 @@ impl std::fmt::Display for RequestBody { hex::encode(request) ), RequestBody::TopicQuery { topic } => write!(f, "TOPICQUERY: topic: {}", topic), - RequestBody::RegisterTopic { topic, enr, ticket } => write!( - f, - "REGTOPIC: topic: {}, enr: {}, ticket: {}", - topic, - enr.to_base64(), - ticket, - ), + RequestBody::RegisterTopic { topic, ticket } => { + write!(f, "REGTOPIC: topic: {}, ticket: {}", topic, ticket,) + } } } } @@ -703,14 +696,12 @@ impl Message { } 7 => { // RegisterTopicRequest - if list_len != 4 { - debug!("RegisterTopic request has an invalid RLP list length. Expected 4, found {}", list_len); + if list_len != 3 { + debug!("RegisterTopic request has an invalid RLP list length. Expected 3, found {}", list_len); return Err(DecoderError::RlpIncorrectListLen); } let topic = rlp.val_at::(1)?; - let enr_rlp = rlp.at(2)?; - let enr = enr_rlp.as_val::()?; - let ticket = rlp.val_at::>(3)?; + let ticket = rlp.val_at::>(2)?; let returned_ticket = { let aead = Aes128Gcm::new(GenericArray::from_slice(ticket_key)); @@ -739,7 +730,6 @@ impl Message { id, body: RequestBody::RegisterTopic { topic, - enr, ticket: returned_ticket, }, }) @@ -1284,16 +1274,11 @@ mod tests { #[test] fn encode_decode_register_topic_request_empty_ticket() { let ticket_key: [u8; 16] = rand::random(); - let port = 5000; - let ip: IpAddr = "127.0.0.1".parse().unwrap(); - let key = enr::CombinedKey::generate_secp256k1(); - let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); let request = Message::Request(Request { id: RequestId(vec![1]), body: RequestBody::RegisterTopic { topic: "lighthouse".to_string(), - enr, ticket: RequestTicket::Empty, }, }); @@ -1313,8 +1298,8 @@ mod tests { let ip: IpAddr = "127.0.0.1".parse().unwrap(); let key = enr::CombinedKey::generate_secp256k1(); let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); - let node_id = enr.node_id(); + let og_ticket = Ticket::new( node_id, ip, @@ -1353,7 +1338,6 @@ mod tests { id: RequestId(vec![1]), body: RequestBody::RegisterTopic { topic: "lighthouse".to_string(), - enr, ticket: RequestTicket::RemotelyIssued(ticket_bytes), }, }); @@ -1368,7 +1352,6 @@ mod tests { body: RequestBody::RegisterTopic { topic: _, - enr: _, ticket: RequestTicket::LocallyIssued(ticket), }, }) = decoded_req diff --git a/src/service.rs b/src/service.rs index b8df0982a..8dc8e9ece 100644 --- a/src/service.rs +++ b/src/service.rs @@ -35,7 +35,7 @@ use crate::{ query_pool::{ FindNodeQueryConfig, PredicateQueryConfig, QueryId, QueryPool, QueryPoolState, TargetKey, }, - rpc, Discv5Config, Discv5Event, Enr, IpMode, Topic, TopicsEnrField, + rpc, Discv5Config, Discv5Event, Enr, Topic, TopicsEnrField, }; use delay_map::HashSetDelay; use enr::{CombinedKey, NodeId}; @@ -683,7 +683,7 @@ impl Service { } HandlerOut::WhoAreYou(whoareyou_ref) => { // check what our latest known ENR is for this node. - if let Some(known_enr) = self.find_enr(&whoareyou_ref.0.node_id) { + if let Some(known_enr) = self.find_enr(&whoareyou_ref.0.node_id, true) { if let Err(e) = self.handler_send.send(HandlerIn::WhoAreYou(whoareyou_ref, Some(known_enr))) { warn!("Failed to send whoareyou {}", e); }; @@ -728,7 +728,7 @@ impl Service { if let Some(position) = result.target.untrusted_enrs.iter().position(|enr| enr.node_id() == node_id) { let enr = result.target.untrusted_enrs.swap_remove(position); found_enrs.push(enr); - } else if let Some(enr) = self.find_enr(&node_id) { + } else if let Some(enr) = self.find_enr(&node_id, true) { // look up from the routing table found_enrs.push(enr); } @@ -767,8 +767,13 @@ impl Service { enr.node_id(), topic_hash ); + // A QueryType::FindTopic variant will always time out. The last batch of + // ENRs returned by the last iteration in the query is added to + // discovered_peers_topic, like previous batches of uncontacted peers were + // added to the query itself first. let discovered_peers = self.discovered_peers_topic.entry(topic_hash).or_default(); + let node_id = enr.node_id(); let peer_key: kbucket::Key = node_id.into(); let topic_key: kbucket::Key = @@ -832,11 +837,10 @@ impl Service { } } Some(Ok((active_topic, active_ticket))) = self.tickets.next() => { - let enr = self.local_enr.read().clone(); // When the ticket time expires a new REGTOPIC request is automatically sent to the // ticket issuer and the registration attempt stays in the [`RegistrationState::Ticket`] // from sending the first REGTOPIC request to this contact for this topic. - self.reg_topic_request(active_ticket.contact(), active_topic.topic().clone(), enr, RequestTicket::RemotelyIssued(active_ticket.ticket())); + self.reg_topic_request(active_ticket.contact(), active_topic.topic().clone(), RequestTicket::RemotelyIssued(active_ticket.ticket())); } Some(topic_query_progress) = self.active_topic_queries.next() => { match topic_query_progress { @@ -1118,14 +1122,12 @@ impl Service { let mut sent_regtopics = 0; for peer in new_peers { - let local_enr = self.local_enr.read().clone(); if let Ok(node_contact) = NodeContact::try_from_enr(peer, self.config.ip_mode) .map_err(|e| error!("Failed to send REGTOPIC to peer. Error: {:?}", e)) { self.reg_topic_request( node_contact, topic.clone(), - local_enr.clone(), RequestTicket::RemotelyIssued(Vec::new()), ); // If an uncontacted peer has a faulty enr, don't count the registration attempt. @@ -1316,7 +1318,7 @@ impl Service { } /// Returns an ENR if one is known for the given NodeId. - pub fn find_enr(&mut self, node_id: &NodeId) -> Option { + pub fn find_enr(&mut self, node_id: &NodeId, include_untrusted_enrs: bool) -> Option { // check if we know this node id in our routing table let key = kbucket::Key::from(*node_id); if let kbucket::Entry::Present(entry, _) = self.kbuckets.write().entry(&key) { @@ -1327,15 +1329,31 @@ impl Service { return Some(entry.value().clone()); } } - // check the untrusted addresses for ongoing queries - for query in self.queries.iter() { - if let Some(enr) = query - .target() - .untrusted_enrs - .iter() - .find(|v| v.node_id() == *node_id) + + if include_untrusted_enrs { + // check the untrusted addresses for ongoing queries + for query in self.queries.iter() { + if let Some(enr) = query + .target() + .untrusted_enrs + .iter() + .find(|v| v.node_id() == *node_id) + { + return Some(enr.clone()); + } + } + + // check the untrusted addresses for ongoing topic queries/registrations + for buckets in self + .discovered_peers_topic + .values() + .map(|buckets| buckets.values()) { - return Some(enr.clone()); + for bucket in buckets { + if let Some((_, enr)) = bucket.iter().find(|(v, _)| *v == node_id) { + return Some(enr.clone()); + } + } } } None @@ -1413,92 +1431,81 @@ impl Service { self.send_event(Discv5Event::TalkRequest(req)); } - RequestBody::RegisterTopic { topic, enr, ticket } => { + RequestBody::RegisterTopic { topic, ticket } => { let topic = Topic::new(topic); - // Blacklist if request tries to advertise another node than the sender - let registration_of_other_node = enr.node_id() != node_address.node_id - || match self.config.ip_mode { - IpMode::Ip4 => { - enr.udp4_socket().map(SocketAddr::V4) != Some(node_address.socket_addr) - } - IpMode::Ip6 { .. } => { - enr.udp6_socket().map(SocketAddr::V6) != Some(node_address.socket_addr) - } - }; - if registration_of_other_node { - warn!("The enr in the REGTOPIC request body does not match sender's. Nodes can only register themselves. Blacklisting peer {}.", node_address.node_id); - BAN_MALICIOUS_PEER(self.config.ban_duration, node_address); - self.rpc_failure(id, RequestError::RegistrationOtherNode); - return; - } - // Blacklist if node doesn't contain the given topic in its enr 'topics' field - let topic_in_enr = |topic_hash: &TopicHash| -> bool { - if let Some(topics) = enr.get(ENR_KEY_TOPICS) { - if let Ok(Some(advertised_topics)) = TopicsEnrField::decode(topics) { - for topic in advertised_topics.topics_iter() { - if topic_hash == &topic.hash() { - return true; + // Only advertise peer which have been added to our kbuckets, i.e. which have + // a contactable address in their enr. + if let Some(enr) = self.find_enr(&node_address.node_id, false) { + // Blacklist if node doesn't contain the given topic in its enr 'topics' field + let topic_in_enr = |topic_hash: &TopicHash| -> bool { + if let Some(topics) = enr.get(ENR_KEY_TOPICS) { + if let Ok(Some(advertised_topics)) = TopicsEnrField::decode(topics) { + for topic in advertised_topics.topics_iter() { + if topic_hash == &topic.hash() { + return true; + } } } } - } - false - }; - - if !topic_in_enr(&topic.hash()) { - warn!("The topic given in the REGTOPIC request body cannot be found in sender's 'topics' enr field. Blacklisting peer {}.", node_address.node_id); - let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); - PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); - self.rpc_failure(id, RequestError::InvalidEnrTopicsField); - return; - } + false + }; - // If the node has not respected the wait time and arrives before the wait time has - // expired or more than 5 seconds later than it has expired, the peer is blacklisted - if let RequestTicket::LocallyIssued(ticket) = ticket { - let waited_time = ticket.req_time().elapsed(); - let wait_time = ticket.wait_time(); - if waited_time < wait_time || waited_time >= wait_time + WAIT_TIME_TOLERANCE { - warn!("The REGTOPIC has not waited the time assigned in the ticket. Blacklisting peer {}.", node_address.node_id); + if !topic_in_enr(&topic.hash()) { + warn!("The topic given in the REGTOPIC request body cannot be found in sender's 'topics' enr field. Blacklisting peer {}.", node_address.node_id); let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); - self.rpc_failure(id, RequestError::InvalidWaitTime); + self.rpc_failure(id, RequestError::InvalidEnrTopicsField); return; } - } - let mut new_ticket = Ticket::new( - node_address.node_id, - node_address.socket_addr.ip(), - topic.hash(), - tokio::time::Instant::now(), - Duration::default(), - ); + // If the node has not respected the wait time and arrives before the wait time has + // expired or more than 5 seconds later than it has expired, the peer is blacklisted + if let RequestTicket::LocallyIssued(ticket) = ticket { + let waited_time = ticket.req_time().elapsed(); + let wait_time = ticket.wait_time(); + if waited_time < wait_time || waited_time >= wait_time + WAIT_TIME_TOLERANCE + { + warn!("The REGTOPIC has not waited the time assigned in the ticket. Blacklisting peer {}.", node_address.node_id); + let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); + PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + self.rpc_failure(id, RequestError::InvalidWaitTime); + return; + } + } - // If there is no wait time and the ad is successfully registered as an ad, the new ticket is sent - // with wait time set to zero indicating successful registration. - if let Err((wait_time, e)) = - self.ads - .insert(enr, topic.hash(), node_address.socket_addr.ip()) - { - // The wait time on the new ticket to send is updated if there is wait time for the requesting - // node for this topic to register as an ad due to the current state of the topic table. - error!( - "Registration attempt from peer {} for topic hash {} failed. Error: {}", - node_address.node_id, topic, e + let mut new_ticket = Ticket::new( + node_address.node_id, + node_address.socket_addr.ip(), + topic.hash(), + tokio::time::Instant::now(), + Duration::default(), ); - new_ticket.set_wait_time(wait_time); - } - let wait_time = new_ticket.wait_time(); - self.send_ticket_response( - node_address, - id, - topic, - ResponseTicket::LocallyIssued(new_ticket), - wait_time, - ); + // If there is no wait time and the ad is successfully registered as an ad, the new ticket is sent + // with wait time set to zero indicating successful registration. + if let Err((wait_time, e)) = + self.ads + .insert(enr, topic.hash(), node_address.socket_addr.ip()) + { + // The wait time on the new ticket to send is updated if there is wait time for the requesting + // node for this topic to register as an ad due to the current state of the topic table. + error!( + "Registration attempt from peer {} for topic hash {} failed. Error: {}", + node_address.node_id, topic, e + ); + new_ticket.set_wait_time(wait_time); + } + + let wait_time = new_ticket.wait_time(); + self.send_ticket_response( + node_address, + id, + topic, + ResponseTicket::LocallyIssued(new_ticket), + wait_time, + ); + } } RequestBody::TopicQuery { topic } => { self.send_topic_query_nodes_response(node_address, id, topic); @@ -1666,7 +1673,6 @@ impl Service { self.active_nodes_responses.remove(&node_id); if let RequestBody::FindNode { .. } = &active_request.request_body { - // In the case that it is a FINDNODE request using a topic hash as key, remove the mapping. self.discovered(&node_id, nodes, active_request.query_id); } else if let RequestBody::TopicQuery { topic } = &active_request.request_body { nodes.retain(|enr| { @@ -1805,7 +1811,7 @@ impl Service { } // check if we need to request a new ENR - if let Some(enr) = self.find_enr(&node_id) { + if let Some(enr) = self.find_enr(&node_id, true) { if enr.seq() < enr_seq { // request an ENR update debug!("Requesting an ENR update from: {}", active_request.contact); @@ -1962,16 +1968,9 @@ impl Service { } /// Requests a node to advertise the sending node for a given topic hash. - fn reg_topic_request( - &mut self, - contact: NodeContact, - topic: Topic, - enr: Enr, - ticket: RequestTicket, - ) { + fn reg_topic_request(&mut self, contact: NodeContact, topic: Topic, ticket: RequestTicket) { let request_body = RequestBody::RegisterTopic { topic: topic.topic(), - enr, ticket, }; trace!("Sending reg topic to node {}", contact.socket_addr()); @@ -2210,7 +2209,7 @@ impl Service { request_body: RequestBody, ) { // find the ENR associated with the query - if let Some(enr) = self.find_enr(&return_peer) { + if let Some(enr) = self.find_enr(&return_peer, true) { match NodeContact::try_from_enr(enr, self.config.ip_mode) { Ok(contact) => { let active_request = ActiveRequest { @@ -2271,7 +2270,7 @@ impl Service { } } - /// Processes discovered peers from a query or a TOPICQUERY or REGTOPIC request. + /// Processes discovered peers from a FINDNODE query looking up a node id or a topic hash. fn discovered(&mut self, source: &NodeId, mut enrs: Vec, query_id: Option) { let local_id = self.local_enr.read().node_id(); @@ -2602,11 +2601,7 @@ impl Service { self.connection_updated(node_id, ConnectionStatus::Disconnected, Some(topic)); return; } - RequestBody::RegisterTopic { - topic, - enr: _, - ticket: _, - } => { + RequestBody::RegisterTopic { topic, ticket: _ } => { let peer_key: kbucket::Key = node_id.into(); let topic = Topic::new(topic); let topic_hash = topic.hash(); From 4baeafe8068227ff3bc26dd52c3d7a6ec8268ec4 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 8 Sep 2022 01:07:01 +0200 Subject: [PATCH 378/391] Move repeated code to closure --- src/service.rs | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/src/service.rs b/src/service.rs index 8dc8e9ece..d939fe79e 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1453,8 +1453,7 @@ impl Service { if !topic_in_enr(&topic.hash()) { warn!("The topic given in the REGTOPIC request body cannot be found in sender's 'topics' enr field. Blacklisting peer {}.", node_address.node_id); - let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); - PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + BAN_MALICIOUS_PEER(self.config.ban_duration, node_address); self.rpc_failure(id, RequestError::InvalidEnrTopicsField); return; } @@ -1467,8 +1466,7 @@ impl Service { if waited_time < wait_time || waited_time >= wait_time + WAIT_TIME_TOLERANCE { warn!("The REGTOPIC has not waited the time assigned in the ticket. Blacklisting peer {}.", node_address.node_id); - let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); - PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + BAN_MALICIOUS_PEER(self.config.ban_duration, node_address); self.rpc_failure(id, RequestError::InvalidWaitTime); return; } @@ -1596,9 +1594,7 @@ impl Service { "Peer returned more than one ENR for itself. Blacklisting {}", node_address ); - let ban_timeout = - self.config.ban_duration.map(|v| Instant::now() + v); - PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + BAN_MALICIOUS_PEER(self.config.ban_duration, node_address); nodes.retain(|enr| { peer_key.log2_distance(&enr.node_id().into()).is_none() }); @@ -1618,9 +1614,7 @@ impl Service { "Peer sent invalid ENR. Blacklisting {}", active_request.contact ); - let ban_timeout = - self.config.ban_duration.map(|v| Instant::now() + v); - PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + BAN_MALICIOUS_PEER(self.config.ban_duration, node_address); } } } From ef1d76aa019f7d9d1d268c3f7e78a2194e7c2a93 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 8 Sep 2022 01:19:00 +0200 Subject: [PATCH 379/391] Clean up and comment --- src/discv5.rs | 36 ++++++++++++++++++------------------ src/discv5/test.rs | 4 ++-- src/handler/mod.rs | 4 ++-- src/rpc.rs | 13 +++++++++++++ src/service.rs | 26 +++++++++++++------------- 5 files changed, 48 insertions(+), 35 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index d8a7feb26..d2f0dd72b 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -66,24 +66,6 @@ pub enum Version { Topics = 1, } -/// Check if a given peer supports a given version of the Discv5 protocol. -pub const CHECK_VERSION: fn(peer: &Enr, version: Version) -> bool = |peer, version| { - if let Some(supported_versions) = peer.get(ENR_KEY_VERSION) { - if let Some(supported_versions) = supported_versions.first() { - let version_num = version as u8; - supported_versions & version_num == version_num - } else { - false - } - } else { - warn!( - "Enr of peer {} doesn't contain field 'version'", - peer.node_id() - ); - false - } -}; - mod test; /// Events that can be produced by the `Discv5` event stream. @@ -885,3 +867,21 @@ impl Drop for Discv5 { self.shutdown(); } } + +/// Check if a given peer supports a given version of the Discv5 protocol. +pub fn check_version(peer: &Enr, version: Version) -> bool { + if let Some(supported_versions) = peer.get(ENR_KEY_VERSION) { + if let Some(supported_versions) = supported_versions.first() { + let version_num = version as u8; + supported_versions & version_num == version_num + } else { + false + } + } else { + warn!( + "Enr of peer {} doesn't contain field 'version'", + peer.node_id() + ); + false + } +} diff --git a/src/discv5/test.rs b/src/discv5/test.rs index 8abfadecc..073df9dce 100644 --- a/src/discv5/test.rs +++ b/src/discv5/test.rs @@ -1,7 +1,7 @@ #![cfg(test)] use crate::{ - discv5::{Version, CHECK_VERSION, ENR_KEY_VERSION}, + discv5::{check_version, Version, ENR_KEY_VERSION}, kbucket, Discv5, *, }; use enr::{k256, CombinedKey, Enr, EnrBuilder, EnrKey, NodeId}; @@ -643,5 +643,5 @@ fn test_version_check() { enr.insert(ENR_KEY_VERSION, &[supported_versions], &key) .unwrap(); - assert!(CHECK_VERSION(&enr, Version::Topics)); + assert!(check_version(&enr, Version::Topics)); } diff --git a/src/handler/mod.rs b/src/handler/mod.rs index ad59a0e9b..3cdc817f5 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -33,7 +33,7 @@ use crate::{ error::{Discv5Error, RequestError}, packet::{ChallengeData, IdNonce, MessageNonce, Packet, PacketKind}, rpc::{Message, Request, RequestBody, RequestId, Response, ResponseBody, FALSE_TICKET}, - service::BAN_MALICIOUS_PEER, + service::ban_malicious_peer, socket, socket::{FilterConfig, Socket}, Enr, Topic, @@ -871,7 +871,7 @@ impl Handler { warn!("Failed to decode message. Error: {:?}, {}", e, node_address); if let DecoderError::Custom(FALSE_TICKET) = e { warn!("Node sent a ticket that couldn't be decrypted with local ticket key. Blacklisting peer {}", node_address.node_id); - BAN_MALICIOUS_PEER(self.ban_duration, node_address.clone()); + ban_malicious_peer(self.ban_duration, node_address.clone()); self.fail_session( &node_address, RequestError::InvalidRemotePacket, diff --git a/src/rpc.rs b/src/rpc.rs index c969e20d2..02e34f70c 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -15,10 +15,18 @@ use tracing::{debug, error, warn}; pub const FALSE_TICKET: &str = "TICKET_ENCRYPTED_BY_FOREIGN_KEY"; +/// A ticket contained in the body of a REGTOPIC request. #[derive(Debug, Clone, Eq, PartialEq)] pub enum RequestTicket { + /// If this REGTOPIC is the first being sent to a given peer, no + /// ticket will be at hand. Empty, + /// This is an incoming REGTOPIC request with a ticket this node + /// issued to the sender at the prior registration attempt. LocallyIssued(Ticket), + /// This is an outgoing REGTOPIC request returning a ticket + /// received from the recipient at the prior registration + /// attempt. RemotelyIssued(Vec), } @@ -83,9 +91,14 @@ impl std::fmt::Display for RequestTicket { } } +/// A ticket contained in the body of a TICKET response. #[derive(Debug, Clone, Eq, PartialEq)] pub enum ResponseTicket { + /// This is an outgoing TICKET response containing a locally + /// assembled ticket. LocallyIssued(Ticket), + /// This is an incoming TICKET response containing a ticket + /// issued by the sender. RemotelyIssued(Vec), } diff --git a/src/service.rs b/src/service.rs index d939fe79e..5d4050ea0 100644 --- a/src/service.rs +++ b/src/service.rs @@ -22,7 +22,7 @@ use crate::{ topic::TopicHash, Ads, AD_LIFETIME, }, - discv5::{Version, CHECK_VERSION, ENR_KEY_TOPICS, KBUCKET_PENDING_TIMEOUT, PERMIT_BAN_LIST}, + discv5::{check_version, Version, ENR_KEY_TOPICS, KBUCKET_PENDING_TIMEOUT, PERMIT_BAN_LIST}, error::{RequestError, ResponseError}, handler::{Handler, HandlerIn, HandlerOut}, kbucket::{ @@ -83,12 +83,6 @@ const MAX_UNCONTACTED_PEERS_PER_TOPIC_BUCKET: usize = 16; /// The duration in seconds which a node can come late to an assigned wait time. const WAIT_TIME_TOLERANCE: Duration = Duration::from_secs(5); -pub const BAN_MALICIOUS_PEER: fn(ban_duration: Option, node_address: NodeAddress) = - |ban_duration, node_address| { - let ban_timeout = ban_duration.map(|v| Instant::now() + v); - PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); - }; - /// Request type for Protocols using `TalkReq` message. /// /// Automatically responds with an empty body on drop if @@ -749,7 +743,7 @@ impl Service { let mut discovered_new_peer = false; if let Some(kbuckets_topic) = self.topics_kbuckets.get_mut(&topic_hash) { for enr in found_enrs { - if !CHECK_VERSION(&enr, Version::Topics) { + if !check_version(&enr, Version::Topics) { continue; } trace!("Found new peer {} for topic {}", enr, topic_hash); @@ -942,7 +936,7 @@ impl Service { for entry in self.kbuckets.write().iter() { let enr = entry.node.value.clone(); - if !CHECK_VERSION(&enr, Version::Topics) { + if !check_version(&enr, Version::Topics) { continue; } match kbuckets.insert_or_update(entry.node.key, enr, entry.status) { @@ -1453,7 +1447,7 @@ impl Service { if !topic_in_enr(&topic.hash()) { warn!("The topic given in the REGTOPIC request body cannot be found in sender's 'topics' enr field. Blacklisting peer {}.", node_address.node_id); - BAN_MALICIOUS_PEER(self.config.ban_duration, node_address); + ban_malicious_peer(self.config.ban_duration, node_address); self.rpc_failure(id, RequestError::InvalidEnrTopicsField); return; } @@ -1466,7 +1460,7 @@ impl Service { if waited_time < wait_time || waited_time >= wait_time + WAIT_TIME_TOLERANCE { warn!("The REGTOPIC has not waited the time assigned in the ticket. Blacklisting peer {}.", node_address.node_id); - BAN_MALICIOUS_PEER(self.config.ban_duration, node_address); + ban_malicious_peer(self.config.ban_duration, node_address); self.rpc_failure(id, RequestError::InvalidWaitTime); return; } @@ -1594,7 +1588,7 @@ impl Service { "Peer returned more than one ENR for itself. Blacklisting {}", node_address ); - BAN_MALICIOUS_PEER(self.config.ban_duration, node_address); + ban_malicious_peer(self.config.ban_duration, node_address); nodes.retain(|enr| { peer_key.log2_distance(&enr.node_id().into()).is_none() }); @@ -1614,7 +1608,7 @@ impl Service { "Peer sent invalid ENR. Blacklisting {}", active_request.contact ); - BAN_MALICIOUS_PEER(self.config.ban_duration, node_address); + ban_malicious_peer(self.config.ban_duration, node_address); } } } @@ -2711,6 +2705,12 @@ impl Service { } } +/// If a peer behaves maliciously, the peer can be banned for a certain time span. +pub fn ban_malicious_peer(ban_duration: Option, node_address: NodeAddress) { + let ban_timeout = ban_duration.map(|v| Instant::now() + v); + PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); +} + /// The result of the `query_event_poll` indicating an action is required to further progress an /// active query. enum QueryEvent { From ad01569f4780f6c994b2c41a67dc87150fdf5c55 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 8 Sep 2022 12:15:21 +0200 Subject: [PATCH 380/391] Remove unused methods --- src/rpc.rs | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index 02e34f70c..276b0f89f 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -987,23 +987,6 @@ impl Ticket { pub fn update_cum_wait(&mut self) { self.cum_wait = self.cum_wait + self.wait_time; }*/ - - pub fn encode(&self) -> Vec { - let mut buf = Vec::new(); - let mut s = RlpStream::new(); - s.append(self); - buf.extend_from_slice(&s.out()); - buf - } - - pub fn decode(ticket: &[u8]) -> Result, DecoderError> { - if !ticket.is_empty() { - let rlp = rlp::Rlp::new(ticket); - let ticket = rlp.as_val::()?; - return Ok(Some(ticket)); - } - Ok(None) - } } impl std::fmt::Display for Ticket { From 0b09f60bb4b7f2f9547c45d66bcc04383b6161e3 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 8 Sep 2022 12:54:30 +0200 Subject: [PATCH 381/391] Correct name --- src/discv5.rs | 30 +++++++++++++++--------------- src/discv5/test.rs | 8 ++++---- src/service.rs | 6 +++--- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index d2f0dd72b..fbb8e21e4 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -57,11 +57,11 @@ pub static HASH: for<'a> fn(topic: &'a str) -> TopicHash = |topic| { pub(crate) const KBUCKET_PENDING_TIMEOUT: Duration = Duration::from_secs(60); /// Custom ENR keys. -const ENR_KEY_VERSION: &str = "version"; +const ENR_KEY_FEATURES: &str = "features"; pub const ENR_KEY_TOPICS: &str = "topics"; -/// Discv5 versions. -pub enum Version { +/// Discv5 features. +pub enum Features { /// The protocol for advertising and looking up to topics in Discv5 is supported. Topics = 1, } @@ -156,16 +156,16 @@ impl Discv5 { ))); // This node supports topic requests REGTOPIC and TOPICQUERY, and their responses. - if let Err(e) = - local_enr - .write() - .insert(ENR_KEY_VERSION, &[Version::Topics as u8], &enr_key.write()) - { + if let Err(e) = local_enr.write().insert( + ENR_KEY_FEATURES, + &[Features::Topics as u8], + &enr_key.write(), + ) { error!("Failed writing to enr. Error {:?}", e); return Err("Failed to insert field 'version' into local enr"); } - println!("{:?}", local_enr.read().get(ENR_KEY_VERSION).unwrap()); + println!("{:?}", local_enr.read().get(ENR_KEY_FEATURES).unwrap()); // Update the PermitBan list based on initial configuration *PERMIT_BAN_LIST.write() = config.permit_ban_list.clone(); @@ -868,12 +868,12 @@ impl Drop for Discv5 { } } -/// Check if a given peer supports a given version of the Discv5 protocol. -pub fn check_version(peer: &Enr, version: Version) -> bool { - if let Some(supported_versions) = peer.get(ENR_KEY_VERSION) { - if let Some(supported_versions) = supported_versions.first() { - let version_num = version as u8; - supported_versions & version_num == version_num +/// Check if a given peer supports a given feature of the Discv5 protocol. +pub fn supports_feature(peer: &Enr, feature: Features) -> bool { + if let Some(supported_features) = peer.get(ENR_KEY_FEATURES) { + if let Some(supported_features_num) = supported_features.first() { + let feature_num = feature as u8; + supported_features_num & feature_num == feature_num } else { false } diff --git a/src/discv5/test.rs b/src/discv5/test.rs index 073df9dce..ca3dae1f9 100644 --- a/src/discv5/test.rs +++ b/src/discv5/test.rs @@ -1,7 +1,7 @@ #![cfg(test)] use crate::{ - discv5::{check_version, Version, ENR_KEY_VERSION}, + discv5::{supports_feature, Features, ENR_KEY_FEATURES}, kbucket, Discv5, *, }; use enr::{k256, CombinedKey, Enr, EnrBuilder, EnrKey, NodeId}; @@ -639,9 +639,9 @@ fn test_version_check() { .udp4(port) .build(&key) .unwrap(); - let supported_versions = Version::Topics as u8 | 2; - enr.insert(ENR_KEY_VERSION, &[supported_versions], &key) + let supported_versions = Features::Topics as u8 | 2; + enr.insert(ENR_KEY_FEATURES, &[supported_versions], &key) .unwrap(); - assert!(check_version(&enr, Version::Topics)); + assert!(supports_feature(&enr, Features::Topics)); } diff --git a/src/service.rs b/src/service.rs index 5d4050ea0..d53fb2f10 100644 --- a/src/service.rs +++ b/src/service.rs @@ -22,7 +22,7 @@ use crate::{ topic::TopicHash, Ads, AD_LIFETIME, }, - discv5::{check_version, Version, ENR_KEY_TOPICS, KBUCKET_PENDING_TIMEOUT, PERMIT_BAN_LIST}, + discv5::{supports_feature, Features, ENR_KEY_TOPICS, KBUCKET_PENDING_TIMEOUT, PERMIT_BAN_LIST}, error::{RequestError, ResponseError}, handler::{Handler, HandlerIn, HandlerOut}, kbucket::{ @@ -743,7 +743,7 @@ impl Service { let mut discovered_new_peer = false; if let Some(kbuckets_topic) = self.topics_kbuckets.get_mut(&topic_hash) { for enr in found_enrs { - if !check_version(&enr, Version::Topics) { + if !supports_feature(&enr, Features::Topics) { continue; } trace!("Found new peer {} for topic {}", enr, topic_hash); @@ -936,7 +936,7 @@ impl Service { for entry in self.kbuckets.write().iter() { let enr = entry.node.value.clone(); - if !check_version(&enr, Version::Topics) { + if !supports_feature(&enr, Features::Topics) { continue; } match kbuckets.insert_or_update(entry.node.key, enr, entry.status) { From 31115928593415a257ff1eaca40435aac040954a Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 8 Sep 2022 16:09:12 +0200 Subject: [PATCH 382/391] Run cargo fmt --- src/service.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index d53fb2f10..b63a259b1 100644 --- a/src/service.rs +++ b/src/service.rs @@ -22,7 +22,9 @@ use crate::{ topic::TopicHash, Ads, AD_LIFETIME, }, - discv5::{supports_feature, Features, ENR_KEY_TOPICS, KBUCKET_PENDING_TIMEOUT, PERMIT_BAN_LIST}, + discv5::{ + supports_feature, Features, ENR_KEY_TOPICS, KBUCKET_PENDING_TIMEOUT, PERMIT_BAN_LIST, + }, error::{RequestError, ResponseError}, handler::{Handler, HandlerIn, HandlerOut}, kbucket::{ From f2caf4ab8123a3f9479f2d3f27dcdc4fbb6cccbc Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 9 Sep 2022 16:50:45 +0200 Subject: [PATCH 383/391] Change name --- src/discv5.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/discv5.rs b/src/discv5.rs index fbb8e21e4..58b3f0755 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -566,7 +566,7 @@ impl Discv5 { /// Looks up a given topic on other nodes that, if currently advertising the given topic, return the enrs of /// those ads. The query keeps going through the given topic's kbuckets until a certain number (passed to /// [`crate::service::ActiveTopicQueries`] upon instantiation) of results are obtained or the query times out. - pub fn topic_query_req( + pub fn topic_query( &self, topic: &'static str, ) -> impl Future, RequestError>> + 'static { From 2d5954d9bfbae6039372a3fd90178f6d799ba125 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 9 Sep 2022 16:58:08 +0200 Subject: [PATCH 384/391] Fix merge conflict --- Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/Cargo.toml b/Cargo.toml index 164faef49..be08a23e5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ zeroize = { version = "1.4.3", features = ["zeroize_derive"] } futures = "0.3.19" uint = { version = "0.9.1", default-features = false } rlp = "0.5.1" +sha2 = "0.9.5" # This version must be kept up to date do it uses the same dependencies as ENR hkdf = "0.12.3" hex = "0.4.3" From 66f43ae4bd4ebbfb9fc7cf909e2099bce33b8cb7 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 9 Sep 2022 17:58:56 +0200 Subject: [PATCH 385/391] Fix bad return bug --- src/service.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index b63a259b1..70a802e9e 100644 --- a/src/service.rs +++ b/src/service.rs @@ -737,7 +737,6 @@ impl Service { if callback.send(found_enrs).is_err() { warn!("Callback dropped for query {}. Results dropped", *id); } - return; } if let QueryType::FindTopic(topic_key) = query_type { From b6992900444765387c94de78ec14e2bb153f6960 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 9 Sep 2022 18:06:14 +0200 Subject: [PATCH 386/391] fixup! Fix bad return bug --- src/service.rs | 145 +++++++++++++++++++++++++------------------------ 1 file changed, 75 insertions(+), 70 deletions(-) diff --git a/src/service.rs b/src/service.rs index 70a802e9e..1e2420fa8 100644 --- a/src/service.rs +++ b/src/service.rs @@ -733,84 +733,89 @@ impl Service { } } - if let Some(callback) = result.target.callback { - if callback.send(found_enrs).is_err() { - warn!("Callback dropped for query {}. Results dropped", *id); + match result.target.callback { + Some(callback) => { + if callback.send(found_enrs).is_err() { + warn!("Callback dropped for query {}. Results dropped", *id); + } } - } - - if let QueryType::FindTopic(topic_key) = query_type { - let topic_hash = TopicHash::from_raw(topic_key.raw()); - let mut discovered_new_peer = false; - if let Some(kbuckets_topic) = self.topics_kbuckets.get_mut(&topic_hash) { - for enr in found_enrs { - if !supports_feature(&enr, Features::Topics) { - continue; - } - trace!("Found new peer {} for topic {}", enr, topic_hash); - let key = kbucket::Key::from(enr.node_id()); - - // If the ENR exists in the routing table and the discovered ENR has a greater - // sequence number, perform some filter checks before updating the enr. - - let must_update_enr = match kbuckets_topic.entry(&key) { - kbucket::Entry::Present(entry, _) => entry.value().seq() < enr.seq(), - kbucket::Entry::Pending(mut entry, _) => entry.value().seq() < enr.seq(), - kbucket::Entry::Absent(_) => { - trace!( - "Discovered new peer {} for topic hash {}", - enr.node_id(), - topic_hash - ); - // A QueryType::FindTopic variant will always time out. The last batch of - // ENRs returned by the last iteration in the query is added to - // discovered_peers_topic, like previous batches of uncontacted peers were - // added to the query itself first. - let discovered_peers = - self.discovered_peers_topic.entry(topic_hash).or_default(); - - let node_id = enr.node_id(); - let peer_key: kbucket::Key = node_id.into(); - let topic_key: kbucket::Key = - NodeId::new(&topic_hash.as_bytes()).into(); - if let Some(distance) = peer_key.log2_distance(&topic_key) { - let bucket = discovered_peers.entry(distance).or_default(); - // If the intermediary storage before the topic's kbuckets is at bounds, discard the - // uncontacted peers. - if bucket.len() < MAX_UNCONTACTED_PEERS_PER_TOPIC_BUCKET { - bucket.insert(node_id, enr.clone()); - discovered_new_peer = true; + None => { + // This was an automatically initiated query to look for more peers + // for a give topic's kbuckets + if let QueryType::FindTopic(topic_key) = query_type { + let topic_hash = TopicHash::from_raw(topic_key.raw()); + let mut discovered_new_peer = false; + if let Some(kbuckets_topic) = self.topics_kbuckets.get_mut(&topic_hash) { + for enr in found_enrs { + if !supports_feature(&enr, Features::Topics) { + continue; + } + trace!("Found new peer {} for topic {}", enr, topic_hash); + let key = kbucket::Key::from(enr.node_id()); + + // If the ENR exists in the routing table and the discovered ENR has a greater + // sequence number, perform some filter checks before updating the enr. + + let must_update_enr = match kbuckets_topic.entry(&key) { + kbucket::Entry::Present(entry, _) => entry.value().seq() < enr.seq(), + kbucket::Entry::Pending(mut entry, _) => entry.value().seq() < enr.seq(), + kbucket::Entry::Absent(_) => { + trace!( + "Discovered new peer {} for topic hash {}", + enr.node_id(), + topic_hash + ); + // A QueryType::FindTopic variant will always time out. The last batch of + // ENRs returned by the last iteration in the query is added to + // discovered_peers_topic, like previous batches of uncontacted peers were + // added to the query itself first. + let discovered_peers = + self.discovered_peers_topic.entry(topic_hash).or_default(); + + let node_id = enr.node_id(); + let peer_key: kbucket::Key = node_id.into(); + let topic_key: kbucket::Key = + NodeId::new(&topic_hash.as_bytes()).into(); + if let Some(distance) = peer_key.log2_distance(&topic_key) { + let bucket = discovered_peers.entry(distance).or_default(); + // If the intermediary storage before the topic's kbuckets is at bounds, discard the + // uncontacted peers. + if bucket.len() < MAX_UNCONTACTED_PEERS_PER_TOPIC_BUCKET { + bucket.insert(node_id, enr.clone()); + discovered_new_peer = true; + } else { + debug!("Discarding uncontacted peers, uncontacted peers at bounds for topic hash {}", topic_hash); + } + } + false + } + _ => false, + }; + if must_update_enr { + if let UpdateResult::Failed(reason) = + kbuckets_topic.update_node(&key, enr.clone(), None) + { + self.peers_to_ping.remove(&enr.node_id()); + debug!( + "Failed to update discovered ENR of peer {} for kbucket of topic hash {:?}. Reason: {:?}", + topic_hash, enr.node_id(), reason + ); } else { - debug!("Discarding uncontacted peers, uncontacted peers at bounds for topic hash {}", topic_hash); + // If the enr was successfully updated, progress might be made in a topic lookup + discovered_new_peer = true; } } - false } - _ => false, - }; - if must_update_enr { - if let UpdateResult::Failed(reason) = - kbuckets_topic.update_node(&key, enr.clone(), None) - { - self.peers_to_ping.remove(&enr.node_id()); - debug!( - "Failed to update discovered ENR of peer {} for kbucket of topic hash {:?}. Reason: {:?}", - topic_hash, enr.node_id(), reason - ); - } else { - // If the enr was successfully updated, progress might be made in a topic lookup - discovered_new_peer = true; + if discovered_new_peer { + // If a topic lookup has dried up (no more peers to query), and we now have found new peers or updated enrs for + // known peers to that topic, the query can now proceed as long as it hasn't timed out already. + if let Some(query) = self.active_topic_queries.queries.get_mut(&topic_hash) { + debug!("Found new peers to send TOPICQUERY to, unsetting query status dry"); + query.dry = false; + } } } } - if discovered_new_peer { - // If a topic lookup has dried up (no more peers to query), and we now have found new peers or updated enrs for - // known peers to that topic, the query can now proceed as long as it hasn't timed out already. - if let Some(query) = self.active_topic_queries.queries.get_mut(&topic_hash) { - debug!("Found new peers to send TOPICQUERY to, unsetting query status dry"); - query.dry = false; - } - } } } } From 86b7491e2d3770e8baac9886e1427401adc0f560 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 9 Sep 2022 18:29:33 +0200 Subject: [PATCH 387/391] Set features in ENR --- src/discv5.rs | 20 ++++++++------------ src/discv5/test.rs | 8 ++++++-- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 58b3f0755..d5937cc5d 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -119,7 +119,7 @@ pub struct Discv5 { impl Discv5 { pub fn new( - local_enr: Enr, + mut local_enr: Enr, enr_key: CombinedKey, mut config: Discv5Config, ) -> Result { @@ -145,6 +145,12 @@ impl Discv5 { (None, None) }; + // This node supports topic requests REGTOPIC and TOPICQUERY, and their responses. + if let Err(e) = local_enr.insert(ENR_KEY_FEATURES, &[Features::Topics as u8], &enr_key) { + error!("Failed writing to enr. Error {:?}", e); + return Err("Failed to insert field 'features' into local enr"); + } + let local_enr = Arc::new(RwLock::new(local_enr)); let enr_key = Arc::new(RwLock::new(enr_key)); let kbuckets = Arc::new(RwLock::new(KBucketsTable::new( @@ -155,16 +161,6 @@ impl Discv5 { bucket_filter, ))); - // This node supports topic requests REGTOPIC and TOPICQUERY, and their responses. - if let Err(e) = local_enr.write().insert( - ENR_KEY_FEATURES, - &[Features::Topics as u8], - &enr_key.write(), - ) { - error!("Failed writing to enr. Error {:?}", e); - return Err("Failed to insert field 'version' into local enr"); - } - println!("{:?}", local_enr.read().get(ENR_KEY_FEATURES).unwrap()); // Update the PermitBan list based on initial configuration @@ -879,7 +875,7 @@ pub fn supports_feature(peer: &Enr, feature: Features) -> bool { } } else { warn!( - "Enr of peer {} doesn't contain field 'version'", + "Enr of peer {} doesn't contain field 'features'", peer.node_id() ); false diff --git a/src/discv5/test.rs b/src/discv5/test.rs index ca3dae1f9..5182243a7 100644 --- a/src/discv5/test.rs +++ b/src/discv5/test.rs @@ -629,7 +629,7 @@ async fn test_bucket_limits() { } #[test] -fn test_version_check() { +fn test_features_check() { // Create the test values needed let port = 6666; let ip: std::net::IpAddr = "127.0.0.1".parse().unwrap(); @@ -639,9 +639,13 @@ fn test_version_check() { .udp4(port) .build(&key) .unwrap(); - let supported_versions = Features::Topics as u8 | 2; + + let supported_versions = Features::Topics as u8; + enr.insert(ENR_KEY_FEATURES, &[supported_versions], &key) .unwrap(); assert!(supports_feature(&enr, Features::Topics)); } + + From 5b2209a6af94f80f201c7ac1e150dc66c68c6768 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 9 Sep 2022 19:13:56 +0200 Subject: [PATCH 388/391] Return whole topic for human readable name for logs --- src/discv5.rs | 2 +- src/discv5/test.rs | 2 -- src/service.rs | 8 ++++---- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index d5937cc5d..0ff5c729c 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -693,7 +693,7 @@ impl Discv5 { /// Retrieves the topics that we have published on other nodes. pub fn active_topics( &self, - ) -> impl Future>, RequestError>> + 'static { + ) -> impl Future>, RequestError>> + 'static { // the service will verify if this node is contactable, we just send it and // await a response. let (callback_send, callback_recv) = oneshot::channel(); diff --git a/src/discv5/test.rs b/src/discv5/test.rs index 5182243a7..b9c233f96 100644 --- a/src/discv5/test.rs +++ b/src/discv5/test.rs @@ -647,5 +647,3 @@ fn test_features_check() { assert!(supports_feature(&enr, Features::Topics)); } - - diff --git a/src/service.rs b/src/service.rs index 1e2420fa8..c07179e76 100644 --- a/src/service.rs +++ b/src/service.rs @@ -201,7 +201,7 @@ pub enum ServiceRequest { oneshot::Sender, RequestError>>, ), /// Retrieves the ads currently published by this node on other nodes in a discv5 network. - ActiveTopics(oneshot::Sender>, RequestError>>), + ActiveTopics(oneshot::Sender>, RequestError>>), /// Stops publishing this node as an advertiser for a topic. StopRegistrationOfTopic(Topic, oneshot::Sender>), /// Retrieves the ads advertised for other nodes for a given topic. @@ -882,8 +882,8 @@ impl Service { } } - fn get_active_topics(&mut self) -> HashMap> { - let mut active_topics = HashMap::>::new(); + fn get_active_topics(&mut self) -> HashMap> { + let mut active_topics = HashMap::>::new(); self.registration_attempts .iter_mut() .for_each(|(topic, reg_attempts_by_distance)| { @@ -894,7 +894,7 @@ impl Service { RegistrationState::Confirmed(insert_time) => { if insert_time.elapsed() < AD_LIFETIME { active_topics - .entry(topic.hash()) + .entry(topic.clone()) .or_default() .push(*node_id); true From a18ae8c2fff1f186d65ae22d023fde0d37305a72 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 9 Sep 2022 19:47:53 +0200 Subject: [PATCH 389/391] Only log events for topics to maintain backwards compatibility --- src/discv5.rs | 9 --------- src/service.rs | 13 +++++-------- 2 files changed, 5 insertions(+), 17 deletions(-) diff --git a/src/discv5.rs b/src/discv5.rs index 0ff5c729c..a415cd597 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -77,9 +77,6 @@ pub enum Discv5Event { /// This happen spontaneously through queries as nodes return ENRs. These ENRs are not /// guaranteed to be live or contactable. Discovered(Enr), - /// A node has been discovered from a FINDNODE request using the given TopiHash as key. - /// See [`Discv5Event::Discovered`]. - DiscoveredPeerTopic(Enr, TopicHash), /// A new ENR was added to the routing table. EnrAdded { enr: Enr, replaced: Option }, /// A new node has been added to the routing table. @@ -87,12 +84,6 @@ pub enum Discv5Event { node_id: NodeId, replaced: Option, }, - /// A new node has been added to a topic hash kbucket. - NodeInsertedTopic { - node_id: NodeId, - replaced: Option, - topic_hash: TopicHash, - }, /// A new session has been established with a node. SessionEstablished(Enr, SocketAddr), /// Our local ENR IP address has been updated. diff --git a/src/service.rs b/src/service.rs index c07179e76..a6f271aee 100644 --- a/src/service.rs +++ b/src/service.rs @@ -705,7 +705,7 @@ impl Service { self.send_event(event); } Some(event) = Service::bucket_maintenance_poll_topics(self.topics_kbuckets.iter_mut()) => { - self.send_event(event); + debug!("{}", event); } query_event = Service::query_event_poll(&mut self.queries) => { match query_event { @@ -2663,18 +2663,15 @@ impl Service { /// the routing table. async fn bucket_maintenance_poll_topics( kbuckets: impl Iterator)>, - ) -> Option { + ) -> Option { // Drain applied pending entries from the routing table. let mut update_kbuckets_futures = Vec::new(); for (topic_hash, topic_kbuckets) in kbuckets { update_kbuckets_futures.push(future::poll_fn(move |_cx| { if let Some(entry) = (*topic_kbuckets).take_applied_pending() { - let event = Discv5Event::NodeInsertedTopic { - node_id: entry.inserted.into_preimage(), - replaced: entry.evicted.map(|n| n.key.into_preimage()), - topic_hash: *topic_hash, - }; - return Poll::Ready(event); + let node_id = entry.inserted.into_preimage(); + let replaced = entry.evicted.map(|n| n.key.into_preimage()); + return Poll::Ready(format!("Node {} has been inserted into kbuckets of topic {}. Replaced: {:?}", node_id, topic_hash, replaced)); } Poll::Pending })); From b37e1b14031bbff96752a6d63073158d0db3b75c Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 9 Sep 2022 19:48:41 +0200 Subject: [PATCH 390/391] fixup! Only log events for topics to maintain backwards compatibility --- src/service.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/service.rs b/src/service.rs index a6f271aee..be0923dee 100644 --- a/src/service.rs +++ b/src/service.rs @@ -2671,7 +2671,10 @@ impl Service { if let Some(entry) = (*topic_kbuckets).take_applied_pending() { let node_id = entry.inserted.into_preimage(); let replaced = entry.evicted.map(|n| n.key.into_preimage()); - return Poll::Ready(format!("Node {} has been inserted into kbuckets of topic {}. Replaced: {:?}", node_id, topic_hash, replaced)); + return Poll::Ready(format!( + "Node {} has been inserted into kbuckets of topic {}. Replaced: {:?}", + node_id, topic_hash, replaced + )); } Poll::Pending })); From 69cd50774cfa187986dbf6fcb0371848669aa2b6 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 9 Sep 2022 19:49:21 +0200 Subject: [PATCH 391/391] fixup! Only log events for topics to maintain backwards compatibility --- examples/find_nodes.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/examples/find_nodes.rs b/examples/find_nodes.rs index 8e7026e3d..0e060ffea 100644 --- a/examples/find_nodes.rs +++ b/examples/find_nodes.rs @@ -191,10 +191,8 @@ async fn main() { } match discv5_ev { Discv5Event::Discovered(enr) => info!("Enr discovered {}", enr), - Discv5Event::DiscoveredPeerTopic(enr, topic_hash) => info!("Enr discovered {} for topic {}", enr, topic_hash), Discv5Event::EnrAdded { enr, replaced: _ } => info!("Enr added {}", enr), Discv5Event::NodeInserted { node_id, replaced: _ } => info!("Node inserted {}", node_id), - Discv5Event::NodeInsertedTopic { node_id, replaced: _, topic_hash } => info!("Node inserted {} in topic hash {} kbucket", node_id, topic_hash), Discv5Event::SessionEstablished(enr, _) => info!("Session established {}", enr), Discv5Event::SocketUpdated(addr) => info!("Socket updated {}", addr), Discv5Event::TalkRequest(_) => info!("Talk request received"),