diff --git a/rust/Cargo.toml b/rust/Cargo.toml index f3b7ecc233..e48ccdcf2d 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -10,7 +10,7 @@ members = [ "cbork-cddl-parser", "cbork-utils", "catalyst-voting", - "catalyst-types", + "catalyst-types", "immutable-ledger", "vote-tx-v1", "vote-tx-v2", @@ -60,3 +60,4 @@ string_slice = "deny" unchecked_duration_subtraction = "deny" unreachable = "deny" missing_docs_in_private_items = "deny" +arithmetic_side_effects = "deny" diff --git a/rust/Earthfile b/rust/Earthfile index 230941a0f7..f3f55f21de 100644 --- a/rust/Earthfile +++ b/rust/Earthfile @@ -1,6 +1,6 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:v3.2.27 AS rust-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:v3.2.28 AS rust-ci COPY_SRC: FUNCTION diff --git a/rust/c509-certificate/src/attributes/mod.rs b/rust/c509-certificate/src/attributes/mod.rs index d56ee25a24..53818edce1 100644 --- a/rust/c509-certificate/src/attributes/mod.rs +++ b/rust/c509-certificate/src/attributes/mod.rs @@ -62,7 +62,12 @@ impl Encode<()> for Attributes { )); } // The attribute type should be included in array too - encode_array_len(e, "Attributes", self.0.len() as u64 * 2)?; + let Some(len) = (self.0.len() as u64).checked_mul(2) else { + return Err(minicbor::encode::Error::message( + "Attributes length overflow", + )); + }; + encode_array_len(e, "Attributes", len)?; for attribute in &self.0 { attribute.encode(e, ctx)?; } diff --git a/rust/c509-certificate/src/extensions/extension/mod.rs b/rust/c509-certificate/src/extensions/extension/mod.rs index 1d7c2b7c76..c442d7ccad 100644 --- a/rust/c509-certificate/src/extensions/extension/mod.rs +++ b/rust/c509-certificate/src/extensions/extension/mod.rs @@ -110,7 +110,11 @@ impl Encode<()> for Extension { { // Determine encoded OID value based on critical flag let encoded_oid = if self.critical { - -mapped_oid + mapped_oid.checked_neg().ok_or_else(|| { + minicbor::encode::Error::message(format!( + "Invalid OID value (will overflow during negation): {mapped_oid}" + )) + })? } else { mapped_oid }; diff --git a/rust/c509-certificate/src/extensions/mod.rs b/rust/c509-certificate/src/extensions/mod.rs index a93eff2ca0..5f1a99d579 100644 --- a/rust/c509-certificate/src/extensions/mod.rs +++ b/rust/c509-certificate/src/extensions/mod.rs @@ -70,7 +70,13 @@ impl Encode<()> for Extensions { if self.0.len() == 1 && extension.registered_oid().c509_oid().oid() == &KEY_USAGE_OID { match extension.value() { ExtensionValue::Int(value) => { - let ku_value = if extension.critical() { -value } else { *value }; + let ku_value = if extension.critical() { + value + .checked_neg() + .ok_or_else(|| minicbor::encode::Error::message(format!("Invalid key usage value (will overflow during negation): {value}")))? + } else { + *value + }; encode_helper(e, "Extensions KeyUsage", ctx, &ku_value)?; return Ok(()); }, diff --git a/rust/c509-certificate/src/general_names/mod.rs b/rust/c509-certificate/src/general_names/mod.rs index 86f6e3488a..951808b9a3 100644 --- a/rust/c509-certificate/src/general_names/mod.rs +++ b/rust/c509-certificate/src/general_names/mod.rs @@ -59,7 +59,12 @@ impl Encode<()> for GeneralNames { )); } // The general name type should be included in array too - encode_array_len(e, "General Names", self.0.len() as u64 * 2)?; + let Some(len) = (self.0.len() as u64).checked_mul(2) else { + return Err(minicbor::encode::Error::message( + "General Names length overflow", + )); + }; + encode_array_len(e, "General Names", len)?; for gn in &self.0 { gn.encode(e, ctx)?; } diff --git a/rust/c509-certificate/src/name/mod.rs b/rust/c509-certificate/src/name/mod.rs index e385926f7d..3e52e07b2c 100644 --- a/rust/c509-certificate/src/name/mod.rs +++ b/rust/c509-certificate/src/name/mod.rs @@ -106,14 +106,24 @@ impl Encode<()> for NameValue { encode_cn_value(e, cn_value)?; } else { - encode_array_len(e, "Attributes", attrs.len() as u64 * 2)?; + let Some(len) = (attrs.len() as u64).checked_mul(2) else { + return Err(minicbor::encode::Error::message( + "Attribute length overflow", + )); + }; + encode_array_len(e, "Attributes", len)?; for attribute in attrs { attribute.encode(e, ctx)?; } } } else { // If is okay if the attributes is empty - encode_array_len(e, "Attributes", attrs.len() as u64 * 2)?; + let Some(len) = (attrs.len() as u64).checked_mul(2) else { + return Err(minicbor::encode::Error::message( + "Attribute length overflow", + )); + }; + encode_array_len(e, "Attributes", len)?; for attribute in attrs { attribute.encode(e, ctx)?; } diff --git a/rust/cardano-blockchain-types/src/auxdata/scripts.rs b/rust/cardano-blockchain-types/src/auxdata/scripts.rs index 8109020c7e..a681a2e24a 100644 --- a/rust/cardano-blockchain-types/src/auxdata/scripts.rs +++ b/rust/cardano-blockchain-types/src/auxdata/scripts.rs @@ -100,7 +100,7 @@ impl TryFrom for ScriptType { match value { 0 => Err(anyhow!("Invalid script type: {}", value)), 1 => Ok(Self::Native), - _ => Ok(Self::Plutus(value - 1)), + _ => Ok(Self::Plutus(value.saturating_sub(1))), } } } diff --git a/rust/cardano-blockchain-types/src/fork.rs b/rust/cardano-blockchain-types/src/fork.rs index 6c1f57c4c4..1c72f704ae 100644 --- a/rust/cardano-blockchain-types/src/fork.rs +++ b/rust/cardano-blockchain-types/src/fork.rs @@ -71,7 +71,7 @@ impl fmt::Display for Fork { 0 => write!(f, "IMMUTABLE"), 1 => write!(f, "BACKFILL"), // For live forks: 2 maps to LIVE:1, 3 maps to LIVE:2 etc. - 2..=u64::MAX => write!(f, "LIVE:{}", self.0 - 1), + 2..=u64::MAX => write!(f, "LIVE:{}", self.0.saturating_sub(1)), } } } diff --git a/rust/cardano-blockchain-types/src/metadata/cip36/key_registration.rs b/rust/cardano-blockchain-types/src/metadata/cip36/key_registration.rs index 31dd52fc06..7dfed5ec47 100644 --- a/rust/cardano-blockchain-types/src/metadata/cip36/key_registration.rs +++ b/rust/cardano-blockchain-types/src/metadata/cip36/key_registration.rs @@ -150,7 +150,11 @@ fn check_is_key_exist( if found_keys.contains(key) { err_report.duplicate_field( format!("{key:?}").as_str(), - format!("Redundant key found in item {} in RBAC map", index + 1).as_str(), + format!( + "Redundant key found in item {} in RBAC map", + index.saturating_add(1) + ) + .as_str(), format!("CIP36 Key Registration {key:?}").as_str(), ); return true; diff --git a/rust/cardano-blockchain-types/src/multi_era_block_data.rs b/rust/cardano-blockchain-types/src/multi_era_block_data.rs index a1508950b3..6e92cdc01d 100644 --- a/rust/cardano-blockchain-types/src/multi_era_block_data.rs +++ b/rust/cardano-blockchain-types/src/multi_era_block_data.rs @@ -487,8 +487,10 @@ pub(crate) mod tests { let pallas_block = pallas::ledger::traverse::MultiEraBlock::decode(test_block.raw.as_slice())?; - let previous_point = - Point::new((pallas_block.slot() - 1).into(), vec![0; 32].try_into()?); + let previous_point = Point::new( + (pallas_block.slot().checked_sub(1).unwrap()).into(), + vec![0; 32].try_into()?, + ); let block = MultiEraBlock::new( Network::Preprod, @@ -511,7 +513,7 @@ pub(crate) mod tests { pallas::ledger::traverse::MultiEraBlock::decode(test_block.raw.as_slice())?; let previous_point = Point::new( - (pallas_block.slot() - 1).into(), + (pallas_block.slot().checked_sub(1).unwrap()).into(), pallas_block .header() .previous_hash() @@ -540,7 +542,7 @@ pub(crate) mod tests { let prev_point = pallas::ledger::traverse::MultiEraBlock::decode(block.as_slice()) .map(|block| { Point::new( - (block.slot() - 1).into(), + (block.slot().saturating_sub(1)).into(), block .header() .previous_hash() diff --git a/rust/cardano-blockchain-types/src/network.rs b/rust/cardano-blockchain-types/src/network.rs index 536fdc0b16..8bc12a9ec7 100644 --- a/rust/cardano-blockchain-types/src/network.rs +++ b/rust/cardano-blockchain-types/src/network.rs @@ -190,23 +190,23 @@ impl Network { let (elapsed_slots, era_known_slot) = if time < shelley_start_time { // Byron era - let time_diff = time - byron_start_time; - ( - time_diff.num_seconds() / i64::from(genesis.byron_slot_length), - genesis.byron_known_slot, - ) + let time_diff = time.signed_duration_since(byron_start_time); + let elapsed_slots = time_diff + .num_seconds() + .checked_div(i64::from(genesis.byron_slot_length))?; + (elapsed_slots, genesis.byron_known_slot) } else { // Shelley era - let time_diff = time - shelley_start_time; - ( - time_diff.num_seconds() / i64::from(genesis.shelley_slot_length), - genesis.shelley_known_slot, - ) + let time_diff = time.signed_duration_since(shelley_start_time); + let elapsed_slots = time_diff + .num_seconds() + .checked_div(i64::from(genesis.shelley_slot_length))?; + (elapsed_slots, genesis.shelley_known_slot) }; let era_known_slot = i64::try_from(era_known_slot).ok()?; - - Some(Slot::from_saturating(elapsed_slots + era_known_slot)) + let slot = elapsed_slots.checked_add(era_known_slot)?; + Some(Slot::from_saturating(slot)) } } diff --git a/rust/cardano-chain-follower/examples/follow_chains.rs b/rust/cardano-chain-follower/examples/follow_chains.rs index a1aef7be96..b3390be4ff 100644 --- a/rust/cardano-chain-follower/examples/follow_chains.rs +++ b/rust/cardano-chain-follower/examples/follow_chains.rs @@ -96,7 +96,11 @@ async fn start_sync_for(network: &Network, matches: ArgMatches) -> Result<(), Bo let mithril_dl_workers = format!("{}", dl_config.workers); if let Some(chunk_size) = matches.get_one::("mithril-sync-chunk-size") { - dl_config = dl_config.with_chunk_size(*chunk_size as usize * 1024 * 1024); + let chunk_size = (*chunk_size as usize) + .checked_mul(1024) + .and_then(|v| v.checked_mul(1024)) + .ok_or_else(|| "Chunk size overflow")?; + dl_config = dl_config.with_chunk_size(chunk_size); } let mithril_dl_chunk_size = format!("{} MBytes", dl_config.chunk_size / (1024 * 1024)); @@ -176,7 +180,7 @@ async fn follow_for(network: Network, matches: ArgMatches) { let mut largest_aux_size: usize = 0; while let Some(chain_update) = follower.next().await { - updates += 1; + updates = updates.saturating_add(1); if chain_update.tip { reached_tip = true; @@ -196,7 +200,7 @@ async fn follow_for(network: Network, matches: ArgMatches) { info!( chain = network.to_string(), "Chain Update {}:{}", - updates - 1, + updates.saturating_sub(1), last_update ); } diff --git a/rust/cardano-chain-follower/src/chain_sync.rs b/rust/cardano-chain-follower/src/chain_sync.rs index ec5bb1e558..a130eb3b02 100644 --- a/rust/cardano-chain-follower/src/chain_sync.rs +++ b/rust/cardano-chain-follower/src/chain_sync.rs @@ -63,7 +63,7 @@ async fn retry_connect( match peer { Ok(peer) => return Ok(peer), Err(err) => { - retries -= 1; + retries = retries.saturating_sub(1); if retries == 0 { return Err(err); } @@ -72,7 +72,7 @@ async fn retry_connect( } }, Err(error) => { - retries -= 1; + retries = retries.saturating_sub(1); if retries == 0 { return Err(pallas::network::facades::Error::ConnectFailure( tokio::io::Error::new( @@ -193,6 +193,7 @@ async fn process_rollback( let head_slot = previous_point.slot_or_default(); debug!("Head slot: {head_slot:?}"); debug!("Rollback slot: {rollback_slot:?}"); + #[allow(clippy::arithmetic_side_effects)] let slot_rollback_size = head_slot - rollback_slot; // We actually do the work here... diff --git a/rust/cardano-chain-follower/src/chain_sync_live_chains.rs b/rust/cardano-chain-follower/src/chain_sync_live_chains.rs index 7bbd5233be..bd2e2f42e2 100644 --- a/rust/cardano-chain-follower/src/chain_sync_live_chains.rs +++ b/rust/cardano-chain-follower/src/chain_sync_live_chains.rs @@ -80,7 +80,7 @@ impl ProtectedLiveChainBlockList { chain.get(point)? } else if advance < 0 { // This is a fuzzy lookup backwards. - advance += 1; + advance = advance.saturating_add(1); chain.upper_bound(Bound::Excluded(point))? } else { // This is a fuzzy lookup forwards. @@ -89,13 +89,13 @@ impl ProtectedLiveChainBlockList { // If we are stepping backwards, look backwards. while advance < 0 { - advance += 1; + advance = advance.saturating_add(1); this = this.prev()?; } // If we are stepping forwards, look forwards. while advance > 0 { - advance -= 1; + advance = advance.saturating_sub(1); this = this.next()?; } @@ -234,7 +234,7 @@ impl ProtectedLiveChainBlockList { // We search backwards because a rollback is more likely in the newest blocks than // the oldest. while let Some(popped) = live_chain.pop_back() { - rollback_size += 1; + rollback_size = rollback_size.saturating_add(1); if previous_point.strict_eq(&popped.value().previous()) { // We are now contiguous, so stop purging. break; @@ -352,6 +352,8 @@ impl ProtectedLiveChainBlockList { let mut previous_point = entry.value().point(); // Loop until we exhaust probe slots, OR we would step past genesis. + // It is ok because slot implement saturating subtraction. + #[allow(clippy::arithmetic_side_effects)] while slot_age < reference_slot { let ref_point = Point::fuzzy(reference_slot - slot_age); let Some(entry) = chain.lower_bound(Bound::Included(&ref_point)) else { @@ -391,7 +393,7 @@ impl ProtectedLiveChainBlockList { // Search backwards for a fork smaller than or equal to the one we know. while this_block.fork() > fork { - rollback_depth += 1; + rollback_depth = rollback_depth.saturating_add(1); entry = match entry.prev() { Some(entry) => entry, None => return None, diff --git a/rust/cardano-chain-follower/src/follow.rs b/rust/cardano-chain-follower/src/follow.rs index 1dc94e619e..772eaa3051 100644 --- a/rust/cardano-chain-follower/src/follow.rs +++ b/rust/cardano-chain-follower/src/follow.rs @@ -374,7 +374,7 @@ mod tests { .expect("cannot decode block"); let previous_point = Point::new( - (pallas_block.slot() - 1).into(), + (pallas_block.slot().checked_sub(1).unwrap()).into(), pallas_block .header() .previous_hash() diff --git a/rust/cardano-chain-follower/src/mithril_snapshot_iterator.rs b/rust/cardano-chain-follower/src/mithril_snapshot_iterator.rs index 9877a34dab..c67de2f319 100644 --- a/rust/cardano-chain-follower/src/mithril_snapshot_iterator.rs +++ b/rust/cardano-chain-follower/src/mithril_snapshot_iterator.rs @@ -56,6 +56,7 @@ pub(crate) fn probe_point(point: &Point, distance: u64) -> Point { const STEP_BACK_ORIGIN: u64 = 0; // Now that we have the tip, step back about 4 block intervals from tip, and do a fuzzy // iteration to find the exact two blocks at the end of the immutable chain. + #[allow(clippy::arithmetic_side_effects)] let step_back_search = point.slot_or_default() - distance.into(); // We stepped back to the origin, so just return Origin @@ -143,7 +144,7 @@ impl MithrilSnapshotIterator { return iterator; } - backwards_search *= 2; + backwards_search = backwards_search.saturating_mul(2); } } diff --git a/rust/cardano-chain-follower/src/mithril_snapshot_sync.rs b/rust/cardano-chain-follower/src/mithril_snapshot_sync.rs index e77d88b5d7..b0026ed7a2 100644 --- a/rust/cardano-chain-follower/src/mithril_snapshot_sync.rs +++ b/rust/cardano-chain-follower/src/mithril_snapshot_sync.rs @@ -129,7 +129,9 @@ fn calculate_sleep_duration( let mut next_sleep = MINIMUM_MITHRIL_UPDATE_CHECK_INTERVAL; // How long between snapshots, - let mut snapshot_interval = (latest_snapshot.created_at - previous_snapshot.created_at) + let mut snapshot_interval = latest_snapshot + .created_at + .signed_duration_since(previous_snapshot.created_at) .max(MAXIMUM_MITHRIL_UPDATE_CHECK_INTERVAL); // We should never be negative, but we CAN be zero if there was no chronologically @@ -139,11 +141,18 @@ fn calculate_sleep_duration( snapshot_interval = EXPECTED_MITHRIL_UPDATE_CHECK_INTERVAL; } - let next_expected_snapshot = latest_snapshot.created_at + snapshot_interval; + let next_expected_snapshot = latest_snapshot + .created_at + .checked_add_signed(snapshot_interval) + .unwrap_or_else(|| { + // Realistically this should never happen. + error!("latest_snapshot.created_at + snapshot_interval overflow"); + latest_snapshot.created_at + }); if next_expected_snapshot > now { // We are behind schedule. Sleep until the next expected snapshot should be published. - next_sleep = next_expected_snapshot - now; + next_sleep = next_expected_snapshot.signed_duration_since(now); } next_sleep @@ -359,7 +368,7 @@ async fn get_latest_validated_mithril_snapshot( // IF the mithril data we have is NOT the current latest (or the immediately previous), it // may as well be invalid. - if latest_mithril < actual_latest.beacon.immutable_file_number - 1 { + if latest_mithril < actual_latest.beacon.immutable_file_number.saturating_sub(1) { return None; } diff --git a/rust/cardano-chain-follower/src/mithril_turbo_downloader.rs b/rust/cardano-chain-follower/src/mithril_turbo_downloader.rs index 0b1671c386..88f41a1d18 100644 --- a/rust/cardano-chain-follower/src/mithril_turbo_downloader.rs +++ b/rust/cardano-chain-follower/src/mithril_turbo_downloader.rs @@ -359,7 +359,9 @@ impl SnapshotDownloader for MithrilTurboDownloader { self.inner.cfg.chain, Some(self.inner.ext_size.load(Ordering::SeqCst)), self.inner.dedup_size.load(Ordering::SeqCst), - tot_files - (chg_files + new_files), + tot_files + .saturating_sub(chg_files) + .saturating_sub(new_files), chg_files, new_files, ); diff --git a/rust/cardano-chain-follower/src/stats/mod.rs b/rust/cardano-chain-follower/src/stats/mod.rs index 75d9d08457..de97374c23 100644 --- a/rust/cardano-chain-follower/src/stats/mod.rs +++ b/rust/cardano-chain-follower/src/stats/mod.rs @@ -153,9 +153,9 @@ pub(crate) fn stats_invalid_block(chain: Network, immutable: bool) { }; if immutable { - chain_stats.mithril.invalid_blocks += 1; + chain_stats.mithril.invalid_blocks = chain_stats.mithril.invalid_blocks.saturating_sub(1); } else { - chain_stats.live.invalid_blocks += 1; + chain_stats.live.invalid_blocks = chain_stats.live.invalid_blocks.saturating_sub(1); } } @@ -174,7 +174,7 @@ pub(crate) fn new_live_block( return; }; - chain_stats.live.new_blocks += 1; + chain_stats.live.new_blocks = chain_stats.live.new_blocks.saturating_add(1); chain_stats.live.blocks = total_live_blocks; chain_stats.live.head_slot = head_slot; chain_stats.live.tip = tip_slot; @@ -195,7 +195,7 @@ pub(crate) fn new_mithril_update( return; }; - chain_stats.mithril.updates += 1; + chain_stats.mithril.updates = chain_stats.mithril.updates.saturating_add(1); chain_stats.mithril.tip = mithril_tip; chain_stats.live.blocks = total_live_blocks; chain_stats.live.tip = tip_slot; @@ -217,7 +217,7 @@ pub(crate) fn backfill_started(chain: Network) { // If we start another backfill, then that means the previous backfill failed, so record // it. if chain_stats.live.backfill_start.is_some() { - chain_stats.live.backfill_failures += 1; + chain_stats.live.backfill_failures = chain_stats.live.backfill_failures.saturating_add(1); chain_stats.live.backfill_failure_time = chain_stats.live.backfill_start; } @@ -255,7 +255,7 @@ pub(crate) fn peer_connected(chain: Network, active: bool, peer_address: &str) { }; if active { - chain_stats.live.reconnects += 1; + chain_stats.live.reconnects = chain_stats.live.reconnects.saturating_add(1); chain_stats.live.last_connect = Utc::now(); chain_stats.live.last_connected_peer = peer_address.to_string(); } else { @@ -335,11 +335,14 @@ pub(crate) fn mithril_dl_finished(chain: Network, dl_size: Option) { if let Some(dl_size) = dl_size { chain_stats.mithril.dl_end = Utc::now(); chain_stats.mithril.dl_size = dl_size; - let last_dl_duration = chain_stats.mithril.dl_end - chain_stats.mithril.dl_start; + let last_dl_duration = chain_stats + .mithril + .dl_end + .signed_duration_since(chain_stats.mithril.dl_start); chain_stats.mithril.last_dl_duration = last_dl_duration.num_seconds().clamp(0, i64::MAX) as u64; } else { - chain_stats.mithril.dl_failures += 1; + chain_stats.mithril.dl_failures = chain_stats.mithril.dl_failures.saturating_add(1); } } @@ -384,7 +387,8 @@ pub(crate) fn mithril_extract_finished( chain_stats.mithril.changed = changed_files; chain_stats.mithril.new = new_files; } else { - chain_stats.mithril.extract_failures += 1; + chain_stats.mithril.extract_failures = + chain_stats.mithril.extract_failures.saturating_add(1); } } @@ -414,7 +418,10 @@ pub(crate) fn mithril_validation_state(chain: Network, mithril_state: MithrilVal match mithril_state { MithrilValidationState::Start => chain_stats.mithril.validate_start = Utc::now(), - MithrilValidationState::Failed => chain_stats.mithril.validate_failures += 1, + MithrilValidationState::Failed => { + chain_stats.mithril.validate_failures = + chain_stats.mithril.validate_failures.saturating_add(1); + }, MithrilValidationState::Finish => chain_stats.mithril.validate_end = Utc::now(), } } @@ -449,15 +456,30 @@ pub(crate) fn mithril_sync_failure(chain: Network, failure: MithrilSyncFailures) match failure { MithrilSyncFailures::DownloadOrValidation => { - chain_stats.mithril.download_or_validation_failed += 1; + chain_stats.mithril.download_or_validation_failed = chain_stats + .mithril + .download_or_validation_failed + .saturating_add(1); + }, + MithrilSyncFailures::FailedToGetTip => { + chain_stats.mithril.failed_to_get_tip = + chain_stats.mithril.failed_to_get_tip.saturating_add(1); + }, + MithrilSyncFailures::TipDidNotAdvance => { + chain_stats.mithril.tip_did_not_advance = + chain_stats.mithril.tip_did_not_advance.saturating_add(1); }, - MithrilSyncFailures::FailedToGetTip => chain_stats.mithril.failed_to_get_tip += 1, - MithrilSyncFailures::TipDidNotAdvance => chain_stats.mithril.tip_did_not_advance += 1, MithrilSyncFailures::TipFailedToSendToUpdater => { - chain_stats.mithril.tip_failed_to_send_to_updater += 1; + chain_stats.mithril.tip_failed_to_send_to_updater = chain_stats + .mithril + .tip_failed_to_send_to_updater + .saturating_add(1); }, MithrilSyncFailures::FailedToActivateNewSnapshot => { - chain_stats.mithril.failed_to_activate_new_snapshot += 1; + chain_stats.mithril.failed_to_activate_new_snapshot = chain_stats + .mithril + .failed_to_activate_new_snapshot + .saturating_add(1); }, } } diff --git a/rust/cardano-chain-follower/src/stats/rollback.rs b/rust/cardano-chain-follower/src/stats/rollback.rs index e6b73d090d..09ceabb542 100644 --- a/rust/cardano-chain-follower/src/stats/rollback.rs +++ b/rust/cardano-chain-follower/src/stats/rollback.rs @@ -135,7 +135,7 @@ pub(crate) fn rollback(chain: Network, rollback: RollbackType, depth: u64) { None => Rollback { depth, count: 0 }, }; - value.count += 1; + value.count = value.count.saturating_add(1); let _unused = rollbacks.insert(depth, value); } diff --git a/rust/cardano-chain-follower/src/stats/thread/mod.rs b/rust/cardano-chain-follower/src/stats/thread/mod.rs index 5892e47abb..e409e516f9 100644 --- a/rust/cardano-chain-follower/src/stats/thread/mod.rs +++ b/rust/cardano-chain-follower/src/stats/thread/mod.rs @@ -16,6 +16,7 @@ use serde::{ ser::{SerializeStruct, Serializer}, Serialize, }; +use tracing::error; /// Thread statistics. #[derive(Debug, Default, Clone, Serialize)] @@ -89,15 +90,14 @@ impl InnerThreadStat { if let Ok(latest_cpu_time) = self.latest_cpu_time.lock() { // Calculate elapsed time (current - previous) - let elapsed = if current_time > *latest_cpu_time { - current_time - *latest_cpu_time - } else { - Duration::ZERO - }; - // If the elapsed time is non-negative, update total_cpu_time - if elapsed > Duration::ZERO { + if let Some(elapsed) = current_time.checked_sub(*latest_cpu_time) { + // If the elapsed time is non-negative, update total_cpu_time if let Ok(mut total_cpu_time) = self.total_cpu_time.lock() { - *total_cpu_time += elapsed; + if let Some(sum) = total_cpu_time.checked_add(elapsed) { + *total_cpu_time = sum; + } else { + error!("Total CPU time overflow"); + } } } } diff --git a/rust/cardano-chain-follower/src/turbo_downloader/mod.rs b/rust/cardano-chain-follower/src/turbo_downloader/mod.rs index 4adab16d90..f32155942a 100644 --- a/rust/cardano-chain-follower/src/turbo_downloader/mod.rs +++ b/rust/cardano-chain-follower/src/turbo_downloader/mod.rs @@ -18,7 +18,7 @@ use std::{ time::Duration, }; -use anyhow::{bail, Context, Result}; +use anyhow::{anyhow, bail, Context, Result}; use cardano_blockchain_types::Network; use catalyst_types::conversion::from_saturating; use crossbeam_channel::{Receiver, RecvError}; @@ -77,7 +77,7 @@ impl BalancingResolver { } /// Resolve the given URL with the configured resolver. - fn resolve(&self, url: &str, worker: usize) -> std::io::Result> { + fn resolve(&self, url: &str, worker: usize) -> std::io::Result> { // debug!("Resolving: {url} for {worker}"); let addresses = if let Some(addresses) = self.cache.get(url) { addresses @@ -96,16 +96,25 @@ impl BalancingResolver { ) })?; - let mut all_addresses: Vec = Vec::new(); + let mut all_addresses: Vec = Vec::new(); for addr in self.resolver.lookup_ip(host.to_string())?.iter() { - all_addresses.push(std::net::SocketAddr::new(addr, port)); + all_addresses.push(SocketAddr::new(addr, port)); } let addresses = Arc::new(all_addresses); self.cache.insert(url.to_string(), addresses.clone()); addresses }; - let worker_addresses = worker % addresses.len(); + let worker_addresses = worker.checked_rem(addresses.len()).ok_or_else(|| { + std::io::Error::new( + std::io::ErrorKind::Other, + format!( + "Unexpected index: worker = {}, addresses len = {}", + worker, + addresses.len() + ), + ) + })?; // Safe because we bound the index with the length of `addresses`. #[allow(clippy::indexing_slicing)] Ok(vec![addresses[worker_addresses]]) @@ -180,7 +189,7 @@ impl DlConfig { } /// Resolve DNS addresses using Hickory Resolver - fn resolve(url: &str, worker: usize) -> std::io::Result> { + fn resolve(url: &str, worker: usize) -> std::io::Result> { let Some(resolver) = RESOLVER.get() else { return Err(std::io::Error::new( std::io::ErrorKind::Other, @@ -298,21 +307,21 @@ impl ParallelDownloadProcessorInner { /// Get start offset of a chunk. fn chunk_start(&self, chunk: usize) -> usize { - self.cfg.chunk_size * chunk + self.cfg.chunk_size.saturating_mul(chunk) } /// Get inclusive end offset of a chunk. fn chunk_end(&self, chunk: usize) -> usize { let start = self.chunk_start(chunk); - if start + self.cfg.chunk_size >= self.file_size { - self.file_size - 1 + if start.saturating_add(self.cfg.chunk_size) >= self.file_size { + self.file_size.saturating_sub(1) } else { - start + self.cfg.chunk_size - 1 + start.saturating_add(self.cfg.chunk_size).saturating_sub(1) } } /// Sends a GET request to download a chunk of the file at the specified range - fn get_range(&self, agent: &ureq::Agent, chunk: usize) -> anyhow::Result>> { + fn get_range(&self, agent: &ureq::Agent, chunk: usize) -> Result>> { let range_start = self.chunk_start(chunk); let range_end_inclusive = self.chunk_end(chunk); let range_header = format!("bytes={range_start}-{range_end_inclusive}"); @@ -331,7 +340,9 @@ impl ParallelDownloadProcessorInner { ) } - let range_size = range_end_inclusive - range_start + 1; + let range_size = range_end_inclusive + .saturating_sub(range_start) + .saturating_add(1); let mut bytes: Vec = Vec::with_capacity(range_size); let bytes_read = get_range_response @@ -349,7 +360,7 @@ impl ParallelDownloadProcessorInner { /// Queue Chunk to processor. /// /// Reorders chunks and sends to the consumer. - fn reorder_queue(&self, chunk: DlChunk) -> anyhow::Result<()> { + fn reorder_queue(&self, chunk: DlChunk) -> Result<()> { self.reorder_queue.insert(chunk.chunk_num, chunk); self.new_chunk_queue_tx.send(Some(()))?; Ok(()) @@ -368,7 +379,7 @@ impl ParallelDownloadProcessor { /// /// Can Fail IF there is no HTTP client provided or the URL does not support getting /// the content length. - pub(crate) async fn new(url: &str, mut cfg: DlConfig, chain: Network) -> anyhow::Result { + pub(crate) async fn new(url: &str, mut cfg: DlConfig, chain: Network) -> Result { if cfg.chunk_size < MIN_CHUNK_SIZE { bail!( "Download chunk size must be at least {} bytes", @@ -396,8 +407,12 @@ impl ParallelDownloadProcessor { cfg: cfg.clone(), file_size, last_chunk, - reorder_queue: DashMap::with_capacity((cfg.workers * cfg.queue_ahead) + 1), - work_queue: DashMap::with_capacity(cfg.workers + 1), + reorder_queue: DashMap::with_capacity( + cfg.workers + .saturating_mul(cfg.queue_ahead) + .saturating_add(1), + ), + work_queue: DashMap::with_capacity(cfg.workers.saturating_add(1)), new_chunk_queue_rx: new_chunk_queue.1, new_chunk_queue_tx: new_chunk_queue.0, bytes_downloaded, @@ -413,7 +428,7 @@ impl ParallelDownloadProcessor { /// Starts the worker tasks, they will not start doing any work until `download` is /// called, which happens immediately after they are started. - fn start_workers(&self, chain: Network) -> anyhow::Result<()> { + fn start_workers(&self, chain: Network) -> Result<()> { for worker in 0..self.0.cfg.workers { // The channel is unbounded, because work distribution is controlled to be at most // `work_queue` deep per worker. And we don't want anything unexpected to @@ -467,10 +482,14 @@ impl ParallelDownloadProcessor { // Add a small delay to the first chunks for each worker. // So that the leading chunks are more likely to finish downloading first. if next_chunk > 0 && next_chunk < params.cfg.workers { - let delay = Duration::from_millis(next_chunk as u64 * 2); - thread::sleep(delay); + if let Some(delay) = (next_chunk as u64).checked_mul(2) { + thread::sleep(Duration::from_millis(delay)); + } else { + // This should never happen. + error!("Next chunk delay overflow"); + } } - let mut retries = 0; + let mut retries = 0u8; let mut block; // debug!("Worker {worker_id} DL chunk {next_chunk}"); loop { @@ -486,15 +505,15 @@ impl ParallelDownloadProcessor { if block.is_some() || retries > 3 { break; } - retries += 1; + retries = retries.saturating_add(1); } // debug!("Worker {worker_id} DL chunk done {next_chunk}: {retries}"); if let Some(ref block) = block { if let Some(dl_stat) = params.bytes_downloaded.get(worker_id) { let this_bytes_downloaded = from_saturating(block.len()); - let _last_bytes_downloaded = dl_stat - .fetch_add(this_bytes_downloaded, std::sync::atomic::Ordering::SeqCst); + let _last_bytes_downloaded = + dl_stat.fetch_add(this_bytes_downloaded, Ordering::SeqCst); // debug!("Worker {worker_id} DL chunk {next_chunk}: // {last_bytes_downloaded} + {this_bytes_downloaded} = {}", // last_bytes_downloaded+this_bytes_downloaded); @@ -518,7 +537,10 @@ impl ParallelDownloadProcessor { /// Send a work order to a worker. fn send_work_order(&self, this_worker: usize, order: DlWorkOrder) -> Result { - let next_worker = (this_worker + 1) % self.0.cfg.workers; + let next_worker = this_worker + .checked_add(1) + .and_then(|w| w.checked_rem(self.0.cfg.workers)) + .ok_or_else(|| anyhow!("Unable to calculate next worker: this_worker = {this_worker}, num workers = {}", self.0.cfg.workers))?; if order < self.0.last_chunk { // let params = self.0.clone(); if let Some(worker_queue) = self.0.work_queue.get(&this_worker) { @@ -540,10 +562,10 @@ impl ParallelDownloadProcessor { /// Starts Downloading the file using parallel connections. /// /// Should only be called once on self. - fn download(&self) -> anyhow::Result<()> { + fn download(&self) -> Result<()> { let params = self.0.clone(); // Pre fill the work queue with orders. - let max_pre_orders = params.cfg.queue_ahead * params.cfg.workers; + let max_pre_orders = params.cfg.queue_ahead.saturating_mul(params.cfg.workers); let pre_orders = max_pre_orders.min(params.last_chunk); let mut this_worker: usize = 0; @@ -634,15 +656,23 @@ impl ParallelDownloadProcessor { }; // Send whats leftover or new. - let bytes_left = left_over_bytes.len() - offset; - let bytes_to_copy = bytes_left.min(buf.len()); - let Some(sub_buf) = left_over_bytes.get(offset..offset + bytes_to_copy) else { - error!("Slicing Sub Buffer failed"); - return Err(std::io::Error::new( + let bytes_left = left_over_bytes.len().checked_sub(offset).ok_or_else(|| { + std::io::Error::new( std::io::ErrorKind::Other, - "Slicing Sub Buffer failed", - )); - }; + format!( + "Invalid left over bytes value: {}, offset = {}", + left_over_bytes.len(), + offset + ), + ) + })?; + let bytes_to_copy = bytes_left.min(buf.len()); + let sub_buf = offset + .checked_add(bytes_to_copy) + .and_then(|upper_bound| left_over_bytes.get(offset..upper_bound)) + .ok_or_else(|| { + std::io::Error::new(std::io::ErrorKind::Other, "Slicing Sub Buffer failed") + })?; if let Err(error) = memx::memcpy(buf, sub_buf) { error!(error=?error, "memx::memcpy failed"); return Err(std::io::Error::new( @@ -652,8 +682,8 @@ impl ParallelDownloadProcessor { } // Save whats leftover back inside the mutex, if there is anything. - if offset + bytes_to_copy != left_over_bytes.len() { - *left_over_buffer = Some((left_over_bytes, offset + bytes_to_copy)); + if offset.saturating_add(bytes_to_copy) != left_over_bytes.len() { + *left_over_buffer = Some((left_over_bytes, offset.saturating_add(bytes_to_copy))); } Ok(bytes_to_copy) @@ -679,7 +709,7 @@ impl Drop for ParallelDownloadProcessor { } } -impl std::io::Read for ParallelDownloadProcessor { +impl Read for ParallelDownloadProcessor { fn read(&mut self, buf: &mut [u8]) -> std::io::Result { let result = self.inner_read(buf); match result { @@ -699,7 +729,7 @@ impl std::io::Read for ParallelDownloadProcessor { /// /// This exists because the `Probe` call made by Mithril is Async, and this makes /// interfacing to that easier. -async fn get_content_length_async(url: &str) -> anyhow::Result { +async fn get_content_length_async(url: &str) -> Result { let url = url.to_owned(); match tokio::task::spawn_blocking(move || get_content_length(&url)).await { Ok(result) => result, @@ -712,7 +742,7 @@ async fn get_content_length_async(url: &str) -> anyhow::Result { /// Send a HEAD request to obtain the length of the file we want to download (necessary /// for calculating the offsets of the chunks) -fn get_content_length(url: &str) -> anyhow::Result { +fn get_content_length(url: &str) -> Result { let response = ureq::head(url).call()?; if response.status() != StatusCode::OK { diff --git a/rust/catalyst-types/src/id_uri/mod.rs b/rust/catalyst-types/src/id_uri/mod.rs index af664f80b5..eb48425ebd 100644 --- a/rust/catalyst-types/src/id_uri/mod.rs +++ b/rust/catalyst-types/src/id_uri/mod.rs @@ -414,8 +414,12 @@ impl IdUri { pub fn is_nonce_in_range(&self, past: Duration, future: Duration) -> bool { if let Some(nonce) = self.nonce { let now = Utc::now(); - let start_time = now - past; - let end_time = now + future; + let Some(start_time) = now.checked_sub_signed(past) else { + return false; + }; + let Some(end_time) = now.checked_add_signed(future) else { + return false; + }; (start_time..=end_time).contains(&nonce) } else { // No nonce defined, so we say that this fails. diff --git a/rust/catalyst-types/src/mmap_file.rs b/rust/catalyst-types/src/mmap_file.rs index b36a10bfd0..0595853e6c 100644 --- a/rust/catalyst-types/src/mmap_file.rs +++ b/rust/catalyst-types/src/mmap_file.rs @@ -82,8 +82,8 @@ impl MemMapFileStat { /// Update the global stats when a file is created. fn update_create_stat(size: u64) { if let Ok(mut stat) = MEMMAP_FILE_STATS.write() { - stat.file_count += 1; - stat.total_size += size; + stat.file_count = stat.file_count.saturating_add(1); + stat.total_size = stat.total_size.saturating_add(size); } else { error!( "RwLock write poisoned, failed to update created memory-mapped file statistics." @@ -94,8 +94,8 @@ impl MemMapFileStat { /// Update the global stats when a file is dropped. fn update_drop_stat(size: u64) { if let Ok(mut stat) = MEMMAP_FILE_STATS.write() { - stat.drop_count += 1; - stat.drop_size += size; + stat.drop_count = stat.drop_count.saturating_add(1); + stat.drop_size = stat.drop_size.saturating_add(size); } else { error!( "RwLock write poisoned, failed to update dropped memory-mapped file statistics." @@ -106,7 +106,7 @@ impl MemMapFileStat { /// Update the global error count when an error occurs. fn update_err_stat() { if let Ok(mut stat) = MEMMAP_FILE_STATS.write() { - stat.error_count += 1; + stat.error_count = stat.error_count.saturating_add(1); } else { error!("RwLock write poisoned, failed to update error memory-mapped file statistics."); } diff --git a/rust/catalyst-voting/src/lib.rs b/rust/catalyst-voting/src/lib.rs index b165d78f1c..f91b6a2228 100644 --- a/rust/catalyst-voting/src/lib.rs +++ b/rust/catalyst-voting/src/lib.rs @@ -1,5 +1,7 @@ //! Voting primitives which are used among Catalyst ecosystem. +#![allow(clippy::arithmetic_side_effects)] + pub mod crypto; mod utils; pub mod vote_protocol; diff --git a/rust/clippy.toml b/rust/clippy.toml index 7bada5473b..caa289b27b 100644 --- a/rust/clippy.toml +++ b/rust/clippy.toml @@ -1,3 +1,4 @@ allow-unwrap-in-tests = true allow-expect-in-tests = true allow-panic-in-tests = true +arithmetic-side-effects-allowed = ["num_bigint::BigInt"] diff --git a/rust/immutable-ledger/src/serialize.rs b/rust/immutable-ledger/src/serialize.rs index 49ae3fb4e6..53fb9cb906 100644 --- a/rust/immutable-ledger/src/serialize.rs +++ b/rust/immutable-ledger/src/serialize.rs @@ -2,7 +2,7 @@ //! Block structure -use anyhow::{bail, Ok}; +use anyhow::{anyhow, bail, Ok}; use blake2b_simd::{self, Params}; use uuid::Uuid; @@ -204,9 +204,16 @@ impl Block { // genesis and final block). Genesis block MUST have 0 value. Final block MUST hash be // incremented by 1 from the previous block height and changed the sign to negative. // E.g. previous block height is 9 and the Final block height is -10. - if self.block_header.height != previous_block.block_header.height + 1 { - return Err(anyhow::anyhow!( - "Module: Immutable ledger, Message: height validation failed: {:?} {:?}", + let Some(block_height) = previous_block.block_header.height.checked_add(1) else { + return Err(anyhow!( + "Module: Immutable ledger, Message: block height overflow: {}", + previous_block.block_header.height + )); + }; + + if self.block_header.height != block_height { + return Err(anyhow!( + "Module: Immutable ledger, Message: height validation failed: {:?} {:?}", self.block_header, previous_block.block_header )); diff --git a/rust/rbac-registration/src/utils/decode_helper.rs b/rust/rbac-registration/src/utils/decode_helper.rs index 9f0632ae74..9125604384 100644 --- a/rust/rbac-registration/src/utils/decode_helper.rs +++ b/rust/rbac-registration/src/utils/decode_helper.rs @@ -12,7 +12,11 @@ pub fn report_duplicated_key( if found_keys.contains(key) { report.duplicate_field( format!("{key:?}").as_str(), - format!("Redundant key found in item {} in RBAC map", index + 1).as_str(), + format!( + "Redundant key found in item {} in RBAC map", + index.saturating_add(1) + ) + .as_str(), context, ); return true; diff --git a/rust/vote-tx-v1/src/decoding.rs b/rust/vote-tx-v1/src/decoding.rs index 6d97a9b5c7..2f33c2d7d1 100644 --- a/rust/vote-tx-v1/src/decoding.rs +++ b/rust/vote-tx-v1/src/decoding.rs @@ -247,7 +247,7 @@ mod tests { let mut reader = bytes.as_slice(); let size = read_be_u32(&mut reader).unwrap(); - assert_eq!(size as usize, bytes.len() - 4); + assert_eq!(size as usize, bytes.len().checked_sub(4).unwrap()); let padding_tag = read_be_u8(&mut reader).unwrap(); assert_eq!(padding_tag, PADDING_TAG); @@ -324,7 +324,7 @@ mod tests { let mut reader = bytes.as_slice(); let size = read_be_u32(&mut reader).unwrap(); - assert_eq!(size as usize, bytes.len() - 4); + assert_eq!(size as usize, bytes.len().checked_sub(4).unwrap()); let padding_tag = read_be_u8(&mut reader).unwrap(); assert_eq!(padding_tag, PADDING_TAG);