Skip to content

Commit

Permalink
feat: clippy
Browse files Browse the repository at this point in the history
  • Loading branch information
Samuel Dare committed Jun 24, 2024
1 parent abc7798 commit b1c26b3
Show file tree
Hide file tree
Showing 3 changed files with 49 additions and 62 deletions.
28 changes: 15 additions & 13 deletions pallets/subtensor/src/epoch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ impl<T: Config> Pallet<T> {
// Inactive mask.
let inactive: Vec<bool> = last_update
.iter()
.map(|updated| *updated + activity_cutoff < current_block)
.map(|updated| updated.saturating_add(activity_cutoff) < current_block)
.collect();
log::trace!("Inactive:\n{:?}\n", inactive.clone());

Expand Down Expand Up @@ -193,7 +193,7 @@ impl<T: Config> Pallet<T> {
let combined_emission: Vec<I32F32> = incentive
.iter()
.zip(dividends.clone())
.map(|(ii, di)| ii + di)
.map(|(ii, di)| ii.saturating_add(di))
.collect();
let emission_sum: I32F32 = combined_emission.iter().sum();

Expand Down Expand Up @@ -223,7 +223,7 @@ impl<T: Config> Pallet<T> {

let server_emission: Vec<I96F32> = normalized_server_emission
.iter()
.map(|se: &I32F32| I96F32::from_num(*se) * float_rao_emission)
.map(|se: &I32F32| I96F32::from_num(*se).saturating_mul(float_rao_emission))
.collect();
let server_emission: Vec<u64> = server_emission
.iter()
Expand All @@ -232,7 +232,7 @@ impl<T: Config> Pallet<T> {

let validator_emission: Vec<I96F32> = normalized_validator_emission
.iter()
.map(|ve: &I32F32| I96F32::from_num(*ve) * float_rao_emission)
.map(|ve: &I32F32| I96F32::from_num(*ve).saturating_mul(float_rao_emission))
.collect();
let validator_emission: Vec<u64> = validator_emission
.iter()
Expand All @@ -242,7 +242,7 @@ impl<T: Config> Pallet<T> {
// Used only to track combined emission in the storage.
let combined_emission: Vec<I96F32> = normalized_combined_emission
.iter()
.map(|ce: &I32F32| I96F32::from_num(*ce) * float_rao_emission)
.map(|ce: &I32F32| I96F32::from_num(*ce).saturating_mul(float_rao_emission))
.collect();
let combined_emission: Vec<u64> = combined_emission
.iter()
Expand Down Expand Up @@ -371,7 +371,7 @@ impl<T: Config> Pallet<T> {
// Inactive mask.
let inactive: Vec<bool> = last_update
.iter()
.map(|updated| *updated + activity_cutoff < current_block)
.map(|updated| updated.saturating_add(activity_cutoff) < current_block)
.collect();
log::trace!("Inactive: {:?}", inactive.clone());

Expand Down Expand Up @@ -551,7 +551,7 @@ impl<T: Config> Pallet<T> {
let combined_emission: Vec<I32F32> = incentive
.iter()
.zip(dividends.clone())
.map(|(ii, di)| ii + di)
.map(|(ii, di)| ii.saturating_add(di))
.collect();
let emission_sum: I32F32 = combined_emission.iter().sum();

Expand Down Expand Up @@ -581,7 +581,7 @@ impl<T: Config> Pallet<T> {

let server_emission: Vec<I96F32> = normalized_server_emission
.iter()
.map(|se: &I32F32| I96F32::from_num(*se) * float_rao_emission)
.map(|se: &I32F32| I96F32::from_num(*se).saturating_mul(float_rao_emission))
.collect();
let server_emission: Vec<u64> = server_emission
.iter()
Expand All @@ -590,7 +590,7 @@ impl<T: Config> Pallet<T> {

let validator_emission: Vec<I96F32> = normalized_validator_emission
.iter()
.map(|ve: &I32F32| I96F32::from_num(*ve) * float_rao_emission)
.map(|ve: &I32F32| I96F32::from_num(*ve).saturating_mul(float_rao_emission))
.collect();
let validator_emission: Vec<u64> = validator_emission
.iter()
Expand All @@ -600,7 +600,7 @@ impl<T: Config> Pallet<T> {
// Only used to track emission in storage.
let combined_emission: Vec<I96F32> = normalized_combined_emission
.iter()
.map(|ce: &I32F32| I96F32::from_num(*ce) * float_rao_emission)
.map(|ce: &I32F32| I96F32::from_num(*ce).saturating_mul(float_rao_emission))
.collect();
let combined_emission: Vec<u64> = combined_emission
.iter()
Expand Down Expand Up @@ -706,7 +706,7 @@ impl<T: Config> Pallet<T> {
I32F32::from_num(Self::get_rho(netuid))
}
pub fn get_float_kappa(netuid: u16) -> I32F32 {
I32F32::from_num(Self::get_kappa(netuid)) / I32F32::from_num(u16::MAX)
I32F32::from_num(Self::get_kappa(netuid)).saturating_div(I32F32::from_num(u16::MAX))
}

pub fn get_normalized_stake(netuid: u16) -> Vec<I32F32> {
Expand Down Expand Up @@ -855,8 +855,10 @@ impl<T: Config> Pallet<T> {

// Calculate the intercept 'b' of the logistic function.
// b = ln((1 / alpha_low - 1)) + a * consensus_low
let b = safe_ln((I32F32::from_num(1.0) / alpha_low).saturating_sub(I32F32::from_num(1.0)))
.saturating_add(a.saturating_mul(consensus_low));
let b = safe_ln(
(I32F32::from_num(1.0).saturating_div(alpha_low)).saturating_sub(I32F32::from_num(1.0)),
)
.saturating_add(a.saturating_mul(consensus_low));
log::trace!("b: {:?}", b);

// Return the calculated slope 'a' and intercept 'b'.
Expand Down
74 changes: 28 additions & 46 deletions pallets/subtensor/src/math.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1148,36 +1148,6 @@ pub fn sparse_threshold(w: &[Vec<(u16, I32F32)>], threshold: I32F32) -> Vec<Vec<
.collect()
}

// /// Calculates the exponential moving average (EMA) for a sparse matrix using dynamic alpha values.
// #[allow(dead_code)]
// pub fn mat_ema_alpha_vec_sparse(
// new: &Vec<Vec<(u16, I32F32)>>,
// old: &Vec<Vec<(u16, I32F32)>>,
// alpha: &Vec<I32F32>,
// ) -> Vec<Vec<(u16, I32F32)>> {
// assert!(new.len() == old.len());
// let n = new.len(); // assume square matrix, rows=cols
// let zero: I32F32 = I32F32::from_num(0.0);
// let mut result: Vec<Vec<(u16, I32F32)>> = vec![vec![]; n];
// for i in 0..new.len() {
// let mut row: Vec<I32F32> = vec![zero; n];
// for (j, value) in new[i].iter() {
// let alpha_val: I32F32 = alpha[*j as usize];
// row[*j as usize] += alpha_val * value;
// }
// for (j, value) in old[i].iter() {
// let one_minus_alpha: I32F32 = I32F32::from_num(1.0) - alpha[*j as usize];
// row[*j as usize] += one_minus_alpha * value;
// }
// for (j, value) in row.iter().enumerate() {
// if *value > zero {
// result[i].push((j as u16, *value))
// }
// }
// }
// result
// }

/// Calculates the exponential moving average (EMA) for a sparse matrix using dynamic alpha values.
#[allow(dead_code)]
pub fn mat_ema_alpha_vec_sparse(
Expand All @@ -1192,36 +1162,39 @@ pub fn mat_ema_alpha_vec_sparse(
let mut result: Vec<Vec<(u16, I32F32)>> = vec![vec![]; n];

// Iterate over each row of the matrices.
for i in 0..new.len() {
for (i, (new_row, old_row)) in new.iter().zip(old).enumerate() {
// Initialize a row of zeros for the result matrix.
let mut row: Vec<I32F32> = vec![zero; n];

// Process the new matrix values.
for (j, value) in new[i].iter() {
for (j, value) in new_row.iter() {
// Retrieve the alpha value for the current column.
let alpha_val: I32F32 = alpha[*j as usize];
let alpha_val: I32F32 = alpha.get(*j as usize).copied().unwrap_or(zero);
// Compute the EMA component for the new value using saturating multiplication.
row[*j as usize] = alpha_val.saturating_mul(*value);
if let Some(row_val) = row.get_mut(*j as usize) {
*row_val = alpha_val.saturating_mul(*value);
}
log::trace!(
"new[{}][{}] * alpha[{}] = {} * {} = {}",
i,
j,
j,
value,
alpha_val,
row[*j as usize]
row.get(*j as usize).unwrap_or(&zero)
);
}

// Process the old matrix values.
for (j, value) in old[i].iter() {
for (j, value) in old_row.iter() {
// Retrieve the alpha value for the current column.
let alpha_val: I32F32 = alpha[*j as usize];
let alpha_val: I32F32 = alpha.get(*j as usize).copied().unwrap_or(zero);
// Calculate the complement of the alpha value using saturating subtraction.
let one_minus_alpha: I32F32 = I32F32::from_num(1.0).saturating_sub(alpha_val);
// Compute the EMA component for the old value and add it to the row using saturating operations.
row[*j as usize] =
row[*j as usize].saturating_add(one_minus_alpha.saturating_mul(*value));
if let Some(row_val) = row.get_mut(*j as usize) {
*row_val = row_val.saturating_add(one_minus_alpha.saturating_mul(*value));
}
log::trace!(
"old[{}][{}] * (1 - alpha[{}]) = {} * {} = {}",
i,
Expand All @@ -1236,15 +1209,18 @@ pub fn mat_ema_alpha_vec_sparse(
// Collect the non-zero values into the result matrix.
for (j, value) in row.iter().enumerate() {
if *value > zero {
result[i].push((j as u16, *value));
log::trace!("result[{}][{}] = {}", i, j, value);
if let Some(result_row) = result.get_mut(i) {
result_row.push((j as u16, *value));
log::trace!("result[{}][{}] = {}", i, j, value);
}
}
}
}

// Return the computed EMA sparse matrix.
result
}

/// Return matrix exponential moving average: `alpha_j * a_ij + one_minus_alpha_j * b_ij`.
/// `alpha_` is the EMA coefficient passed as a vector per column.
#[allow(dead_code)]
Expand All @@ -1254,13 +1230,13 @@ pub fn mat_ema_alpha_vec(
alpha: &[I32F32],
) -> Vec<Vec<I32F32>> {
// Check if the new matrix is empty or its first row is empty.
if new.is_empty() || new[0].is_empty() {
if new.is_empty() || new.first().map_or(true, |row| row.is_empty()) {
return vec![vec![]; 1];
}

// Ensure the dimensions of the new and old matrices match.
assert!(new.len() == old.len());
assert!(new[0].len() == alpha.len());
assert!(new.first().map_or(0, |row| row.len()) == alpha.len());

// Initialize the result matrix with zeros, having the same dimensions as the new matrix.
let mut result: Vec<Vec<I32F32>> =
Expand All @@ -1277,9 +1253,15 @@ pub fn mat_ema_alpha_vec(
let one_minus_alpha = I32F32::from_num(1.0).saturating_sub(alpha_val);

// Compute the EMA for the current element using saturating operations.
result[i][j] = alpha_val
.saturating_mul(new_row[j])
.saturating_add(one_minus_alpha.saturating_mul(old_row[j]));
if let (Some(new_val), Some(old_val), Some(result_val)) = (
new_row.get(j),
old_row.get(j),
result.get_mut(i).and_then(|row| row.get_mut(j)),
) {
*result_val = alpha_val
.saturating_mul(*new_val)
.saturating_add(one_minus_alpha.saturating_mul(*old_val));
}
}
}

Expand Down
9 changes: 6 additions & 3 deletions pallets/subtensor/tests/math.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
#![allow(clippy::unwrap_used)]
#![allow(clippy::panic)]
#![allow(clippy::indexing_slicing)]
#![allow(
clippy::unwrap_used,
clippy::panic,
clippy::indexing_slicing,
clippy::arithmetic_side_effects
)]
use substrate_fixed::types::{I32F32, I64F64};

use pallet_subtensor::math::*;
Expand Down

0 comments on commit b1c26b3

Please sign in to comment.