diff --git a/pallets/subtensor/src/epoch.rs b/pallets/subtensor/src/epoch.rs index 22331d206..e90f4b73d 100644 --- a/pallets/subtensor/src/epoch.rs +++ b/pallets/subtensor/src/epoch.rs @@ -32,7 +32,7 @@ impl Pallet { // Inactive mask. let inactive: Vec = last_update .iter() - .map(|updated| *updated + activity_cutoff < current_block) + .map(|updated| updated.saturating_add(activity_cutoff) < current_block) .collect(); log::trace!("Inactive:\n{:?}\n", inactive.clone()); @@ -193,7 +193,7 @@ impl Pallet { let combined_emission: Vec = incentive .iter() .zip(dividends.clone()) - .map(|(ii, di)| ii + di) + .map(|(ii, di)| ii.saturating_add(di)) .collect(); let emission_sum: I32F32 = combined_emission.iter().sum(); @@ -223,7 +223,7 @@ impl Pallet { let server_emission: Vec = normalized_server_emission .iter() - .map(|se: &I32F32| I96F32::from_num(*se) * float_rao_emission) + .map(|se: &I32F32| I96F32::from_num(*se).saturating_mul(float_rao_emission)) .collect(); let server_emission: Vec = server_emission .iter() @@ -232,7 +232,7 @@ impl Pallet { let validator_emission: Vec = normalized_validator_emission .iter() - .map(|ve: &I32F32| I96F32::from_num(*ve) * float_rao_emission) + .map(|ve: &I32F32| I96F32::from_num(*ve).saturating_mul(float_rao_emission)) .collect(); let validator_emission: Vec = validator_emission .iter() @@ -242,7 +242,7 @@ impl Pallet { // Used only to track combined emission in the storage. let combined_emission: Vec = normalized_combined_emission .iter() - .map(|ce: &I32F32| I96F32::from_num(*ce) * float_rao_emission) + .map(|ce: &I32F32| I96F32::from_num(*ce).saturating_mul(float_rao_emission)) .collect(); let combined_emission: Vec = combined_emission .iter() @@ -371,7 +371,7 @@ impl Pallet { // Inactive mask. let inactive: Vec = last_update .iter() - .map(|updated| *updated + activity_cutoff < current_block) + .map(|updated| updated.saturating_add(activity_cutoff) < current_block) .collect(); log::trace!("Inactive: {:?}", inactive.clone()); @@ -551,7 +551,7 @@ impl Pallet { let combined_emission: Vec = incentive .iter() .zip(dividends.clone()) - .map(|(ii, di)| ii + di) + .map(|(ii, di)| ii.saturating_add(di)) .collect(); let emission_sum: I32F32 = combined_emission.iter().sum(); @@ -581,7 +581,7 @@ impl Pallet { let server_emission: Vec = normalized_server_emission .iter() - .map(|se: &I32F32| I96F32::from_num(*se) * float_rao_emission) + .map(|se: &I32F32| I96F32::from_num(*se).saturating_mul(float_rao_emission)) .collect(); let server_emission: Vec = server_emission .iter() @@ -590,7 +590,7 @@ impl Pallet { let validator_emission: Vec = normalized_validator_emission .iter() - .map(|ve: &I32F32| I96F32::from_num(*ve) * float_rao_emission) + .map(|ve: &I32F32| I96F32::from_num(*ve).saturating_mul(float_rao_emission)) .collect(); let validator_emission: Vec = validator_emission .iter() @@ -600,7 +600,7 @@ impl Pallet { // Only used to track emission in storage. let combined_emission: Vec = normalized_combined_emission .iter() - .map(|ce: &I32F32| I96F32::from_num(*ce) * float_rao_emission) + .map(|ce: &I32F32| I96F32::from_num(*ce).saturating_mul(float_rao_emission)) .collect(); let combined_emission: Vec = combined_emission .iter() @@ -706,7 +706,7 @@ impl Pallet { I32F32::from_num(Self::get_rho(netuid)) } pub fn get_float_kappa(netuid: u16) -> I32F32 { - I32F32::from_num(Self::get_kappa(netuid)) / I32F32::from_num(u16::MAX) + I32F32::from_num(Self::get_kappa(netuid)).saturating_div(I32F32::from_num(u16::MAX)) } pub fn get_normalized_stake(netuid: u16) -> Vec { @@ -855,8 +855,10 @@ impl Pallet { // Calculate the intercept 'b' of the logistic function. // b = ln((1 / alpha_low - 1)) + a * consensus_low - let b = safe_ln((I32F32::from_num(1.0) / alpha_low).saturating_sub(I32F32::from_num(1.0))) - .saturating_add(a.saturating_mul(consensus_low)); + let b = safe_ln( + (I32F32::from_num(1.0).saturating_div(alpha_low)).saturating_sub(I32F32::from_num(1.0)), + ) + .saturating_add(a.saturating_mul(consensus_low)); log::trace!("b: {:?}", b); // Return the calculated slope 'a' and intercept 'b'. diff --git a/pallets/subtensor/src/math.rs b/pallets/subtensor/src/math.rs index 88078821e..8b6e76f2c 100644 --- a/pallets/subtensor/src/math.rs +++ b/pallets/subtensor/src/math.rs @@ -1148,36 +1148,6 @@ pub fn sparse_threshold(w: &[Vec<(u16, I32F32)>], threshold: I32F32) -> Vec>, -// old: &Vec>, -// alpha: &Vec, -// ) -> Vec> { -// assert!(new.len() == old.len()); -// let n = new.len(); // assume square matrix, rows=cols -// let zero: I32F32 = I32F32::from_num(0.0); -// let mut result: Vec> = vec![vec![]; n]; -// for i in 0..new.len() { -// let mut row: Vec = vec![zero; n]; -// for (j, value) in new[i].iter() { -// let alpha_val: I32F32 = alpha[*j as usize]; -// row[*j as usize] += alpha_val * value; -// } -// for (j, value) in old[i].iter() { -// let one_minus_alpha: I32F32 = I32F32::from_num(1.0) - alpha[*j as usize]; -// row[*j as usize] += one_minus_alpha * value; -// } -// for (j, value) in row.iter().enumerate() { -// if *value > zero { -// result[i].push((j as u16, *value)) -// } -// } -// } -// result -// } - /// Calculates the exponential moving average (EMA) for a sparse matrix using dynamic alpha values. #[allow(dead_code)] pub fn mat_ema_alpha_vec_sparse( @@ -1192,16 +1162,18 @@ pub fn mat_ema_alpha_vec_sparse( let mut result: Vec> = vec![vec![]; n]; // Iterate over each row of the matrices. - for i in 0..new.len() { + for (i, (new_row, old_row)) in new.iter().zip(old).enumerate() { // Initialize a row of zeros for the result matrix. let mut row: Vec = vec![zero; n]; // Process the new matrix values. - for (j, value) in new[i].iter() { + for (j, value) in new_row.iter() { // Retrieve the alpha value for the current column. - let alpha_val: I32F32 = alpha[*j as usize]; + let alpha_val: I32F32 = alpha.get(*j as usize).copied().unwrap_or(zero); // Compute the EMA component for the new value using saturating multiplication. - row[*j as usize] = alpha_val.saturating_mul(*value); + if let Some(row_val) = row.get_mut(*j as usize) { + *row_val = alpha_val.saturating_mul(*value); + } log::trace!( "new[{}][{}] * alpha[{}] = {} * {} = {}", i, @@ -1209,19 +1181,20 @@ pub fn mat_ema_alpha_vec_sparse( j, value, alpha_val, - row[*j as usize] + row.get(*j as usize).unwrap_or(&zero) ); } // Process the old matrix values. - for (j, value) in old[i].iter() { + for (j, value) in old_row.iter() { // Retrieve the alpha value for the current column. - let alpha_val: I32F32 = alpha[*j as usize]; + let alpha_val: I32F32 = alpha.get(*j as usize).copied().unwrap_or(zero); // Calculate the complement of the alpha value using saturating subtraction. let one_minus_alpha: I32F32 = I32F32::from_num(1.0).saturating_sub(alpha_val); // Compute the EMA component for the old value and add it to the row using saturating operations. - row[*j as usize] = - row[*j as usize].saturating_add(one_minus_alpha.saturating_mul(*value)); + if let Some(row_val) = row.get_mut(*j as usize) { + *row_val = row_val.saturating_add(one_minus_alpha.saturating_mul(*value)); + } log::trace!( "old[{}][{}] * (1 - alpha[{}]) = {} * {} = {}", i, @@ -1236,8 +1209,10 @@ pub fn mat_ema_alpha_vec_sparse( // Collect the non-zero values into the result matrix. for (j, value) in row.iter().enumerate() { if *value > zero { - result[i].push((j as u16, *value)); - log::trace!("result[{}][{}] = {}", i, j, value); + if let Some(result_row) = result.get_mut(i) { + result_row.push((j as u16, *value)); + log::trace!("result[{}][{}] = {}", i, j, value); + } } } } @@ -1245,6 +1220,7 @@ pub fn mat_ema_alpha_vec_sparse( // Return the computed EMA sparse matrix. result } + /// Return matrix exponential moving average: `alpha_j * a_ij + one_minus_alpha_j * b_ij`. /// `alpha_` is the EMA coefficient passed as a vector per column. #[allow(dead_code)] @@ -1254,13 +1230,13 @@ pub fn mat_ema_alpha_vec( alpha: &[I32F32], ) -> Vec> { // Check if the new matrix is empty or its first row is empty. - if new.is_empty() || new[0].is_empty() { + if new.is_empty() || new.first().map_or(true, |row| row.is_empty()) { return vec![vec![]; 1]; } // Ensure the dimensions of the new and old matrices match. assert!(new.len() == old.len()); - assert!(new[0].len() == alpha.len()); + assert!(new.first().map_or(0, |row| row.len()) == alpha.len()); // Initialize the result matrix with zeros, having the same dimensions as the new matrix. let mut result: Vec> = @@ -1277,9 +1253,15 @@ pub fn mat_ema_alpha_vec( let one_minus_alpha = I32F32::from_num(1.0).saturating_sub(alpha_val); // Compute the EMA for the current element using saturating operations. - result[i][j] = alpha_val - .saturating_mul(new_row[j]) - .saturating_add(one_minus_alpha.saturating_mul(old_row[j])); + if let (Some(new_val), Some(old_val), Some(result_val)) = ( + new_row.get(j), + old_row.get(j), + result.get_mut(i).and_then(|row| row.get_mut(j)), + ) { + *result_val = alpha_val + .saturating_mul(*new_val) + .saturating_add(one_minus_alpha.saturating_mul(*old_val)); + } } } diff --git a/pallets/subtensor/tests/math.rs b/pallets/subtensor/tests/math.rs index f48ffee1a..bff4628df 100644 --- a/pallets/subtensor/tests/math.rs +++ b/pallets/subtensor/tests/math.rs @@ -1,6 +1,9 @@ -#![allow(clippy::unwrap_used)] -#![allow(clippy::panic)] -#![allow(clippy::indexing_slicing)] +#![allow( + clippy::unwrap_used, + clippy::panic, + clippy::indexing_slicing, + clippy::arithmetic_side_effects +)] use substrate_fixed::types::{I32F32, I64F64}; use pallet_subtensor::math::*;