From 5f7c08fd0c46f462c993a66d2a500ba17e41f4d9 Mon Sep 17 00:00:00 2001 From: stana-ethernal Date: Fri, 20 Oct 2023 09:15:14 +0200 Subject: [PATCH] Drop and restart validators - property test --- consensus/polybft/proposer_calculator.go | 16 +++-- e2e-polybft/property/property_test.go | 80 +++++++++++++++++++++++- 2 files changed, 88 insertions(+), 8 deletions(-) diff --git a/consensus/polybft/proposer_calculator.go b/consensus/polybft/proposer_calculator.go index 2ec25fa32a..bf17625798 100644 --- a/consensus/polybft/proposer_calculator.go +++ b/consensus/polybft/proposer_calculator.go @@ -25,6 +25,11 @@ type PrioritizedValidator struct { ProposerPriority *big.Int } +func (pv PrioritizedValidator) String() string { + return fmt.Sprintf("[%v, voting power %v, priority %v]", pv.Metadata.Address.String(), + pv.Metadata.VotingPower, pv.ProposerPriority) +} + // ProposerSnapshot represents snapshot of one proposer calculation type ProposerSnapshot struct { Height uint64 @@ -211,9 +216,7 @@ func (pc *ProposerCalculator) GetSnapshot() (*ProposerSnapshot, bool) { // PostBlock is called on every insert of finalized block (either from consensus or syncer) // It will update priorities and save the updated snapshot to db func (pc *ProposerCalculator) PostBlock(req *PostBlockRequest) error { - blockNumber := req.FullBlock.Block.Number() - - return pc.update(blockNumber, req.DBTx) + return pc.update(req.FullBlock.Block.Number(), req.DBTx) } func (pc *ProposerCalculator) update(blockNumber uint64, dbTx *bolt.Tx) error { @@ -229,15 +232,16 @@ func (pc *ProposerCalculator) update(blockNumber uint64, dbTx *bolt.Tx) error { return err } - pc.logger.Debug("Proposers snapshot has been updated", "current block", blockNumber+1, - "validators count", len(pc.snapshot.Validators)) + pc.logger.Debug("Proposer snapshot has been updated", + "block", height, "validators", pc.snapshot.Validators) } if err := pc.state.ProposerSnapshotStore.writeProposerSnapshot(pc.snapshot, dbTx); err != nil { return fmt.Errorf("cannot save proposers snapshot for block %d: %w", blockNumber, err) } - pc.logger.Debug("Update proposers snapshot finished", "target block", blockNumber) + pc.logger.Info("Proposer snapshot update has been finished", + "target block", blockNumber+1, "validators", len(pc.snapshot.Validators)) return nil } diff --git a/e2e-polybft/property/property_test.go b/e2e-polybft/property/property_test.go index d43a179993..f33358360f 100644 --- a/e2e-polybft/property/property_test.go +++ b/e2e-polybft/property/property_test.go @@ -2,13 +2,14 @@ package property import ( "fmt" - "math" "math/big" "path/filepath" + "sync" "testing" "time" "github.com/stretchr/testify/require" + "github.com/umbracle/ethgo" "pgregory.net/rapid" "github.com/0xPolygon/polygon-edge/e2e-polybft/framework" @@ -20,7 +21,7 @@ func TestProperty_DifferentVotingPower(t *testing.T) { const ( blockTime = time.Second * 6 - maxStake = math.MaxUint64 + maxStake = 20 ) rapid.Check(t, func(tt *rapid.T) { @@ -55,3 +56,78 @@ func TestProperty_DifferentVotingPower(t *testing.T) { require.NoError(t, cluster.WaitForBlock(numBlocks, blockTime*time.Duration(numBlocks))) }) } + +func TestProperty_DropValidators(t *testing.T) { + t.Parallel() + + const ( + blockTime = time.Second * 4 + ) + + rapid.Check(t, func(tt *rapid.T) { + var ( + numNodes = rapid.Uint64Range(5, 8).Draw(tt, "number of cluster nodes") + epochSize = rapid.OneOf(rapid.Just(4), rapid.Just(10)).Draw(tt, "epoch size") + ) + + cluster := framework.NewPropertyTestCluster(t, int(numNodes), + framework.WithEpochSize(epochSize), + framework.WithSecretsCallback(func(adresses []types.Address, config *framework.TestClusterConfig) { + for range adresses { + config.StakeAmounts = append(config.StakeAmounts, big.NewInt(20)) + } + })) + defer cluster.Stop() + + t.Logf("Test %v, run with %d nodes, epoch size: %d", + filepath.Base(cluster.Config.LogsDir), numNodes, epochSize) + + cluster.WaitForReady(t) + + // stop first validator, block production should continue + cluster.Servers[0].Stop() + activeValidator := cluster.Servers[numNodes-1] + currentBlock, err := activeValidator.JSONRPC().Eth().GetBlockByNumber(ethgo.Latest, false) + require.NoError(t, err) + require.NoError(t, cluster.WaitForBlock(currentBlock.Number+1, 2*blockTime)) + + // drop all validator nodes, leaving one node alive + numNodesToDrop := int(numNodes - 1) + + var wg sync.WaitGroup + // drop bulk of nodes from cluster + for i := 1; i < numNodesToDrop; i++ { + node := cluster.Servers[i] + + wg.Add(1) + + go func(node *framework.TestServer) { + defer wg.Done() + node.Stop() + }(node) + } + + wg.Wait() + + // check that block production is stoped + currentBlock, err = activeValidator.JSONRPC().Eth().GetBlockByNumber(ethgo.Latest, false) + require.NoError(t, err) + oldBlockNumber := currentBlock.Number + time.Sleep(2 * blockTime) + currentBlock, err = activeValidator.JSONRPC().Eth().GetBlockByNumber(ethgo.Latest, false) + require.NoError(t, err) + require.Equal(t, oldBlockNumber, currentBlock.Number) + + // start dropped nodes again + for i := 0; i < numNodesToDrop; i++ { + node := cluster.Servers[i] + node.Start() + } + + time.Sleep(2 * blockTime) + currentBlock, err = activeValidator.JSONRPC().Eth().GetBlockByNumber(ethgo.Latest, false) + require.NoError(t, err) + // check that block production is restarted + require.True(t, oldBlockNumber < currentBlock.Number) + }) +}