Merge pull request #216 from meshplus/feat/tick-timeout-in-raft
feat(raft):add tick timeout in raft config
This commit is contained in:
commit
6f6d21fb01
|
@ -1,4 +1,5 @@
|
|||
[raft]
|
||||
tick_timeout = "0.1s" # TickTimeout is the internal logical clock for the Node by a single tick, Election timeouts and heartbeat timeouts are in units of ticks.
|
||||
election_tick = 10 # ElectionTick is the number of Node.Tick invocations that must pass between elections.
|
||||
heartbeat_tick = 1 # HeartbeatTick is the number of Node.Tick invocations that must pass between heartbeats.
|
||||
max_size_per_msg = 1048576 # 1024*1024, MaxSizePerMsg limits the max size of each append message.
|
||||
|
|
|
@ -14,16 +14,17 @@ type RAFTConfig struct {
|
|||
}
|
||||
|
||||
type MempoolConfig struct {
|
||||
BatchSize uint64 `mapstructure:"batch_size"`
|
||||
PoolSize uint64 `mapstructure:"pool_size"`
|
||||
BatchSize uint64 `mapstructure:"batch_size"`
|
||||
PoolSize uint64 `mapstructure:"pool_size"`
|
||||
TxSliceSize uint64 `mapstructure:"tx_slice_size"`
|
||||
|
||||
BatchTick time.Duration `mapstructure:"batch_tick"`
|
||||
FetchTimeout time.Duration `mapstructure:"fetch_timeout"`
|
||||
BatchTick time.Duration `mapstructure:"batch_tick"`
|
||||
FetchTimeout time.Duration `mapstructure:"fetch_timeout"`
|
||||
TxSliceTimeout time.Duration `mapstructure:"tx_slice_timeout"`
|
||||
}
|
||||
|
||||
type RAFT struct {
|
||||
TickTimeout time.Duration `mapstructure:"tick_timeout"`
|
||||
ElectionTick int `mapstructure:"election_tick"`
|
||||
HeartbeatTick int `mapstructure:"heartbeat_tick"`
|
||||
MaxSizePerMsg uint64 `mapstructure:"max_size_per_msg"`
|
||||
|
@ -46,10 +47,10 @@ func defaultRaftConfig() raft.Config {
|
|||
}
|
||||
}
|
||||
|
||||
func generateRaftConfig(id uint64, repoRoot string, logger logrus.FieldLogger, ram MemoryStorage) (*raft.Config, error) {
|
||||
func generateRaftConfig(id uint64, repoRoot string, logger logrus.FieldLogger, ram MemoryStorage) (*raft.Config, time.Duration, error) {
|
||||
readConfig, err := readConfig(repoRoot)
|
||||
if err != nil {
|
||||
return &raft.Config{}, nil
|
||||
return &raft.Config{}, 100 * time.Millisecond, nil
|
||||
}
|
||||
defaultConfig := defaultRaftConfig()
|
||||
defaultConfig.ID = id
|
||||
|
@ -70,7 +71,7 @@ func generateRaftConfig(id uint64, repoRoot string, logger logrus.FieldLogger, r
|
|||
defaultConfig.PreVote = readConfig.RAFT.PreVote
|
||||
defaultConfig.CheckQuorum = readConfig.RAFT.CheckQuorum
|
||||
defaultConfig.DisableProposalForwarding = readConfig.RAFT.DisableProposalForwarding
|
||||
return &defaultConfig, nil
|
||||
return &defaultConfig, readConfig.RAFT.TickTimeout, nil
|
||||
}
|
||||
|
||||
func generateMempoolConfig(repoRoot string) (*MempoolConfig, error) {
|
||||
|
|
|
@ -37,6 +37,7 @@ type Node struct {
|
|||
confState raftpb.ConfState // raft requires ConfState to be persisted within snapshot
|
||||
commitC chan *pb.Block // the hash commit channel
|
||||
errorC chan<- error // errors from raft session
|
||||
tickTimeout time.Duration // tick timeout
|
||||
|
||||
raftStorage *RaftStorage // the raft backend storage system
|
||||
storage storage.Storage // db
|
||||
|
@ -55,6 +56,7 @@ type Node struct {
|
|||
readyCache sync.Map // ready cache
|
||||
ctx context.Context // context
|
||||
haltC chan struct{} // exit signal
|
||||
|
||||
}
|
||||
|
||||
// NewNode new raft node
|
||||
|
@ -128,7 +130,7 @@ func NewNode(opts ...order.Option) (order.Order, error) {
|
|||
// Start or restart raft node
|
||||
func (n *Node) Start() error {
|
||||
n.blockAppliedIndex.Store(n.mempool.GetChainHeight(), n.loadAppliedIndex())
|
||||
rc, err := generateRaftConfig(n.id, n.repoRoot, n.logger, n.raftStorage.ram)
|
||||
rc, tickTimeout, err := generateRaftConfig(n.id, n.repoRoot, n.logger, n.raftStorage.ram)
|
||||
if err != nil {
|
||||
return fmt.Errorf("generate raft config: %w", err)
|
||||
}
|
||||
|
@ -137,6 +139,7 @@ func (n *Node) Start() error {
|
|||
} else {
|
||||
n.node = raft.StartNode(rc, n.peers)
|
||||
}
|
||||
n.tickTimeout = tickTimeout
|
||||
|
||||
go n.run()
|
||||
n.mempool.Start()
|
||||
|
@ -254,7 +257,7 @@ func (n *Node) run() {
|
|||
n.confState = snap.Metadata.ConfState
|
||||
n.snapshotIndex = snap.Metadata.Index
|
||||
n.appliedIndex = snap.Metadata.Index
|
||||
ticker := time.NewTicker(100 * time.Millisecond)
|
||||
ticker := time.NewTicker(n.tickTimeout)
|
||||
defer ticker.Stop()
|
||||
|
||||
// handle input request
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
[raft]
|
||||
tick_timeout = "0.1s" # TickTimeout is the internal logical clock for the Node by a single tick, Election timeouts and heartbeat timeouts are in units of ticks.
|
||||
election_tick = 10 # ElectionTick is the number of Node.Tick invocations that must pass between elections.
|
||||
heartbeat_tick = 1 # HeartbeatTick is the number of Node.Tick invocations that must pass between heartbeats.
|
||||
max_size_per_msg = 1048576 # 1024*1024, MaxSizePerMsg limits the max size of each append message.
|
||||
|
@ -6,7 +7,13 @@ max_inflight_msgs = 500 # MaxInflightMsgs limits the max number of in-
|
|||
check_quorum = true # Leader steps down when quorum is not active for an electionTimeout.
|
||||
pre_vote = true # PreVote prevents reconnected node from disturbing network.
|
||||
disable_proposal_forwarding = true # This prevents blocks from being accidentally proposed by followers.
|
||||
[raft.tx_pool]
|
||||
pack_size = 500 # How many transactions should the primary pack.
|
||||
pool_size = 50000 # How many transactions could the txPool stores in total.
|
||||
block_tick = "500ms" # Block packaging time period.
|
||||
|
||||
[raft.mempool]
|
||||
batch_size = 200 # How many transactions should the primary pack.
|
||||
pool_size = 50000 # How many transactions could the txPool stores in total.
|
||||
tx_slice_size = 10 # How many transactions should the node broadcast at once
|
||||
|
||||
batch_tick = "0.3s" # Block packaging time period.
|
||||
tx_slice_timeout = "0.1s" # Node broadcasts transactions if there are cached transactions, although set_size isn't reached yet
|
||||
fetch_timeout = "3s" # How long to wait before fetching missing transactions finished
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
[raft]
|
||||
tick_timeout = "0.1s" # TickTimeout is the internal logical clock for the Node by a single tick, Election timeouts and heartbeat timeouts are in units of ticks.
|
||||
election_tick = 10 # ElectionTick is the number of Node.Tick invocations that must pass between elections.
|
||||
heartbeat_tick = 1 # HeartbeatTick is the number of Node.Tick invocations that must pass between heartbeats.
|
||||
max_size_per_msg = 1048576 # 1024*1024, MaxSizePerMsg limits the max size of each append message.
|
||||
|
@ -6,7 +7,13 @@ max_inflight_msgs = 500 # MaxInflightMsgs limits the max number of in-
|
|||
check_quorum = true # Leader steps down when quorum is not active for an electionTimeout.
|
||||
pre_vote = true # PreVote prevents reconnected node from disturbing network.
|
||||
disable_proposal_forwarding = true # This prevents blocks from being accidentally proposed by followers.
|
||||
[raft.tx_pool]
|
||||
pack_size = 500 # How many transactions should the primary pack.
|
||||
pool_size = 50000 # How many transactions could the txPool stores in total.
|
||||
block_tick = "500ms" # Block packaging time period.
|
||||
|
||||
[raft.mempool]
|
||||
batch_size = 200 # How many transactions should the primary pack.
|
||||
pool_size = 50000 # How many transactions could the txPool stores in total.
|
||||
tx_slice_size = 10 # How many transactions should the node broadcast at once
|
||||
|
||||
batch_tick = "0.3s" # Block packaging time period.
|
||||
tx_slice_timeout = "0.1s" # Node broadcasts transactions if there are cached transactions, although set_size isn't reached yet
|
||||
fetch_timeout = "3s" # How long to wait before fetching missing transactions finished
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
[raft]
|
||||
tick_timeout = "0.1s" # TickTimeout is the internal logical clock for the Node by a single tick, Election timeouts and heartbeat timeouts are in units of ticks.
|
||||
election_tick = 10 # ElectionTick is the number of Node.Tick invocations that must pass between elections.
|
||||
heartbeat_tick = 1 # HeartbeatTick is the number of Node.Tick invocations that must pass between heartbeats.
|
||||
max_size_per_msg = 1048576 # 1024*1024, MaxSizePerMsg limits the max size of each append message.
|
||||
|
@ -6,7 +7,13 @@ max_inflight_msgs = 500 # MaxInflightMsgs limits the max number of in-
|
|||
check_quorum = true # Leader steps down when quorum is not active for an electionTimeout.
|
||||
pre_vote = true # PreVote prevents reconnected node from disturbing network.
|
||||
disable_proposal_forwarding = true # This prevents blocks from being accidentally proposed by followers.
|
||||
[raft.tx_pool]
|
||||
pack_size = 500 # How many transactions should the primary pack.
|
||||
pool_size = 50000 # How many transactions could the txPool stores in total.
|
||||
block_tick = "500ms" # Block packaging time period.
|
||||
|
||||
[raft.mempool]
|
||||
batch_size = 200 # How many transactions should the primary pack.
|
||||
pool_size = 50000 # How many transactions could the txPool stores in total.
|
||||
tx_slice_size = 10 # How many transactions should the node broadcast at once
|
||||
|
||||
batch_tick = "0.3s" # Block packaging time period.
|
||||
tx_slice_timeout = "0.1s" # Node broadcasts transactions if there are cached transactions, although set_size isn't reached yet
|
||||
fetch_timeout = "3s" # How long to wait before fetching missing transactions finished
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
[raft]
|
||||
tick_timeout = "0.1s" # TickTimeout is the internal logical clock for the Node by a single tick, Election timeouts and heartbeat timeouts are in units of ticks.
|
||||
election_tick = 10 # ElectionTick is the number of Node.Tick invocations that must pass between elections.
|
||||
heartbeat_tick = 1 # HeartbeatTick is the number of Node.Tick invocations that must pass between heartbeats.
|
||||
max_size_per_msg = 1048576 # 1024*1024, MaxSizePerMsg limits the max size of each append message.
|
||||
|
@ -6,7 +7,13 @@ max_inflight_msgs = 500 # MaxInflightMsgs limits the max number of in-
|
|||
check_quorum = true # Leader steps down when quorum is not active for an electionTimeout.
|
||||
pre_vote = true # PreVote prevents reconnected node from disturbing network.
|
||||
disable_proposal_forwarding = true # This prevents blocks from being accidentally proposed by followers.
|
||||
[raft.tx_pool]
|
||||
pack_size = 500 # How many transactions should the primary pack.
|
||||
pool_size = 50000 # How many transactions could the txPool stores in total.
|
||||
block_tick = "500ms" # Block packaging time period.
|
||||
|
||||
[raft.mempool]
|
||||
batch_size = 200 # How many transactions should the primary pack.
|
||||
pool_size = 50000 # How many transactions could the txPool stores in total.
|
||||
tx_slice_size = 10 # How many transactions should the node broadcast at once
|
||||
|
||||
batch_tick = "0.3s" # Block packaging time period.
|
||||
tx_slice_timeout = "0.1s" # Node broadcasts transactions if there are cached transactions, although set_size isn't reached yet
|
||||
fetch_timeout = "3s" # How long to wait before fetching missing transactions finished
|
||||
|
||||
|
|
Loading…
Reference in New Issue