2020-09-24 15:11:33 +08:00
package mempool
import (
2021-01-05 13:38:29 +08:00
"math"
2021-01-12 14:40:48 +08:00
"sync"
2021-01-05 13:38:29 +08:00
"time"
2021-01-12 14:40:48 +08:00
2020-12-16 16:25:29 +08:00
"github.com/google/btree"
2020-09-24 15:11:33 +08:00
"github.com/meshplus/bitxhub-model/pb"
raftproto "github.com/meshplus/bitxhub/pkg/order/etcdraft/proto"
"github.com/sirupsen/logrus"
)
type mempoolImpl struct {
2020-12-31 15:57:25 +08:00
localID uint64
batchSize uint64
txSliceSize uint64
batchSeqNo uint64 // track the sequence number of block
poolSize uint64
logger logrus . FieldLogger
txStore * transactionStore // store all transactions info
2020-09-24 15:11:33 +08:00
}
2020-12-16 16:25:29 +08:00
func newMempoolImpl ( config * Config ) * mempoolImpl {
2020-09-24 15:11:33 +08:00
mpi := & mempoolImpl {
2020-12-31 15:57:25 +08:00
localID : config . ID ,
batchSeqNo : config . ChainHeight ,
logger : config . Logger ,
txSliceSize : config . TxSliceSize ,
2020-09-24 15:11:33 +08:00
}
mpi . txStore = newTransactionStore ( )
if config . BatchSize == 0 {
mpi . batchSize = DefaultBatchSize
} else {
mpi . batchSize = config . BatchSize
}
2020-12-16 16:25:29 +08:00
if config . PoolSize == 0 {
mpi . poolSize = DefaultPoolSize
2020-09-24 15:11:33 +08:00
} else {
2020-12-16 16:25:29 +08:00
mpi . poolSize = config . PoolSize
2020-09-24 15:11:33 +08:00
}
2020-12-31 15:57:25 +08:00
if config . TxSliceSize == 0 {
mpi . txSliceSize = DefaultTxSetSize
} else {
mpi . txSliceSize = config . TxSliceSize
}
2020-12-16 16:25:29 +08:00
mpi . logger . Infof ( "MemPool batch size = %d" , mpi . batchSize )
2020-12-31 15:57:25 +08:00
mpi . logger . Infof ( "MemPool tx slice size = %d" , mpi . batchSize )
2020-12-16 16:25:29 +08:00
mpi . logger . Infof ( "MemPool batch seqNo = %d" , mpi . batchSeqNo )
mpi . logger . Infof ( "MemPool pool size = %d" , mpi . poolSize )
2020-09-24 15:11:33 +08:00
return mpi
}
2021-01-05 13:38:29 +08:00
func ( mpi * mempoolImpl ) ProcessTransactions ( txs [ ] * pb . Transaction , isLeader , isLocal bool ) * raftproto . RequestBatch {
2020-09-24 15:11:33 +08:00
validTxs := make ( map [ string ] [ ] * pb . Transaction )
for _ , tx := range txs {
// check the sequence number of tx
2020-10-22 16:43:43 +08:00
txAccount := tx . Account ( )
2020-09-24 15:11:33 +08:00
currentSeqNo := mpi . txStore . nonceCache . getPendingNonce ( txAccount )
if tx . Nonce < currentSeqNo {
2020-09-27 17:03:17 +08:00
mpi . logger . Warningf ( "Account %s, current sequence number is %d, required %d" , txAccount , tx . Nonce , currentSeqNo + 1 )
2020-09-25 16:18:28 +08:00
continue
2020-09-24 15:11:33 +08:00
}
// check the existence of hash of this tx
2020-10-21 22:18:18 +08:00
txHash := tx . TransactionHash . String ( )
2020-09-24 15:11:33 +08:00
if txPointer := mpi . txStore . txHashMap [ txHash ] ; txPointer != nil {
2020-10-21 11:06:17 +08:00
mpi . logger . Warningf ( "Tx [account: %s, nonce: %d, hash: %s] already received" , txAccount , tx . Nonce , txHash )
2020-09-24 15:11:33 +08:00
continue
}
2020-09-30 19:19:38 +08:00
_ , ok := validTxs [ txAccount ]
2020-09-24 15:11:33 +08:00
if ! ok {
validTxs [ txAccount ] = make ( [ ] * pb . Transaction , 0 )
}
validTxs [ txAccount ] = append ( validTxs [ txAccount ] , tx )
}
// Process all the new transaction and merge any errors into the original slice
2021-01-05 13:38:29 +08:00
dirtyAccounts := mpi . txStore . insertTxs ( validTxs , isLocal )
2020-09-24 15:11:33 +08:00
// send tx to mempool store
mpi . processDirtyAccount ( dirtyAccounts )
2020-12-16 16:25:29 +08:00
// generator batch by block size
if isLeader && mpi . txStore . priorityNonBatchSize >= mpi . batchSize {
batch , err := mpi . generateBlock ( )
if err != nil {
mpi . logger . Errorf ( "Generator batch failed" )
return nil
2020-09-24 15:11:33 +08:00
}
2020-12-16 16:25:29 +08:00
return batch
2020-09-24 15:11:33 +08:00
}
return nil
}
func ( mpi * mempoolImpl ) processDirtyAccount ( dirtyAccounts map [ string ] bool ) {
for account := range dirtyAccounts {
if list , ok := mpi . txStore . allTxs [ account ] ; ok {
// search for related sequential txs in allTxs
// and add these txs into priorityIndex and parkingLotIndex
pendingNonce := mpi . txStore . nonceCache . getPendingNonce ( account )
readyTxs , nonReadyTxs , nextDemandNonce := list . filterReady ( pendingNonce )
mpi . txStore . nonceCache . setPendingNonce ( account , nextDemandNonce )
2021-01-18 20:17:04 +08:00
// insert ready txs into priorityIndex.
2020-09-24 15:11:33 +08:00
for _ , tx := range readyTxs {
2020-12-31 15:57:25 +08:00
if ! mpi . txStore . priorityIndex . data . Has ( makeTimeoutKey ( account , tx ) ) {
mpi . txStore . priorityIndex . insertByTimeoutKey ( account , tx )
}
2020-09-24 15:11:33 +08:00
}
2021-01-05 13:38:29 +08:00
mpi . txStore . updateEarliestTimestamp ( )
2020-09-24 15:11:33 +08:00
mpi . txStore . priorityNonBatchSize = mpi . txStore . priorityNonBatchSize + uint64 ( len ( readyTxs ) )
// inset non-ready txs into parkingLotIndex.
for _ , tx := range nonReadyTxs {
mpi . txStore . parkingLotIndex . insertByOrderedQueueKey ( account , tx )
}
}
}
}
// getBlock fetches next block of transactions for consensus,
// batchedTx are all txs sent to consensus but were not committed yet, mempool should filter out such txs.
2020-12-16 16:25:29 +08:00
func ( mpi * mempoolImpl ) generateBlock ( ) ( * raftproto . RequestBatch , error ) {
2020-12-31 15:57:25 +08:00
// tx which has lower timestamp will be observed first in priority index iterator.
// and if first seen tx's nonce isn't the required nonce for the account,
// it will be stored in skip DS first.
2020-12-16 16:25:29 +08:00
mpi . logger . Debugf ( "Length of non-batched transactions: %d" , mpi . txStore . priorityNonBatchSize )
var batchSize uint64
if poolLen := mpi . txStore . priorityNonBatchSize ; poolLen > mpi . batchSize {
batchSize = mpi . batchSize
} else {
batchSize = mpi . txStore . priorityNonBatchSize
}
skippedTxs := make ( map [ orderedIndexKey ] bool )
result := make ( [ ] orderedIndexKey , 0 , mpi . batchSize )
2020-09-24 15:11:33 +08:00
mpi . txStore . priorityIndex . data . Ascend ( func ( a btree . Item ) bool {
2020-12-31 15:57:25 +08:00
tx := a . ( * orderedTimeoutKey )
// if tx has existed in bathedTxs, ignore this tx
2020-09-24 15:11:33 +08:00
if _ , ok := mpi . txStore . batchedTxs [ orderedIndexKey { tx . account , tx . nonce } ] ; ok {
return true
}
txSeq := tx . nonce
commitNonce := mpi . txStore . nonceCache . getCommitNonce ( tx . account )
var seenPrevious bool
if txSeq >= 1 {
_ , seenPrevious = mpi . txStore . batchedTxs [ orderedIndexKey { account : tx . account , nonce : txSeq - 1 } ]
}
2020-12-31 15:57:25 +08:00
if txSeq == 3 {
mpi . logger . Infof ( "seenPrevious %v and commitNonce is %d" , seenPrevious , commitNonce )
mpi . logger . Infof ( "batched txs is %v" , mpi . txStore . batchedTxs )
}
2020-09-24 15:11:33 +08:00
// include transaction if it's "next" for given account or
// we've already sent its ancestor to Consensus
if seenPrevious || ( txSeq == commitNonce ) {
2020-12-31 15:57:25 +08:00
ptr := orderedIndexKey { account : tx . account , nonce : txSeq }
2020-09-24 15:11:33 +08:00
mpi . txStore . batchedTxs [ ptr ] = true
result = append ( result , ptr )
2020-12-16 16:25:29 +08:00
if uint64 ( len ( result ) ) == batchSize {
2020-09-24 15:11:33 +08:00
return false
}
2020-12-16 16:25:29 +08:00
// check if we can now include some txs that were skipped before for given account
2020-12-31 15:57:25 +08:00
skippedTxn := orderedIndexKey { account : tx . account , nonce : txSeq + 1 }
2020-12-16 16:25:29 +08:00
for {
if _ , ok := skippedTxs [ skippedTxn ] ; ! ok {
break
}
2021-01-12 14:40:48 +08:00
mpi . txStore . batchedTxs [ skippedTxn ] = true
2020-12-16 16:25:29 +08:00
result = append ( result , skippedTxn )
if uint64 ( len ( result ) ) == batchSize {
return false
}
skippedTxn . nonce ++
}
} else {
2020-12-31 15:57:25 +08:00
skippedTxs [ orderedIndexKey { tx . account , txSeq } ] = true
2020-09-24 15:11:33 +08:00
}
return true
} )
2020-12-16 16:25:29 +08:00
if len ( result ) == 0 && mpi . txStore . priorityNonBatchSize > 0 {
mpi . logger . Error ( "===== NOTE!!! Leader generate a batch with 0 txs" )
mpi . txStore . priorityNonBatchSize = 0
return nil , nil
}
2020-09-24 15:11:33 +08:00
// convert transaction pointers to real values
txList := make ( [ ] * pb . Transaction , len ( result ) )
for i , v := range result {
rawTransaction := mpi . txStore . getTxByOrderKey ( v . account , v . nonce )
txList [ i ] = rawTransaction
}
2020-12-16 16:25:29 +08:00
mpi . batchSeqNo ++
batchSeqNo := mpi . batchSeqNo
batch := & raftproto . RequestBatch {
TxList : txList ,
Height : batchSeqNo ,
2020-09-24 15:11:33 +08:00
}
2020-12-16 16:25:29 +08:00
if mpi . txStore . priorityNonBatchSize >= uint64 ( len ( txList ) ) {
mpi . txStore . priorityNonBatchSize = mpi . txStore . priorityNonBatchSize - uint64 ( len ( txList ) )
2020-09-24 15:11:33 +08:00
}
2020-12-16 16:25:29 +08:00
mpi . logger . Debugf ( "Leader generate a batch with %d txs, which height is %d, and now there are %d pending txs." , len ( txList ) , batchSeqNo , mpi . txStore . priorityNonBatchSize )
return batch , nil
2020-09-24 15:11:33 +08:00
}
2020-09-30 19:19:38 +08:00
// processCommitTransactions removes the transactions in ready.
2020-12-16 16:25:29 +08:00
func ( mpi * mempoolImpl ) processCommitTransactions ( state * ChainState ) {
2020-09-24 15:11:33 +08:00
dirtyAccounts := make ( map [ string ] bool )
// update current cached commit nonce for account
2020-12-16 16:25:29 +08:00
updateAccounts := make ( map [ string ] uint64 )
// update current cached commit nonce for account
for _ , txHash := range state . TxHashList {
2020-10-21 22:18:18 +08:00
strHash := txHash . String ( )
txPointer := mpi . txStore . txHashMap [ strHash ]
txPointer , ok := mpi . txStore . txHashMap [ strHash ]
2020-09-27 17:00:47 +08:00
if ! ok {
2020-10-21 22:18:18 +08:00
mpi . logger . Warningf ( "Remove transaction %s failed, Can't find it from txHashMap" , strHash )
2020-09-27 17:00:47 +08:00
continue
}
2020-09-24 15:11:33 +08:00
preCommitNonce := mpi . txStore . nonceCache . getCommitNonce ( txPointer . account )
newCommitNonce := txPointer . nonce + 1
if preCommitNonce < newCommitNonce {
mpi . txStore . nonceCache . setCommitNonce ( txPointer . account , newCommitNonce )
2020-12-16 16:25:29 +08:00
// Note!!! updating pendingNonce to commitNonce for the restart node
pendingNonce := mpi . txStore . nonceCache . getPendingNonce ( txPointer . account )
if pendingNonce < newCommitNonce {
updateAccounts [ txPointer . account ] = newCommitNonce
mpi . txStore . nonceCache . setPendingNonce ( txPointer . account , newCommitNonce )
}
2020-09-24 15:11:33 +08:00
}
2020-10-21 22:18:18 +08:00
delete ( mpi . txStore . txHashMap , strHash )
2020-09-24 15:11:33 +08:00
delete ( mpi . txStore . batchedTxs , * txPointer )
dirtyAccounts [ txPointer . account ] = true
}
// clean related txs info in cache
for account := range dirtyAccounts {
commitNonce := mpi . txStore . nonceCache . getCommitNonce ( account )
if list , ok := mpi . txStore . allTxs [ account ] ; ok {
2020-10-13 20:02:59 +08:00
// remove all previous seq number txs for this account.
2020-09-24 15:11:33 +08:00
removedTxs := list . forward ( commitNonce )
2020-10-13 20:02:59 +08:00
// remove index smaller than commitNonce delete index.
2020-09-24 15:11:33 +08:00
var wg sync . WaitGroup
2020-12-31 15:57:25 +08:00
wg . Add ( 4 )
2020-09-24 15:11:33 +08:00
go func ( ready map [ string ] [ ] * pb . Transaction ) {
defer wg . Done ( )
2020-09-30 19:19:38 +08:00
list . index . removeBySortedNonceKey ( removedTxs )
2020-09-24 15:11:33 +08:00
} ( removedTxs )
go func ( ready map [ string ] [ ] * pb . Transaction ) {
defer wg . Done ( )
2020-12-31 15:57:25 +08:00
mpi . txStore . priorityIndex . removeByTimeoutKey ( removedTxs )
} ( removedTxs )
go func ( ready map [ string ] [ ] * pb . Transaction ) {
defer wg . Done ( )
mpi . txStore . ttlIndex . removeByTtlKey ( removedTxs )
2021-01-05 13:38:29 +08:00
mpi . txStore . updateEarliestTimestamp ( )
2020-09-24 15:11:33 +08:00
} ( removedTxs )
go func ( ready map [ string ] [ ] * pb . Transaction ) {
defer wg . Done ( )
mpi . txStore . parkingLotIndex . removeByOrderedQueueKey ( removedTxs )
} ( removedTxs )
wg . Wait ( )
}
}
2020-12-16 16:25:29 +08:00
readyNum := uint64 ( mpi . txStore . priorityIndex . size ( ) )
// set priorityNonBatchSize to min(nonBatchedTxs, readyNum),
if mpi . txStore . priorityNonBatchSize > readyNum {
mpi . txStore . priorityNonBatchSize = readyNum
2020-09-24 15:11:33 +08:00
}
2020-12-16 16:25:29 +08:00
for account , pendingNonce := range updateAccounts {
mpi . logger . Debugf ( "Account %s update its pendingNonce to %d by commitNonce" , account , pendingNonce )
2020-09-24 15:11:33 +08:00
}
2020-12-16 16:25:29 +08:00
mpi . logger . Debugf ( "Replica %d removes batches in mempool, and now there are %d non-batched txs," +
"priority len: %d, parkingLot len: %d, batchedTx len: %d, txHashMap len: %d" , mpi . localID , mpi . txStore . priorityNonBatchSize ,
2020-12-31 15:57:25 +08:00
mpi . txStore . priorityIndex . size ( ) , mpi . txStore . parkingLotIndex . size ( ) , len ( mpi . txStore . batchedTxs ) , len ( mpi . txStore . txHashMap ) )
}
2021-01-05 13:38:29 +08:00
func ( mpi * mempoolImpl ) GetTimeoutTransactions ( rebroadcastDuration time . Duration ) [ ] [ ] * pb . Transaction {
// all the tx whose live time is less than lowBoundTime should be rebroadcast
2021-01-18 20:17:04 +08:00
mpi . logger . Debugf ( "Start gathering timeout txs, ttl index len is %d" , mpi . txStore . ttlIndex . index . Len ( ) )
2021-01-05 13:38:29 +08:00
currentTime := time . Now ( ) . UnixNano ( )
if currentTime < mpi . txStore . earliestTimestamp + rebroadcastDuration . Nanoseconds ( ) {
// if the latest incoming tx has not exceeded the timeout limit, then none will be timeout
return [ ] [ ] * pb . Transaction { }
}
2021-01-18 20:17:04 +08:00
timeoutItems := make ( [ ] * orderedTimeoutKey , 0 )
2021-01-05 13:38:29 +08:00
mpi . txStore . ttlIndex . index . Ascend ( func ( i btree . Item ) bool {
2021-01-18 20:17:04 +08:00
item := i . ( * orderedTimeoutKey )
if item . timestamp > math . MaxInt64 {
2021-01-05 13:38:29 +08:00
// TODO(tyx): if this tx has rebroadcast many times and exceeded a final limit,
// it is expired and will be removed from mempool
return true
}
// if this tx has not exceeded the rebroadcast duration, break iteration
2021-01-18 20:17:04 +08:00
timeoutTime := item . timestamp + rebroadcastDuration . Nanoseconds ( )
_ , ok := mpi . txStore . allTxs [ item . account ]
2021-01-05 13:38:29 +08:00
if ! ok || currentTime < timeoutTime {
return false
}
timeoutItems = append ( timeoutItems , item )
return true
} )
for _ , item := range timeoutItems {
// update the liveTime of timeout txs
2021-01-18 20:17:04 +08:00
item . timestamp = currentTime
2021-01-05 13:38:29 +08:00
mpi . txStore . ttlIndex . items [ makeAccountNonceKey ( item . account , item . nonce ) ] = currentTime
}
// shard txList into fixed size in case txList is too large to broadcast one time
2021-01-18 20:17:04 +08:00
return mpi . shardTxList ( timeoutItems , mpi . txSliceSize )
2021-01-05 13:38:29 +08:00
}
2021-01-18 20:17:04 +08:00
func ( mpi * mempoolImpl ) shardTxList ( timeoutItems [ ] * orderedTimeoutKey , batchLen uint64 ) [ ] [ ] * pb . Transaction {
2021-01-05 13:38:29 +08:00
begin := uint64 ( 0 )
2021-01-18 20:17:04 +08:00
end := uint64 ( len ( timeoutItems ) ) - 1
totalLen := uint64 ( len ( timeoutItems ) )
2021-01-05 13:38:29 +08:00
2021-01-18 20:17:04 +08:00
// shape timeout txs to batch size in case totalLen is too large
2021-01-05 13:38:29 +08:00
batchNums := totalLen / batchLen
if totalLen % batchLen != 0 {
batchNums ++
}
shardedLists := make ( [ ] [ ] * pb . Transaction , 0 , batchNums )
2021-01-18 20:17:04 +08:00
for i := uint64 ( 0 ) ; i < batchNums ; i ++ {
2021-01-05 13:38:29 +08:00
actualLen := batchLen
if end - begin + 1 < batchLen {
actualLen = end - begin + 1
}
2021-01-18 20:17:04 +08:00
2021-01-05 13:38:29 +08:00
shardedList := make ( [ ] * pb . Transaction , actualLen )
for j := uint64 ( 0 ) ; j < batchLen && begin <= end ; j ++ {
2021-01-18 20:17:04 +08:00
txMap , _ := mpi . txStore . allTxs [ timeoutItems [ begin ] . account ]
shardedList [ j ] = txMap . items [ timeoutItems [ begin ] . nonce ] . tx
2021-01-05 13:38:29 +08:00
begin ++
}
shardedLists = append ( shardedLists , shardedList )
}
return shardedLists
2020-09-24 15:11:33 +08:00
}