refactor: move blockfile to kit lib
This commit is contained in:
parent
e52b4ae2c9
commit
9bac16d161
4
go.mod
4
go.mod
|
@ -26,7 +26,7 @@ require (
|
|||
github.com/libp2p/go-libp2p-core v0.5.6
|
||||
github.com/magiconair/properties v1.8.4
|
||||
github.com/meshplus/bitxhub-core v0.1.0-rc1.0.20201125025329-ac1187099a88
|
||||
github.com/meshplus/bitxhub-kit v1.1.2-0.20201027090548-41dfc41037af
|
||||
github.com/meshplus/bitxhub-kit v1.1.2-0.20201127072239-fddea8940bae
|
||||
github.com/meshplus/bitxhub-model v1.1.2-0.20201118055706-510eb971b4c6
|
||||
github.com/meshplus/go-lightp2p v0.0.0-20201102131103-3fa9723c2c7c
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
|
@ -34,7 +34,7 @@ require (
|
|||
github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.5.0
|
||||
github.com/prometheus/tsdb v0.7.1
|
||||
github.com/prometheus/tsdb v0.10.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.5.2 // indirect
|
||||
github.com/rs/cors v1.7.0
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
|
|
23
go.sum
23
go.sum
|
@ -68,14 +68,6 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
|
|||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bitxhub/bitxid v0.1.1-0.20201119084850-60b90cb575c9 h1:Le7yaoTA3ZHaX41jDFfKuOuUDQsW17sAbVZ5esy4Oe0=
|
||||
github.com/bitxhub/bitxid v0.1.1-0.20201119084850-60b90cb575c9/go.mod h1:pbaFyrTyUldNCpjqv0VB1c84Cl7Al9kz8uP6nobCMbM=
|
||||
github.com/bitxhub/did-method-registry v0.0.0-20201125025610-feb20d69f302 h1:WYzucBBrNhOqxpZefi/I9v5uK7KfmEgUc0JjKkBqLYE=
|
||||
github.com/bitxhub/did-method-registry v0.0.0-20201125025610-feb20d69f302/go.mod h1:SGLGP1nTsl+T4yMMdpGQ4S4AYte25/RrGgcZq8bfJr4=
|
||||
github.com/bitxhub/parallel-executor v0.0.0-20201027053703-4bec95aa1cda h1:DC0jE84bDM7qEBrYi8Aj0zzhDaRml26jMgTxxgUluKQ=
|
||||
github.com/bitxhub/parallel-executor v0.0.0-20201027053703-4bec95aa1cda/go.mod h1:CmlkYunXK4gQEhDBp/WQ7qJt2IqlXAeYhQ0yHSf5r3s=
|
||||
github.com/bitxhub/service-mng v0.0.0-20201125031105-f345beee1b42 h1:SlNVTvy11VSzlfSrpEbHO8/IdplKmtqb/qIV8Kljt/g=
|
||||
github.com/bitxhub/service-mng v0.0.0-20201125031105-f345beee1b42/go.mod h1:vK8mZD0cGM9p4AvvYjxUGzi8uQ6evb8Vt8FoNjF398w=
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ=
|
||||
github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8=
|
||||
|
@ -225,6 +217,7 @@ github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:x
|
|||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
|
@ -632,6 +625,8 @@ github.com/meshplus/bitxhub-core v0.1.0-rc1.0.20201125025329-ac1187099a88/go.mod
|
|||
github.com/meshplus/bitxhub-kit v1.0.0-rc1/go.mod h1:ra/AhOkPvpElI+wXrB9G6DjdcrdxFU3vMwA5MYKr9D0=
|
||||
github.com/meshplus/bitxhub-kit v1.0.0/go.mod h1:7cWyhXWZfrQ3+EaxkRoXfuiG3Y5R9DXYJomeZKkETW8=
|
||||
github.com/meshplus/bitxhub-kit v1.0.1/go.mod h1:r4l4iqn0RPJreb/OmoYKfjCjQJrXpZX++6Qc31VG/1k=
|
||||
github.com/meshplus/bitxhub-kit v1.1.1 h1:vkPO88oA3+Kpc0N8lIgfj/U52KBuI+633hPbMYt1xm8=
|
||||
github.com/meshplus/bitxhub-kit v1.1.1/go.mod h1:r4l4iqn0RPJreb/OmoYKfjCjQJrXpZX++6Qc31VG/1k=
|
||||
github.com/meshplus/bitxhub-kit v1.1.2-0.20201021105954-468d0a9d7957 h1:1a3wYo2HQw9/yg5LfAPJ1En90pPbMwRlaVssxOLG97w=
|
||||
github.com/meshplus/bitxhub-kit v1.1.2-0.20201021105954-468d0a9d7957/go.mod h1:r4l4iqn0RPJreb/OmoYKfjCjQJrXpZX++6Qc31VG/1k=
|
||||
github.com/meshplus/bitxhub-kit v1.1.2-0.20201023030558-9f36554d5d5d/go.mod h1:r4l4iqn0RPJreb/OmoYKfjCjQJrXpZX++6Qc31VG/1k=
|
||||
|
@ -639,6 +634,10 @@ github.com/meshplus/bitxhub-kit v1.1.2-0.20201023073721-052e6b89ea39 h1:zgkQfnws
|
|||
github.com/meshplus/bitxhub-kit v1.1.2-0.20201023073721-052e6b89ea39/go.mod h1:r4l4iqn0RPJreb/OmoYKfjCjQJrXpZX++6Qc31VG/1k=
|
||||
github.com/meshplus/bitxhub-kit v1.1.2-0.20201027090548-41dfc41037af h1:konLVrtVND+WXYlliNtsbXQTZRU1WDfYOdxcMs08iLM=
|
||||
github.com/meshplus/bitxhub-kit v1.1.2-0.20201027090548-41dfc41037af/go.mod h1:r4l4iqn0RPJreb/OmoYKfjCjQJrXpZX++6Qc31VG/1k=
|
||||
github.com/meshplus/bitxhub-kit v1.1.2-0.20201126161339-2bc105d8dab5 h1:VTLtDd4v6lFqNcNCHMojguMyKaoMzmhdW2ryEYT5u14=
|
||||
github.com/meshplus/bitxhub-kit v1.1.2-0.20201126161339-2bc105d8dab5/go.mod h1:KR7ZlXhII9n0Bu8viaZTScvXCYn0MCQnYlsTvHPp0XA=
|
||||
github.com/meshplus/bitxhub-kit v1.1.2-0.20201127072239-fddea8940bae h1:G9ENjV078RtGauPVUvpPn2c83h9uXQvvzdQpOwdSVUM=
|
||||
github.com/meshplus/bitxhub-kit v1.1.2-0.20201127072239-fddea8940bae/go.mod h1:KR7ZlXhII9n0Bu8viaZTScvXCYn0MCQnYlsTvHPp0XA=
|
||||
github.com/meshplus/bitxhub-model v1.0.0-rc3/go.mod h1:ZCctQIYTlE3vJ8Lhkrgs9bWwNA+Dw4JzojOSIzLVU6E=
|
||||
github.com/meshplus/bitxhub-model v1.0.0-rc3/go.mod h1:ZCctQIYTlE3vJ8Lhkrgs9bWwNA+Dw4JzojOSIzLVU6E=
|
||||
github.com/meshplus/bitxhub-model v1.1.2-0.20201021152621-0b3c17c54b23 h1:ys+2VjPrt6nr5xEVgRsVxowipkF425IOcI5HV53M5bA=
|
||||
|
@ -802,6 +801,8 @@ github.com/prometheus/procfs v0.0.10/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+G
|
|||
github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/prometheus/tsdb v0.10.0 h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic=
|
||||
github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
|
@ -861,6 +862,7 @@ github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9
|
|||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
|
@ -873,6 +875,7 @@ github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8
|
|||
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU=
|
||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
|
@ -1006,6 +1009,7 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU
|
|||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
|
@ -1037,6 +1041,7 @@ golang.org/x/net v0.0.0-20200519113804-d87ec0cfa476/go.mod h1:qpuaurCH72eLCgpAm/
|
|||
golang.org/x/net v0.0.0-20200528225125-3c3fba18258b/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
|
||||
|
@ -1050,6 +1055,7 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -1134,6 +1140,7 @@ golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtn
|
|||
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200221224223-e1da425f72fd h1:hHkvGJK23seRCflePJnVa9IMv8fsuavSCWKd11kDQFs=
|
||||
golang.org/x/tools v0.0.0-20200221224223-e1da425f72fd/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20201118030313-598b068a9102 h1:kr6Ik/EJgxdTSLX+rSiDounHdHWMBu9Ks/ghr2hWNpo=
|
||||
golang.org/x/tools v0.0.0-20201118030313-598b068a9102/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
|
||||
"github.com/common-nighthawk/go-figure"
|
||||
"github.com/ethereum/go-ethereum/common/fdlimit"
|
||||
"github.com/meshplus/bitxhub-kit/storage/blockfile"
|
||||
"github.com/meshplus/bitxhub-kit/storage/leveldb"
|
||||
"github.com/meshplus/bitxhub-kit/types"
|
||||
_ "github.com/meshplus/bitxhub/imports"
|
||||
|
@ -19,7 +20,6 @@ import (
|
|||
"github.com/meshplus/bitxhub/internal/repo"
|
||||
"github.com/meshplus/bitxhub/internal/router"
|
||||
"github.com/meshplus/bitxhub/internal/storages"
|
||||
"github.com/meshplus/bitxhub/internal/storages/blockfile"
|
||||
"github.com/meshplus/bitxhub/pkg/order"
|
||||
"github.com/meshplus/bitxhub/pkg/order/etcdraft"
|
||||
"github.com/meshplus/bitxhub/pkg/peermgr"
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/meshplus/bitxhub-kit/crypto"
|
||||
"github.com/meshplus/bitxhub-kit/crypto/asym"
|
||||
"github.com/meshplus/bitxhub-kit/log"
|
||||
"github.com/meshplus/bitxhub-kit/storage/blockfile"
|
||||
"github.com/meshplus/bitxhub-kit/storage/leveldb"
|
||||
"github.com/meshplus/bitxhub-kit/types"
|
||||
"github.com/meshplus/bitxhub-model/constant"
|
||||
|
@ -23,7 +24,6 @@ import (
|
|||
"github.com/meshplus/bitxhub/internal/ledger/mock_ledger"
|
||||
"github.com/meshplus/bitxhub/internal/model/events"
|
||||
"github.com/meshplus/bitxhub/internal/repo"
|
||||
"github.com/meshplus/bitxhub/internal/storages/blockfile"
|
||||
"github.com/meshplus/bitxhub/pkg/cert"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
|
|
@ -8,9 +8,9 @@ import (
|
|||
"github.com/meshplus/bitxhub-kit/bytesutil"
|
||||
"github.com/meshplus/bitxhub-kit/hexutil"
|
||||
"github.com/meshplus/bitxhub-kit/log"
|
||||
"github.com/meshplus/bitxhub-kit/storage/blockfile"
|
||||
"github.com/meshplus/bitxhub-kit/storage/leveldb"
|
||||
"github.com/meshplus/bitxhub-kit/types"
|
||||
"github.com/meshplus/bitxhub/internal/storages/blockfile"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
|
|
@ -7,9 +7,9 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/meshplus/bitxhub-kit/storage"
|
||||
"github.com/meshplus/bitxhub-kit/storage/blockfile"
|
||||
"github.com/meshplus/bitxhub-kit/types"
|
||||
"github.com/meshplus/bitxhub-model/pb"
|
||||
"github.com/meshplus/bitxhub/internal/storages/blockfile"
|
||||
)
|
||||
|
||||
// PutBlock put block into store
|
||||
|
|
|
@ -6,10 +6,10 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/meshplus/bitxhub-kit/storage"
|
||||
"github.com/meshplus/bitxhub-kit/storage/blockfile"
|
||||
"github.com/meshplus/bitxhub-kit/types"
|
||||
"github.com/meshplus/bitxhub-model/pb"
|
||||
"github.com/meshplus/bitxhub/internal/repo"
|
||||
"github.com/meshplus/bitxhub/internal/storages/blockfile"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
|
@ -13,11 +13,11 @@ import (
|
|||
"github.com/meshplus/bitxhub-kit/crypto"
|
||||
"github.com/meshplus/bitxhub-kit/log"
|
||||
"github.com/meshplus/bitxhub-kit/storage"
|
||||
"github.com/meshplus/bitxhub-kit/storage/blockfile"
|
||||
"github.com/meshplus/bitxhub-kit/storage/leveldb"
|
||||
"github.com/meshplus/bitxhub-kit/types"
|
||||
"github.com/meshplus/bitxhub-model/pb"
|
||||
"github.com/meshplus/bitxhub/internal/repo"
|
||||
"github.com/meshplus/bitxhub/internal/storages/blockfile"
|
||||
"github.com/meshplus/bitxhub/pkg/cert"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
|
|
@ -1,181 +0,0 @@
|
|||
package blockfile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/prometheus/tsdb/fileutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type BlockFile struct {
|
||||
blocks uint64 // Number of blocks
|
||||
|
||||
tables map[string]*BlockTable // Data tables for stroring blocks
|
||||
instanceLock fileutil.Releaser // File-system lock to prevent double opens
|
||||
|
||||
logger logrus.FieldLogger
|
||||
closeOnce sync.Once
|
||||
}
|
||||
|
||||
func NewBlockFile(repoRoot string, logger logrus.FieldLogger) (*BlockFile, error) {
|
||||
if info, err := os.Lstat(repoRoot); !os.IsNotExist(err) {
|
||||
if info.Mode()&os.ModeSymlink != 0 {
|
||||
logger.WithField("path", repoRoot).Error("Symbolic link is not supported")
|
||||
return nil, fmt.Errorf("symbolic link datadir is not supported")
|
||||
}
|
||||
}
|
||||
lock, _, err := fileutil.Flock(filepath.Join(repoRoot, "FLOCK"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockfile := &BlockFile{
|
||||
tables: make(map[string]*BlockTable),
|
||||
instanceLock: lock,
|
||||
logger: logger,
|
||||
}
|
||||
for name := range BlockFileSchema {
|
||||
table, err := newTable(repoRoot, name, 2*1000*1000*1000, logger)
|
||||
if err != nil {
|
||||
for _, table := range blockfile.tables {
|
||||
table.Close()
|
||||
}
|
||||
_ = lock.Release()
|
||||
return nil, err
|
||||
}
|
||||
blockfile.tables[name] = table
|
||||
}
|
||||
if err := blockfile.repair(); err != nil {
|
||||
for _, table := range blockfile.tables {
|
||||
table.Close()
|
||||
}
|
||||
_ = lock.Release()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return blockfile, nil
|
||||
}
|
||||
|
||||
func (bf *BlockFile) Blocks() (uint64, error) {
|
||||
return atomic.LoadUint64(&bf.blocks), nil
|
||||
}
|
||||
|
||||
func (bf *BlockFile) Get(kind string, number uint64) ([]byte, error) {
|
||||
if table := bf.tables[kind]; table != nil {
|
||||
return table.Retrieve(number - 1)
|
||||
}
|
||||
return nil, fmt.Errorf("unknown table")
|
||||
}
|
||||
|
||||
func (bf *BlockFile) AppendBlock(number uint64, hash, body, receipts, transactions, interchainMetas []byte) (err error) {
|
||||
if atomic.LoadUint64(&bf.blocks) != number {
|
||||
return fmt.Errorf("the append operation is out-order")
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
rerr := bf.repair()
|
||||
if rerr != nil {
|
||||
bf.logger.WithField("err", err).Errorf("Failed to repair blockfile")
|
||||
}
|
||||
bf.logger.WithFields(logrus.Fields{
|
||||
"number": number,
|
||||
"err": err,
|
||||
}).Info("Append block failed")
|
||||
}
|
||||
}()
|
||||
if err := bf.tables[BlockFileHashTable].Append(bf.blocks, hash); err != nil {
|
||||
bf.logger.WithFields(logrus.Fields{
|
||||
"number": bf.blocks,
|
||||
"hash": hash,
|
||||
"err": err,
|
||||
}).Error("Failed to append block hash")
|
||||
return err
|
||||
}
|
||||
if err := bf.tables[BlockFileBodiesTable].Append(bf.blocks, body); err != nil {
|
||||
bf.logger.WithFields(logrus.Fields{
|
||||
"number": bf.blocks,
|
||||
"hash": hash,
|
||||
"err": err,
|
||||
}).Error("Failed to append block body")
|
||||
return err
|
||||
}
|
||||
if err := bf.tables[BlockFileTXsTable].Append(bf.blocks, transactions); err != nil {
|
||||
bf.logger.WithFields(logrus.Fields{
|
||||
"number": bf.blocks,
|
||||
"hash": hash,
|
||||
"err": err,
|
||||
}).Error("Failed to append block transactions")
|
||||
return err
|
||||
}
|
||||
if err := bf.tables[BlockFileReceiptTable].Append(bf.blocks, receipts); err != nil {
|
||||
bf.logger.WithFields(logrus.Fields{
|
||||
"number": bf.blocks,
|
||||
"hash": hash,
|
||||
"err": err,
|
||||
}).Error("Failed to append block receipt")
|
||||
return err
|
||||
}
|
||||
if err := bf.tables[BlockFileInterchainTable].Append(bf.blocks, interchainMetas); err != nil {
|
||||
bf.logger.WithFields(logrus.Fields{
|
||||
"number": bf.blocks,
|
||||
"hash": hash,
|
||||
"err": err,
|
||||
}).Error("Failed to append block interchain metas")
|
||||
return err
|
||||
}
|
||||
atomic.AddUint64(&bf.blocks, 1) // Only modify atomically
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bf *BlockFile) TruncateBlocks(items uint64) error {
|
||||
if atomic.LoadUint64(&bf.blocks) <= items {
|
||||
return nil
|
||||
}
|
||||
for _, table := range bf.tables {
|
||||
if err := table.truncate(items); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
atomic.StoreUint64(&bf.blocks, items)
|
||||
return nil
|
||||
}
|
||||
|
||||
// repair truncates all data tables to the same length.
|
||||
func (bf *BlockFile) repair() error {
|
||||
min := uint64(math.MaxUint64)
|
||||
for _, table := range bf.tables {
|
||||
items := atomic.LoadUint64(&table.items)
|
||||
if min > items {
|
||||
min = items
|
||||
}
|
||||
}
|
||||
for _, table := range bf.tables {
|
||||
if err := table.truncate(min); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
atomic.StoreUint64(&bf.blocks, min)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bf *BlockFile) Close() error {
|
||||
var errs []error
|
||||
bf.closeOnce.Do(func() {
|
||||
for _, table := range bf.tables {
|
||||
if err := table.Close(); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
if err := bf.instanceLock.Release(); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
})
|
||||
if errs != nil {
|
||||
return fmt.Errorf("%v", errs)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,475 +0,0 @@
|
|||
package blockfile
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type BlockTable struct {
|
||||
items uint64
|
||||
|
||||
name string
|
||||
path string
|
||||
maxFileSize uint32 // Max file size for data-files
|
||||
|
||||
head *os.File // File descriptor for the data head of the table
|
||||
index *os.File // File description
|
||||
files map[uint32]*os.File // open files
|
||||
headId uint32 // number of the currently active head file
|
||||
tailId uint32 // number of the earliest file
|
||||
|
||||
headBytes uint32 // Number of bytes written to the head file
|
||||
itemOffset uint32 // Offset (number of discarded items)
|
||||
|
||||
logger logrus.FieldLogger
|
||||
lock sync.RWMutex // Mutex protecting the data file descriptors
|
||||
}
|
||||
|
||||
type indexEntry struct {
|
||||
filenum uint32 // stored as uint16 ( 2 bytes)
|
||||
offset uint32 // stored as uint32 ( 4 bytes)
|
||||
}
|
||||
|
||||
const indexEntrySize = 6
|
||||
|
||||
// unmarshallBinary deserializes binary b into the rawIndex entry.
|
||||
func (i *indexEntry) unmarshalBinary(b []byte) error {
|
||||
i.filenum = uint32(binary.BigEndian.Uint16(b[:2]))
|
||||
i.offset = binary.BigEndian.Uint32(b[2:6])
|
||||
return nil
|
||||
}
|
||||
|
||||
// marshallBinary serializes the rawIndex entry into binary.
|
||||
func (i *indexEntry) marshallBinary() []byte {
|
||||
b := make([]byte, indexEntrySize)
|
||||
binary.BigEndian.PutUint16(b[:2], uint16(i.filenum))
|
||||
binary.BigEndian.PutUint32(b[2:6], i.offset)
|
||||
return b
|
||||
}
|
||||
|
||||
func newTable(path string, name string, maxFilesize uint32, logger logrus.FieldLogger) (*BlockTable, error) {
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
idxName := fmt.Sprintf("%s.ridx", name)
|
||||
offsets, err := openBlockFileForAppend(filepath.Join(path, idxName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
table := &BlockTable{
|
||||
index: offsets,
|
||||
files: make(map[uint32]*os.File),
|
||||
name: name,
|
||||
path: path,
|
||||
maxFileSize: maxFilesize,
|
||||
logger: logger,
|
||||
}
|
||||
if err := table.repair(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return table, nil
|
||||
}
|
||||
|
||||
func (b *BlockTable) repair() error {
|
||||
buffer := make([]byte, indexEntrySize)
|
||||
|
||||
stat, err := b.index.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stat.Size() == 0 {
|
||||
if _, err := b.index.Write(buffer); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if remainder := stat.Size() % indexEntrySize; remainder != 0 {
|
||||
err := truncateBlockFile(b.index, stat.Size()-remainder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if stat, err = b.index.Stat(); err != nil {
|
||||
return err
|
||||
}
|
||||
offsetsSize := stat.Size()
|
||||
|
||||
// Open the head file
|
||||
var (
|
||||
firstIndex indexEntry
|
||||
lastIndex indexEntry
|
||||
contentSize int64
|
||||
contentExp int64
|
||||
)
|
||||
// Read index zero, determine what file is the earliest
|
||||
// and what item offset to use
|
||||
_, err = b.index.ReadAt(buffer, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = firstIndex.unmarshalBinary(buffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.tailId = firstIndex.filenum
|
||||
b.itemOffset = firstIndex.offset
|
||||
|
||||
_, err = b.index.ReadAt(buffer, offsetsSize-indexEntrySize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = lastIndex.unmarshalBinary(buffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.head, err = b.openFile(lastIndex.filenum, openBlockFileForAppend)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stat, err = b.head.Stat(); err != nil {
|
||||
return err
|
||||
}
|
||||
contentSize = stat.Size()
|
||||
|
||||
// Keep truncating both files until they come in sync
|
||||
contentExp = int64(lastIndex.offset)
|
||||
|
||||
for contentExp != contentSize {
|
||||
b.logger.WithFields(logrus.Fields{
|
||||
"indexed": contentExp,
|
||||
"stored": contentSize,
|
||||
}).Warn("Truncating dangling head")
|
||||
if contentExp < contentSize {
|
||||
if err := truncateBlockFile(b.head, contentExp); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if contentExp > contentSize {
|
||||
b.logger.WithFields(logrus.Fields{
|
||||
"indexed": contentExp,
|
||||
"stored": contentSize,
|
||||
}).Warn("Truncating dangling indexes")
|
||||
offsetsSize -= indexEntrySize
|
||||
_, err = b.index.ReadAt(buffer, offsetsSize-indexEntrySize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var newLastIndex indexEntry
|
||||
err = newLastIndex.unmarshalBinary(buffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// We might have slipped back into an earlier head-file here
|
||||
if newLastIndex.filenum != lastIndex.filenum {
|
||||
// Release earlier opened file
|
||||
b.releaseFile(lastIndex.filenum)
|
||||
if b.head, err = b.openFile(newLastIndex.filenum, openBlockFileForAppend); err != nil {
|
||||
return err
|
||||
}
|
||||
if stat, err = b.head.Stat(); err != nil {
|
||||
// TODO, anything more we can do here?
|
||||
// A data file has gone missing...
|
||||
return err
|
||||
}
|
||||
contentSize = stat.Size()
|
||||
}
|
||||
lastIndex = newLastIndex
|
||||
contentExp = int64(lastIndex.offset)
|
||||
}
|
||||
}
|
||||
// Ensure all reparation changes have been written to disk
|
||||
if err := b.index.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.head.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Update the item and byte counters and return
|
||||
b.items = uint64(b.itemOffset) + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file
|
||||
b.headBytes = uint32(contentSize)
|
||||
b.headId = lastIndex.filenum
|
||||
|
||||
// Close opened files and preopen all files
|
||||
if err := b.preopen(); err != nil {
|
||||
return err
|
||||
}
|
||||
b.logger.WithFields(logrus.Fields{
|
||||
"items": b.items,
|
||||
"size": b.headBytes,
|
||||
}).Debug("Chain freezer table opened")
|
||||
return nil
|
||||
}
|
||||
|
||||
// truncate discards any recent data above the provided threshold number.
|
||||
func (b *BlockTable) truncate(items uint64) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
existing := atomic.LoadUint64(&b.items)
|
||||
if existing <= items {
|
||||
return nil
|
||||
}
|
||||
|
||||
b.logger.WithFields(logrus.Fields{
|
||||
"items": existing,
|
||||
"limit": items,
|
||||
}).Warn("Truncating block file")
|
||||
if err := truncateBlockFile(b.index, int64(items+1)*indexEntrySize); err != nil {
|
||||
return err
|
||||
}
|
||||
// Calculate the new expected size of the data file and truncate it
|
||||
buffer := make([]byte, indexEntrySize)
|
||||
if _, err := b.index.ReadAt(buffer, int64(items*indexEntrySize)); err != nil {
|
||||
return err
|
||||
}
|
||||
var expected indexEntry
|
||||
err := expected.unmarshalBinary(buffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We might need to truncate back to older files
|
||||
if expected.filenum != b.headId {
|
||||
// If already open for reading, force-reopen for writing
|
||||
b.releaseFile(expected.filenum)
|
||||
newHead, err := b.openFile(expected.filenum, openBlockFileForAppend)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Release any files _after the current head -- both the previous head
|
||||
// and any files which may have been opened for reading
|
||||
b.releaseFilesAfter(expected.filenum, true)
|
||||
// Set back the historic head
|
||||
b.head = newHead
|
||||
atomic.StoreUint32(&b.headId, expected.filenum)
|
||||
}
|
||||
if err := truncateBlockFile(b.head, int64(expected.offset)); err != nil {
|
||||
return err
|
||||
}
|
||||
// All data files truncated, set internal counters and return
|
||||
atomic.StoreUint64(&b.items, items)
|
||||
atomic.StoreUint32(&b.headBytes, expected.offset)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BlockTable) Retrieve(item uint64) ([]byte, error) {
|
||||
b.lock.RLock()
|
||||
|
||||
if b.index == nil || b.head == nil {
|
||||
b.lock.RUnlock()
|
||||
return nil, fmt.Errorf("closed")
|
||||
}
|
||||
if atomic.LoadUint64(&b.items) <= item {
|
||||
b.lock.RUnlock()
|
||||
return nil, fmt.Errorf("out of bounds")
|
||||
}
|
||||
if uint64(b.itemOffset) > item {
|
||||
b.lock.RUnlock()
|
||||
return nil, fmt.Errorf("out of bounds")
|
||||
}
|
||||
startOffset, endOffset, filenum, err := b.getBounds(item - uint64(b.itemOffset))
|
||||
if err != nil {
|
||||
b.lock.RUnlock()
|
||||
return nil, err
|
||||
}
|
||||
dataFile, exist := b.files[filenum]
|
||||
if !exist {
|
||||
b.lock.RUnlock()
|
||||
return nil, fmt.Errorf("missing data file %d", filenum)
|
||||
}
|
||||
blob := make([]byte, endOffset-startOffset)
|
||||
if _, err := dataFile.ReadAt(blob, int64(startOffset)); err != nil {
|
||||
b.lock.RUnlock()
|
||||
return nil, err
|
||||
}
|
||||
b.lock.RUnlock()
|
||||
|
||||
return blob, nil
|
||||
}
|
||||
|
||||
func (b *BlockTable) Append(item uint64, blob []byte) error {
|
||||
b.lock.RLock()
|
||||
if b.index == nil || b.head == nil {
|
||||
b.lock.RUnlock()
|
||||
return fmt.Errorf("closed")
|
||||
}
|
||||
if atomic.LoadUint64(&b.items) != item {
|
||||
b.lock.RUnlock()
|
||||
return fmt.Errorf("appending unexpected item: want %d, have %d", b.items, item)
|
||||
}
|
||||
bLen := uint32(len(blob))
|
||||
if b.headBytes+bLen < bLen ||
|
||||
b.headBytes+bLen > b.maxFileSize {
|
||||
b.lock.RUnlock()
|
||||
b.lock.Lock()
|
||||
nextID := atomic.LoadUint32(&b.headId) + 1
|
||||
// We open the next file in truncated mode -- if this file already
|
||||
// exists, we need to start over from scratch on it
|
||||
newHead, err := b.openFile(nextID, openBlockFileTruncated)
|
||||
if err != nil {
|
||||
b.lock.Unlock()
|
||||
return err
|
||||
}
|
||||
// Close old file, and reopen in RDONLY mode
|
||||
b.releaseFile(b.headId)
|
||||
_, err = b.openFile(b.headId, openBlockFileForReadOnly)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Swap out the current head
|
||||
b.head = newHead
|
||||
atomic.StoreUint32(&b.headBytes, 0)
|
||||
atomic.StoreUint32(&b.headId, nextID)
|
||||
b.lock.Unlock()
|
||||
b.lock.RLock()
|
||||
}
|
||||
|
||||
defer b.lock.RUnlock()
|
||||
if _, err := b.head.Write(blob); err != nil {
|
||||
return err
|
||||
}
|
||||
newOffset := atomic.AddUint32(&b.headBytes, bLen)
|
||||
idx := indexEntry{
|
||||
filenum: atomic.LoadUint32(&b.headId),
|
||||
offset: newOffset,
|
||||
}
|
||||
// Write indexEntry
|
||||
_, _ = b.index.Write(idx.marshallBinary())
|
||||
|
||||
atomic.AddUint64(&b.items, 1)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BlockTable) getBounds(item uint64) (uint32, uint32, uint32, error) {
|
||||
buffer := make([]byte, indexEntrySize)
|
||||
var startIdx, endIdx indexEntry
|
||||
if _, err := b.index.ReadAt(buffer, int64((item+1)*indexEntrySize)); err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
if err := endIdx.unmarshalBinary(buffer); err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
if item != 0 {
|
||||
if _, err := b.index.ReadAt(buffer, int64(item*indexEntrySize)); err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
if err := startIdx.unmarshalBinary(buffer); err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
} else {
|
||||
// the first reading
|
||||
return 0, endIdx.offset, endIdx.filenum, nil
|
||||
}
|
||||
if startIdx.filenum != endIdx.filenum {
|
||||
return 0, endIdx.offset, endIdx.filenum, nil
|
||||
}
|
||||
return startIdx.offset, endIdx.offset, endIdx.filenum, nil
|
||||
}
|
||||
|
||||
func (b *BlockTable) preopen() (err error) {
|
||||
b.releaseFilesAfter(0, false)
|
||||
|
||||
for i := b.tailId; i < b.headId; i++ {
|
||||
if _, err = b.openFile(i, openBlockFileForReadOnly); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
b.head, err = b.openFile(b.headId, openBlockFileForAppend)
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *BlockTable) openFile(num uint32, opener func(string) (*os.File, error)) (f *os.File, err error) {
|
||||
var exist bool
|
||||
if f, exist = b.files[num]; !exist {
|
||||
name := fmt.Sprintf("%s.%04d.rdat", b.name, num)
|
||||
f, err = opener(filepath.Join(b.path, name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.files[num] = f
|
||||
}
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Close closes all opened files.
|
||||
func (b *BlockTable) Close() error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
var errs []error
|
||||
if err := b.index.Close(); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
b.index = nil
|
||||
|
||||
for _, f := range b.files {
|
||||
if err := f.Close(); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
b.head = nil
|
||||
|
||||
if errs != nil {
|
||||
return fmt.Errorf("%v", errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BlockTable) releaseFilesAfter(num uint32, remove bool) {
|
||||
for fnum, f := range b.files {
|
||||
if fnum > num {
|
||||
delete(b.files, fnum)
|
||||
f.Close()
|
||||
if remove {
|
||||
os.Remove(f.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BlockTable) releaseFile(num uint32) {
|
||||
if f, exist := b.files[num]; exist {
|
||||
delete(b.files, num)
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func truncateBlockFile(file *os.File, size int64) error {
|
||||
if err := file.Truncate(size); err != nil {
|
||||
return err
|
||||
}
|
||||
// Seek to end for append
|
||||
if _, err := file.Seek(0, io.SeekEnd); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func openBlockFileForAppend(filename string) (*os.File, error) {
|
||||
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Seek to end for append
|
||||
if _, err = file.Seek(0, io.SeekEnd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func openBlockFileTruncated(filename string) (*os.File, error) {
|
||||
return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
}
|
||||
|
||||
func openBlockFileForReadOnly(filename string) (*os.File, error) {
|
||||
return os.OpenFile(filename, os.O_RDONLY, 0644)
|
||||
}
|
|
@ -1,275 +0,0 @@
|
|||
package blockfile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/meshplus/bitxhub-kit/log"
|
||||
"github.com/meshplus/bitxhub-kit/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func getChunk(size int, b int) []byte {
|
||||
data := make([]byte, size)
|
||||
for i := range data {
|
||||
data[i] = byte(b)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func TestBlockFileBasics(t *testing.T) {
|
||||
f, err := NewBlockFile(os.TempDir(), log.NewWithModule("blockfile_test"))
|
||||
assert.Nil(t, err)
|
||||
defer f.Close()
|
||||
err = f.TruncateBlocks(uint64(0))
|
||||
assert.Nil(t, err)
|
||||
err = f.AppendBlock(uint64(0), types.NewHash([]byte{1}).Bytes(), []byte("1"), []byte("1"), []byte("1"), []byte("1"))
|
||||
assert.Nil(t, err)
|
||||
num, err := f.Blocks()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, uint64(1), num)
|
||||
|
||||
_, err = f.Get(BlockFileHashTable, uint64(1))
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestBlockTableBasics(t *testing.T) {
|
||||
// set cutoff at 50 bytes
|
||||
f, err := newTable(os.TempDir(),
|
||||
fmt.Sprintf("unittest-%d", rand.Uint64()), 2*1000*1000*1000, log.NewWithModule("blockfile_test"))
|
||||
assert.Nil(t, err)
|
||||
defer f.Close()
|
||||
// Write 15 bytes 255 times, results in 85 files
|
||||
for x := 0; x < 255; x++ {
|
||||
data := getChunk(15, x)
|
||||
f.Append(uint64(x), data)
|
||||
}
|
||||
for y := 0; y < 255; y++ {
|
||||
exp := getChunk(15, y)
|
||||
got, err := f.Retrieve(uint64(y))
|
||||
assert.Nil(t, err)
|
||||
if !bytes.Equal(got, exp) {
|
||||
t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
|
||||
}
|
||||
}
|
||||
// Check that we cannot read too far
|
||||
_, err = f.Retrieve(uint64(255))
|
||||
assert.Equal(t, fmt.Errorf("out of bounds"), err)
|
||||
}
|
||||
|
||||
func TestAppendBlocKCase1(t *testing.T) {
|
||||
f, err := NewBlockFile(os.TempDir(), log.NewWithModule("blockfile_test"))
|
||||
assert.Nil(t, err)
|
||||
defer f.Close()
|
||||
err = f.TruncateBlocks(uint64(0))
|
||||
assert.Nil(t, err)
|
||||
err = f.AppendBlock(uint64(0), types.NewHash([]byte{1}).Bytes(), []byte("1"), []byte("1"), []byte("1"), []byte("1"))
|
||||
assert.Nil(t, err)
|
||||
f.tables[BlockFileHashTable].items = 3
|
||||
err = f.AppendBlock(uint64(1), types.NewHash([]byte{1}).Bytes(), []byte("1"), []byte("1"), []byte("1"), []byte("1"))
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestAppendBlocKCase2(t *testing.T) {
|
||||
f, err := NewBlockFile(os.TempDir(), log.NewWithModule("blockfile_test"))
|
||||
assert.Nil(t, err)
|
||||
defer f.Close()
|
||||
err = f.TruncateBlocks(uint64(0))
|
||||
assert.Nil(t, err)
|
||||
err = f.AppendBlock(uint64(0), types.NewHash([]byte{1}).Bytes(), []byte("1"), []byte("1"), []byte("1"), []byte("1"))
|
||||
assert.Nil(t, err)
|
||||
f.tables[BlockFileBodiesTable].items = 3
|
||||
err = f.AppendBlock(uint64(1), types.NewHash([]byte{1}).Bytes(), []byte("1"), []byte("1"), []byte("1"), []byte("1"))
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestAppendBlocKCase3(t *testing.T) {
|
||||
f, err := NewBlockFile(os.TempDir(), log.NewWithModule("blockfile_test"))
|
||||
assert.Nil(t, err)
|
||||
defer f.Close()
|
||||
err = f.TruncateBlocks(uint64(0))
|
||||
assert.Nil(t, err)
|
||||
err = f.AppendBlock(uint64(0), types.NewHash([]byte{1}).Bytes(), []byte("1"), []byte("1"), []byte("1"), []byte("1"))
|
||||
assert.Nil(t, err)
|
||||
f.tables[BlockFileInterchainTable].items = 3
|
||||
err = f.AppendBlock(uint64(1), types.NewHash([]byte{1}).Bytes(), []byte("1"), []byte("1"), []byte("1"), []byte("1"))
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestAppendBlocKCase4(t *testing.T) {
|
||||
f, err := NewBlockFile(os.TempDir(), log.NewWithModule("blockfile_test"))
|
||||
assert.Nil(t, err)
|
||||
defer f.Close()
|
||||
err = f.TruncateBlocks(uint64(0))
|
||||
assert.Nil(t, err)
|
||||
err = f.AppendBlock(uint64(0), types.NewHash([]byte{1}).Bytes(), []byte("1"), []byte("1"), []byte("1"), []byte("1"))
|
||||
assert.Nil(t, err)
|
||||
f.tables[BlockFileReceiptTable].items = 3
|
||||
err = f.AppendBlock(uint64(1), types.NewHash([]byte{1}).Bytes(), []byte("1"), []byte("1"), []byte("1"), []byte("1"))
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestBlockTableBasicsClosing(t *testing.T) {
|
||||
var (
|
||||
fname = fmt.Sprintf("basics-close-%d", rand.Uint64())
|
||||
logger = log.NewWithModule("blockfile_test")
|
||||
f *BlockTable
|
||||
err error
|
||||
)
|
||||
f, err = newTable(os.TempDir(), fname, 2*1000*1000*1000, logger)
|
||||
assert.Nil(t, err)
|
||||
// Write 15 bytes 255 times, results in 85 files
|
||||
for x := 0; x < 255; x++ {
|
||||
data := getChunk(15, x)
|
||||
f.Append(uint64(x), data)
|
||||
f.Close()
|
||||
f, err = newTable(os.TempDir(), fname, 2*1000*1000*1000, logger)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
for y := 0; y < 255; y++ {
|
||||
exp := getChunk(15, y)
|
||||
got, err := f.Retrieve(uint64(y))
|
||||
assert.Nil(t, err)
|
||||
if !bytes.Equal(got, exp) {
|
||||
t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
|
||||
}
|
||||
f.Close()
|
||||
f, err = newTable(os.TempDir(), fname, 2*1000*1000*1000, logger)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFreezerTruncate(t *testing.T) {
|
||||
fname := fmt.Sprintf("truncation-%d", rand.Uint64())
|
||||
logger := log.NewWithModule("blockfile_test")
|
||||
|
||||
{ // Fill table
|
||||
f, err := newTable(os.TempDir(), fname, 50, logger)
|
||||
assert.Nil(t, err)
|
||||
// Write 15 bytes 30 times
|
||||
for x := 0; x < 30; x++ {
|
||||
data := getChunk(15, x)
|
||||
f.Append(uint64(x), data)
|
||||
}
|
||||
// The last item should be there
|
||||
_, err = f.Retrieve(f.items - 1)
|
||||
assert.Nil(t, err)
|
||||
f.Close()
|
||||
}
|
||||
// Reopen, truncate
|
||||
{
|
||||
f, err := newTable(os.TempDir(), fname, 50, logger)
|
||||
assert.Nil(t, err)
|
||||
defer f.Close()
|
||||
// for x := 0; x < 20; x++ {
|
||||
// f.truncate(uint64(30 - x - 1)) // 150 bytes
|
||||
// }
|
||||
f.truncate(10)
|
||||
if f.items != 10 {
|
||||
t.Fatalf("expected %d items, got %d", 10, f.items)
|
||||
}
|
||||
// 45, 45, 45, 15 -- bytes should be 15
|
||||
if f.headBytes != 15 {
|
||||
t.Fatalf("expected %d bytes, got %d", 15, f.headBytes)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestFreezerReadAndTruncate(t *testing.T) {
|
||||
fname := fmt.Sprintf("read_truncate-%d", rand.Uint64())
|
||||
logger := log.NewWithModule("blockfile_test")
|
||||
{ // Fill table
|
||||
f, err := newTable(os.TempDir(), fname, 50, logger)
|
||||
assert.Nil(t, err)
|
||||
// Write 15 bytes 30 times
|
||||
for x := 0; x < 30; x++ {
|
||||
data := getChunk(15, x)
|
||||
f.Append(uint64(x), data)
|
||||
}
|
||||
// The last item should be there
|
||||
_, err = f.Retrieve(f.items - 1)
|
||||
assert.Nil(t, err)
|
||||
f.Close()
|
||||
}
|
||||
// Reopen and read all files
|
||||
{
|
||||
f, err := newTable(os.TempDir(), fname, 50, logger)
|
||||
assert.Nil(t, err)
|
||||
if f.items != 30 {
|
||||
f.Close()
|
||||
t.Fatalf("expected %d items, got %d", 0, f.items)
|
||||
}
|
||||
for y := byte(0); y < 30; y++ {
|
||||
f.Retrieve(uint64(y))
|
||||
}
|
||||
// Now, truncate back to zero
|
||||
f.truncate(0)
|
||||
// Write the data again
|
||||
for x := 0; x < 30; x++ {
|
||||
data := getChunk(15, ^x)
|
||||
err := f.Append(uint64(x), data)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func TestFreezerRepairFirstFile(t *testing.T) {
|
||||
fname := fmt.Sprintf("truncationfirst-%d", rand.Uint64())
|
||||
logger := log.NewWithModule("blockfile_test")
|
||||
{ // Fill table
|
||||
f, err := newTable(os.TempDir(), fname, 50, logger)
|
||||
assert.Nil(t, err)
|
||||
// Write 80 bytes, splitting out into two files
|
||||
f.Append(0, getChunk(40, 0xFF))
|
||||
f.Append(1, getChunk(40, 0xEE))
|
||||
// The last item should be there
|
||||
_, err = f.Retrieve(f.items - 1)
|
||||
assert.Nil(t, err)
|
||||
f.Close()
|
||||
}
|
||||
// Truncate the file in half
|
||||
fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0001.rdat", fname))
|
||||
{
|
||||
err := assertFileSize(fileToCrop, 40)
|
||||
assert.Nil(t, err)
|
||||
file, err := os.OpenFile(fileToCrop, os.O_RDWR, 0644)
|
||||
assert.Nil(t, err)
|
||||
file.Truncate(20)
|
||||
file.Close()
|
||||
}
|
||||
// Reopen
|
||||
{
|
||||
f, err := newTable(os.TempDir(), fname, 50, logger)
|
||||
assert.Nil(t, err)
|
||||
if f.items != 1 {
|
||||
f.Close()
|
||||
t.Fatalf("expected %d items, got %d", 0, f.items)
|
||||
}
|
||||
// Write 40 bytes
|
||||
f.Append(1, getChunk(40, 0xDD))
|
||||
f.Close()
|
||||
// Should have been truncated down to zero and then 40 written
|
||||
err = assertFileSize(fileToCrop, 40)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func assertFileSize(f string, size int64) error {
|
||||
stat, err := os.Stat(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stat.Size() != size {
|
||||
return fmt.Errorf("error, expected size %d, got %d", size, stat.Size())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
package blockfile
|
||||
|
||||
const (
|
||||
// freezerHashTable indicates the name of the freezer canonical hash table.
|
||||
BlockFileHashTable = "hashes"
|
||||
|
||||
// freezerBodiesTable indicates the name of the freezer block body table.
|
||||
BlockFileBodiesTable = "bodies"
|
||||
|
||||
// freezerHeaderTable indicates the name of the freezer header table.
|
||||
BlockFileTXsTable = "transactions"
|
||||
|
||||
// freezerReceiptTable indicates the name of the freezer receipts table.
|
||||
BlockFileReceiptTable = "receipts"
|
||||
|
||||
// freezerReceiptTable indicates the name of the freezer receipts table.
|
||||
BlockFileInterchainTable = "interchain"
|
||||
)
|
||||
|
||||
var BlockFileSchema = map[string]bool{
|
||||
BlockFileHashTable: true,
|
||||
BlockFileBodiesTable: true,
|
||||
BlockFileTXsTable: true,
|
||||
BlockFileReceiptTable: true,
|
||||
BlockFileInterchainTable: true,
|
||||
}
|
|
@ -12,12 +12,12 @@ import (
|
|||
"github.com/meshplus/bitxhub-kit/crypto"
|
||||
"github.com/meshplus/bitxhub-kit/crypto/asym"
|
||||
"github.com/meshplus/bitxhub-kit/log"
|
||||
"github.com/meshplus/bitxhub-kit/storage/blockfile"
|
||||
"github.com/meshplus/bitxhub-kit/storage/leveldb"
|
||||
"github.com/meshplus/bitxhub-kit/types"
|
||||
"github.com/meshplus/bitxhub-model/pb"
|
||||
"github.com/meshplus/bitxhub/internal/ledger"
|
||||
"github.com/meshplus/bitxhub/internal/repo"
|
||||
"github.com/meshplus/bitxhub/internal/storages/blockfile"
|
||||
"github.com/meshplus/bitxhub/pkg/cert"
|
||||
"github.com/meshplus/bitxhub/pkg/vm"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
|
Loading…
Reference in New Issue