diff --git a/.dockerignore b/.dockerignore index 65e3ba2ed..5a71055bc 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1 +1,3 @@ test/ +bin/cql* +*.cover.out diff --git a/.gitignore b/.gitignore index 0ce9eb7e2..4ae821fc2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,6 @@ *.trace +conf* +..bfg-report pprof.txt .dsn *.svg @@ -13,9 +15,11 @@ vendor/**/.gitignore node_*/ kayak_test *.conf +*.ldb *.db *.db-shm *.db-wal +*.ldb .DS_Store msgpack-20180824 diff --git a/.travis.yml b/.travis.yml index 676511089..93a905397 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,23 +7,19 @@ env: - REVIEWDOG_VERSION=0.9.11 language: go go: - - '1.11' + - '1.10.x' os: - linux - osx matrix: - allow_failures: - - go: 1.11 - exclude: - - go: tip fast_finish: true install: - go get github.com/wadey/gocovmerge - go get golang.org/x/lint/golint - mkdir -p ~/bin/ && export export PATH="~/bin/:$PATH" - - curl -fSL https://github.com/haya14busa/reviewdog/releases/download/$REVIEWDOG_VERSION/reviewdog_linux_amd64 -o ~/bin/reviewdog && chmod +x ~/bin/reviewdog + - curl -fSL https://github.com/haya14busa/reviewdog/releases/download/${REVIEWDOG_VERSION}/reviewdog_linux_amd64 -o ~/bin/reviewdog && chmod +x ~/bin/reviewdog before_script: - echo $TRAVIS - echo $TRAVIS_PULL_REQUEST @@ -34,9 +30,11 @@ before_script: - echo $TRAVIS_SECURE_ENV_VARS script: - bash build.sh - - go test -v -race -failfast -parallel 16 $(go list ./... | grep -v "/vendor/") -coverprofile cover.out + - go test -v -race -failfast -parallel 16 -cpu 16 $(go list ./... | grep -v "/vendor/") -coverprofile cover.out - cd rpc && go test -test.bench ^BenchmarkPersistentCaller_Call$ -test.run ^$ && cd - - - gocovmerge cover.out $(find cmd -name "*.cover.out") > coverage.txt && rm -f cover.out + - bash cleanupDB.sh || true + - cd cmd/cql-minerd && go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ && cd - + - gocovmerge cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f cover.out - bash <(curl -s https://codecov.io/bash) - >- golint ./... | grep -v 'vendor/' | grep -v 'server/' | grep -v 'utils/' | reviewdog -f=golint -reporter=github-pr-review || true diff --git a/CHANGELOG.md b/CHANGELOG.md index bcca8da03..37674d155 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## [v0.0.4](https://github.com/CovenantSQL/CovenantSQL/tree/v0.0.4) (2018-11-08) + +[Full Changelog](https://github.com/CovenantSQL/CovenantSQL/compare/v0.0.3...v0.0.4) + +**Fixed bugs:** + +- Potential deadlock in testing [\#93](https://github.com/CovenantSQL/CovenantSQL/issues/93) + +**Closed issues:** + +- Where can I find covenantsql.io/covenantsql\_adapter [\#53](https://github.com/CovenantSQL/CovenantSQL/issues/53) + +**Merged pull requests:** + +- Fix loadChain failure, remove the lock in sync\(\) [\#114](https://github.com/CovenantSQL/CovenantSQL/pull/114) ([zeqing-guo](https://github.com/zeqing-guo)) +- Kayak performance improvement refactor [\#112](https://github.com/CovenantSQL/CovenantSQL/pull/112) ([xq262144](https://github.com/xq262144)) +- Fix index out of bound, refactor part of sqlchain code [\#110](https://github.com/CovenantSQL/CovenantSQL/pull/110) ([leventeliu](https://github.com/leventeliu)) +- Support lastInsertID/affectedRows in kayak [\#109](https://github.com/CovenantSQL/CovenantSQL/pull/109) ([xq262144](https://github.com/xq262144)) + ## [v0.0.3](https://github.com/CovenantSQL/CovenantSQL/tree/v0.0.3) (2018-11-04) [Full Changelog](https://github.com/CovenantSQL/CovenantSQL/compare/v0.0.2...v0.0.3) @@ -7,7 +26,6 @@ **Fixed bugs:** - Cannot receive tokens from testnet [\#84](https://github.com/CovenantSQL/CovenantSQL/issues/84) -- Potential deadlock in testing [\#93](https://github.com/CovenantSQL/CovenantSQL/issues/93) **Closed issues:** @@ -49,27 +67,6 @@ [Full Changelog](https://github.com/CovenantSQL/CovenantSQL/compare/82811a8fcac65d74aefbb506450e4477ecdad048...v0.0.1) -**TestNet** - -1. Ready for CLI or SDK usage. For now, Linux & OSX supported only. -1. SQL Chain Explorer is ready. - -**TestNet Known Issues** - -1. Main Chain - 1. Allocation algorithm for BlockProducer and Miner is incomplete. - 1. Joining as BP or Miner is unsupported for now. _Fix@2018-10-12_ - 1. Forking Recovery algorithm is incomplete. -1. Connector - 1. [Java](https://github.com/CovenantSQL/covenant-connector) and [Golang Connector](https://github.com/CovenantSQL/CovenantSQL/tree/develop/client) is ready. - 1. ĐApp support for ETH or EOS is incomplete. - 1. Java connector protocol is based on RESTful HTTPS, change to Golang DH-RPC latter. -1. Database - 1. Cartesian product or big join caused OOM. _Fix@2018-10-12_ - 1. SQL Query filter is incomplete. _Fix@2018-10-12_ - 1. Forking Recovery algorithm is incomplete. - 1. Database for TestNet is World Open on [Explorer](https://explorer.dbhub.org). - **Closed issues:** - ThunderDB has been renamed to CovenantSQL [\#58](https://github.com/CovenantSQL/CovenantSQL/issues/58) diff --git a/Dockerfile b/Dockerfile index 2f046e8a4..626ca9619 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,6 +4,7 @@ FROM golang:1.11-stretch as builder WORKDIR /go/src/github.com/CovenantSQL/CovenantSQL COPY . . RUN CGO_ENABLED=1 GOOS=linux GOLDFLAGS="-linkmode external -extldflags -static" ./build.sh +RUN rm -f bin/*.test # Stage: runner FROM alpine:3.7 diff --git a/Gopkg.lock b/Gopkg.lock index 563485179..dca93369b 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -3,11 +3,24 @@ [[projects]] branch = "master" - digest = "1:13749560a469f9e2eb39a2798d9aae94c77a751254d8a48f4ed954dfe9e664a7" + digest = "1:2fab77e19256b26d733b70e1f68cd965cb87e38e3a619fa960cf319aef743d53" + name = "bazil.org/fuse" + packages = [ + ".", + "fs", + "fs/fstestutil", + "fuseutil", + ] + pruneopts = "UT" + revision = "65cc252bf6691cb3c7014bcb2c8dc29de91e3a7e" + +[[projects]] + branch = "master" + digest = "1:341f9de25b320f45124840fa084aa804a6659cfeae2ea93a45424fd73f9d7da5" name = "github.com/CovenantSQL/HashStablePack" packages = ["marshalhash"] pruneopts = "UT" - revision = "1627b606c496aeafb4f0693c1ee5cd935b85dd73" + revision = "f5d7cc3bf3356c85eadcb0f66007f2f2b7ee81bc" [[projects]] branch = "develop" @@ -19,7 +32,7 @@ [[projects]] branch = "master" - digest = "1:599cc68328d92a329d9ec7fd516ed5ac2abfaaafd5f824001adafcb0cf10fe49" + digest = "1:9552f97d556ffa281b8d5c23280aad519e9792a02222cded90467882ff675063" name = "github.com/CovenantSQL/sqlparser" packages = [ ".", @@ -29,7 +42,7 @@ "dependency/sqltypes", ] pruneopts = "UT" - revision = "fb543cee920387ed5e85a2f098eba2dcc12b13c7" + revision = "21a792a5b3b3ecddc9a836fb3da0468f0bd0434f" [[projects]] branch = "master" @@ -135,14 +148,6 @@ pruneopts = "UT" revision = "cbb64ac3d964b81592e64f957ad53df015803288" -[[projects]] - digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "UT" - revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" - version = "v1.1.1" - [[projects]] digest = "1:72dc2b6056e7097f829260e4a2ff08d32fec6017df1982a66e110ab4128486f8" name = "github.com/dlclark/regexp2" @@ -338,14 +343,6 @@ revision = "645ef00459ed84a119197bfb8d8205042c6df63d" version = "v0.8.0" -[[projects]] - digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - pruneopts = "UT" - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - [[projects]] branch = "master" digest = "1:f696f304d2a14745859a153f1041b66e0e2cf150eff731beb6431e93e27ddc5c" @@ -443,12 +440,12 @@ source = "github.com/CovenantSQL/go-mysql" [[projects]] - digest = "1:3f53e9e4dfbb664cd62940c9c4b65a2171c66acd0b7621a1a6b8e78513525a52" + digest = "1:69b1cc331fca23d702bd72f860c6a647afd0aa9fcbc1d0659b1365e26546dd70" name = "github.com/sirupsen/logrus" packages = ["."] pruneopts = "UT" - revision = "ad15b42461921f1fb3529b058c6786c6a45d5162" - version = "v1.1.1" + revision = "bcd833dfe83d3cebad139e4a29ed79cb2318bf95" + version = "v1.2.0" [[projects]] digest = "1:cc1c574c9cb5e99b123888c12b828e2d19224ab6c2244bda34647f230bf33243" @@ -474,25 +471,6 @@ revision = "9e8dc3f972df6c8fcc0375ef492c24d0bb204857" version = "1.6.3" -[[projects]] - digest = "1:ac83cf90d08b63ad5f7e020ef480d319ae890c208f8524622a2f3136e2686b02" - name = "github.com/stretchr/objx" - packages = ["."] - pruneopts = "UT" - revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c" - version = "v0.1.1" - -[[projects]] - digest = "1:15a4a7e5afac3cea801fa24831fce3bf3b5bd3620cbf8355a07b7dbf06877883" - name = "github.com/stretchr/testify" - packages = [ - "assert", - "mock", - ] - pruneopts = "UT" - revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" - version = "v1.2.2" - [[projects]] branch = "master" digest = "1:59483b8e8183f10ab21a85ba1f4cbb4a2335d48891801f79ed7b9499f44d383c" @@ -606,14 +584,15 @@ "ssh/terminal", ] pruneopts = "UT" - revision = "45a5f77698d342a8c2ef8423abdf0ba6880b008a" + revision = "4d3f4d9ffa16a13f451c3b2999e9c49e9750bf06" [[projects]] branch = "master" - digest = "1:eceec1bdeb912f2aed8e5e9c8c81927649925095e1e4d5b85b331898fee06397" + digest = "1:67ee3460c086005f76f49c4d5f38a80ce11d68986de22979c75bf04e8372db9c" name = "golang.org/x/net" packages = [ "bpf", + "context", "html", "html/atom", "internal/iana", @@ -622,18 +601,18 @@ "ipv6", ] pruneopts = "UT" - revision = "c44066c5c816ec500d459a2a324a753f78531ae0" + revision = "b7e296877c6e5e01665044d943c8e6d10ce72a99" [[projects]] branch = "master" - digest = "1:d2605ed96ca0244a457fa8e6c37d5c370ce98ea09dcc21e5e07d967bfcb78878" + digest = "1:417d27a82efb8473554234a282be33d23b0d6adc121e636b55950f913ac071d6" name = "golang.org/x/sys" packages = [ "unix", "windows", ] pruneopts = "UT" - revision = "95b1ffbd15a57cc5abb3f04402b9e8ec0016a52c" + revision = "9b800f95dbbc54abff0acf7ee32d88ba4e328c89" [[projects]] digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202" @@ -647,6 +626,9 @@ analyzer-name = "dep" analyzer-version = 1 input-imports = [ + "bazil.org/fuse", + "bazil.org/fuse/fs", + "bazil.org/fuse/fs/fstestutil", "github.com/CovenantSQL/HashStablePack/marshalhash", "github.com/CovenantSQL/go-sqlite3-encrypt", "github.com/CovenantSQL/sqlparser", @@ -677,8 +659,8 @@ "github.com/siddontang/go-mysql/server", "github.com/sirupsen/logrus", "github.com/smartystreets/goconvey/convey", - "github.com/stretchr/testify/mock", "github.com/syndtr/goleveldb/leveldb", + "github.com/syndtr/goleveldb/leveldb/iterator", "github.com/syndtr/goleveldb/leveldb/opt", "github.com/syndtr/goleveldb/leveldb/util", "github.com/tchap/go-patricia/patricia", diff --git a/Makefile b/Makefile index 2adee6581..43e9dbbe4 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ default: build -IMAGE := covenantsql.io/covenantsql +IMAGE := covenantsql/covenantsql GIT_COMMIT ?= $(shell git rev-parse --short HEAD) GIT_DIRTY ?= $(shell test -n "`git status --porcelain`" && echo "+CHANGES" || true) GIT_DESCRIBE ?= $(shell git describe --tags --always) @@ -38,4 +38,8 @@ start: logs: docker-compose logs -f --tail=10 -.PHONY: status build save start logs +push: + docker push $(IMAGE):$(VERSION) + docker push $(IMAGE):latest + +.PHONY: status build save start logs push diff --git a/bin/docker-entry.sh b/bin/docker-entry.sh index fd5348d4a..45b813e19 100755 --- a/bin/docker-entry.sh +++ b/bin/docker-entry.sh @@ -4,12 +4,14 @@ echo nameserver 1.1.1.1 > /etc/resolv.conf case "${COVENANT_ROLE}" in miner) - exec /app/cql-minerd -config "${COVENANT_CONF}" + exec /app/cql-minerd -config "${COVENANT_CONF}" "${@}" ;; blockproducer) - exec /app/cqld -config "${COVENANT_CONF}" + rm -f /app/node_*/chain.db + exec /app/cqld -config "${COVENANT_CONF}" "${@}" ;; observer) + rm -f /app/node_observer/observer.db exec /app/cql-observer -config "${COVENANT_CONF}" "${@}" ;; adapter) diff --git a/blockproducer/blockindex.go b/blockproducer/blockindex.go index a916b5902..95991f678 100644 --- a/blockproducer/blockindex.go +++ b/blockproducer/blockindex.go @@ -19,9 +19,11 @@ package blockproducer import ( "encoding/binary" "sync" + "time" "github.com/CovenantSQL/CovenantSQL/blockproducer/types" "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/utils/log" ) type blockNode struct { @@ -31,9 +33,13 @@ type blockNode struct { count uint32 } -func newBlockNode(h uint32, block *types.Block, parent *blockNode) *blockNode { +func newBlockNode(chainInitTime time.Time, period time.Duration, block *types.Block, parent *blockNode) *blockNode { var count uint32 + h := uint32(block.Timestamp().Sub(chainInitTime) / period) + + log.Debugf("chain init time %s, block generation time %s, block height %d", chainInitTime.String(), block.Timestamp().String(), h) + if parent != nil { count = parent.count + 1 } else { diff --git a/blockproducer/blockindex_test.go b/blockproducer/blockindex_test.go index 67b08e32a..ea5069e29 100644 --- a/blockproducer/blockindex_test.go +++ b/blockproducer/blockindex_test.go @@ -20,16 +20,19 @@ import ( "encoding/binary" "reflect" "testing" + "time" "github.com/CovenantSQL/CovenantSQL/crypto/hash" ) func TestNewBlockNodeAndIndexKey(t *testing.T) { + chainInitTime := time.Now().UTC() + period := time.Second block, err := generateRandomBlock(hash.Hash{}, true) if err != nil { t.Fatalf("Unexcepted error: %v", err) } - parent := newBlockNode(0, block, nil) + parent := newBlockNode(chainInitTime, period, block, nil) if parent == nil { t.Fatal("unexpected result: nil") } else if parent.parent != nil { @@ -38,11 +41,13 @@ func TestNewBlockNodeAndIndexKey(t *testing.T) { t.Fatalf("unexpected height: %d", parent.height) } + time.Sleep(time.Second) + block2, err := generateRandomBlock(block.SignedHeader.BlockHash, false) if err != nil { t.Fatalf("Unexcepted error: %v", err) } - child := newBlockNode(1, block2, parent) + child := newBlockNode(chainInitTime, period, block2, parent) if child == nil { t.Fatal("unexpected result: nil") } else if child.parent != parent { @@ -63,11 +68,13 @@ func TestNewBlockNodeAndIndexKey(t *testing.T) { } func TestAncestor(t *testing.T) { + chainInitTime := time.Now() + period := time.Second block, err := generateRandomBlock(hash.Hash{}, true) if err != nil { t.Fatalf("Unexcepted error: %v", err) } - parent := newBlockNode(0, block, nil) + parent := newBlockNode(chainInitTime, period, block, nil) if parent == nil { t.Fatal("unexpected result: nil") } else if parent.parent != nil { @@ -76,11 +83,14 @@ func TestAncestor(t *testing.T) { t.Fatalf("unexpected height: %d", parent.height) } + time.Sleep(time.Second) + block2, err := generateRandomBlock(block.SignedHeader.BlockHash, false) if err != nil { t.Fatalf("Unexcepted error: %v", err) } - child := newBlockNode(1, block2, parent) + + child := newBlockNode(chainInitTime, period, block2, parent) if child == nil { t.Fatal("unexpected result: nil") } else if child.parent != parent { @@ -104,6 +114,9 @@ func TestAncestor(t *testing.T) { } func TestIndexBlock(t *testing.T) { + chainInitTime := time.Now() + period := time.Second + bi := newBlockIndex() if bi == nil { @@ -114,19 +127,23 @@ func TestIndexBlock(t *testing.T) { if err != nil { t.Fatalf("Unexcepted error: %v", err) } - bn0 := newBlockNode(0, block0, nil) + bn0 := newBlockNode(chainInitTime, period, block0, nil) + + time.Sleep(time.Second) block1, err := generateRandomBlock(hash.Hash{}, true) if err != nil { t.Fatalf("Unexcepted error: %v", err) } - bn1 := newBlockNode(1, block1, bn0) + bn1 := newBlockNode(chainInitTime, period, block1, bn0) + + time.Sleep(time.Second) block2, err := generateRandomBlock(hash.Hash{}, true) if err != nil { t.Fatalf("Unexcepted error: %v", err) } - bn2 := newBlockNode(2, block2, bn1) + bn2 := newBlockNode(chainInitTime, period, block2, bn1) bi.addBlock(bn0) bi.addBlock(bn1) diff --git a/blockproducer/chain.go b/blockproducer/chain.go index 59904afbd..bcb00161b 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -35,6 +35,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/coreos/bbolt" + "github.com/pkg/errors" ) var ( @@ -56,10 +57,9 @@ type Chain struct { rt *rt cl *rpc.Caller - blocksFromSelf chan *pt.Block - blocksFromRPC chan *pt.Block - pendingTxs chan pi.Transaction - stopCh chan struct{} + blocksFromRPC chan *pt.Block + pendingTxs chan pi.Transaction + stopCh chan struct{} } // NewChain creates a new blockchain. @@ -121,15 +121,14 @@ func NewChain(cfg *Config) (*Chain, error) { // create chain chain := &Chain{ - db: db, - ms: newMetaState(), - bi: newBlockIndex(), - rt: newRuntime(cfg, accountAddress), - cl: rpc.NewCaller(), - blocksFromSelf: make(chan *pt.Block), - blocksFromRPC: make(chan *pt.Block), - pendingTxs: make(chan pi.Transaction), - stopCh: make(chan struct{}), + db: db, + ms: newMetaState(), + bi: newBlockIndex(), + rt: newRuntime(cfg, accountAddress), + cl: rpc.NewCaller(), + blocksFromRPC: make(chan *pt.Block), + pendingTxs: make(chan pi.Transaction), + stopCh: make(chan struct{}), } log.WithField("genesis", cfg.Genesis).Debug("pushing genesis block") @@ -143,8 +142,7 @@ func NewChain(cfg *Config) (*Chain, error) { "bp_number": chain.rt.bpNum, "period": chain.rt.period.String(), "tick": chain.rt.tick.String(), - "head": chain.rt.getHead().getHeader().String(), - "height": chain.rt.getHead().getHeight(), + "height": chain.rt.getHead().Height, }).Debug("current chain state") return chain, nil @@ -169,15 +167,14 @@ func LoadChain(cfg *Config) (chain *Chain, err error) { } chain = &Chain{ - db: db, - ms: newMetaState(), - bi: newBlockIndex(), - rt: newRuntime(cfg, accountAddress), - cl: rpc.NewCaller(), - blocksFromSelf: make(chan *pt.Block), - blocksFromRPC: make(chan *pt.Block), - pendingTxs: make(chan pi.Transaction), - stopCh: make(chan struct{}), + db: db, + ms: newMetaState(), + bi: newBlockIndex(), + rt: newRuntime(cfg, accountAddress), + cl: rpc.NewCaller(), + blocksFromRPC: make(chan *pt.Block), + pendingTxs: make(chan pi.Transaction), + stopCh: make(chan struct{}), } err = chain.db.View(func(tx *bolt.Tx) (err error) { @@ -205,10 +202,12 @@ func LoadChain(cfg *Config) (chain *Chain, err error) { return err } + log.Debugf("load chain block %s, parent block %s", block.BlockHash(), block.ParentHash()) + parent := (*blockNode)(nil) if last == nil { - // TODO(lambda): check genesis block + // check genesis block } else if block.ParentHash().IsEqual(&last.hash) { if err = block.SignedHeader.Verify(); err != nil { return err @@ -249,10 +248,10 @@ func LoadChain(cfg *Config) (chain *Chain, err error) { // checkBlock has following steps: 1. check parent block 2. checkTx 2. merkle tree 3. Hash 4. Signature. func (c *Chain) checkBlock(b *pt.Block) (err error) { // TODO(lambda): process block fork - if !b.ParentHash().IsEqual(c.rt.getHead().getHeader()) { + if !b.ParentHash().IsEqual(&c.rt.getHead().Head) { log.WithFields(log.Fields{ - "head": c.rt.getHead().getHeader().String(), - "height": c.rt.getHead().getHeight(), + "head": c.rt.getHead().Head.String(), + "height": c.rt.getHead().Height, "received_parent": b.ParentHash(), }).Debug("invalid parent") return ErrParentNotMatch @@ -277,7 +276,8 @@ func (c *Chain) checkBlock(b *pt.Block) (err error) { func (c *Chain) pushBlockWithoutCheck(b *pt.Block) error { h := c.rt.getHeightFromTime(b.Timestamp()) - node := newBlockNode(h, b, c.rt.getHead().getNode()) + log.Debugf("current block %s, height %d, its parent %s", b.BlockHash(), h, b.ParentHash()) + node := newBlockNode(c.rt.chainInitTime, c.rt.period, b, c.rt.getHead().Node) state := &State{ Node: node, Head: node.hash, @@ -289,34 +289,34 @@ func (c *Chain) pushBlockWithoutCheck(b *pt.Block) error { return err } - encState, err := utils.EncodeMsgPack(c.rt.getHead()) + encState, err := utils.EncodeMsgPack(state) if err != nil { return err } err = c.db.Update(func(tx *bolt.Tx) (err error) { - err = tx.Bucket(metaBucket[:]).Put(metaStateKey, encState.Bytes()) - if err != nil { - return err - } err = tx.Bucket(metaBucket[:]).Bucket(metaBlockIndexBucket).Put(node.indexKey(), encBlock.Bytes()) if err != nil { - return err + return } for _, v := range b.Transactions { if err = c.ms.applyTransactionProcedure(v)(tx); err != nil { - return err + return } } err = c.ms.partialCommitProcedure(b.Transactions)(tx) + if err != nil { + return + } + err = tx.Bucket(metaBucket[:]).Put(metaStateKey, encState.Bytes()) + if err != nil { + return + } + c.rt.setHead(state) + c.bi.addBlock(node) return }) - if err != nil { - return err - } - c.rt.setHead(state) - c.bi.addBlock(node) - return nil + return err } func (c *Chain) pushGenesisBlock(b *pt.Block) (err error) { @@ -330,6 +330,7 @@ func (c *Chain) pushGenesisBlock(b *pt.Block) (err error) { func (c *Chain) pushBlock(b *pt.Block) error { err := c.checkBlock(b) if err != nil { + err = errors.Wrap(err, "check block failed") return err } @@ -352,7 +353,7 @@ func (c *Chain) produceBlock(now time.Time) error { Header: pt.Header{ Version: blockVersion, Producer: c.rt.accountAddress, - ParentHash: *c.rt.getHead().getHeader(), + ParentHash: c.rt.getHead().Head, Timestamp: now, }, }, @@ -374,7 +375,7 @@ func (c *Chain) produceBlock(now time.Time) error { peers := c.rt.getPeers() wg := &sync.WaitGroup{} for _, s := range peers.Servers { - if !s.ID.IsEqual(&c.rt.nodeID) { + if !s.IsEqual(&c.rt.nodeID) { wg.Add(1) go func(id proto.NodeID) { defer wg.Done() @@ -388,18 +389,16 @@ func (c *Chain) produceBlock(now time.Time) error { if err := c.cl.CallNode(id, route.MCCAdviseNewBlock.String(), blockReq, blockResp); err != nil { log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), - "curr_turn": c.rt.getNextTurn(), "now_time": time.Now().UTC().Format(time.RFC3339Nano), "block_hash": b.BlockHash(), }).WithError(err).Error( "failed to advise new block") } else { log.WithFields(log.Fields{ - "height": c.rt.getHead().getHeight(), - "node": id, + "node": id, }).Debug("success advising block") } - }(s.ID) + }(s) } } @@ -473,7 +472,7 @@ func (c *Chain) checkBillingRequest(br *pt.BillingRequest) (err error) { } func (c *Chain) fetchBlockByHeight(h uint32) (b *pt.Block, count uint32, err error) { - node := c.rt.getHead().getNode().ancestor(h) + node := c.rt.getHead().Node.ancestor(h) if node == nil { return nil, 0, ErrNoSuchBlock } @@ -493,7 +492,7 @@ func (c *Chain) fetchBlockByHeight(h uint32) (b *pt.Block, count uint32, err err } func (c *Chain) fetchBlockByCount(count uint32) (b *pt.Block, height uint32, err error) { - node := c.rt.getHead().getNode().ancestorByCount(count) + node := c.rt.getHead().Node.ancestorByCount(count) if node == nil { return nil, 0, ErrNoSuchBlock } @@ -538,27 +537,26 @@ func (c *Chain) sync() error { "peer": c.rt.getPeerInfoString(), }).Debug("synchronizing chain state") + // sync executes firstly alone, so it's ok to sync without locking runtime for { now := c.rt.now() height := c.rt.getHeightFromTime(now) log.WithFields(log.Fields{ "height": height, - "nextTurn": c.rt.getNextTurn(), + "nextTurn": c.rt.nextTurn, }).Info("try sync heights") - if c.rt.getNextTurn() >= height { + if c.rt.nextTurn >= height { log.WithFields(log.Fields{ "height": height, - "nextTurn": c.rt.getNextTurn(), + "nextTurn": c.rt.nextTurn, }).Info("return heights") break } - for c.rt.getNextTurn() <= height { + for c.rt.nextTurn <= height { // TODO(lambda): fetch blocks and txes. - c.rt.setNextTurn() - // TODO(lambda): remove it after implementing fetch - c.rt.getHead().increaseHeightByOne() + c.rt.nextTurn++ } } @@ -609,14 +607,6 @@ func (c *Chain) processBlocks() { var stash []*pt.Block for { select { - case block := <-c.blocksFromSelf: - h := c.rt.getHeightFromTime(block.Timestamp()) - if h == c.rt.getNextTurn()-1 { - err := c.pushBlockWithoutCheck(block) - if err != nil { - log.Error(err) - } - } case block := <-c.blocksFromRPC: if h := c.rt.getHeightFromTime(block.Timestamp()); h > c.rt.getNextTurn()-1 { // Stash newer blocks for later check @@ -631,7 +621,11 @@ func (c *Chain) processBlocks() { } else { err := c.pushBlock(block) if err != nil { - log.Error(err) + log.WithFields(log.Fields{ + "block_hash": block.BlockHash(), + "block_parent_hash": block.ParentHash(), + "block_timestamp": block.Timestamp(), + }).Debug(err) } } @@ -661,9 +655,9 @@ func (c *Chain) processTxs() { log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "next_turn": c.rt.getNextTurn(), - "head_height": c.rt.getHead().getHeight(), - "head_block": c.rt.getHead().getHeader().String(), - "transaction": tx.GetHash().String(), + "head_height": c.rt.getHead().Height, + "head_block": c.rt.getHead().Head.String(), + "transaction": tx.Hash().String(), }).Debugf("Failed to push tx with error: %v", err) } case <-c.stopCh: @@ -690,8 +684,8 @@ func (c *Chain) mainCycle() { log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "next_turn": c.rt.getNextTurn(), - "head_height": c.rt.getHead().getHeight(), - "head_block": c.rt.getHead().getHeader().String(), + "head_height": c.rt.getHead().Height, + "head_block": c.rt.getHead().Head.String(), "now_time": t.Format(time.RFC3339Nano), "duration": d, }).Debug("Main cycle") @@ -705,12 +699,13 @@ func (c *Chain) mainCycle() { func (c *Chain) syncHead() { // Try to fetch if the the block of the current turn is not advised yet - //log.WithFields(log.Fields{ - // "index": c.rt.index, - // "next_turn": c.rt.getNextTurn(), - // "height": c.rt.getHead().getHeight(), - //}).Debug("sync header") - if h := c.rt.getNextTurn() - 1; c.rt.getHead().getHeight() < h { + log.WithFields(log.Fields{ + "index": c.rt.index, + "next_turn": c.rt.getNextTurn(), + "height": c.rt.getHead().Height, + }).Debug("sync header") + if h := c.rt.getNextTurn() - 1; c.rt.getHead().Height < h { + log.Debugf("sync header with height %d", h) var err error req := &FetchBlockReq{ Envelope: proto.Envelope{ @@ -723,25 +718,25 @@ func (c *Chain) syncHead() { succ := false for i, s := range peers.Servers { - if !s.ID.IsEqual(&c.rt.nodeID) { - err = c.cl.CallNode(s.ID, route.MCCFetchBlock.String(), req, resp) + if !s.IsEqual(&c.rt.nodeID) { + err = c.cl.CallNode(s, route.MCCFetchBlock.String(), req, resp) if err != nil || resp.Block == nil { log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), - "remote": fmt.Sprintf("[%d/%d] %s", i, len(peers.Servers), s.ID), + "remote": fmt.Sprintf("[%d/%d] %s", i, len(peers.Servers), s), "curr_turn": c.rt.getNextTurn(), - "head_height": c.rt.getHead().getHeight(), - "head_block": c.rt.getHead().getHeader().String(), + "head_height": c.rt.getHead().Height, + "head_block": c.rt.getHead().Head.String(), }).WithError(err).Debug( "Failed to fetch block from peer") } else { c.blocksFromRPC <- resp.Block log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), - "remote": fmt.Sprintf("[%d/%d] %s", i, len(peers.Servers), s.ID), + "remote": fmt.Sprintf("[%d/%d] %s", i, len(peers.Servers), s), "curr_turn": c.rt.getNextTurn(), - "head_height": c.rt.getHead().getHeight(), - "head_block": c.rt.getHead().getHeader().String(), + "head_height": c.rt.getHead().Height, + "head_block": c.rt.getHead().Head.String(), }).Debug( "Fetch block from remote peer successfully") succ = true @@ -754,8 +749,8 @@ func (c *Chain) syncHead() { log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "curr_turn": c.rt.getNextTurn(), - "head_height": c.rt.getHead().getHeight(), - "head_block": c.rt.getHead().getHeader().String(), + "head_height": c.rt.getHead().Height, + "head_block": c.rt.getHead().Head.String(), }).Debug( "Cannot get block from any peer") } diff --git a/blockproducer/chain_test.go b/blockproducer/chain_test.go index 450a4f4cd..1da9c742f 100644 --- a/blockproducer/chain_test.go +++ b/blockproducer/chain_test.go @@ -26,11 +26,10 @@ import ( pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" pt "github.com/CovenantSQL/CovenantSQL/blockproducer/types" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils/log" . "github.com/smartystreets/goconvey/convey" ) @@ -69,7 +68,7 @@ func TestChain(t *testing.T) { So(err, ShouldBeNil) _, peers, err := createTestPeersWithPrivKeys(priv, testPeersNumber) - cfg := NewConfig(genesis, fl.Name(), rpcServer, peers, peers.Servers[0].ID, testPeriod, testTick) + cfg := NewConfig(genesis, fl.Name(), rpcServer, peers, peers.Servers[0], testPeriod, testTick) chain, err := NewChain(cfg) So(err, ShouldBeNil) ao, ok := chain.ms.readonly.accounts[testAddress1] @@ -100,7 +99,7 @@ func TestChain(t *testing.T) { for { time.Sleep(testPeriod) t.Logf("Chain state: head = %s, height = %d, turn = %d, nextturnstart = %s, ismyturn = %t", - chain.rt.getHead().getHeader(), chain.rt.getHead().getHeight(), chain.rt.nextTurn, + chain.rt.getHead().Head, chain.rt.getHead().Height, chain.rt.nextTurn, chain.rt.chainInitTime.Add( chain.rt.period*time.Duration(chain.rt.nextTurn)).Format(time.RFC3339Nano), chain.rt.isMyTurn()) @@ -120,7 +119,7 @@ func TestChain(t *testing.T) { } // generate block - block, err := generateRandomBlockWithTransactions(*chain.rt.getHead().getHeader(), tbs) + block, err := generateRandomBlockWithTransactions(chain.rt.getHead().Head, tbs) So(err, ShouldBeNil) err = chain.pushBlock(block) So(err, ShouldBeNil) @@ -133,7 +132,7 @@ func TestChain(t *testing.T) { So(chain.bi.hasBlock(block.SignedHeader.BlockHash), ShouldBeTrue) // So(chain.rt.getHead().Height, ShouldEqual, height) - height := chain.rt.getHead().getHeight() + height := chain.rt.getHead().Height specificHeightBlock1, _, err := chain.fetchBlockByHeight(height) So(err, ShouldBeNil) So(block.SignedHeader.BlockHash, ShouldResemble, specificHeightBlock1.SignedHeader.BlockHash) @@ -162,17 +161,18 @@ func TestChain(t *testing.T) { height++ t.Logf("Pushed new block: height = %d, %s <- %s", - chain.rt.getHead().getHeight(), + chain.rt.getHead().Height, block.ParentHash(), block.BlockHash()) - if chain.rt.getHead().getHeight() >= testPeriodNumber { + if chain.rt.getHead().Height >= testPeriodNumber { break } } // load chain from db - chain.db.Close() + err = chain.Stop() + So(err, ShouldBeNil) _, err = LoadChain(cfg) So(err, ShouldBeNil) }) @@ -199,7 +199,7 @@ func TestMultiNode(t *testing.T) { } var nis []cpuminer.NonceInfo - var peers *kayak.Peers + var peers *proto.Peers peerInited := false for i := range chains { // create tmp file @@ -219,13 +219,13 @@ func TestMultiNode(t *testing.T) { So(err, ShouldBeNil) for i, p := range peers.Servers { - t.Logf("Peer #%d: %s", i, p.ID) + t.Logf("Peer #%d: %s", i, p) } peerInited = true } - cfg := NewConfig(genesis, fl.Name(), server, peers, peers.Servers[i].ID, testPeriod, testTick) + cfg := NewConfig(genesis, fl.Name(), server, peers, peers.Servers[i], testPeriod, testTick) // init chain chains[i], err = NewChain(cfg) @@ -235,8 +235,13 @@ func TestMultiNode(t *testing.T) { pub, err := kms.GetLocalPublicKey() So(err, ShouldBeNil) node := proto.Node{ - ID: peers.Servers[i].ID, - Role: peers.Servers[i].Role, + ID: peers.Servers[i], + Role: func(peers *proto.Peers, i int) proto.ServerRole { + if peers.Leader.IsEqual(&peers.Servers[i]) { + return proto.Leader + } + return proto.Follower + }(peers, i), Addr: server.Listener.Addr().String(), PublicKey: pub, Nonce: nis[i].Nonce, @@ -274,13 +279,13 @@ func TestMultiNode(t *testing.T) { br, err := generateRandomBillingRequest() c.So(err, ShouldBeNil) - bReq := &ct.AdviseBillingReq{ + bReq := &types.AdviseBillingReq{ Envelope: proto.Envelope{ // TODO(lambda): Add fields. }, Req: br, } - bResp := &ct.AdviseBillingResp{} + bResp := &types.AdviseBillingResp{} log.WithFields(log.Fields{ "node": val, "requestHash": br.RequestHash, diff --git a/blockproducer/config.go b/blockproducer/config.go index d80ae5dbe..175d2a1d6 100644 --- a/blockproducer/config.go +++ b/blockproducer/config.go @@ -20,7 +20,6 @@ import ( "time" "github.com/CovenantSQL/CovenantSQL/blockproducer/types" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/rpc" ) @@ -37,7 +36,7 @@ type Config struct { Server *rpc.Server - Peers *kayak.Peers + Peers *proto.Peers NodeID proto.NodeID Period time.Duration @@ -46,7 +45,7 @@ type Config struct { // NewConfig creates new config. func NewConfig(genesis *types.Block, dataFile string, - server *rpc.Server, peers *kayak.Peers, + server *rpc.Server, peers *proto.Peers, nodeID proto.NodeID, period time.Duration, tick time.Duration) *Config { config := Config{ Genesis: genesis, diff --git a/blockproducer/db_service.go b/blockproducer/db_service.go index 511227bc3..463d925c8 100644 --- a/blockproducer/db_service.go +++ b/blockproducer/db_service.go @@ -25,15 +25,13 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/metric" "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" dto "github.com/prometheus/client_model/go" ) @@ -67,7 +65,7 @@ type DBService struct { } // CreateDatabase defines block producer create database logic. -func (s *DBService) CreateDatabase(req *CreateDatabaseRequest, resp *CreateDatabaseResponse) (err error) { +func (s *DBService) CreateDatabase(req *types.CreateDatabaseRequest, resp *types.CreateDatabaseResponse) (err error) { // verify signature if err = req.Verify(); err != nil { return @@ -92,7 +90,7 @@ func (s *DBService) CreateDatabase(req *CreateDatabaseRequest, resp *CreateDatab log.WithField("db", dbID).Debug("generated database id") // allocate nodes - var peers *kayak.Peers + var peers *proto.Peers if peers, err = s.allocateNodes(0, dbID, req.Header.ResourceMeta); err != nil { return } @@ -100,7 +98,7 @@ func (s *DBService) CreateDatabase(req *CreateDatabaseRequest, resp *CreateDatab log.WithField("peers", peers).Debug("generated peers info") // TODO(lambda): call accounting features, top up deposit - var genesisBlock *ct.Block + var genesisBlock *types.Block if genesisBlock, err = s.generateGenesisBlock(dbID, req.Header.ResourceMeta); err != nil { return } @@ -119,9 +117,9 @@ func (s *DBService) CreateDatabase(req *CreateDatabaseRequest, resp *CreateDatab return } - initSvcReq := new(wt.UpdateService) - initSvcReq.Header.Op = wt.CreateDB - initSvcReq.Header.Instance = wt.ServiceInstance{ + initSvcReq := new(types.UpdateService) + initSvcReq.Header.Op = types.CreateDB + initSvcReq.Header.Instance = types.ServiceInstance{ DatabaseID: dbID, Peers: peers, GenesisBlock: genesisBlock, @@ -130,21 +128,21 @@ func (s *DBService) CreateDatabase(req *CreateDatabaseRequest, resp *CreateDatab return } - rollbackReq := new(wt.UpdateService) - rollbackReq.Header.Op = wt.DropDB - rollbackReq.Header.Instance = wt.ServiceInstance{ + rollbackReq := new(types.UpdateService) + rollbackReq.Header.Op = types.DropDB + rollbackReq.Header.Instance = types.ServiceInstance{ DatabaseID: dbID, } if err = rollbackReq.Sign(privateKey); err != nil { return } - if err = s.batchSendSvcReq(initSvcReq, rollbackReq, s.peersToNodes(peers)); err != nil { + if err = s.batchSendSvcReq(initSvcReq, rollbackReq, peers.Servers); err != nil { return } // save to meta - instanceMeta := wt.ServiceInstance{ + instanceMeta := types.ServiceInstance{ DatabaseID: dbID, Peers: peers, ResourceMeta: req.Header.ResourceMeta, @@ -169,7 +167,7 @@ func (s *DBService) CreateDatabase(req *CreateDatabaseRequest, resp *CreateDatab } // DropDatabase defines block producer drop database logic. -func (s *DBService) DropDatabase(req *DropDatabaseRequest, resp *DropDatabaseResponse) (err error) { +func (s *DBService) DropDatabase(req *types.DropDatabaseRequest, resp *types.DropDatabaseResponse) (err error) { // verify signature if err = req.Verify(); err != nil { return @@ -186,15 +184,15 @@ func (s *DBService) DropDatabase(req *DropDatabaseRequest, resp *DropDatabaseRes }() // get database peers - var instanceMeta wt.ServiceInstance + var instanceMeta types.ServiceInstance if instanceMeta, err = s.ServiceMap.Get(req.Header.DatabaseID); err != nil { return } // call miner nodes to drop database - dropDBSvcReq := new(wt.UpdateService) - dropDBSvcReq.Header.Op = wt.DropDB - dropDBSvcReq.Header.Instance = wt.ServiceInstance{ + dropDBSvcReq := new(types.UpdateService) + dropDBSvcReq.Header.Op = types.DropDB + dropDBSvcReq.Header.Instance = types.ServiceInstance{ DatabaseID: req.Header.DatabaseID, } if dropDBSvcReq.Header.Signee, err = kms.GetLocalPublicKey(); err != nil { @@ -208,7 +206,7 @@ func (s *DBService) DropDatabase(req *DropDatabaseRequest, resp *DropDatabaseRes return } - if err = s.batchSendSvcReq(dropDBSvcReq, nil, s.peersToNodes(instanceMeta.Peers)); err != nil { + if err = s.batchSendSvcReq(dropDBSvcReq, nil, instanceMeta.Peers.Servers); err != nil { return } @@ -229,7 +227,7 @@ func (s *DBService) DropDatabase(req *DropDatabaseRequest, resp *DropDatabaseRes } // GetDatabase defines block producer get database logic. -func (s *DBService) GetDatabase(req *GetDatabaseRequest, resp *GetDatabaseResponse) (err error) { +func (s *DBService) GetDatabase(req *types.GetDatabaseRequest, resp *types.GetDatabaseResponse) (err error) { // verify signature if err = req.Verify(); err != nil { return @@ -246,7 +244,7 @@ func (s *DBService) GetDatabase(req *GetDatabaseRequest, resp *GetDatabaseRespon }() // fetch from meta - var instanceMeta wt.ServiceInstance + var instanceMeta types.ServiceInstance if instanceMeta, err = s.ServiceMap.Get(req.Header.DatabaseID); err != nil { return } @@ -269,9 +267,9 @@ func (s *DBService) GetDatabase(req *GetDatabaseRequest, resp *GetDatabaseRespon } // GetNodeDatabases defines block producer get node databases logic. -func (s *DBService) GetNodeDatabases(req *wt.InitService, resp *wt.InitServiceResponse) (err error) { +func (s *DBService) GetNodeDatabases(req *types.InitService, resp *types.InitServiceResponse) (err error) { // fetch from meta - var instances []wt.ServiceInstance + var instances []types.ServiceInstance if instances, err = s.ServiceMap.GetDatabases(req.GetNodeID().ToNodeID()); err != nil { return } @@ -324,7 +322,7 @@ func (s *DBService) generateDatabaseID(reqNodeID *proto.RawNodeID) (dbID proto.D } } -func (s *DBService) allocateNodes(lastTerm uint64, dbID proto.DatabaseID, resourceMeta wt.ResourceMeta) (peers *kayak.Peers, err error) { +func (s *DBService) allocateNodes(lastTerm uint64, dbID proto.DatabaseID, resourceMeta types.ResourceMeta) (peers *proto.Peers, err error) { curRange := int(resourceMeta.Node) excludeNodes := make(map[proto.NodeID]bool) var allocated []allocatedNode @@ -444,7 +442,7 @@ func (s *DBService) allocateNodes(lastTerm uint64, dbID proto.DatabaseID, resour } // build peers - return s.buildPeers(lastTerm+1, nodes, nodeAllocated) + return s.buildPeers(lastTerm+1, nodeAllocated) } curRange += int(resourceMeta.Node) @@ -479,54 +477,26 @@ func (s *DBService) getMetric(metric metric.MetricMap, keys []string) (value uin return } -func (s *DBService) buildPeers(term uint64, nodes []proto.Node, allocated []proto.NodeID) (peers *kayak.Peers, err error) { +func (s *DBService) buildPeers(term uint64, allocated []proto.NodeID) (peers *proto.Peers, err error) { log.WithFields(log.Fields{ "term": term, "nodes": allocated, }).Debug("build peers for term/nodes") // get local private key - var pubKey *asymmetric.PublicKey - if pubKey, err = kms.GetLocalPublicKey(); err != nil { - return - } - var privKey *asymmetric.PrivateKey if privKey, err = kms.GetLocalPrivateKey(); err != nil { return } // get allocated node info - allocatedMap := make(map[proto.NodeID]bool) - - for _, nodeID := range allocated { - allocatedMap[nodeID] = true - } - - allocatedNodes := make([]proto.Node, 0, len(allocated)) - - for _, node := range nodes { - if allocatedMap[node.ID] { - allocatedNodes = append(allocatedNodes, node) - } - } - - peers = &kayak.Peers{ - Term: term, - PubKey: pubKey, - Servers: make([]*kayak.Server, len(allocated)), - } - - for idx, node := range allocatedNodes { - peers.Servers[idx] = &kayak.Server{ - Role: proto.Follower, - ID: node.ID, - PubKey: node.PublicKey, - } + peers = &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: term, + Servers: allocated, + }, } - // choose the first node as leader, allocateNodes sort the allocated node list by memory size - peers.Servers[0].Role = proto.Leader peers.Leader = peers.Servers[0] // sign the peers structure @@ -535,7 +505,7 @@ func (s *DBService) buildPeers(term uint64, nodes []proto.Node, allocated []prot return } -func (s *DBService) generateGenesisBlock(dbID proto.DatabaseID, resourceMeta wt.ResourceMeta) (genesisBlock *ct.Block, err error) { +func (s *DBService) generateGenesisBlock(dbID proto.DatabaseID, resourceMeta types.ResourceMeta) (genesisBlock *types.Block, err error) { // TODO(xq262144): following is stub code, real logic should be implemented in the future emptyHash := hash.Hash{} @@ -548,9 +518,9 @@ func (s *DBService) generateGenesisBlock(dbID proto.DatabaseID, resourceMeta wt. return } - genesisBlock = &ct.Block{ - SignedHeader: ct.SignedHeader{ - Header: ct.Header{ + genesisBlock = &types.Block{ + SignedHeader: types.SignedHeader{ + Header: types.Header{ Version: 0x01000000, Producer: nodeID, GenesisHash: emptyHash, @@ -564,7 +534,7 @@ func (s *DBService) generateGenesisBlock(dbID proto.DatabaseID, resourceMeta wt. return } -func (s *DBService) batchSendSvcReq(req *wt.UpdateService, rollbackReq *wt.UpdateService, nodes []proto.NodeID) (err error) { +func (s *DBService) batchSendSvcReq(req *types.UpdateService, rollbackReq *types.UpdateService, nodes []proto.NodeID) (err error) { if err = s.batchSendSingleSvcReq(req, nodes); err != nil { s.batchSendSingleSvcReq(rollbackReq, nodes) } @@ -572,7 +542,7 @@ func (s *DBService) batchSendSvcReq(req *wt.UpdateService, rollbackReq *wt.Updat return } -func (s *DBService) batchSendSingleSvcReq(req *wt.UpdateService, nodes []proto.NodeID) (err error) { +func (s *DBService) batchSendSingleSvcReq(req *types.UpdateService, nodes []proto.NodeID) (err error) { var wg sync.WaitGroup errCh := make(chan error, len(nodes)) @@ -580,7 +550,7 @@ func (s *DBService) batchSendSingleSvcReq(req *wt.UpdateService, nodes []proto.N wg.Add(1) go func(s proto.NodeID, ec chan error) { defer wg.Done() - var resp wt.UpdateServiceResponse + var resp types.UpdateServiceResponse ec <- rpc.NewCaller().CallNode(s, route.DBSDeploy.String(), req, &resp) }(node, errCh) } @@ -591,17 +561,3 @@ func (s *DBService) batchSendSingleSvcReq(req *wt.UpdateService, nodes []proto.N return } - -func (s *DBService) peersToNodes(peers *kayak.Peers) (nodes []proto.NodeID) { - if peers == nil { - return - } - - nodes = make([]proto.NodeID, 0, len(peers.Servers)) - - for _, s := range peers.Servers { - nodes = append(nodes, s.ID) - } - - return -} diff --git a/blockproducer/db_service_map.go b/blockproducer/db_service_map.go index fa506ec4e..1a56042a7 100644 --- a/blockproducer/db_service_map.go +++ b/blockproducer/db_service_map.go @@ -20,20 +20,21 @@ import ( "sync" "github.com/CovenantSQL/CovenantSQL/proto" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/CovenantSQL/CovenantSQL/types" + "github.com/pkg/errors" ) // DBMetaPersistence defines database meta persistence api. type DBMetaPersistence interface { - GetDatabase(dbID proto.DatabaseID) (wt.ServiceInstance, error) - SetDatabase(meta wt.ServiceInstance) error + GetDatabase(dbID proto.DatabaseID) (types.ServiceInstance, error) + SetDatabase(meta types.ServiceInstance) error DeleteDatabase(dbID proto.DatabaseID) error - GetAllDatabases() ([]wt.ServiceInstance, error) + GetAllDatabases() ([]types.ServiceInstance, error) } // DBServiceMap defines database instance meta. type DBServiceMap struct { - dbMap map[proto.DatabaseID]wt.ServiceInstance + dbMap map[proto.DatabaseID]types.ServiceInstance nodeMap map[proto.NodeID]map[proto.DatabaseID]bool persist DBMetaPersistence sync.RWMutex @@ -43,7 +44,7 @@ type DBServiceMap struct { func InitServiceMap(persistImpl DBMetaPersistence) (s *DBServiceMap, err error) { s = &DBServiceMap{ persist: persistImpl, - dbMap: make(map[proto.DatabaseID]wt.ServiceInstance), + dbMap: make(map[proto.DatabaseID]types.ServiceInstance), nodeMap: make(map[proto.NodeID]map[proto.DatabaseID]bool), } @@ -51,7 +52,7 @@ func InitServiceMap(persistImpl DBMetaPersistence) (s *DBServiceMap, err error) s.Lock() defer s.Unlock() - var allDatabases []wt.ServiceInstance + var allDatabases []types.ServiceInstance if allDatabases, err = s.persist.GetAllDatabases(); err != nil { return @@ -61,10 +62,10 @@ func InitServiceMap(persistImpl DBMetaPersistence) (s *DBServiceMap, err error) s.dbMap[meta.DatabaseID] = meta for _, server := range meta.Peers.Servers { - if s.nodeMap[server.ID] == nil { - s.nodeMap[server.ID] = make(map[proto.DatabaseID]bool) + if s.nodeMap[server] == nil { + s.nodeMap[server] = make(map[proto.DatabaseID]bool) } - s.nodeMap[server.ID][meta.DatabaseID] = true + s.nodeMap[server][meta.DatabaseID] = true } } @@ -72,22 +73,22 @@ func InitServiceMap(persistImpl DBMetaPersistence) (s *DBServiceMap, err error) } // Set add database to meta. -func (c *DBServiceMap) Set(meta wt.ServiceInstance) (err error) { +func (c *DBServiceMap) Set(meta types.ServiceInstance) (err error) { c.Lock() defer c.Unlock() - if !meta.Peers.Verify() { - return ErrInvalidDBPeersConfig + if err = meta.Peers.Verify(); err != nil { + return errors.Wrap(err, "verify peers failed") } // remove previous records - var oldMeta wt.ServiceInstance + var oldMeta types.ServiceInstance var ok bool if oldMeta, ok = c.dbMap[meta.DatabaseID]; ok { for _, s := range oldMeta.Peers.Servers { - if c.nodeMap[s.ID] != nil { - delete(c.nodeMap[s.ID], meta.DatabaseID) + if c.nodeMap[s] != nil { + delete(c.nodeMap[s], meta.DatabaseID) } } } @@ -96,10 +97,10 @@ func (c *DBServiceMap) Set(meta wt.ServiceInstance) (err error) { c.dbMap[meta.DatabaseID] = meta for _, s := range meta.Peers.Servers { - if c.nodeMap[s.ID] == nil { - c.nodeMap[s.ID] = make(map[proto.DatabaseID]bool) + if c.nodeMap[s] == nil { + c.nodeMap[s] = make(map[proto.DatabaseID]bool) } - c.nodeMap[s.ID][meta.DatabaseID] = true + c.nodeMap[s][meta.DatabaseID] = true } // set to persistence @@ -109,7 +110,7 @@ func (c *DBServiceMap) Set(meta wt.ServiceInstance) (err error) { } // Get find database from meta. -func (c *DBServiceMap) Get(dbID proto.DatabaseID) (meta wt.ServiceInstance, err error) { +func (c *DBServiceMap) Get(dbID proto.DatabaseID) (meta types.ServiceInstance, err error) { c.RLock() defer c.RUnlock() @@ -134,14 +135,14 @@ func (c *DBServiceMap) Delete(dbID proto.DatabaseID) (err error) { c.Lock() defer c.Unlock() - var meta wt.ServiceInstance + var meta types.ServiceInstance var ok bool // delete from cache if meta, ok = c.dbMap[dbID]; ok { for _, s := range meta.Peers.Servers { - if c.nodeMap[s.ID] != nil { - delete(c.nodeMap[s.ID], dbID) + if c.nodeMap[s] != nil { + delete(c.nodeMap[s], dbID) } } } @@ -155,15 +156,15 @@ func (c *DBServiceMap) Delete(dbID proto.DatabaseID) (err error) { } // GetDatabases return database config. -func (c *DBServiceMap) GetDatabases(nodeID proto.NodeID) (dbs []wt.ServiceInstance, err error) { +func (c *DBServiceMap) GetDatabases(nodeID proto.NodeID) (dbs []types.ServiceInstance, err error) { c.RLock() defer c.RUnlock() - dbs = make([]wt.ServiceInstance, 0) + dbs = make([]types.ServiceInstance, 0) for dbID, ok := range c.nodeMap[nodeID] { if ok { - var db wt.ServiceInstance + var db types.ServiceInstance if db, ok = c.dbMap[dbID]; ok { dbs = append(dbs, db) } diff --git a/blockproducer/db_service_map_test.go b/blockproducer/db_service_map_test.go index 52c1e518c..b5fe441e9 100644 --- a/blockproducer/db_service_map_test.go +++ b/blockproducer/db_service_map_test.go @@ -26,7 +26,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/CovenantSQL/CovenantSQL/types" . "github.com/smartystreets/goconvey/convey" ) @@ -66,7 +66,7 @@ func TestServiceMap(t *testing.T) { So(err, ShouldNotBeNil) // test get exists - var instance wt.ServiceInstance + var instance types.ServiceInstance instance, err = svcMap.Get(proto.DatabaseID("db")) So(instance.DatabaseID, ShouldResemble, proto.DatabaseID("db")) @@ -101,7 +101,7 @@ func TestServiceMap(t *testing.T) { So(err, ShouldBeNil) instance.Peers.Servers = append(instance.Peers.Servers, instance.Peers.Servers[0]) // something new - instance.Peers.Servers[1].ID = proto.NodeID("00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35") + instance.Peers.Servers[1] = proto.NodeID("00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35") err = instance.Peers.Sign(privKey) So(err, ShouldBeNil) err = svcMap.Set(instance) @@ -116,7 +116,7 @@ func TestServiceMap(t *testing.T) { So(svcMap.dbMap, ShouldNotContainKey, proto.DatabaseID("db2")) // test get databases - var instances []wt.ServiceInstance + var instances []types.ServiceInstance instances, err = svcMap.GetDatabases(nodeID) So(instances, ShouldHaveLength, 1) So(instances[0].DatabaseID, ShouldResemble, proto.DatabaseID("db")) diff --git a/blockproducer/db_service_test.go b/blockproducer/db_service_test.go index da00e68ba..14d5b4d95 100644 --- a/blockproducer/db_service_test.go +++ b/blockproducer/db_service_test.go @@ -26,7 +26,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/CovenantSQL/CovenantSQL/types" . "github.com/smartystreets/goconvey/convey" ) @@ -71,20 +71,20 @@ func TestService(t *testing.T) { So(err, ShouldBeNil) // test get database - getReq := new(GetDatabaseRequest) + getReq := new(types.GetDatabaseRequest) getReq.Header.DatabaseID = proto.DatabaseID("db") err = getReq.Sign(privateKey) So(err, ShouldBeNil) - getRes := new(GetDatabaseResponse) + getRes := new(types.GetDatabaseResponse) err = rpc.NewCaller().CallNode(nodeID, route.BPDBGetDatabase.String(), getReq, getRes) So(err, ShouldBeNil) So(getReq.Verify(), ShouldBeNil) So(getRes.Header.InstanceMeta.DatabaseID, ShouldResemble, proto.DatabaseID("db")) // get node databases - getAllReq := new(wt.InitService) - getAllRes := new(wt.InitServiceResponse) + getAllReq := new(types.InitService) + getAllRes := new(types.InitServiceResponse) err = rpc.NewCaller().CallNode(nodeID, route.BPDBGetNodeDatabases.String(), getAllReq, getAllRes) So(err, ShouldBeNil) So(getAllRes.Verify(), ShouldBeNil) @@ -92,25 +92,25 @@ func TestService(t *testing.T) { So(getAllRes.Header.Instances[0].DatabaseID, ShouldResemble, proto.DatabaseID("db")) // create database, no metric received, should failed - createDBReq := new(CreateDatabaseRequest) - createDBReq.Header.ResourceMeta = wt.ResourceMeta{ + createDBReq := new(types.CreateDatabaseRequest) + createDBReq.Header.ResourceMeta = types.ResourceMeta{ Node: 1, } err = createDBReq.Sign(privateKey) So(err, ShouldBeNil) - createDBRes := new(CreateDatabaseResponse) + createDBRes := new(types.CreateDatabaseResponse) err = rpc.NewCaller().CallNode(nodeID, route.BPDBCreateDatabase.String(), createDBReq, createDBRes) So(err, ShouldNotBeNil) // trigger metrics, but does not allow block producer to service as miner metric.NewCollectClient().UploadMetrics(nodeID) - createDBRes = new(CreateDatabaseResponse) + createDBRes = new(types.CreateDatabaseResponse) err = rpc.NewCaller().CallNode(nodeID, route.BPDBCreateDatabase.String(), createDBReq, createDBRes) So(err, ShouldNotBeNil) // allow block producer to service as miner, only use this in test case dbService.includeBPNodesForAllocation = true - createDBRes = new(CreateDatabaseResponse) + createDBRes = new(types.CreateDatabaseResponse) err = rpc.NewCaller().CallNode(nodeID, route.BPDBCreateDatabase.String(), createDBReq, createDBRes) So(err, ShouldBeNil) So(createDBRes.Verify(), ShouldBeNil) @@ -131,18 +131,18 @@ func TestService(t *testing.T) { }) // use the database - serverID := createDBRes.Header.InstanceMeta.Peers.Leader.ID + serverID := createDBRes.Header.InstanceMeta.Peers.Leader dbID := createDBRes.Header.InstanceMeta.DatabaseID - var queryReq *wt.Request - queryReq, err = buildQuery(wt.WriteQuery, 1, 1, dbID, []string{ + var queryReq *types.Request + queryReq, err = buildQuery(types.WriteQuery, 1, 1, dbID, []string{ "create table test (test int)", "insert into test values(1)", }) So(err, ShouldBeNil) - queryRes := new(wt.Response) + queryRes := new(types.Response) err = rpc.NewCaller().CallNode(serverID, route.DBSQuery.String(), queryReq, queryRes) So(err, ShouldBeNil) - queryReq, err = buildQuery(wt.ReadQuery, 1, 2, dbID, []string{ + queryReq, err = buildQuery(types.ReadQuery, 1, 2, dbID, []string{ "select * from test", }) So(err, ShouldBeNil) @@ -158,16 +158,16 @@ func TestService(t *testing.T) { So(queryRes.Payload.Rows[0].Values[0], ShouldEqual, 1) // drop database - dropDBReq := new(DropDatabaseRequest) + dropDBReq := new(types.DropDatabaseRequest) dropDBReq.Header.DatabaseID = createDBRes.Header.InstanceMeta.DatabaseID err = dropDBReq.Sign(privateKey) So(err, ShouldBeNil) - dropDBRes := new(DropDatabaseResponse) + dropDBRes := new(types.DropDatabaseResponse) err = rpc.NewCaller().CallNode(nodeID, route.BPDBDropDatabase.String(), dropDBReq, dropDBRes) So(err, ShouldBeNil) // get this database again to test if it is dropped - getReq = new(GetDatabaseRequest) + getReq = new(types.GetDatabaseRequest) getReq.Header.DatabaseID = createDBRes.Header.InstanceMeta.DatabaseID err = getReq.Sign(privateKey) So(err, ShouldBeNil) @@ -176,7 +176,7 @@ func TestService(t *testing.T) { }) } -func buildQuery(queryType wt.QueryType, connID uint64, seqNo uint64, databaseID proto.DatabaseID, queries []string) (query *wt.Request, err error) { +func buildQuery(queryType types.QueryType, connID uint64, seqNo uint64, databaseID proto.DatabaseID, queries []string) (query *types.Request, err error) { // get node id var nodeID proto.NodeID if nodeID, err = kms.GetLocalNodeID(); err != nil { @@ -192,15 +192,15 @@ func buildQuery(queryType wt.QueryType, connID uint64, seqNo uint64, databaseID tm := time.Now().UTC() // build queries - realQueries := make([]wt.Query, len(queries)) + realQueries := make([]types.Query, len(queries)) for i, v := range queries { realQueries[i].Pattern = v } - query = &wt.Request{ - Header: wt.SignedRequestHeader{ - RequestHeader: wt.RequestHeader{ + query = &types.Request{ + Header: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ DatabaseID: databaseID, QueryType: queryType, NodeID: nodeID, @@ -209,7 +209,7 @@ func buildQuery(queryType wt.QueryType, connID uint64, seqNo uint64, databaseID Timestamp: tm, }, }, - Payload: wt.RequestPayload{ + Payload: types.RequestPayload{ Queries: realQueries, }, } diff --git a/blockproducer/errors.go b/blockproducer/errors.go index 53e845bab..a36b9d8a9 100644 --- a/blockproducer/errors.go +++ b/blockproducer/errors.go @@ -19,8 +19,6 @@ package blockproducer import "errors" var ( - // ErrInvalidDBPeersConfig defines database peers invalid error. - ErrInvalidDBPeersConfig = errors.New("invalid database peers config") // ErrNoSuchDatabase defines database meta not exists error. ErrNoSuchDatabase = errors.New("no such database") // ErrDatabaseAllocation defines database allocation failure error. @@ -50,8 +48,6 @@ var ( ErrSmallerSequenceID = errors.New("SequanceID should be bigger than the old one") // ErrInvalidBillingRequest defines BillingRequest is invalid ErrInvalidBillingRequest = errors.New("The BillingRequest is invalid") - // ErrSignVerification indicates a failed signature verification. - ErrSignVerification = errors.New("signature verification failed") // ErrBalanceOverflow indicates that there will be an overflow after balance manipulation. ErrBalanceOverflow = errors.New("balance overflow") diff --git a/blockproducer/helper_test.go b/blockproducer/helper_test.go index ccc5d7efa..ce68aca12 100644 --- a/blockproducer/helper_test.go +++ b/blockproducer/helper_test.go @@ -33,17 +33,15 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/metric" "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/CovenantSQL/CovenantSQL/worker" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" ) const ( @@ -55,7 +53,7 @@ var ( ) // copied from sqlchain.xxx_test. -func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error) { +func createRandomBlock(parent hash.Hash, isGenesis bool) (b *types.Block, err error) { // Generate key pair priv, pub, err := asymmetric.GenSecp256k1KeyPair() @@ -66,9 +64,9 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error h := hash.Hash{} rand.Read(h[:]) - b = &ct.Block{ - SignedHeader: ct.SignedHeader{ - Header: ct.Header{ + b = &types.Block{ + SignedHeader: types.SignedHeader{ + Header: types.Header{ Version: 0x01000000, Producer: proto.NodeID(h.String()), GenesisHash: rootHash, @@ -76,12 +74,6 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error Timestamp: time.Now().UTC(), }, }, - Queries: make([]*hash.Hash, rand.Intn(10)+10), - } - - for i := range b.Queries { - b.Queries[i] = new(hash.Hash) - rand.Read(b.Queries[i][:]) } if isGenesis { @@ -114,7 +106,7 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error // fake a persistence driver. type stubDBMetaPersistence struct{} -func (p *stubDBMetaPersistence) GetDatabase(dbID proto.DatabaseID) (instance wt.ServiceInstance, err error) { +func (p *stubDBMetaPersistence) GetDatabase(dbID proto.DatabaseID) (instance types.ServiceInstance, err error) { // for test purpose, name with db prefix consider it exists if !strings.HasPrefix(string(dbID), "db") { err = ErrNoSuchDatabase @@ -124,7 +116,7 @@ func (p *stubDBMetaPersistence) GetDatabase(dbID proto.DatabaseID) (instance wt. return p.getInstanceMeta(dbID) } -func (p *stubDBMetaPersistence) SetDatabase(meta wt.ServiceInstance) (err error) { +func (p *stubDBMetaPersistence) SetDatabase(meta types.ServiceInstance) (err error) { return } @@ -132,18 +124,13 @@ func (p *stubDBMetaPersistence) DeleteDatabase(dbID proto.DatabaseID) (err error return } -func (p *stubDBMetaPersistence) GetAllDatabases() (instances []wt.ServiceInstance, err error) { - instances = make([]wt.ServiceInstance, 1) +func (p *stubDBMetaPersistence) GetAllDatabases() (instances []types.ServiceInstance, err error) { + instances = make([]types.ServiceInstance, 1) instances[0], err = p.getInstanceMeta("db") return } -func (p *stubDBMetaPersistence) getInstanceMeta(dbID proto.DatabaseID) (instance wt.ServiceInstance, err error) { - var pubKey *asymmetric.PublicKey - if pubKey, err = kms.GetLocalPublicKey(); err != nil { - return - } - +func (p *stubDBMetaPersistence) getInstanceMeta(dbID proto.DatabaseID) (instance types.ServiceInstance, err error) { var privKey *asymmetric.PrivateKey if privKey, err = kms.GetLocalPrivateKey(); err != nil { return @@ -155,21 +142,12 @@ func (p *stubDBMetaPersistence) getInstanceMeta(dbID proto.DatabaseID) (instance } instance.DatabaseID = proto.DatabaseID(dbID) - instance.Peers = &kayak.Peers{ - Term: 1, - Leader: &kayak.Server{ - Role: proto.Leader, - ID: nodeID, - PubKey: pubKey, - }, - Servers: []*kayak.Server{ - { - Role: proto.Leader, - ID: nodeID, - PubKey: pubKey, - }, + instance.Peers = &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: 1, + Leader: nodeID, + Servers: []proto.NodeID{nodeID}, }, - PubKey: pubKey, } if err = instance.Peers.Sign(privKey); err != nil { return diff --git a/kayak/api/mux.go b/blockproducer/interfaces/mixins_test.go similarity index 59% rename from kayak/api/mux.go rename to blockproducer/interfaces/mixins_test.go index 305df8177..48fca8eb0 100644 --- a/kayak/api/mux.go +++ b/blockproducer/interfaces/mixins_test.go @@ -14,19 +14,19 @@ * limitations under the License. */ -package api +package interfaces import ( - kt "github.com/CovenantSQL/CovenantSQL/kayak/transport" - "github.com/CovenantSQL/CovenantSQL/rpc" -) + "testing" -// NewMuxService create a new transport mux service and register to rpc server. -func NewMuxService(serviceName string, server *rpc.Server) (service *kt.ETLSTransportService) { - service = &kt.ETLSTransportService{ - ServiceName: serviceName, - } - server.RegisterService(serviceName, service) + . "github.com/smartystreets/goconvey/convey" +) - return service +func TestTransactionTypeMixin(t *testing.T) { + Convey("test transaction type mixin", t, func() { + m := NewTransactionTypeMixin(TransactionTypeBaseAccount) + So(m.GetTransactionType(), ShouldEqual, TransactionTypeBaseAccount) + m.SetTransactionType(TransactionTypeTransfer) + So(m.GetTransactionType(), ShouldEqual, TransactionTypeTransfer) + }) } diff --git a/blockproducer/interfaces/transaction.go b/blockproducer/interfaces/transaction.go index f4916ac80..ba1d93a86 100644 --- a/blockproducer/interfaces/transaction.go +++ b/blockproducer/interfaces/transaction.go @@ -97,7 +97,7 @@ func (t TransactionType) String() string { type Transaction interface { GetAccountAddress() proto.AccountAddress GetAccountNonce() AccountNonce - GetHash() hash.Hash + Hash() hash.Hash GetTransactionType() TransactionType Sign(signer *asymmetric.PrivateKey) error Verify() error diff --git a/blockproducer/interfaces/transaction_test.go b/blockproducer/interfaces/transaction_test.go index 07ecba4e8..7a80ac53b 100644 --- a/blockproducer/interfaces/transaction_test.go +++ b/blockproducer/interfaces/transaction_test.go @@ -54,4 +54,9 @@ func TestTypes(t *testing.T) { So(h1, ShouldResemble, h2) } }) + Convey("test string", t, func() { + for i := TransactionTypeBilling; i != TransactionTypeNumber+1; i++ { + So(i.String(), ShouldNotBeEmpty) + } + }) } diff --git a/blockproducer/interfaces/transaction_wrapper.go b/blockproducer/interfaces/transaction_wrapper.go index 37948aa38..2c46b6cf7 100644 --- a/blockproducer/interfaces/transaction_wrapper.go +++ b/blockproducer/interfaces/transaction_wrapper.go @@ -77,14 +77,6 @@ func (w *TransactionWrapper) CodecEncodeSelf(e *codec.Encoder) { return } - // if the transaction is supports type transaction mixin - var rawTx interface{} = w.Transaction - if _, ok := rawTx.(ContainsTransactionTypeMixin); ok { - // encode directly - helperEncoder.EncFallback(w.Transaction) - return - } - // translate wrapper to two fields array wrapped by map encDriver.WriteArrayStart(2) encDriver.WriteArrayElem() @@ -146,7 +138,6 @@ func (w *TransactionWrapper) decodeFromWrapper(d *codec.Decoder) { helperDecoder.DecFallback(&w.Transaction, true) } } else { - helperDecoder.DecSwallow() helperDecoder.DecStructFieldNotFound(i, "") } } diff --git a/blockproducer/interfaces/transaction_wrapper_test.go b/blockproducer/interfaces/transaction_wrapper_test.go new file mode 100644 index 000000000..a8d97608c --- /dev/null +++ b/blockproducer/interfaces/transaction_wrapper_test.go @@ -0,0 +1,194 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package interfaces_test + +import ( + "testing" + + pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils" + . "github.com/smartystreets/goconvey/convey" +) + +type TestTransactionEncode struct { + TestField int64 + pi.TransactionTypeMixin +} + +func (e *TestTransactionEncode) GetAccountAddress() proto.AccountAddress { + return proto.AccountAddress{} +} + +func (e *TestTransactionEncode) GetAccountNonce() pi.AccountNonce { + return pi.AccountNonce(0) +} + +func (e *TestTransactionEncode) Hash() hash.Hash { + return hash.Hash{} +} + +func (e *TestTransactionEncode) Sign(signer *asymmetric.PrivateKey) error { + return nil +} + +func (e *TestTransactionEncode) Verify() error { + return nil +} +func (e *TestTransactionEncode) MarshalHash() ([]byte, error) { + return nil, nil +} + +func (e *TestTransactionEncode) Msgsize() int { + return 0 +} + +func init() { + pi.RegisterTransaction(pi.TransactionTypeBilling, (*TestTransactionEncode)(nil)) +} + +func TestTransactionWrapper(t *testing.T) { + Convey("tx wrapper test", t, func() { + w := &pi.TransactionWrapper{} + So(w.Unwrap(), ShouldBeNil) + + // nil encode + buf, err := utils.EncodeMsgPack(w) + So(err, ShouldBeNil) + var v interface{} + err = utils.DecodeMsgPack(buf.Bytes(), &v) + So(err, ShouldBeNil) + So(v, ShouldBeNil) + + // encode test + e := &TestTransactionEncode{} + e.SetTransactionType(pi.TransactionTypeBilling) + buf, err = utils.EncodeMsgPack(e) + So(err, ShouldBeNil) + var v2 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v2) + So(err, ShouldBeNil) + So(v2.GetTransactionType(), ShouldEqual, pi.TransactionTypeBilling) + + // encode with wrapper test + e2 := pi.WrapTransaction(e) + buf, err = utils.EncodeMsgPack(e2) + So(err, ShouldBeNil) + var v3 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v3) + So(err, ShouldBeNil) + So(v3.GetTransactionType(), ShouldEqual, pi.TransactionTypeBilling) + tw, ok := v3.(*pi.TransactionWrapper) + So(ok, ShouldBeTrue) + So(tw.Unwrap().GetTransactionType(), ShouldEqual, pi.TransactionTypeBilling) + + // test encode non-existence type + e3 := &TestTransactionEncode{} + e3.SetTransactionType(pi.TransactionTypeTransfer) + buf, err = utils.EncodeMsgPack(e3) + So(err, ShouldBeNil) + var v4 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v4) + So(err, ShouldNotBeNil) + + // test invalid decode, not enough length + buf, err = utils.EncodeMsgPack([]uint64{}) + So(err, ShouldBeNil) + var v5 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v5) + So(err, ShouldNotBeNil) + + // test invalid decode, invalid tx type + buf, err = utils.EncodeMsgPack([]uint64{1}) + So(err, ShouldBeNil) + var v6 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v6) + So(err, ShouldNotBeNil) + + // test invalid decode, nil type + buf, err = utils.EncodeMsgPack([]interface{}{nil}) + So(err, ShouldBeNil) + var v7 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v7) + So(err, ShouldNotBeNil) + + // test invalid decode, nil payload + buf, err = utils.EncodeMsgPack([]interface{}{pi.TransactionTypeBilling, nil}) + So(err, ShouldBeNil) + var v8 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v8) + So(err, ShouldNotBeNil) + + // test invalid decode, invalid payload container type + buf, err = utils.EncodeMsgPack([]interface{}{pi.TransactionTypeBilling, []uint64{}}) + So(err, ShouldBeNil) + var v9 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v9) + So(err, ShouldNotBeNil) + + // extra payload + buf, err = utils.EncodeMsgPack([]interface{}{pi.TransactionTypeBilling, e, 1, 2}) + So(err, ShouldBeNil) + var v10 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v10) + So(err, ShouldBeNil) + + // test invalid type + buf, err = utils.EncodeMsgPack(1) + So(err, ShouldBeNil) + var v11 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v11) + So(err, ShouldNotBeNil) + + // test invalid mixin + buf, err = utils.EncodeMsgPack(map[string]interface{}{"TxType": "invalid type"}) + So(err, ShouldBeNil) + var v12 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v12) + So(err, ShouldNotBeNil) + + // test invalid mixin type + buf, err = utils.EncodeMsgPack(map[string]interface{}{"TxType": pi.TransactionTypeNumber}) + So(err, ShouldBeNil) + var v13 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v13) + So(err, ShouldNotBeNil) + + // test tx data + buf, err = utils.EncodeMsgPack(map[string]interface{}{"TxType": pi.TransactionTypeBilling, "TestField": 1}) + So(err, ShouldBeNil) + var v14 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v14) + So(err, ShouldBeNil) + + // test invalid tx data + buf, err = utils.EncodeMsgPack(map[string]interface{}{"TxType": pi.TransactionTypeBilling, "TestField": "happy"}) + So(err, ShouldBeNil) + var v15 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v15) + So(err, ShouldNotBeNil) + }) +} + +func TestRegisterTransaction(t *testing.T) { + Convey("test registration", t, func() { + So(func() { pi.RegisterTransaction(pi.TransactionTypeTransfer, nil) }, ShouldPanic) + So(func() { pi.RegisterTransaction(pi.TransactionTypeBaseAccount, (*pi.TransactionWrapper)(nil)) }, ShouldPanic) + }) +} diff --git a/blockproducer/metastate.go b/blockproducer/metastate.go index b4d1ae675..ec7814ae0 100644 --- a/blockproducer/metastate.go +++ b/blockproducer/metastate.go @@ -77,9 +77,6 @@ func (s *metaState) loadOrStoreAccountObject( func (s *metaState) loadAccountStableBalance(addr proto.AccountAddress) (b uint64, loaded bool) { var o *accountObject - s.Lock() - defer s.Unlock() - defer func() { log.WithFields(log.Fields{ "account": addr.String(), @@ -87,6 +84,9 @@ func (s *metaState) loadAccountStableBalance(addr proto.AccountAddress) (b uint6 "loaded": loaded, }).Debug("queried stable account") }() + + s.Lock() + defer s.Unlock() if o, loaded = s.dirty.accounts[addr]; loaded && o != nil { b = o.StableCoinBalance @@ -101,9 +101,6 @@ func (s *metaState) loadAccountStableBalance(addr proto.AccountAddress) (b uint6 func (s *metaState) loadAccountCovenantBalance(addr proto.AccountAddress) (b uint64, loaded bool) { var o *accountObject - s.Lock() - defer s.Unlock() - defer func() { log.WithFields(log.Fields{ "account": addr.String(), @@ -112,6 +109,9 @@ func (s *metaState) loadAccountCovenantBalance(addr proto.AccountAddress) (b uin }).Debug("queried covenant account") }() + s.Lock() + defer s.Unlock() + if o, loaded = s.dirty.accounts[addr]; loaded && o != nil { b = o.CovenantCoinBalance return @@ -711,7 +711,7 @@ func (s *metaState) applyTransactionProcedure(t pi.Transaction) (_ func(*bolt.Tx var ( enc *bytes.Buffer - hash = t.GetHash() + hash = t.Hash() addr = t.GetAccountAddress() nonce = t.GetAccountNonce() ttype = t.GetTransactionType() diff --git a/blockproducer/rpc.go b/blockproducer/rpc.go index 358e5bad0..1fafe2f2d 100644 --- a/blockproducer/rpc.go +++ b/blockproducer/rpc.go @@ -18,9 +18,9 @@ package blockproducer import ( pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" - "github.com/CovenantSQL/CovenantSQL/blockproducer/types" + pt "github.com/CovenantSQL/CovenantSQL/blockproducer/types" "github.com/CovenantSQL/CovenantSQL/proto" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" ) // ChainRPCService defines a main chain RPC server. @@ -31,7 +31,7 @@ type ChainRPCService struct { // AdviseNewBlockReq defines a request of the AdviseNewBlock RPC method. type AdviseNewBlockReq struct { proto.Envelope - Block *types.Block + Block *pt.Block } // AdviseNewBlockResp defines a response of the AdviseNewBlock RPC method. @@ -42,7 +42,7 @@ type AdviseNewBlockResp struct { // AdviseTxBillingReq defines a request of the AdviseTxBilling RPC method. type AdviseTxBillingReq struct { proto.Envelope - TxBilling *types.Billing + TxBilling *pt.Billing } // AdviseTxBillingResp defines a response of the AdviseTxBilling RPC method. @@ -61,7 +61,7 @@ type FetchBlockResp struct { proto.Envelope Height uint32 Count uint32 - Block *types.Block + Block *pt.Block } // FetchBlockByCountReq define a request of the FetchBlockByCount RPC method. @@ -135,11 +135,11 @@ type QueryAccountCovenantBalanceResp struct { // AdviseNewBlock is the RPC method to advise a new block to target server. func (s *ChainRPCService) AdviseNewBlock(req *AdviseNewBlockReq, resp *AdviseNewBlockResp) error { s.chain.blocksFromRPC <- req.Block - return s.chain.pushBlock(req.Block) + return nil } // AdviseBillingRequest is the RPC method to advise a new billing request to main chain. -func (s *ChainRPCService) AdviseBillingRequest(req *ct.AdviseBillingReq, resp *ct.AdviseBillingResp) error { +func (s *ChainRPCService) AdviseBillingRequest(req *types.AdviseBillingReq, resp *types.AdviseBillingResp) error { response, err := s.chain.produceBilling(req.Req) if err != nil { return err diff --git a/blockproducer/runtime.go b/blockproducer/runtime.go index b1e376f0d..a4e15bf03 100644 --- a/blockproducer/runtime.go +++ b/blockproducer/runtime.go @@ -21,7 +21,6 @@ import ( "sync" "time" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" @@ -50,7 +49,7 @@ type rt struct { // peersMutex protects following peers-relative fields. peersMutex sync.Mutex - peers *kayak.Peers + peers *proto.Peers nodeID proto.NodeID stateMutex sync.Mutex // Protects following fields. @@ -78,7 +77,7 @@ func (r *rt) now() time.Time { func newRuntime(cfg *Config, accountAddress proto.AccountAddress) *rt { var index uint32 for i, s := range cfg.Peers.Servers { - if cfg.NodeID.IsEqual(&s.ID) { + if cfg.NodeID.IsEqual(&s) { index = uint32(i) } } @@ -151,7 +150,7 @@ func (r *rt) getNextTurn() uint32 { return r.nextTurn } -func (r *rt) getPeers() *kayak.Peers { +func (r *rt) getPeers() *proto.Peers { r.peersMutex.Lock() defer r.peersMutex.Unlock() peers := r.peers.Clone() diff --git a/blockproducer/state.go b/blockproducer/state.go index c82fc3618..9e0723e2f 100644 --- a/blockproducer/state.go +++ b/blockproducer/state.go @@ -17,45 +17,12 @@ package blockproducer import ( - "sync" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" ) // State store the node info of chain. type State struct { - sync.Mutex Node *blockNode Head hash.Hash Height uint32 } - -func (s *State) getNode() *blockNode { - s.Lock() - defer s.Unlock() - return s.Node -} - -func (s *State) getHeight() uint32 { - s.Lock() - defer s.Unlock() - return s.Height -} - -func (s *State) setHeight(h uint32) { - s.Lock() - defer s.Unlock() - s.Height = h -} - -func (s *State) increaseHeightByOne() { - s.Lock() - defer s.Unlock() - s.Height++ -} - -func (s *State) getHeader() *hash.Hash { - s.Lock() - defer s.Unlock() - return &s.Head -} diff --git a/blockproducer/txpool.go b/blockproducer/txpool.go index 4fd46f683..41b714ae7 100644 --- a/blockproducer/txpool.go +++ b/blockproducer/txpool.go @@ -94,7 +94,7 @@ func (p *txPool) hasTx(tx pi.Transaction) (ok bool) { return } // Check transaction hash - if ok = (tx.GetHash() == te.transactions[index].GetHash()); !ok { + if ok = (tx.Hash() == te.transactions[index].Hash()); !ok { log.Debug("transaction hash already exists") return } @@ -112,7 +112,7 @@ func (p *txPool) cmpAndMoveNextTx(tx pi.Transaction) (ok bool) { return } // Check transaction hash - if ok = (tx.GetHash() == te.transactions[0].GetHash()); !ok { + if ok = (tx.Hash() == te.transactions[0].Hash()); !ok { return } // Move forward diff --git a/blockproducer/types/baseaccount.go b/blockproducer/types/baseaccount.go index ce4facda1..c56b7a421 100644 --- a/blockproducer/types/baseaccount.go +++ b/blockproducer/types/baseaccount.go @@ -50,8 +50,8 @@ func (b *BaseAccount) GetAccountNonce() pi.AccountNonce { return pi.AccountNonce(0) } -// GetHash implements interfaces/Transaction.GetHash. -func (b *BaseAccount) GetHash() (h hash.Hash) { +// Hash implements interfaces/Transaction.Hash. +func (b *BaseAccount) Hash() (h hash.Hash) { return } diff --git a/blockproducer/types/baseaccount_test.go b/blockproducer/types/baseaccount_test.go new file mode 100644 index 000000000..fd1370735 --- /dev/null +++ b/blockproducer/types/baseaccount_test.go @@ -0,0 +1,44 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "testing" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/proto" + . "github.com/smartystreets/goconvey/convey" +) + +func TestBaseAccount(t *testing.T) { + Convey("base account", t, func() { + h, err := hash.NewHashFromStr("000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade") + So(err, ShouldBeNil) + addr := proto.AccountAddress(*h) + ba := NewBaseAccount(&Account{ + Address: addr, + }) + So(ba.GetAccountAddress(), ShouldEqual, addr) + So(ba.GetAccountNonce(), ShouldEqual, 0) + So(ba.Hash(), ShouldEqual, hash.Hash{}) + priv, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + So(ba.Sign(priv), ShouldBeNil) + So(ba.Verify(), ShouldBeNil) + }) +} diff --git a/blockproducer/types/billing.go b/blockproducer/types/billing.go index 0beeb86f7..ba959981c 100644 --- a/blockproducer/types/billing.go +++ b/blockproducer/types/billing.go @@ -19,6 +19,7 @@ package types import ( pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/proto" ) @@ -56,7 +57,7 @@ func NewBillingHeader(nonce pi.AccountNonce, bReq *BillingRequest, producer prot type Billing struct { BillingHeader pi.TransactionTypeMixin - DefaultHashSignVerifierImpl + verifier.DefaultHashSignVerifierImpl } // NewBilling generates a new Billing. diff --git a/blockproducer/types/billing_gen.go b/blockproducer/types/billing_gen.go index 5260f1a35..bc636474f 100644 --- a/blockproducer/types/billing_gen.go +++ b/blockproducer/types/billing_gen.go @@ -18,13 +18,13 @@ func (z *Billing) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } o = append(o, 0x83) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } o = append(o, 0x83) - if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -34,7 +34,7 @@ func (z *Billing) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Billing) Msgsize() (s int) { - s = 1 + 14 + z.BillingHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + s = 1 + 14 + z.BillingHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() return } diff --git a/blockproducer/types/billing_test.go b/blockproducer/types/billing_test.go index bafa32c10..1c3d6e0c2 100644 --- a/blockproducer/types/billing_test.go +++ b/blockproducer/types/billing_test.go @@ -82,13 +82,13 @@ func TestBilling_SerializeDeserialize(t *testing.T) { } if !tb.Signature.IsEqual(dec.Signature) { - t.Fatalf("Value not match: \n\tv1=%v\n\tv2=%v", tb.Signature, tb.Signature) + t.Fatalf("Value not match: \n\tv1=%v\n\tv2=%v", tb.Signature, dec.Signature) } if !tb.Signee.IsEqual(dec.Signee) { - t.Fatalf("Value not match: \n\tv1=%v\n\tv2=%v", tb.Signee, tb.Signee) + t.Fatalf("Value not match: \n\tv1=%v\n\tv2=%v", tb.Signee, dec.Signee) } - if !tb.Hash.IsEqual(&dec.Hash) { - t.Fatalf("Value not match: \n\tv1=%v\n\tv2=%v", tb.Hash, tb.Hash) + if tb.Hash() != dec.Hash() { + t.Fatalf("Value not match: \n\tv1=%v\n\tv2=%v", tb.Hash(), dec.Hash()) } } @@ -120,4 +120,22 @@ func TestBilling_PackAndSignTx(t *testing.T) { if err != nil { t.Fatalf("Verify signature failed: %v", err) } + + // get + addr := hash.Hash(tb.GetAccountAddress()) + if addr.IsEqual(&hash.Hash{}) { + t.Fatal("Get hash failed") + } + + tb.GetAccountNonce() + + if tb.GetDatabaseID() == nil { + t.Fatal("Get nil DatabaseID") + } + + tb.Signature = nil + err = tb.Verify() + if err == nil { + t.Fatal("Verify signature should failed") + } } diff --git a/blockproducer/types/block.go b/blockproducer/types/block.go index a2f88d642..42bb57714 100644 --- a/blockproducer/types/block.go +++ b/blockproducer/types/block.go @@ -67,7 +67,7 @@ func (b *Block) GetTxHashes() []*hash.Hash { hs := make([]*hash.Hash, len(b.Transactions)) for i, v := range b.Transactions { - h := v.GetHash() + h := v.Hash() hs[i] = &h } return hs diff --git a/blockproducer/types/createdb.go b/blockproducer/types/createdb.go index 2d5aa00fa..33749fcb9 100644 --- a/blockproducer/types/createdb.go +++ b/blockproducer/types/createdb.go @@ -19,6 +19,7 @@ package types import ( pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/proto" ) @@ -44,7 +45,7 @@ func (h *CreateDatabaseHeader) GetAccountNonce() pi.AccountNonce { type CreateDatabase struct { CreateDatabaseHeader pi.TransactionTypeMixin - DefaultHashSignVerifierImpl + verifier.DefaultHashSignVerifierImpl } // NewCreateDatabase returns new instance. diff --git a/blockproducer/types/createdb_gen.go b/blockproducer/types/createdb_gen.go index 0b3e6b862..2d4f51a21 100644 --- a/blockproducer/types/createdb_gen.go +++ b/blockproducer/types/createdb_gen.go @@ -25,13 +25,13 @@ func (z *CreateDatabase) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } o = append(o, 0x83) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } o = append(o, 0x83) - if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -41,7 +41,7 @@ func (z *CreateDatabase) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *CreateDatabase) Msgsize() (s int) { - s = 1 + 21 + 1 + 6 + z.CreateDatabaseHeader.Owner.Msgsize() + 6 + z.CreateDatabaseHeader.Nonce.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + s = 1 + 21 + 1 + 6 + z.CreateDatabaseHeader.Owner.Msgsize() + 6 + z.CreateDatabaseHeader.Nonce.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() return } diff --git a/blockproducer/types/createdb_test.go b/blockproducer/types/createdb_test.go new file mode 100644 index 000000000..e61381a4b --- /dev/null +++ b/blockproducer/types/createdb_test.go @@ -0,0 +1,51 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "testing" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/proto" + . "github.com/smartystreets/goconvey/convey" +) + +func TestTxCreateDatabase(t *testing.T) { + Convey("test tx create database", t, func() { + h, err := hash.NewHashFromStr("000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade") + So(err, ShouldBeNil) + addr := proto.AccountAddress(*h) + + cd := NewCreateDatabase(&CreateDatabaseHeader{ + Owner: addr, + Nonce: 1, + }) + + So(cd.GetAccountAddress(), ShouldEqual, addr) + So(cd.GetAccountNonce(), ShouldEqual, 1) + + priv, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + + err = cd.Sign(priv) + So(err, ShouldBeNil) + + err = cd.Verify() + So(err, ShouldBeNil) + }) +} diff --git a/blockproducer/types/msgpack_test.go b/blockproducer/types/msgpack_test.go index c448c3af6..838f244de 100644 --- a/blockproducer/types/msgpack_test.go +++ b/blockproducer/types/msgpack_test.go @@ -106,21 +106,6 @@ func TestEncodeDecodeTransactions(t *testing.T) { So(reflect.TypeOf(out.Maps[k]).String(), ShouldContainSubstring, "TransactionWrapper") } }) - Convey("test encode wrapper, decode using real type", t, func() { - var t pi.Transaction - t = pi.WrapTransaction(NewBaseAccount(&Account{})) - So(reflect.TypeOf(t).String(), ShouldContainSubstring, "TransactionWrapper") - So(t.GetTransactionType(), ShouldEqual, pi.TransactionTypeBaseAccount) - buf, err := utils.EncodeMsgPack(t) - So(err, ShouldBeNil) - - var out *BaseAccount - err = utils.DecodeMsgPack(buf.Bytes(), &out) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - So(out.GetTransactionType(), ShouldEqual, pi.TransactionTypeBaseAccount) - So(reflect.TypeOf(out).String(), ShouldContainSubstring, "BaseAccount") - }) Convey("decode invalid data", t, func() { var testTypes = []interface{}{ "1", diff --git a/blockproducer/types/token.go b/blockproducer/types/token.go new file mode 100644 index 000000000..33a39a97f --- /dev/null +++ b/blockproducer/types/token.go @@ -0,0 +1,85 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "bytes" + "encoding/binary" + + "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// SupportTokenNumber defines the number of token covenantsql supports +const SupportTokenNumber int32 = 4 + +// Token defines token's number. +var Token = [SupportTokenNumber]string{ + "Particle", + "Ether", + "EOS", + "Bitcoin", +} + +// TokenType defines token's type +type TokenType int32 + +const ( + // Particle defines covenantsql's token + Particle TokenType = iota + // Ether defines Ethereum. + Ether + // EOS defines EOS. + EOS + // Bitcoin defines Bitcoin. + Bitcoin +) + +// String returns token's symbol. +func (t TokenType) String() string { + if t < 0 || int32(t) >= SupportTokenNumber { + return "Unknown" + } + + return Token[int(t)] +} + +// FromString returns token's number. +func FromString(t string) TokenType { + for i := range Token { + if t == Token[i] { + return TokenType(i) + } + } + return -1 +} + +// Listed returns if the token is listed in list. +func (t *TokenType) Listed() bool { + return (*t) >= 0 && int32(*t) < SupportTokenNumber +} + +// MarshalHash marshals for hash. +func (t *TokenType) MarshalHash() (o []byte, err error) { + var binBuf bytes.Buffer + binary.Write(&binBuf, binary.BigEndian, t) + return binBuf.Bytes(), nil +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message. +func (t *TokenType) Msgsize() (s int) { + return marshalhash.BytesPrefixSize + 4 +} diff --git a/blockproducer/types/token_test.go b/blockproducer/types/token_test.go new file mode 100644 index 000000000..b049081f3 --- /dev/null +++ b/blockproducer/types/token_test.go @@ -0,0 +1,41 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestTokenType(t *testing.T) { + Convey("test token util function", t, func() { + eos := "EOS" + unknown := "Unknown" + token := FromString(eos) + So(eos, ShouldEqual, token.String()) + So(token.Listed(), ShouldBeTrue) + + token = FromString("shitcoin") + So(token.String(), ShouldEqual, unknown) + So(token.Listed(), ShouldBeFalse) + + token = TokenType(SupportTokenNumber) + So(token.String(), ShouldEqual, unknown) + So(token.Listed(), ShouldBeFalse) + }) +} diff --git a/blockproducer/types/transfer.go b/blockproducer/types/transfer.go index e9a170fa0..38d6f7134 100644 --- a/blockproducer/types/transfer.go +++ b/blockproducer/types/transfer.go @@ -19,6 +19,7 @@ package types import ( pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/proto" ) @@ -35,7 +36,7 @@ type TransferHeader struct { type Transfer struct { TransferHeader pi.TransactionTypeMixin - DefaultHashSignVerifierImpl + verifier.DefaultHashSignVerifierImpl } // NewTransfer returns new instance. diff --git a/blockproducer/types/transfer_gen.go b/blockproducer/types/transfer_gen.go index 6d910fa70..c2f7f8736 100644 --- a/blockproducer/types/transfer_gen.go +++ b/blockproducer/types/transfer_gen.go @@ -12,19 +12,19 @@ func (z *Transfer) MarshalHash() (o []byte, err error) { o = hsp.Require(b, z.Msgsize()) // map header, size 3 o = append(o, 0x83, 0x83) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.TransferHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } o = append(o, 0x83) - if oTemp, err := z.TransferHeader.MarshalHash(); err != nil { + if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } o = append(o, 0x83) - if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -34,7 +34,7 @@ func (z *Transfer) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Transfer) Msgsize() (s int) { - s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 15 + z.TransferHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + s = 1 + 15 + z.TransferHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() return } diff --git a/blockproducer/types/transfer_test.go b/blockproducer/types/transfer_test.go index 28c02e239..e651054b7 100644 --- a/blockproducer/types/transfer_test.go +++ b/blockproducer/types/transfer_test.go @@ -15,3 +15,32 @@ */ package types + +import ( + "testing" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/proto" + . "github.com/smartystreets/goconvey/convey" +) + +func TestTxTransfer(t *testing.T) { + Convey("test transfer", t, func() { + h, err := hash.NewHashFromStr("000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade") + So(err, ShouldBeNil) + addr := proto.AccountAddress(*h) + + t := NewTransfer(&TransferHeader{ + Sender: addr, + Nonce: 1, + }) + So(t.GetAccountAddress(), ShouldEqual, addr) + So(t.GetAccountNonce(), ShouldEqual, 1) + + priv, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + So(t.Sign(priv), ShouldBeNil) + So(t.Verify(), ShouldBeNil) + }) +} diff --git a/blockproducer/xxx_test.go b/blockproducer/xxx_test.go index 8361b0eeb..14972dc65 100644 --- a/blockproducer/xxx_test.go +++ b/blockproducer/xxx_test.go @@ -30,7 +30,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/utils/log" @@ -373,7 +372,7 @@ func createRandomString(offset, length int, s *string) { *s = string(buff) } -func createTestPeersWithPrivKeys(priv *asymmetric.PrivateKey, num int) (nis []cpuminer.NonceInfo, p *kayak.Peers, err error) { +func createTestPeersWithPrivKeys(priv *asymmetric.PrivateKey, num int) (nis []cpuminer.NonceInfo, p *proto.Peers, err error) { if num <= 0 { return } @@ -386,29 +385,20 @@ func createTestPeersWithPrivKeys(priv *asymmetric.PrivateKey, num int) (nis []cp return } - s := make([]*kayak.Server, num) + s := make([]proto.NodeID, num) h := &hash.Hash{} for i := range s { rand.Read(h[:]) - s[i] = &kayak.Server{ - Role: func() proto.ServerRole { - if i == 0 { - return proto.Leader - } - return proto.Follower - }(), - ID: proto.NodeID(nis[i].Hash.String()), - PubKey: pub, - } + s[i] = proto.NodeID(nis[i].Hash.String()) } - p = &kayak.Peers{ - Term: 0, - Leader: s[0], - Servers: s, - PubKey: pub, - Signature: nil, + p = &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: 0, + Leader: s[0], + Servers: s, + }, } if err = p.Sign(priv); err != nil { @@ -418,7 +408,7 @@ func createTestPeersWithPrivKeys(priv *asymmetric.PrivateKey, num int) (nis []cp return } -func createTestPeers(num int) (nis []cpuminer.NonceInfo, p *kayak.Peers, err error) { +func createTestPeers(num int) (nis []cpuminer.NonceInfo, p *proto.Peers, err error) { if num <= 0 { return } @@ -443,29 +433,20 @@ func createTestPeers(num int) (nis []cpuminer.NonceInfo, p *kayak.Peers, err err return } - s := make([]*kayak.Server, num) + s := make([]proto.NodeID, num) h := &hash.Hash{} for i := range s { rand.Read(h[:]) - s[i] = &kayak.Server{ - Role: func() proto.ServerRole { - if i == 0 { - return proto.Leader - } - return proto.Follower - }(), - ID: proto.NodeID(nis[i].Hash.String()), - PubKey: pub, - } + s[i] = proto.NodeID(nis[i].Hash.String()) } - p = &kayak.Peers{ - Term: 0, - Leader: s[0], - Servers: s, - PubKey: pub, - Signature: nil, + p = &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: 0, + Leader: s[0], + Servers: s, + }, } if err = p.Sign(priv); err != nil { diff --git a/build.sh b/build.sh index f856edc00..fc39ea309 100755 --- a/build.sh +++ b/build.sh @@ -37,6 +37,9 @@ go test -coverpkg github.com/CovenantSQL/CovenantSQL/... -cover -race -c -tags ' cli_pkgpath="github.com/CovenantSQL/CovenantSQL/cmd/cql" CGO_ENABLED=1 go build -ldflags "-X main.version=${version} -X github.com/CovenantSQL/CovenantSQL/conf.RoleTag=C ${GOLDFLAGS}" --tags ${platform}" sqlite_omit_load_extension" -o bin/cql ${cli_pkgpath} +fuse_pkgpath="github.com/CovenantSQL/CovenantSQL/cmd/cql-fuse" +CGO_ENABLED=1 go build -ldflags "-X main.version=${version} -X github.com/CovenantSQL/CovenantSQL/conf.RoleTag=C ${GOLDFLAGS}" --tags ${platform}" sqlite_omit_load_extension" -o bin/cql-fuse ${fuse_pkgpath} + cql_adapter_pkgpath="github.com/CovenantSQL/CovenantSQL/cmd/cql-adapter" CGO_ENABLED=1 go build -ldflags "-X main.version=${version} -X github.com/CovenantSQL/CovenantSQL/conf.RoleTag=C ${GOLDFLAGS}" --tags ${platform}" sqlite_omit_load_extension" -o bin/cql-adapter ${cql_adapter_pkgpath} diff --git a/chain/errors.go b/chain/errors.go deleted file mode 100644 index f936a24db..000000000 --- a/chain/errors.go +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package chain - -import "errors" - -var ( - // ErrUnknownTx indicates that the transaction is unknown. - ErrUnknownTx = errors.New("unknown transaction") - // ErrDuplicateTx indicates that the transaction will be duplicate in the new block. - ErrDuplicateTx = errors.New("duplicate transaction") - // ErrCorruptedIndex indicates that a corrupted index item is detected. - ErrCorruptedIndex = errors.New("corrupted index") -) diff --git a/chain/interfaces/transaction.go b/chain/interfaces/transaction.go deleted file mode 100644 index 12f07957f..000000000 --- a/chain/interfaces/transaction.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package interfaces - -import ( - "time" - - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/proto" -) - -// Serializer is the interface implemented by an object that can serialize itself into binary form. -type Serializer interface { - Serialize() ([]byte, error) -} - -// Deserializer is the interface implemented by an object that can deserialize a binary -// representation of itself. -type Deserializer interface { - Deserialize(enc []byte) error -} - -// Transaction is the interface implemented by an object that can be verified and processed by -// a blockchain as a transaction. -type Transaction interface { - Serializer - Deserializer - GetDatabaseID() *proto.DatabaseID - GetHash() hash.Hash - GetIndexKey() interface{} - GetPersistenceKey() []byte - GetSequenceID() uint32 - GetTime() time.Time - Verify() error -} diff --git a/chain/persistence.go b/chain/persistence.go deleted file mode 100644 index 3cb46e4d7..000000000 --- a/chain/persistence.go +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package chain - -import ( - ci "github.com/CovenantSQL/CovenantSQL/chain/interfaces" - "github.com/coreos/bbolt" -) - -var ( - metaBucket = [4]byte{0x0, 0x0, 0x0, 0x0} - metaTxIndexBucket = []byte("covenantsql-tx-index-bucket") -) - -// TxPersistence defines a persistence storage for blockchain transactions. -type TxPersistence struct { - db *bolt.DB -} - -// NewTxPersistence returns a new TxPersistence instance using the given bolt database as -// underlying storage engine. -func NewTxPersistence(db *bolt.DB) (ins *TxPersistence, err error) { - // Initialize buckets - if err = db.Update(func(tx *bolt.Tx) (err error) { - meta, err := tx.CreateBucketIfNotExists(metaBucket[:]) - if err != nil { - return - } - _, err = meta.CreateBucketIfNotExists(metaTxIndexBucket) - return - }); err != nil { - return - } - // Create instance if succeed - ins = &TxPersistence{db: db} - return -} - -// PutTransaction serializes and puts the transaction tx into the storage. -func (p *TxPersistence) PutTransaction(tx ci.Transaction) (err error) { - var key, value []byte - key = tx.GetPersistenceKey() - if value, err = tx.Serialize(); err != nil { - return - } - return p.db.Update(func(tx *bolt.Tx) error { - return tx.Bucket(metaBucket[:]).Bucket(metaTxIndexBucket).Put(key, value) - }) -} - -// GetTransaction gets the transaction binary representation from the storage with key and -// deserialize to tx. -// -// It is important that tx must provide an interface with corresponding concrete value, or the -// deserialization will cause unexpected error. -func (p *TxPersistence) GetTransaction(key []byte, tx ci.Transaction) (ok bool, err error) { - var value []byte - if err = p.db.View(func(tx *bolt.Tx) error { - value = tx.Bucket(metaBucket[:]).Bucket(metaTxIndexBucket).Get(key) - return nil - }); err != nil { - return - } - if value != nil { - ok = true - err = tx.Deserialize(value) - return - } - return -} - -// DelTransaction deletes the transaction from the storage with key. -func (p *TxPersistence) DelTransaction(key []byte) (err error) { - return p.db.Update(func(tx *bolt.Tx) error { - return tx.Bucket(metaBucket[:]).Bucket(metaTxIndexBucket).Delete(key) - }) -} - -// PutTransactionAndUpdateIndex serializes and puts the transaction from the storage with key -// and updates transaction index ti in a single database transaction. -func (p *TxPersistence) PutTransactionAndUpdateIndex(tx ci.Transaction, ti *TxIndex) (err error) { - var ( - key = tx.GetPersistenceKey() - val []byte - ) - if val, err = tx.Serialize(); err != nil { - return - } - return p.db.Update(func(dbtx *bolt.Tx) (err error) { - if err = dbtx.Bucket(metaBucket[:]).Bucket(metaTxIndexBucket).Put(key, val); err != nil { - return - } - ti.StoreTx(tx) - return - }) -} - -// DelTransactionAndUpdateIndex deletes the transaction from the storage with key and updates -// transaction index ti in a single database transaction. -func (p *TxPersistence) DelTransactionAndUpdateIndex( - pkey []byte, ikey interface{}, ti *TxIndex) (err error, -) { - return p.db.Update(func(dbtx *bolt.Tx) (err error) { - if err = dbtx.Bucket(metaBucket[:]).Bucket(metaTxIndexBucket).Delete(pkey); err != nil { - return - } - ti.DelTx(ikey) - return - }) -} diff --git a/chain/persistence_test.go b/chain/persistence_test.go deleted file mode 100644 index 963ca08fc..000000000 --- a/chain/persistence_test.go +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package chain - -import ( - "fmt" - "path" - "reflect" - "testing" - - ci "github.com/CovenantSQL/CovenantSQL/chain/interfaces" - "github.com/coreos/bbolt" -) - -func TestBadNewTxPersistence(t *testing.T) { - fl := path.Join(testDataDir, fmt.Sprintf("%s.db", t.Name())) - db, err := bolt.Open(fl, 0600, nil) - if err = db.Close(); err != nil { - t.Fatalf("Error occurred: %v", err) - } - if _, err = NewTxPersistence(db); err == nil { - t.Fatalf("Unexpected error: %v", err) - } -} - -func TestTxPersistenceWithClosedDB(t *testing.T) { - fl := path.Join(testDataDir, fmt.Sprintf("%s.db", t.Name())) - db, err := bolt.Open(fl, 0600, nil) - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - tp, err := NewTxPersistence(db) - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - if err = db.Close(); err != nil { - t.Fatalf("Error occurred: %v", err) - } - var ( - otx ci.Transaction = newRandomDemoTxImpl() - rtx ci.Transaction = &DemoTxImpl{} - ) - if _, err := tp.GetTransaction(otx.GetPersistenceKey(), rtx); err == nil { - t.Fatalf("Unexpected error: %v", err) - } - if err = tp.PutTransaction(otx); err == nil { - t.Fatalf("Unexpected error: %v", err) - } - if err = tp.DelTransaction(otx.GetPersistenceKey()); err == nil { - t.Fatalf("Unexpected error: %v", err) - } -} - -func TestTxPersistence(t *testing.T) { - fl := path.Join(testDataDir, fmt.Sprintf("%s.db", t.Name())) - db, err := bolt.Open(fl, 0600, nil) - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - tp, err := NewTxPersistence(db) - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - // Test operations: Get -> Put -> Get -> Del -> Get - var ( - otx ci.Transaction = newRandomDemoTxImpl() - rtx ci.Transaction = &DemoTxImpl{} - ) - if ok, err := tp.GetTransaction(otx.GetPersistenceKey(), rtx); err != nil { - t.Fatalf("Error occurred: %v", err) - } else if ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if err = tp.PutTransaction(otx); err != nil { - t.Fatalf("Error occurred: %v", err) - } - if ok, err := tp.GetTransaction(otx.GetPersistenceKey(), rtx); err != nil { - t.Fatalf("Error occurred: %v", err) - } else if !ok { - t.Fatalf("Unexpected query result: %v", ok) - } else if !reflect.DeepEqual(otx, rtx) { - t.Fatalf("Unexpected result:\n\torigin = %v\n\toutput = %v", otx, rtx) - } - if err = tp.DelTransaction(otx.GetPersistenceKey()); err != nil { - t.Fatalf("Error occurred: %v", err) - } - if ok, err := tp.GetTransaction(otx.GetPersistenceKey(), rtx); err != nil { - t.Fatalf("Error occurred: %v", err) - } else if ok { - t.Fatalf("Unexpected query result: %v", ok) - } -} - -func TestTxPersistenceWithIndex(t *testing.T) { - fl := path.Join(testDataDir, fmt.Sprintf("%s.db", t.Name())) - db, err := bolt.Open(fl, 0600, nil) - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - tp, err := NewTxPersistence(db) - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - ti := NewTxIndex() - - // Test operations: Get -> Put -> Get -> Del -> Get - var ( - otx ci.Transaction = newRandomDemoTxImpl() - rtx ci.Transaction = &DemoTxImpl{} - ) - if ok, err := tp.GetTransaction(otx.GetPersistenceKey(), rtx); err != nil { - t.Fatalf("Error occurred: %v", err) - } else if ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if err = tp.PutTransactionAndUpdateIndex(otx, ti); err != nil { - t.Fatalf("Error occurred: %v", err) - } - if ok, err := tp.GetTransaction(otx.GetPersistenceKey(), rtx); err != nil { - t.Fatalf("Error occurred: %v", err) - } else if !ok { - t.Fatalf("Unexpected query result: %v", ok) - } else if !reflect.DeepEqual(otx, rtx) { - t.Fatalf("Unexpected result:\n\torigin = %v\n\toutput = %v", otx, rtx) - } - if xtx, ok := ti.LoadTx(otx.GetIndexKey()); !ok { - t.Fatalf("Unexpected query result: %v", ok) - } else if !reflect.DeepEqual(otx, xtx) { - t.Fatalf("Unexpected result:\n\torigin = %v\n\toutput = %v", otx, xtx) - } - if err = tp.DelTransactionAndUpdateIndex( - otx.GetPersistenceKey(), otx.GetIndexKey(), ti, - ); err != nil { - t.Fatalf("Error occurred: %v", err) - } - if ok, err := tp.GetTransaction(otx.GetPersistenceKey(), rtx); err != nil { - t.Fatalf("Error occurred: %v", err) - } else if ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if _, ok := ti.LoadTx(otx.GetIndexKey()); ok { - t.Fatalf("Unexpected query result: %v", ok) - } -} diff --git a/chain/txindex.go b/chain/txindex.go deleted file mode 100644 index 5d764e972..000000000 --- a/chain/txindex.go +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package chain - -import ( - "sync" - - ci "github.com/CovenantSQL/CovenantSQL/chain/interfaces" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" -) - -type txCache struct { - bh *hash.Hash - tx ci.Transaction -} - -// TxIndex defines transaction index. -type TxIndex struct { - index sync.Map -} - -// NewTxIndex returns a new TxIndex instance. -func NewTxIndex() *TxIndex { - return &TxIndex{} -} - -// StoreTx stores tx in the transaction index. -func (i *TxIndex) StoreTx(tx ci.Transaction) { - i.index.Store(tx.GetIndexKey(), &txCache{tx: tx}) -} - -// HasTx returns a boolean value indicating wether the transaction index has key or not. -func (i *TxIndex) HasTx(key interface{}) (ok bool) { - _, ok = i.index.Load(key) - return -} - -// LoadTx loads a transaction with key. -func (i *TxIndex) LoadTx(key interface{}) (tx ci.Transaction, ok bool) { - var ( - val interface{} - tc *txCache - ) - if val, ok = i.index.Load(key); ok { - if tc = val.(*txCache); tc != nil { - tx = tc.tx - } - } - return -} - -// SetBlock sets the block hash filed of txCache with key in the transaction index. -func (i *TxIndex) SetBlock(key interface{}, bh hash.Hash) (ok bool) { - var ( - val interface{} - tc *txCache - ) - if val, ok = i.index.Load(key); ok { - if tc = val.(*txCache); tc != nil { - tc.bh = &bh - } - } - return -} - -// DelTx deletes transaction with key in the transaction index. -func (i *TxIndex) DelTx(key interface{}) { - i.index.Delete(key) -} - -// ResetBlock resets the block hash field of txCache with key in the transaction index. -func (i *TxIndex) ResetBlock(key interface{}) (ok bool) { - var ( - val interface{} - tc *txCache - ) - if val, ok = i.index.Load(key); ok { - if tc = val.(*txCache); tc != nil { - tc.bh = nil - } - } - return -} - -// CheckTxState checks the transaction state for block packing with key in the transaction index. -func (i *TxIndex) CheckTxState(key interface{}) error { - var ( - ok bool - val interface{} - ) - if val, ok = i.index.Load(key); !ok { - return ErrUnknownTx - } - if tc := val.(*txCache); tc == nil { - return ErrCorruptedIndex - } else if tc.bh != nil { - return ErrDuplicateTx - } - return nil -} - -// FetchUnpackedTxes fetches all unpacked tranactions and returns them as a slice. -func (i *TxIndex) FetchUnpackedTxes() (txes []ci.Transaction) { - i.index.Range(func(key interface{}, val interface{}) bool { - if tc := val.(*txCache); tc != nil && tc.bh == nil { - txes = append(txes, tc.tx) - } - return true - }) - return -} diff --git a/chain/txindex_test.go b/chain/txindex_test.go deleted file mode 100644 index bbe109d23..000000000 --- a/chain/txindex_test.go +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package chain - -import ( - "reflect" - "testing" - - ci "github.com/CovenantSQL/CovenantSQL/chain/interfaces" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" -) - -func TestTxIndex(t *testing.T) { - var ( - ti = NewTxIndex() - otx ci.Transaction = newRandomDemoTxImpl() - ) - // Test operations: Get -> Put -> Get -> Del -> Get - if ok := ti.HasTx(otx.GetIndexKey()); ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if _, ok := ti.LoadTx(otx.GetIndexKey()); ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if ok := ti.SetBlock(otx.GetIndexKey(), hash.Hash{}); ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if err := ti.CheckTxState(otx.GetIndexKey()); err != ErrUnknownTx { - t.Fatalf("Unexpected error: %v", err) - } - ti.StoreTx(otx) - if ok := ti.HasTx(otx.GetIndexKey()); !ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if xtx, ok := ti.LoadTx(otx.GetIndexKey()); !ok { - t.Fatalf("Unexpected query result: %v", ok) - } else if !reflect.DeepEqual(otx, xtx) { - t.Fatalf("Unexpected result:\n\torigin = %v\n\toutput = %v", otx, xtx) - } - if err := ti.CheckTxState(otx.GetIndexKey()); err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if ok := ti.SetBlock(otx.GetIndexKey(), hash.Hash{}); !ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if err := ti.CheckTxState(otx.GetIndexKey()); err != ErrDuplicateTx { - t.Fatalf("Unexpected error: %v", err) - } - if ok := ti.ResetBlock(otx.GetIndexKey()); !ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if txes := ti.FetchUnpackedTxes(); len(txes) != 1 { - t.Fatalf("Unexpected query result: %v", txes) - } else if !reflect.DeepEqual(otx, txes[0]) { - t.Fatalf("Unexpected result:\n\torigin = %v\n\toutput = %v", otx, txes[0]) - } - ti.DelTx(otx.GetIndexKey()) - if ok := ti.HasTx(otx.GetIndexKey()); ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if _, ok := ti.LoadTx(otx.GetIndexKey()); ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if ok := ti.SetBlock(otx.GetIndexKey(), hash.Hash{}); ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if err := ti.CheckTxState(otx.GetIndexKey()); err != ErrUnknownTx { - t.Fatalf("Unexpected error: %v", err) - } -} diff --git a/chain/xxx_gen_test.go b/chain/xxx_gen_test.go deleted file mode 100644 index ed7deaa59..000000000 --- a/chain/xxx_gen_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package chain - -// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. - -import ( - hsp "github.com/CovenantSQL/HashStablePack/marshalhash" -) - -// MarshalHash marshals for hash -func (z *DemoHeader) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 3 - o = append(o, 0x83, 0x83) - if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x83) - o = hsp.AppendTime(o, z.Timestamp) - o = append(o, 0x83) - o = hsp.AppendUint32(o, z.SequenceID) - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *DemoHeader) Msgsize() (s int) { - s = 1 + 11 + z.DatabaseID.Msgsize() + 10 + hsp.TimeSize + 11 + hsp.Uint32Size - return -} diff --git a/chain/xxx_test.go b/chain/xxx_test.go deleted file mode 100644 index 80c28b153..000000000 --- a/chain/xxx_test.go +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package chain - -import ( - "io/ioutil" - "math/rand" - "os" - "testing" - "time" - - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/utils" -) - -var ( - testDataDir string - testPrivKey *asymmetric.PrivateKey - testPubKey *asymmetric.PublicKey -) - -func createRandomString(offset, length int) string { - buff := make([]byte, rand.Intn(length)+offset) - rand.Read(buff) - for i, v := range buff { - buff[i] = v%(0x7f-0x20) + 0x20 - } - return string(buff) -} - -type DemoHeader struct { - DatabaseID proto.DatabaseID - SequenceID uint32 - Timestamp time.Time -} - -type DemoTxImpl struct { - DemoHeader - HeaderHash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature -} - -func newRandomDemoTxImpl() (i *DemoTxImpl) { - header := DemoHeader{ - DatabaseID: proto.DatabaseID(createRandomString(10, 10)), - SequenceID: rand.Uint32(), - Timestamp: time.Now().UTC(), - } - - enc, err := header.MarshalHash() - if err != nil { - panic(err) - } - - hh := hash.HashH(enc) - sig, err := testPrivKey.Sign(hh[:]) - if err != nil { - panic(err) - } - - i = &DemoTxImpl{ - DemoHeader: header, - HeaderHash: hh, - Signee: testPubKey, - Signature: sig, - } - return -} - -func (i *DemoTxImpl) Serialize() (enc []byte, err error) { - if b, err := utils.EncodeMsgPack(i); err == nil { - enc = b.Bytes() - } - return -} - -func (i *DemoTxImpl) Deserialize(enc []byte) error { - return utils.DecodeMsgPack(enc, i) -} - -func (i *DemoTxImpl) GetDatabaseID() *proto.DatabaseID { - return &i.DatabaseID -} - -func (i *DemoTxImpl) GetHash() hash.Hash { - return i.HeaderHash -} - -func (i *DemoTxImpl) GetIndexKey() interface{} { - return i.HeaderHash -} - -func (i *DemoTxImpl) GetPersistenceKey() []byte { - return i.HeaderHash[:] -} - -func (i *DemoTxImpl) GetSequenceID() uint32 { - return i.SequenceID -} - -func (i *DemoTxImpl) GetTime() time.Time { - return i.Timestamp -} - -func (i *DemoTxImpl) Verify() (err error) { - var enc []byte - if enc, err = i.DemoHeader.MarshalHash(); err != nil { - return - } else if h := hash.THashH(enc); !i.HeaderHash.IsEqual(&h) { - return - } else if !i.Signature.Verify(h[:], i.Signee) { - return - } - return -} - -func setup() { - // Setup RNG - rand.Seed(time.Now().UnixNano()) - - var err error - // Create temp directory - testDataDir, err = ioutil.TempDir("", "covenantsql") - if err != nil { - panic(err) - } - // Create key pair for test - testPrivKey, testPubKey, err = asymmetric.GenSecp256k1KeyPair() - if err != nil { - panic(err) - } -} - -func teardown() { - if err := os.RemoveAll(testDataDir); err != nil { - panic(err) - } -} - -func TestMain(m *testing.M) { - os.Exit(func() int { - setup() - defer teardown() - return m.Run() - }()) -} diff --git a/chainbus/bus.go b/chainbus/bus.go new file mode 100644 index 000000000..e2bcfebec --- /dev/null +++ b/chainbus/bus.go @@ -0,0 +1,225 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package chainbus + +import ( + "fmt" + "reflect" + "sync" + + bi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" +) + +// ChainSuber defines subscribing-related bus behavior +type ChainSuber interface { + Subscribe(topic bi.TransactionType, handler interface{}) error + SubscribeAsync(topic bi.TransactionType, handler interface{}, transactional bool) error + SubscribeOnce(topic bi.TransactionType, handler interface{}) error + SubscribeOnceAsync(topic bi.TransactionType, handler interface{}) error + Unsubscribe(topic bi.TransactionType, handler interface{}) error +} + +// ChainPuber defines publishing-related bus behavior +type ChainPuber interface { + Publish(topic bi.TransactionType, args ...interface{}) +} + +// BusController defines bus control behavior (checking handler's presence, synchronization) +type BusController interface { + HasCallback(topic bi.TransactionType) bool + WaitAsync() +} + +// Bus englobes global (subscribe, publish, control) bus behavior +type Bus interface { + BusController + ChainSuber + ChainPuber +} + +// ChainBus - box for handlers and callbacks. +type ChainBus struct { + handlers map[bi.TransactionType][]*eventHandler + lock sync.Mutex // a lock for the map + wg sync.WaitGroup +} + +type eventHandler struct { + callBack reflect.Value + flagOnce bool + async bool + transactional bool + sync.Mutex // lock for an event handler - useful for running async callbacks serially +} + +// New returns new ChainBus with empty handlers. +func New() Bus { + b := &ChainBus{ + make(map[bi.TransactionType][]*eventHandler), + sync.Mutex{}, + sync.WaitGroup{}, + } + return b +} + +// doSubscribe handles the subscription logic and is utilized by the public Subscribe functions +func (bus *ChainBus) doSubscribe(topic bi.TransactionType, fn interface{}, handler *eventHandler) error { + bus.lock.Lock() + defer bus.lock.Unlock() + if !(reflect.TypeOf(fn).Kind() == reflect.Func) { + return fmt.Errorf("%s is not of type reflect.Func", reflect.TypeOf(fn).Kind()) + } + bus.handlers[topic] = append(bus.handlers[topic], handler) + return nil +} + +// Subscribe subscribes to a topic. +// Returns error if `fn` is not a function. +func (bus *ChainBus) Subscribe(topic bi.TransactionType, fn interface{}) error { + return bus.doSubscribe(topic, fn, &eventHandler{ + reflect.ValueOf(fn), false, false, false, sync.Mutex{}, + }) +} + +// SubscribeAsync subscribes to a topic with an asynchronous callback +// Transactional determines whether subsequent callbacks for a topic are +// run serially (true) or concurrently (false) +// Returns error if `fn` is not a function. +func (bus *ChainBus) SubscribeAsync(topic bi.TransactionType, fn interface{}, transactional bool) error { + return bus.doSubscribe(topic, fn, &eventHandler{ + reflect.ValueOf(fn), false, true, transactional, sync.Mutex{}, + }) +} + +// SubscribeOnce subscribes to a topic once. Handler will be removed after executing. +// Returns error if `fn` is not a function. +func (bus *ChainBus) SubscribeOnce(topic bi.TransactionType, fn interface{}) error { + return bus.doSubscribe(topic, fn, &eventHandler{ + reflect.ValueOf(fn), true, false, false, sync.Mutex{}, + }) +} + +// SubscribeOnceAsync subscribes to a topic once with an asynchronous callback +// Handler will be removed after executing. +// Returns error if `fn` is not a function. +func (bus *ChainBus) SubscribeOnceAsync(topic bi.TransactionType, fn interface{}) error { + return bus.doSubscribe(topic, fn, &eventHandler{ + reflect.ValueOf(fn), true, true, false, sync.Mutex{}, + }) +} + +// HasCallback returns true if exists any callback subscribed to the topic. +func (bus *ChainBus) HasCallback(topic bi.TransactionType) bool { + bus.lock.Lock() + defer bus.lock.Unlock() + _, ok := bus.handlers[topic] + if ok { + return len(bus.handlers[topic]) > 0 + } + return false +} + +// Unsubscribe removes callback defined for a topic. +// Returns error if there are no callbacks subscribed to the topic. +func (bus *ChainBus) Unsubscribe(topic bi.TransactionType, handler interface{}) error { + bus.lock.Lock() + defer bus.lock.Unlock() + if _, ok := bus.handlers[topic]; ok && len(bus.handlers[topic]) > 0 { + bus.removeHandler(topic, bus.findHandlerIdx(topic, reflect.ValueOf(handler))) + return nil + } + return fmt.Errorf("topic %s doesn't exist", topic) +} + +// Publish executes callback defined for a topic. Any additional argument will be transferred to the callback. +func (bus *ChainBus) Publish(topic bi.TransactionType, args ...interface{}) { + bus.lock.Lock() // will unlock if handler is not found or always after setUpPublish + defer bus.lock.Unlock() + if handlers, ok := bus.handlers[topic]; ok && 0 < len(handlers) { + // Handlers slice may be changed by removeHandler and Unsubscribe during iteration, + // so make a copy and iterate the copied slice. + copyHandlers := make([]*eventHandler, 0, len(handlers)) + copyHandlers = append(copyHandlers, handlers...) + for i, handler := range copyHandlers { + if handler.flagOnce { + bus.removeHandler(topic, i) + } + if !handler.async { + bus.doPublish(handler, topic, args...) + } else { + bus.wg.Add(1) + if handler.transactional { + handler.Lock() + } + go bus.doPublishAsync(handler, topic, args...) + } + } + } +} + +func (bus *ChainBus) doPublish(handler *eventHandler, topic bi.TransactionType, args ...interface{}) { + passedArguments := bus.setUpPublish(topic, args...) + handler.callBack.Call(passedArguments) +} + +func (bus *ChainBus) doPublishAsync(handler *eventHandler, topic bi.TransactionType, args ...interface{}) { + defer bus.wg.Done() + if handler.transactional { + defer handler.Unlock() + } + bus.doPublish(handler, topic, args...) +} + +func (bus *ChainBus) removeHandler(topic bi.TransactionType, idx int) { + if _, ok := bus.handlers[topic]; !ok { + return + } + l := len(bus.handlers[topic]) + + if !(0 <= idx && idx < l) { + return + } + + copy(bus.handlers[topic][idx:], bus.handlers[topic][idx+1:]) + bus.handlers[topic][l-1] = nil // or the zero value of T + bus.handlers[topic] = bus.handlers[topic][:l-1] +} + +func (bus *ChainBus) findHandlerIdx(topic bi.TransactionType, callback reflect.Value) int { + if _, ok := bus.handlers[topic]; ok { + for idx, handler := range bus.handlers[topic] { + if handler.callBack == callback { + return idx + } + } + } + return -1 +} + +func (bus *ChainBus) setUpPublish(topic bi.TransactionType, args ...interface{}) []reflect.Value { + + passedArguments := make([]reflect.Value, 0) + for _, arg := range args { + passedArguments = append(passedArguments, reflect.ValueOf(arg)) + } + return passedArguments +} + +// WaitAsync waits for all async callbacks to complete +func (bus *ChainBus) WaitAsync() { + bus.wg.Wait() +} diff --git a/chainbus/bus_test.go b/chainbus/bus_test.go new file mode 100644 index 000000000..552bc8dcc --- /dev/null +++ b/chainbus/bus_test.go @@ -0,0 +1,175 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package chainbus + +import ( + "sync/atomic" + "testing" + "time" + + bi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" +) + +func TestNew(t *testing.T) { + bus := New() + if bus == nil { + t.Log("New EventBus not created!") + t.Fail() + } +} + +func TestHasCallback(t *testing.T) { + bus := New() + bus.Subscribe(bi.TransactionType(1), func() {}) + if bus.HasCallback(bi.TransactionType(2)) { + t.Fail() + } + if !bus.HasCallback(bi.TransactionType(1)) { + t.Fail() + } +} + +func TestSubscribe(t *testing.T) { + bus := New() + if bus.Subscribe(bi.TransactionType(1), func() {}) != nil { + t.Fail() + } + if bus.Subscribe(bi.TransactionType(1), "String") == nil { + t.Fail() + } +} + +func TestSubscribeOnce(t *testing.T) { + bus := New() + if bus.SubscribeOnce(bi.TransactionType(1), func() {}) != nil { + t.Fail() + } + if bus.SubscribeOnce(bi.TransactionType(1), "String") == nil { + t.Fail() + } +} + +func TestSubscribeOnceAndManySubscribe(t *testing.T) { + bus := New() + event := bi.TransactionType(1) + flag := 0 + fn := func() { flag++ } + bus.SubscribeOnce(event, fn) + bus.Subscribe(event, fn) + bus.Subscribe(event, fn) + bus.Publish(event) + + if flag != 3 { + t.Fail() + } +} + +func TestUnsubscribe(t *testing.T) { + bus := New() + handler := func() {} + bus.Subscribe(bi.TransactionType(1), handler) + if bus.Unsubscribe(bi.TransactionType(1), handler) != nil { + t.Fail() + } + if bus.Unsubscribe(bi.TransactionType(1), handler) == nil { + t.Fail() + } +} + +func TestPublish(t *testing.T) { + bus := New() + bus.Subscribe(bi.TransactionType(1), func(a int, b int) { + if a != b { + t.Fail() + } + }) + bus.Publish(bi.TransactionType(1), 10, 10) +} + +func TestSubcribeOnceAsync(t *testing.T) { + results := make([]int, 0) + + bus := New() + bus.SubscribeOnceAsync(bi.TransactionType(1), func(a int, out *[]int) { + *out = append(*out, a) + }) + + bus.Publish(bi.TransactionType(1), 10, &results) + bus.Publish(bi.TransactionType(1), 10, &results) + + bus.WaitAsync() + + if len(results) != 1 { + t.Fail() + } + + if bus.HasCallback(bi.TransactionType(1)) { + t.Fail() + } +} + +func TestSubscribeAsyncTransactional(t *testing.T) { + results := make([]int, 0) + + bus := New() + bus.SubscribeAsync(bi.TransactionType(1), func(a int, out *[]int, dur string) { + sleep, _ := time.ParseDuration(dur) + time.Sleep(sleep) + *out = append(*out, a) + }, true) + + bus.Publish(bi.TransactionType(1), 1, &results, "1s") + bus.Publish(bi.TransactionType(1), 2, &results, "0s") + + bus.WaitAsync() + + if len(results) != 2 { + t.Fail() + } + + if results[0] != 1 || results[1] != 2 { + t.Fail() + } +} + +func TestSubscribeAsync(t *testing.T) { + results := make(chan int) + + bus := New() + bus.SubscribeAsync(bi.TransactionType(1), func(a int, out chan<- int) { + out <- a + }, false) + + bus.Publish(bi.TransactionType(1), 1, results) + bus.Publish(bi.TransactionType(1), 2, results) + + var numResults int32 + + go func() { + for range results { + atomic.AddInt32(&numResults, 1) + } + }() + + bus.WaitAsync() + + time.Sleep(10 * time.Millisecond) + + if atomic.LoadInt32(&numResults) != 2 { + t.Fail() + } +} diff --git a/chainbus/doc.go b/chainbus/doc.go new file mode 100644 index 000000000..d606ba5bb --- /dev/null +++ b/chainbus/doc.go @@ -0,0 +1,39 @@ +//The MIT License (MIT) +// +//Copyright (c) 2014 Alex Saskevich +// +//Permission is hereby granted, free of charge, to any person obtaining a copy +//of this software and associated documentation files (the "Software"), to deal +//in the Software without restriction, including without limitation the rights +//to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +//copies of the Software, and to permit persons to whom the Software is +//furnished to do so, subject to the following conditions: +// +//The above copyright notice and this permission notice shall be included in all +//copies or substantial portions of the Software. +// +//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +//IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +//FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +//AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +//LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +//OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +//SOFTWARE. + +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package chainbus diff --git a/cleanupDB.sh b/cleanupDB.sh index caf5dc037..ff4110925 100755 --- a/cleanupDB.sh +++ b/cleanupDB.sh @@ -2,10 +2,10 @@ PROJECT_DIR=$(cd $(dirname $0)/; pwd) -cd ${PROJECT_DIR} && find . -name '*.db' -exec rm -f {} \; -cd ${PROJECT_DIR} && find . -name '*.db-shm' -exec rm -f {} \; -cd ${PROJECT_DIR} && find . -name '*.db-wal' -exec rm -f {} \; -cd ${PROJECT_DIR} && find . -name 'db.meta' -exec rm -f {} \; -cd ${PROJECT_DIR} && find . -name 'public.keystore' -exec rm -f {} \; -cd ${PROJECT_DIR} && find . -name '*.public.keystore' -exec rm -f {} \; -cd ${PROJECT_DIR} && find . -type d -name '*.ldb' -prune -exec rm -rf {} \; +cd ${PROJECT_DIR} && find . -name '*.db' -exec rm -vf {} \; +cd ${PROJECT_DIR} && find . -name '*.db-shm' -exec rm -vf {} \; +cd ${PROJECT_DIR} && find . -name '*.db-wal' -exec rm -vf {} \; +cd ${PROJECT_DIR} && find . -name 'db.meta' -exec rm -vf {} \; +cd ${PROJECT_DIR} && find . -name 'public.keystore' -exec rm -vf {} \; +cd ${PROJECT_DIR} && find . -name '*.public.keystore' -exec rm -vf {} \; +cd ${PROJECT_DIR} && find . -type d -name '*.ldb' -prune -exec rm -vrf {} \; diff --git a/client/_example/gdpaverage.go b/client/_example/gdpaverage.go index 367f841c6..f3a1e098e 100644 --- a/client/_example/gdpaverage.go +++ b/client/_example/gdpaverage.go @@ -21,7 +21,7 @@ import ( "flag" "github.com/CovenantSQL/CovenantSQL/client" - log "github.com/Sirupsen/logrus" + "github.com/CovenantSQL/CovenantSQL/utils/log" ) func main() { diff --git a/client/_example/simple.go b/client/_example/simple.go index 1dd270aab..cc8c88464 100644 --- a/client/_example/simple.go +++ b/client/_example/simple.go @@ -22,7 +22,7 @@ import ( "fmt" "github.com/CovenantSQL/CovenantSQL/client" - log "github.com/sirupsen/logrus" + "github.com/CovenantSQL/CovenantSQL/utils/log" ) func main() { diff --git a/client/conn.go b/client/conn.go index a91de5258..eeec4d1db 100644 --- a/client/conn.go +++ b/client/conn.go @@ -26,23 +26,22 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" ) // conn implements an interface sql.Conn. type conn struct { dbID proto.DatabaseID - queries []wt.Query + queries []types.Query localNodeID proto.NodeID privKey *asymmetric.PrivateKey - ackCh chan *wt.Ack + ackCh chan *types.Ack inTransaction bool closed int32 pCaller *rpc.PersistentCaller @@ -65,15 +64,17 @@ func newConn(cfg *Config) (c *conn, err error) { dbID: proto.DatabaseID(cfg.DatabaseID), localNodeID: localNodeID, privKey: privKey, - queries: make([]wt.Query, 0), + queries: make([]types.Query, 0), } + var peers *proto.Peers // get peers from BP - if _, err = cacheGetPeers(c.dbID, c.privKey); err != nil { + if peers, err = cacheGetPeers(c.dbID, c.privKey); err != nil { log.WithError(err).Error("cacheGetPeers failed") c = nil return } + c.pCaller = rpc.NewPersistentCaller(peers.Leader) err = c.startAckWorkers(2) if err != nil { @@ -87,7 +88,7 @@ func newConn(cfg *Config) (c *conn, err error) { } func (c *conn) startAckWorkers(workerCount int) (err error) { - c.ackCh = make(chan *wt.Ack, workerCount*4) + c.ackCh = make(chan *types.Ack, workerCount*4) for i := 0; i < workerCount; i++ { go c.ackWorker() } @@ -100,7 +101,7 @@ func (c *conn) stopAckWorkers() { func (c *conn) ackWorker() { if rawPeers, ok := peerList.Load(c.dbID); ok { - if peers, ok := rawPeers.(*kayak.Peers); ok { + if peers, ok := rawPeers.(*proto.Peers); ok { var ( oneTime sync.Once pc *rpc.PersistentCaller @@ -114,14 +115,14 @@ func (c *conn) ackWorker() { break ackWorkerLoop } oneTime.Do(func() { - pc = rpc.NewPersistentCaller(peers.Leader.ID) + pc = rpc.NewPersistentCaller(peers.Leader) }) if err = ack.Sign(c.privKey, false); err != nil { log.WithField("target", pc.TargetID).WithError(err).Error("failed to sign ack") continue } - var ackRes wt.AckResponse + var ackRes types.AckResponse // send ack back if err = pc.Call(route.DBSAck.String(), ack, &ackRes); err != nil { log.WithError(err).Warning("send ack failed") @@ -202,11 +203,16 @@ func (c *conn) ExecContext(ctx context.Context, query string, args []driver.Name // TODO(xq262144): make use of the ctx argument sq := convertQuery(query, args) - if _, err = c.addQuery(wt.WriteQuery, sq); err != nil { + + var affectedRows, lastInsertID int64 + if affectedRows, lastInsertID, _, err = c.addQuery(types.WriteQuery, sq); err != nil { return } - result = driver.ResultNoRows + result = &execResult{ + affectedRows: affectedRows, + lastInsertID: lastInsertID, + } return } @@ -220,7 +226,9 @@ func (c *conn) QueryContext(ctx context.Context, query string, args []driver.Nam // TODO(xq262144): make use of the ctx argument sq := convertQuery(query, args) - return c.addQuery(wt.ReadQuery, sq) + _, _, rows, err = c.addQuery(types.ReadQuery, sq) + + return } // Commit implements the driver.Tx.Commit method. @@ -240,7 +248,7 @@ func (c *conn) Commit() (err error) { if len(c.queries) > 0 { // send query - if _, err = c.sendQuery(wt.WriteQuery, c.queries); err != nil { + if _, _, _, err = c.sendQuery(types.WriteQuery, c.queries); err != nil { return } } @@ -270,10 +278,10 @@ func (c *conn) Rollback() error { return nil } -func (c *conn) addQuery(queryType wt.QueryType, query *wt.Query) (rows driver.Rows, err error) { +func (c *conn) addQuery(queryType types.QueryType, query *types.Query) (affectedRows int64, lastInsertID int64, rows driver.Rows, err error) { if c.inTransaction { // check query type, enqueue query - if queryType == wt.ReadQuery { + if queryType == types.ReadQuery { // read query is not supported in transaction err = ErrQueryInTransaction return @@ -295,11 +303,11 @@ func (c *conn) addQuery(queryType wt.QueryType, query *wt.Query) (rows driver.Ro "args": query.Args, }).Debug("execute query") - return c.sendQuery(queryType, []wt.Query{*query}) + return c.sendQuery(queryType, []types.Query{*query}) } -func (c *conn) sendQuery(queryType wt.QueryType, queries []wt.Query) (rows driver.Rows, err error) { - var peers *kayak.Peers +func (c *conn) sendQuery(queryType types.QueryType, queries []types.Query) (affectedRows int64, lastInsertID int64, rows driver.Rows, err error) { + var peers *proto.Peers if peers, err = cacheGetPeers(c.dbID, c.privKey); err != nil { return } @@ -314,15 +322,15 @@ func (c *conn) sendQuery(queryType wt.QueryType, queries []wt.Query) (rows drive "type": queryType.String(), "connID": connID, "seqNo": seqNo, - "target": peers.Leader.ID, + "target": peers.Leader, "source": c.localNodeID, }).WithError(err).Debug("send query") }() // build request - req := &wt.Request{ - Header: wt.SignedRequestHeader{ - RequestHeader: wt.RequestHeader{ + req := &types.Request{ + Header: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ QueryType: queryType, NodeID: c.localNodeID, DatabaseID: c.dbID, @@ -331,7 +339,7 @@ func (c *conn) sendQuery(queryType wt.QueryType, queries []wt.Query) (rows drive Timestamp: getLocalTime(), }, }, - Payload: wt.RequestPayload{ + Payload: types.RequestPayload{ Queries: queries, }, } @@ -340,8 +348,7 @@ func (c *conn) sendQuery(queryType wt.QueryType, queries []wt.Query) (rows drive return } - c.pCaller = rpc.NewPersistentCaller(peers.Leader.ID) - var response wt.Response + var response types.Response if err = c.pCaller.Call(route.DBSQuery.String(), req, &response); err != nil { return } @@ -352,10 +359,15 @@ func (c *conn) sendQuery(queryType wt.QueryType, queries []wt.Query) (rows drive } rows = newRows(&response) + if queryType == types.WriteQuery { + affectedRows = response.Header.AffectedRows + lastInsertID = response.Header.LastInsertID + } + // build ack - c.ackCh <- &wt.Ack{ - Header: wt.SignedAckHeader{ - AckHeader: wt.AckHeader{ + c.ackCh <- &types.Ack{ + Header: types.SignedAckHeader{ + AckHeader: types.AckHeader{ Response: response.Header, NodeID: c.localNodeID, Timestamp: getLocalTime(), @@ -370,16 +382,17 @@ func getLocalTime() time.Time { return time.Now().UTC() } -func convertQuery(query string, args []driver.NamedValue) (sq *wt.Query) { +func convertQuery(query string, args []driver.NamedValue) (sq *types.Query) { // rebuild args to named args - sq = &wt.Query{ + sq = &types.Query{ Pattern: query, } - sq.Args = make([]sql.NamedArg, len(args)) + sq.Args = make([]types.NamedArg, len(args)) for i, v := range args { - sq.Args[i] = sql.Named(v.Name, v.Value) + sq.Args[i].Name = v.Name + sq.Args[i].Value = v.Value } return diff --git a/client/conn_test.go b/client/conn_test.go index 29022e9e4..c65679210 100644 --- a/client/conn_test.go +++ b/client/conn_test.go @@ -159,10 +159,18 @@ func TestTransaction(t *testing.T) { So(db, ShouldNotBeNil) So(err, ShouldBeNil) + var execResult sql.Result + var lastInsertID, affectedRows int64 + _, err = db.Exec("create table test (test int)") So(err, ShouldBeNil) - _, err = db.Exec("insert into test values (1)") + execResult, err = db.Exec("insert into test values (1)") + So(err, ShouldBeNil) + lastInsertID, err = execResult.LastInsertId() So(err, ShouldBeNil) + So(lastInsertID, ShouldEqual, 1) + affectedRows, err = execResult.RowsAffected() + So(affectedRows, ShouldEqual, 1) // test start transaction var tx *sql.Tx diff --git a/client/driver.go b/client/driver.go index 4522fac11..5a789ae88 100644 --- a/client/driver.go +++ b/client/driver.go @@ -25,19 +25,17 @@ import ( "sync/atomic" "time" - "github.com/pkg/errors" - bp "github.com/CovenantSQL/CovenantSQL/blockproducer" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/pkg/errors" ) const ( @@ -53,7 +51,7 @@ var ( driverInitialized uint32 peersUpdaterRunning uint32 - peerList sync.Map // map[proto.DatabaseID]*kayak.Peers + peerList sync.Map // map[proto.DatabaseID]*proto.Peers connIDLock sync.Mutex connIDAvail []uint64 globalSeqNo uint64 @@ -86,7 +84,7 @@ func (d *covenantSQLDriver) Open(dsn string) (conn driver.Conn, err error) { } // ResourceMeta defines new database resources requirement descriptions. -type ResourceMeta wt.ResourceMeta +type ResourceMeta types.ResourceMeta // Init defines init process for client. func Init(configFile string, masterKey []byte) (err error) { @@ -124,12 +122,8 @@ func Create(meta ResourceMeta) (dsn string, err error) { return } - req := new(bp.CreateDatabaseRequest) - req.Header.ResourceMeta = wt.ResourceMeta(meta) - if req.Header.Signee, err = kms.GetLocalPublicKey(); err != nil { - err = errors.Wrap(err, "get local public key failed") - return - } + req := new(types.CreateDatabaseRequest) + req.Header.ResourceMeta = types.ResourceMeta(meta) var privateKey *asymmetric.PrivateKey if privateKey, err = kms.GetLocalPrivateKey(); err != nil { err = errors.Wrap(err, "get local private key failed") @@ -139,7 +133,7 @@ func Create(meta ResourceMeta) (dsn string, err error) { err = errors.Wrap(err, "sign request failed") return } - res := new(bp.CreateDatabaseResponse) + res := new(types.CreateDatabaseResponse) if err = requestBP(route.BPDBCreateDatabase, req, res); err != nil { err = errors.Wrap(err, "call BPDB.CreateDatabase failed") @@ -169,11 +163,8 @@ func Drop(dsn string) (err error) { return } - req := new(bp.DropDatabaseRequest) + req := new(types.DropDatabaseRequest) req.Header.DatabaseID = proto.DatabaseID(cfg.DatabaseID) - if req.Header.Signee, err = kms.GetLocalPublicKey(); err != nil { - return - } var privateKey *asymmetric.PrivateKey if privateKey, err = kms.GetLocalPrivateKey(); err != nil { return @@ -181,7 +172,7 @@ func Drop(dsn string) (err error) { if err = req.Sign(privateKey); err != nil { return } - res := new(bp.DropDatabaseResponse) + res := new(types.DropDatabaseResponse) err = requestBP(route.BPDBDropDatabase, req, res) return @@ -321,7 +312,7 @@ func stopPeersUpdater() { atomic.StoreUint32(&peersUpdaterRunning, 0) } -func cacheGetPeers(dbID proto.DatabaseID, privKey *asymmetric.PrivateKey) (peers *kayak.Peers, err error) { +func cacheGetPeers(dbID proto.DatabaseID, privKey *asymmetric.PrivateKey) (peers *proto.Peers, err error) { var ok bool var rawPeers interface{} var cacheHit bool @@ -334,7 +325,7 @@ func cacheGetPeers(dbID proto.DatabaseID, privKey *asymmetric.PrivateKey) (peers }() if rawPeers, ok = peerList.Load(dbID); ok { - if peers, ok = rawPeers.(*kayak.Peers); ok { + if peers, ok = rawPeers.(*proto.Peers); ok { cacheHit = true return } @@ -344,8 +335,8 @@ func cacheGetPeers(dbID proto.DatabaseID, privKey *asymmetric.PrivateKey) (peers return getPeers(dbID, privKey) } -func getPeers(dbID proto.DatabaseID, privKey *asymmetric.PrivateKey) (peers *kayak.Peers, err error) { - req := new(bp.GetDatabaseRequest) +func getPeers(dbID proto.DatabaseID, privKey *asymmetric.PrivateKey) (peers *proto.Peers, err error) { + req := new(types.GetDatabaseRequest) req.Header.DatabaseID = dbID defer func() { @@ -359,7 +350,7 @@ func getPeers(dbID proto.DatabaseID, privKey *asymmetric.PrivateKey) (peers *kay return } - res := new(bp.GetDatabaseResponse) + res := new(types.GetDatabaseResponse) if err = requestBP(route.BPDBGetDatabase, req, res); err != nil { return } diff --git a/client/driver_test.go b/client/driver_test.go index f412b19f7..018cd9665 100644 --- a/client/driver_test.go +++ b/client/driver_test.go @@ -36,6 +36,9 @@ func TestInit(t *testing.T) { stopTestService, confDir, err = startTestService() So(err, ShouldBeNil) defer stopTestService() + // already init ed + err = Init(filepath.Join(confDir, "config.yaml"), []byte("")) + So(err, ShouldNotBeNil) // fake driver not initialized atomic.StoreUint32(&driverInitialized, 0) err = Init(filepath.Join(confDir, "config.yaml"), []byte("")) diff --git a/client/helper_test.go b/client/helper_test.go index ed267535a..9b4be6670 100644 --- a/client/helper_test.go +++ b/client/helper_test.go @@ -34,16 +34,14 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/CovenantSQL/CovenantSQL/worker" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" ) const ( @@ -58,7 +56,7 @@ var ( // fake BPDB service type stubBPDBService struct{} -func (s *stubBPDBService) CreateDatabase(req *bp.CreateDatabaseRequest, resp *bp.CreateDatabaseResponse) (err error) { +func (s *stubBPDBService) CreateDatabase(req *types.CreateDatabaseRequest, resp *types.CreateDatabaseResponse) (err error) { if resp.Header.InstanceMeta, err = s.getInstanceMeta(proto.DatabaseID("db")); err != nil { return } @@ -75,11 +73,11 @@ func (s *stubBPDBService) CreateDatabase(req *bp.CreateDatabaseRequest, resp *bp return } -func (s *stubBPDBService) DropDatabase(req *bp.DropDatabaseRequest, resp *bp.DropDatabaseRequest) (err error) { +func (s *stubBPDBService) DropDatabase(req *types.DropDatabaseRequest, resp *types.DropDatabaseRequest) (err error) { return } -func (s *stubBPDBService) GetDatabase(req *bp.GetDatabaseRequest, resp *bp.GetDatabaseResponse) (err error) { +func (s *stubBPDBService) GetDatabase(req *types.GetDatabaseRequest, resp *types.GetDatabaseResponse) (err error) { if resp.Header.InstanceMeta, err = s.getInstanceMeta(req.Header.DatabaseID); err != nil { return } @@ -96,8 +94,8 @@ func (s *stubBPDBService) GetDatabase(req *bp.GetDatabaseRequest, resp *bp.GetDa return } -func (s *stubBPDBService) GetNodeDatabases(req *wt.InitService, resp *wt.InitServiceResponse) (err error) { - resp.Header.Instances = make([]wt.ServiceInstance, 0) +func (s *stubBPDBService) GetNodeDatabases(req *types.InitService, resp *types.InitServiceResponse) (err error) { + resp.Header.Instances = make([]types.ServiceInstance, 0) if resp.Header.Signee, err = kms.GetLocalPublicKey(); err != nil { return } @@ -111,12 +109,7 @@ func (s *stubBPDBService) GetNodeDatabases(req *wt.InitService, resp *wt.InitSer return } -func (s *stubBPDBService) getInstanceMeta(dbID proto.DatabaseID) (instance wt.ServiceInstance, err error) { - var pubKey *asymmetric.PublicKey - if pubKey, err = kms.GetLocalPublicKey(); err != nil { - return - } - +func (s *stubBPDBService) getInstanceMeta(dbID proto.DatabaseID) (instance types.ServiceInstance, err error) { var privKey *asymmetric.PrivateKey if privKey, err = kms.GetLocalPrivateKey(); err != nil { return @@ -128,21 +121,12 @@ func (s *stubBPDBService) getInstanceMeta(dbID proto.DatabaseID) (instance wt.Se } instance.DatabaseID = proto.DatabaseID(dbID) - instance.Peers = &kayak.Peers{ - Term: 1, - Leader: &kayak.Server{ - Role: proto.Leader, - ID: nodeID, - PubKey: pubKey, + instance.Peers = &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: 1, + Leader: nodeID, + Servers: []proto.NodeID{nodeID}, }, - Servers: []*kayak.Server{ - { - Role: proto.Leader, - ID: nodeID, - PubKey: pubKey, - }, - }, - PubKey: pubKey, } if err = instance.Peers.Sign(privKey); err != nil { return @@ -202,10 +186,10 @@ func startTestService() (stopTestService func(), tempDir string, err error) { } // add database - var req *wt.UpdateService - var res wt.UpdateServiceResponse - var peers *kayak.Peers - var block *ct.Block + var req *types.UpdateService + var res types.UpdateServiceResponse + var peers *proto.Peers + var block *types.Block dbID := proto.DatabaseID("db") @@ -218,9 +202,9 @@ func startTestService() (stopTestService func(), tempDir string, err error) { } // build create database request - req = new(wt.UpdateService) - req.Header.Op = wt.CreateDB - req.Header.Instance = wt.ServiceInstance{ + req = new(types.UpdateService) + req.Header.Op = types.CreateDB + req.Header.Instance = types.ServiceInstance{ DatabaseID: dbID, Peers: peers, GenesisBlock: block, @@ -321,7 +305,7 @@ func initNode() (cleanupFunc func(), tempDir string, server *rpc.Server, err err } // copied from sqlchain.xxx_test. -func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error) { +func createRandomBlock(parent hash.Hash, isGenesis bool) (b *types.Block, err error) { // Generate key pair priv, pub, err := asymmetric.GenSecp256k1KeyPair() @@ -332,9 +316,9 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error h := hash.Hash{} rand.Read(h[:]) - b = &ct.Block{ - SignedHeader: ct.SignedHeader{ - Header: ct.Header{ + b = &types.Block{ + SignedHeader: types.SignedHeader{ + Header: types.Header{ Version: 0x01000000, Producer: proto.NodeID(h.String()), GenesisHash: rootHash, @@ -342,12 +326,6 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error Timestamp: time.Now().UTC(), }, }, - Queries: make([]*hash.Hash, rand.Intn(10)+10), - } - - for i := range b.Queries { - b.Queries[i] = new(hash.Hash) - rand.Read(b.Queries[i][:]) } if isGenesis { @@ -401,7 +379,7 @@ func getKeys() (privKey *asymmetric.PrivateKey, pubKey *asymmetric.PublicKey, er return } -func genPeers(term uint64) (peers *kayak.Peers, err error) { +func genPeers(term uint64) (peers *proto.Peers, err error) { // get node id var nodeID proto.NodeID if nodeID, err = kms.GetLocalNodeID(); err != nil { @@ -409,24 +387,19 @@ func genPeers(term uint64) (peers *kayak.Peers, err error) { } // get private/public key - var pubKey *asymmetric.PublicKey var privateKey *asymmetric.PrivateKey - if privateKey, pubKey, err = getKeys(); err != nil { + if privateKey, _, err = getKeys(); err != nil { return } // generate peers and sign - server := &kayak.Server{ - Role: proto.Leader, - ID: nodeID, - PubKey: pubKey, - } - peers = &kayak.Peers{ - Term: term, - Leader: server, - Servers: []*kayak.Server{server}, - PubKey: pubKey, + peers = &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: term, + Leader: nodeID, + Servers: []proto.NodeID{nodeID}, + }, } err = peers.Sign(privateKey) return diff --git a/client/result.go b/client/result.go new file mode 100644 index 000000000..5faa21deb --- /dev/null +++ b/client/result.go @@ -0,0 +1,32 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package client + +type execResult struct { + affectedRows int64 + lastInsertID int64 +} + +// LastInsertId teturn last inserted ID. +func (r *execResult) LastInsertId() (int64, error) { + return r.lastInsertID, nil +} + +// RowsAffected return how many rows affected. +func (r *execResult) RowsAffected() (int64, error) { + return r.affectedRows, nil +} diff --git a/client/result_test.go b/client/result_test.go new file mode 100644 index 000000000..be6975fcc --- /dev/null +++ b/client/result_test.go @@ -0,0 +1,39 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package client + +import ( + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestExecResult(t *testing.T) { + Convey("test result", t, func() { + r := &execResult{ + affectedRows: 1, + lastInsertID: 2, + } + + i, err := r.LastInsertId() + So(i, ShouldEqual, 2) + So(err, ShouldBeNil) + i, err = r.RowsAffected() + So(i, ShouldEqual, 1) + So(err, ShouldBeNil) + }) +} diff --git a/client/rows.go b/client/rows.go index cd599f146..dcacc2c06 100644 --- a/client/rows.go +++ b/client/rows.go @@ -21,16 +21,16 @@ import ( "io" "strings" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/CovenantSQL/CovenantSQL/types" ) type rows struct { columns []string types []string - data []wt.ResponseRow + data []types.ResponseRow } -func newRows(res *wt.Response) *rows { +func newRows(res *types.Response) *rows { return &rows{ columns: res.Payload.Columns, types: res.Payload.DeclTypes, diff --git a/client/rows_test.go b/client/rows_test.go index 80795c7e8..15602b4eb 100644 --- a/client/rows_test.go +++ b/client/rows_test.go @@ -15,3 +15,45 @@ */ package client + +import ( + "database/sql/driver" + "io" + "testing" + + "github.com/CovenantSQL/CovenantSQL/types" + . "github.com/smartystreets/goconvey/convey" +) + +func TestRowsStructure(t *testing.T) { + Convey("test rows", t, func() { + r := newRows(&types.Response{ + Payload: types.ResponsePayload{ + Columns: []string{ + "a", + }, + DeclTypes: []string{ + "int", + }, + Rows: []types.ResponseRow{ + { + Values: []interface{}{1}, + }, + }, + }, + }) + columns := r.Columns() + So(columns, ShouldResemble, []string{"a"}) + So(r.ColumnTypeDatabaseTypeName(0), ShouldEqual, "INT") + + dest := make([]driver.Value, 1) + err := r.Next(dest) + So(err, ShouldBeNil) + So(dest[0], ShouldEqual, 1) + err = r.Next(dest) + So(err, ShouldEqual, io.EOF) + err = r.Close() + So(err, ShouldBeNil) + So(r.data, ShouldBeNil) + }) +} diff --git a/client/stmt_test.go b/client/stmt_test.go index 4672a9a92..a3aebb0fa 100644 --- a/client/stmt_test.go +++ b/client/stmt_test.go @@ -17,7 +17,10 @@ package client import ( + "context" "database/sql" + "database/sql/driver" + "fmt" "testing" . "github.com/smartystreets/goconvey/convey" @@ -95,10 +98,84 @@ func TestStmt(t *testing.T) { _, err = stmt.Exec() So(err, ShouldNotBeNil) + ctx := context.Background() + err = ExecuteTx(ctx, db, nil /* txopts */, func(tx *sql.Tx) error { + _, err := tx.Exec("insert into test values(?)", 7) + if err != nil { + return err + } + _, err = tx.Exec("insert into test values(?)", 8) + if err != nil { + return err + } + _, err = tx.Exec("insert into test values(?)", 9) + if err != nil { + return err + } + return err + }) + So(err, ShouldBeNil) + + row = db.QueryRow("select count(1) as cnt from test") + So(row, ShouldNotBeNil) + err = row.Scan(&result) + So(err, ShouldBeNil) + So(result, ShouldEqual, 6) + + err = ExecuteTx(ctx, db, nil /* txopts */, func(tx *sql.Tx) error { + _, err := tx.Exec("insert into test values(?)", 10) + if err != nil { + return err + } + _, err = tx.Exec("insert into testNoExist values(?)", 11) + if err != nil { + return err + } + _, err = tx.Exec("insert into test values(?)", 12) + if err != nil { + return err + } + return err + }) + So(err, ShouldNotBeNil) + + err = ExecuteTx(ctx, db, nil /* txopts */, func(tx *sql.Tx) error { + _, err := tx.Exec("insert into test values(?)", 10) + if err != nil { + return err + } + return fmt.Errorf("some error") + }) + So(err, ShouldNotBeNil) + + row = db.QueryRow("select count(1) as cnt from test") + So(row, ShouldNotBeNil) + err = row.Scan(&result) + So(err, ShouldBeNil) + So(result, ShouldEqual, 6) + db.Close() // prepare on closed _, err = db.Prepare("select * from test") So(err, ShouldNotBeNil) + + err = ExecuteTx(nil, db, nil /* txopts */, func(tx *sql.Tx) error { + return nil + }) + So(err, ShouldNotBeNil) + + // closed stmt and old args + cs := newStmt(nil, "test query") + cs.Close() + + _, err = cs.Query([]driver.Value{1}) + So(err, ShouldNotBeNil) + + _, err = cs.Exec([]driver.Value{2}) + err = ExecuteTx(nil, db, nil /* txopts */, func(tx *sql.Tx) error { + return nil + }) + So(err, ShouldNotBeNil) }) } diff --git a/client/tx.go b/client/tx.go new file mode 100644 index 000000000..047d1ccfe --- /dev/null +++ b/client/tx.go @@ -0,0 +1,50 @@ +/* + * Copyright 2016 The Cockroach Authors. + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package client + +import ( + "context" + "database/sql" + "database/sql/driver" +) + +// ExecuteTx starts a transaction, and runs fn in it +func ExecuteTx( + ctx context.Context, db *sql.DB, txopts *sql.TxOptions, fn func(*sql.Tx) error, +) error { + // Start a transaction. + tx, err := db.BeginTx(ctx, txopts) + if err != nil { + return err + } + return ExecuteInTx(tx, func() error { return fn(tx) }) +} + +// ExecuteInTx runs fn inside tx which should already have begun. +func ExecuteInTx(tx driver.Tx, fn func() error) (err error) { + err = fn() + if err == nil { + // Ignore commit errors. The tx has already been committed by RELEASE. + err = tx.Commit() + } else { + // We always need to execute a Rollback() so sql.DB releases the + // connection. + _ = tx.Rollback() + } + return +} diff --git a/cmd/cql-adapter/api/query.go b/cmd/cql-adapter/api/query.go index aed0b15de..abf0d9fc1 100644 --- a/cmd/cql-adapter/api/query.go +++ b/cmd/cql-adapter/api/query.go @@ -139,10 +139,15 @@ func (a *queryAPI) Write(rw http.ResponseWriter, r *http.Request) { log.WithField("db", dbID).WithField("query", query).Info("got exec") var err error - if err = config.GetConfig().StorageInstance.Exec(dbID, query); err != nil { + var affectedRows int64 + var lastInsertID int64 + if affectedRows, lastInsertID, err = config.GetConfig().StorageInstance.Exec(dbID, query); err != nil { sendResponse(http.StatusInternalServerError, false, err, nil, rw) return } - sendResponse(http.StatusOK, true, nil, nil, rw) + sendResponse(http.StatusOK, true, nil, map[string]interface{}{ + "last_insert_id": lastInsertID, + "affected_rows": affectedRows, + }, rw) } diff --git a/cmd/cql-adapter/storage/covenantsql.go b/cmd/cql-adapter/storage/covenantsql.go index f5847d7b8..9750c891c 100644 --- a/cmd/cql-adapter/storage/covenantsql.go +++ b/cmd/cql-adapter/storage/covenantsql.go @@ -92,14 +92,20 @@ func (s *CovenantSQLStorage) Query(dbID string, query string) (columns []string, } // Exec implements the Storage abstraction interface. -func (s *CovenantSQLStorage) Exec(dbID string, query string) (err error) { +func (s *CovenantSQLStorage) Exec(dbID string, query string) (affectedRows int64, lastInsertID int64, err error) { var conn *sql.DB if conn, err = s.getConn(dbID); err != nil { return } defer conn.Close() - _, err = conn.Exec(query) + var result sql.Result + result, err = conn.Exec(query) + + if err == nil { + affectedRows, _ = result.RowsAffected() + lastInsertID, _ = result.LastInsertId() + } return } diff --git a/cmd/cql-adapter/storage/sqlite3.go b/cmd/cql-adapter/storage/sqlite3.go index f3a80f1af..aac13f94d 100644 --- a/cmd/cql-adapter/storage/sqlite3.go +++ b/cmd/cql-adapter/storage/sqlite3.go @@ -115,14 +115,18 @@ func (s *SQLite3Storage) Query(dbID string, query string) (columns []string, typ } // Exec implements the Storage abstraction interface. -func (s *SQLite3Storage) Exec(dbID string, query string) (err error) { +func (s *SQLite3Storage) Exec(dbID string, query string) (affectedRows int64, lastInsertID int64, err error) { var conn *sql.DB if conn, err = s.getConn(dbID, false); err != nil { return } defer conn.Close() - _, err = conn.Exec(query) + var result sql.Result + result, err = conn.Exec(query) + + affectedRows, _ = result.RowsAffected() + lastInsertID, _ = result.LastInsertId() return } diff --git a/cmd/cql-adapter/storage/storage.go b/cmd/cql-adapter/storage/storage.go index f10640166..c35dbfbfa 100644 --- a/cmd/cql-adapter/storage/storage.go +++ b/cmd/cql-adapter/storage/storage.go @@ -30,7 +30,7 @@ type Storage interface { // Query for result. Query(dbID string, query string) (columns []string, types []string, rows [][]interface{}, err error) // Exec for update. - Exec(dbID string, query string) (err error) + Exec(dbID string, query string) (affectedRows int64, lastInsertID int64, err error) } // golang does trick convert, use rowScanner to return the original result type in sqlite3 driver diff --git a/cmd/cql-explorer/service.go b/cmd/cql-explorer/service.go index 0ccda7ff3..d77181285 100644 --- a/cmd/cql-explorer/service.go +++ b/cmd/cql-explorer/service.go @@ -215,7 +215,7 @@ func (s *Service) getTxByHash(h *hash.Hash) (tx pi.Transaction, c uint32, height continue } - if curH := curTx.GetHash(); h.IsEqual(&curH) { + if curH := curTx.Hash(); h.IsEqual(&curH) { tx = curTx break } @@ -367,7 +367,7 @@ func (s *Service) saveTransaction(c uint32, tx pi.Transaction) (err error) { return ErrNilTransaction } - txHash := tx.GetHash() + txHash := tx.Hash() var txKey []byte diff --git a/cmd/cql-fuse/block.go b/cmd/cql-fuse/block.go new file mode 100644 index 000000000..ceb421336 --- /dev/null +++ b/cmd/cql-fuse/block.go @@ -0,0 +1,361 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Copyright 2015 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. See the AUTHORS file +// for names of contributors. +// +// Author: Marc Berhault (marc@cockroachlabs.com) + +package main + +import ( + "fmt" + "strings" + + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +// BlockSize is the size of each data block. It must not +// change throughout the lifetime of the filesystem. +const BlockSize = 4 << 10 // 4KB + +func min(a, b uint64) uint64 { + if a < b { + return a + } + return b +} + +// blockRange describes a range of blocks. +// If the first and last block are the same, the effective data range +// will be: [startOffset, lastLength) +type blockRange struct { + start int // index of the start block + startOffset uint64 // starting offset within the first block + startLength uint64 // length of data in first block + last int // index of the last block + lastLength uint64 // length of the last block +} + +// newBlockRange returns the block range for 'size' bytes from 'from'. +func newBlockRange(from, length uint64) blockRange { + end := from + length + return blockRange{ + start: int(from / BlockSize), + startOffset: from % BlockSize, + startLength: min(length, BlockSize-(from%BlockSize)), + last: int(end / BlockSize), + lastLength: end % BlockSize, + } +} + +// shrink resizes the data to a smaller length. +// Requirement: from > to. +// If truncates are done on block boundaries, this is reasonably +// efficient. However, if truncating in the middle of a block, +// we need to fetch the block first, truncate it, and write it again. +func shrink(e sqlExecutor, inodeID, from, to uint64) error { + delRange := newBlockRange(to, from-to) + deleteFrom := delRange.start + + if delRange.startOffset > 0 { + // We're truncating in the middle of a block, fetch it, truncate its + // data, and write it again. + // TODO(marc): this would be more efficient if we had LEFT for bytes. + data, err := getBlockData(e, inodeID, delRange.start) + if err != nil { + return err + } + data = data[:delRange.startOffset] + if err := updateBlockData(e, inodeID, delRange.start, data); err != nil { + return err + } + // We don't need to delete this block. + deleteFrom++ + } + + deleteTo := delRange.last + if delRange.lastLength == 0 { + // The last block did not previously exist. + deleteTo-- + } + if deleteTo < deleteFrom { + return nil + } + + // There is something to delete. + // TODO(marc): would it be better to pass the block IDs? + delStmt := `DELETE FROM fs_BLOCK WHERE id = ? AND block >= ?` + if _, err := e.Exec(delStmt, inodeID, deleteFrom); err != nil { + return err + } + + return nil +} + +// grow resizes the data to a larger length. +// Requirement: to > from. +// If the file ended in a partial block, we fetch it, grow it, +// and write it back. +func grow(e sqlExecutor, inodeID, from, to uint64) error { + addRange := newBlockRange(from, to-from) + insertFrom := addRange.start + + if addRange.startOffset > 0 { + // We need to extend the original 'last block'. + // Fetch it, grow it, and update it. + // TODO(marc): this would be more efficient if we had RPAD for bytes. + data, err := getBlockData(e, inodeID, addRange.start) + if err != nil { + return err + } + data = append(data, make([]byte, addRange.startLength, addRange.startLength)...) + if err := updateBlockData(e, inodeID, addRange.start, data); err != nil { + return err + } + // We don't need to insert this block. + insertFrom++ + } + + insertTo := addRange.last + if insertTo < insertFrom { + return nil + } + + // Build the sql statement and blocks to insert. + // We don't share this functionality with 'write' because we can repeat empty blocks. + // This would be shorter if we weren't trying to be efficient. + // TODO(marc): this would also be better if we supported sparse files. + paramStrings := []string{} + params := []interface{}{} + count := 1 // placeholder count starts at 1. + if insertFrom != insertTo { + // We have full blocks. Only send a full block once. + for i := insertFrom; i < insertTo; i++ { + params = append(params, make([]byte, BlockSize, BlockSize)) + } + count++ + } + + // Go over all blocks that are certainly full. + for i := insertFrom; i < insertTo; i++ { + paramStrings = append(paramStrings, fmt.Sprintf("(%d, %d, ?)", inodeID, i)) + } + + // Check the last block. + if addRange.lastLength > 0 { + // Not empty, write it. It can't be a full block, because we + // would have an empty block right after. + params = append(params, make([]byte, addRange.lastLength, addRange.lastLength)) + paramStrings = append(paramStrings, fmt.Sprintf("(%d, %d, ?)", + inodeID, addRange.last)) + count++ + } + + if len(paramStrings) == 0 { + // We had only one block, and it was empty. Nothing do to. + return nil + } + + insStmt := fmt.Sprintf(`INSERT INTO fs_block VALUES %s`, strings.Join(paramStrings, ",")) + if _, err := e.Exec(insStmt, params...); err != nil { + return err + } + + return nil +} + +// read returns the data [from, to). +// Requires: to > from and [to, from) is contained in the file. +func read(e sqlExecutor, inodeID, from, to uint64) ([]byte, error) { + readRange := newBlockRange(from, to-from) + end := readRange.last + if readRange.lastLength == 0 { + end-- + } + + blockInfos, err := getBlocksBetween(e, inodeID, readRange.start, end) + if err != nil { + return nil, err + } + if len(blockInfos) != end-readRange.start+1 { + return nil, fmt.Errorf("wrong number of blocks, asked for [%d-%d], got %d back", + readRange.start, end, len(blockInfos)) + } + + if readRange.lastLength != 0 { + // We have a last partial block, truncate it. + last := len(blockInfos) - 1 + blockInfos[last].data = blockInfos[last].data[:readRange.lastLength] + } + blockInfos[0].data = blockInfos[0].data[readRange.startOffset:] + + var data []byte + for _, b := range blockInfos { + data = append(data, b.data...) + } + + return data, nil +} + +// write commits data to the blocks starting at 'offset' +// Amount of data to write must be non-zero. +// If offset is greated than 'originalSize', the file is grown first. +// We always write all or nothing. +func write(e sqlExecutor, inodeID, originalSize, offset uint64, data []byte) error { + if offset > originalSize { + diff := offset - originalSize + if diff > BlockSize*2 { + // we need to grow the file by at least two blocks. Use growing method + // which only sends empty blocks once. + if err := grow(e, inodeID, originalSize, offset); err != nil { + return err + } + originalSize = offset + } else if diff > 0 { + // don't grow the file first, just change what we need to write. + data = append(make([]byte, diff, diff), data...) + offset = originalSize + } + } + + // Now we know that offset is <= originalSize. + writeRange := newBlockRange(offset, uint64(len(data))) + writeFrom := writeRange.start + + if writeRange.startOffset > 0 { + // We're partially overwriting a block (this includes appending + // to the last block): fetch it, grow it, and update it. + // TODO(marc): this would be more efficient if we had RPAD for bytes. + blockData, err := getBlockData(e, inodeID, writeRange.start) + if err != nil { + return err + } + blockData = append(blockData[:writeRange.startOffset], data[:writeRange.startLength]...) + data = data[writeRange.startLength:] + if err := updateBlockData(e, inodeID, writeRange.start, blockData); err != nil { + return err + } + // We don't need to insert this block. + writeFrom++ + } + + writeTo := writeRange.last + if writeRange.lastLength == 0 { + // Last block is empty, don't update/insert it. + writeTo-- + } + if writeTo < writeFrom { + return nil + } + + // Figure out last existing block. Needed to tell the difference + // between insert and update. + lastBlock := int(originalSize / BlockSize) + if originalSize%BlockSize == 0 { + // Empty blocks do not exist (size=0 -> lastblock=-1). + lastBlock-- + } + + // Process updates first. + for i := writeFrom; i <= writeTo; i++ { + if i > lastBlock { + // We've reached the end of existing blocks, no more UPDATE. + break + } + if len(data) == 0 { + panic(fmt.Sprintf("reached end of data, but still have %d blocks to write", + writeTo-i)) + } + toWrite := min(BlockSize, uint64(len(data))) + blockData := data[:toWrite] + data = data[toWrite:] + if toWrite != BlockSize { + // This is the last block, and it's partial, fetch the original + // data from this block and append. + // TODO(marc): we could fetch this at the same time as the first + // partial block, if any. This would make overwriting in the middle + // of the file on non-block boundaries a bit more efficient. + origData, err := getBlockData(e, inodeID, i) + if err != nil { + return err + } + toWrite = min(toWrite, uint64(len(origData))) + blockData = append(blockData, origData[toWrite:]...) + } + // TODO(marc): is there a way to do batch updates? + if err := updateBlockData(e, inodeID, i, blockData); err != nil { + return err + } + } + + if len(data) == 0 { + return nil + } + + paramStrings := []string{} + params := []interface{}{} + count := 1 // placeholder count starts at 1. + + for i := lastBlock + 1; i <= writeTo; i++ { + if len(data) == 0 { + panic(fmt.Sprintf("reached end of data, but still have %d blocks to write", + writeTo-i)) + } + toWrite := min(BlockSize, uint64(len(data))) + blockData := data[:toWrite] + data = data[toWrite:] + paramStrings = append(paramStrings, fmt.Sprintf("(%d, %d, ?)", + inodeID, i)) + params = append(params, blockData) + count++ + } + + if len(data) != 0 { + panic(fmt.Sprintf("processed all blocks, but still have %d of data to write", len(data))) + } + + insStmt := fmt.Sprintf(`INSERT INTO fs_block VALUES %s`, strings.Join(paramStrings, ",")) + log.Warn(insStmt, params) + if _, err := e.Exec(insStmt, params...); err != nil { + return err + } + + return nil +} + +// resize changes the size of the data for the inode with id 'inodeID' +// from 'from' to 'to'. This may grow or shrink. +func resizeBlocks(e sqlExecutor, inodeID, from, to uint64) error { + if to < from { + return shrink(e, inodeID, from, to) + } else if to > from { + return grow(e, inodeID, from, to) + } + return nil +} diff --git a/cmd/cql-fuse/block_test.go b/cmd/cql-fuse/block_test.go new file mode 100644 index 000000000..13ec53664 --- /dev/null +++ b/cmd/cql-fuse/block_test.go @@ -0,0 +1,540 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Copyright 2015 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. See the AUTHORS file +// for names of contributors. +// +// Author: Marc Berhault (marc@cockroachlabs.com) + +package main + +import ( + "bytes" + "context" + "database/sql" + "fmt" + "os" + "os/exec" + "path/filepath" + "reflect" + "sync" + "syscall" + "testing" + "time" + + "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/utils" + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +var ( + baseDir = utils.GetProjectSrcDir() + testWorkingDir = FJ(baseDir, "./test/") + logDir = FJ(testWorkingDir, "./log/") + db *sql.DB +) + +var nodeCmds []*utils.CMD + +var FJ = filepath.Join + +func TestMain(m *testing.M) { + os.Exit(func() int { + var stop func() + db, stop = initTestDB() + defer stop() + defer db.Close() + return m.Run() + }()) +} + +func startNodes() { + ctx := context.Background() + + // wait for ports to be available + var err error + + err = utils.WaitForPorts(ctx, "127.0.0.1", []int{ + 6122, + 6121, + 6120, + }, time.Millisecond*200) + + if err != nil { + log.Fatalf("wait for port ready timeout: %v", err) + } + + // start 3bps + var cmd *utils.CMD + if cmd, err = utils.RunCommandNB( + FJ(baseDir, "./bin/cqld.test"), + []string{"-config", FJ(testWorkingDir, "./fuse/node_0/config.yaml"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql-fuse/leader.cover.out"), + }, + "leader", testWorkingDir, logDir, true, + ); err == nil { + nodeCmds = append(nodeCmds, cmd) + } else { + log.Errorf("start node failed: %v", err) + } + if cmd, err = utils.RunCommandNB( + FJ(baseDir, "./bin/cqld.test"), + []string{"-config", FJ(testWorkingDir, "./fuse/node_1/config.yaml"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql-fuse/follower1.cover.out"), + }, + "follower1", testWorkingDir, logDir, false, + ); err == nil { + nodeCmds = append(nodeCmds, cmd) + } else { + log.Errorf("start node failed: %v", err) + } + if cmd, err = utils.RunCommandNB( + FJ(baseDir, "./bin/cqld.test"), + []string{"-config", FJ(testWorkingDir, "./fuse/node_2/config.yaml"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql-fuse/follower2.cover.out"), + }, + "follower2", testWorkingDir, logDir, false, + ); err == nil { + nodeCmds = append(nodeCmds, cmd) + } else { + log.Errorf("start node failed: %v", err) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + err = utils.WaitToConnect(ctx, "127.0.0.1", []int{ + 6122, + 6121, + 6120, + }, time.Second) + + if err != nil { + log.Fatalf("wait for port ready timeout: %v", err) + } + + ctx, cancel = context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + err = utils.WaitForPorts(ctx, "127.0.0.1", []int{ + 3144, + 3145, + 3146, + }, time.Millisecond*200) + + if err != nil { + log.Fatalf("wait for port ready timeout: %v", err) + } + + time.Sleep(10 * time.Second) + + // start 3miners + os.RemoveAll(FJ(testWorkingDir, "./fuse/node_miner_0/data")) + if cmd, err = utils.RunCommandNB( + FJ(baseDir, "./bin/cql-minerd.test"), + []string{"-config", FJ(testWorkingDir, "./fuse/node_miner_0/config.yaml"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql-fuse/miner0.cover.out"), + }, + "miner0", testWorkingDir, logDir, true, + ); err == nil { + nodeCmds = append(nodeCmds, cmd) + } else { + log.Errorf("start node failed: %v", err) + } + + os.RemoveAll(FJ(testWorkingDir, "./fuse/node_miner_1/data")) + if cmd, err = utils.RunCommandNB( + FJ(baseDir, "./bin/cql-minerd.test"), + []string{"-config", FJ(testWorkingDir, "./fuse/node_miner_1/config.yaml"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql-fuse/miner1.cover.out"), + }, + "miner1", testWorkingDir, logDir, false, + ); err == nil { + nodeCmds = append(nodeCmds, cmd) + } else { + log.Errorf("start node failed: %v", err) + } + + os.RemoveAll(FJ(testWorkingDir, "./fuse/node_miner_2/data")) + if cmd, err = utils.RunCommandNB( + FJ(baseDir, "./bin/cql-minerd.test"), + []string{"-config", FJ(testWorkingDir, "./fuse/node_miner_2/config.yaml"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql-fuse/miner2.cover.out"), + }, + "miner2", testWorkingDir, logDir, false, + ); err == nil { + nodeCmds = append(nodeCmds, cmd) + } else { + log.Errorf("start node failed: %v", err) + } +} + +func stopNodes() { + var wg sync.WaitGroup + testDir := FJ(testWorkingDir, "./fuse") + for _, nodeCmd := range nodeCmds { + wg.Add(1) + go func(thisCmd *utils.CMD) { + defer wg.Done() + thisCmd.Cmd.Process.Signal(syscall.SIGTERM) + thisCmd.Cmd.Wait() + grepRace := exec.Command("/bin/sh", "-c", "grep -A 50 'DATA RACE' "+thisCmd.LogPath) + out, _ := grepRace.Output() + if len(out) > 2 { + log.Fatalf("DATA RACE in %s :\n%s", thisCmd.Cmd.Path, string(out)) + } + }(nodeCmd) + } + + wg.Wait() + cmd := exec.Command("/bin/sh", "-c", fmt.Sprintf(`cd %s && find . -name '*.db' -exec rm -vf {} \;`, testDir)) + cmd.Run() + cmd = exec.Command("/bin/sh", "-c", fmt.Sprintf(`cd %s && find . -name '*.db-shm' -exec rm -vf {} \;`, testDir)) + cmd.Run() + cmd = exec.Command("/bin/sh", "-c", fmt.Sprintf(`cd %s && find . -name '*.db-wal' -exec rm -vf {} \;`, testDir)) + cmd.Run() + cmd = exec.Command("/bin/sh", "-c", fmt.Sprintf(`cd %s && find . -name 'db.meta' -exec rm -vf {} \;`, testDir)) + cmd.Run() + cmd = exec.Command("/bin/sh", "-c", fmt.Sprintf(`cd %s && find . -name 'public.keystore' -exec rm -vf {} \;`, testDir)) + cmd.Run() + cmd = exec.Command("/bin/sh", "-c", fmt.Sprintf(`cd %s && find . -name '*.public.keystore' -exec rm -vf {} \;`, testDir)) + cmd.Run() + cmd = exec.Command("/bin/sh", "-c", fmt.Sprintf(`cd %s && find . -name '*.ldb' -exec rm -vrf {} \;`, testDir)) + cmd.Run() +} + +func initTestDB() (*sql.DB, func()) { + + startNodes() + var err error + + time.Sleep(10 * time.Second) + + err = client.Init(FJ(testWorkingDir, "./fuse/node_c/config.yaml"), []byte("")) + if err != nil { + log.Errorf("init client failed: %v", err) + return nil, stopNodes + } + + // create + dsn, err := client.Create(client.ResourceMeta{Node: 1}) + if err != nil { + log.Errorf("create db failed: %v", err) + return nil, stopNodes + } + + log.Infof("the created database dsn is %v", dsn) + + db, err := sql.Open("covenantsql", dsn) + if err != nil { + log.Errorf("open db failed: %v", err) + return nil, stopNodes + } + + if err := initSchema(db); err != nil { + stopNodes() + log.Fatal(err) + } + + return db, stopNodes +} + +func getAllBlocks(db *sql.DB, inode uint64) ([]byte, error) { + blocks, err := getBlocks(db, inode) + if err != nil { + return nil, err + } + num := len(blocks) + var data []byte + for i, b := range blocks { + if i != b.block { + // We can't have missing blocks. + return nil, fmt.Errorf("gap in block list, found block %d at index %d", b.block, i) + } + bl := uint64(len(b.data)) + if bl == 0 { + return nil, fmt.Errorf("empty block found at %d (out of %d blocks)", i, num) + } + if i != (num-1) && bl != BlockSize { + return nil, fmt.Errorf("non-blocksize %d at %d (out of %d blocks)", bl, i, num) + } + data = append(data, b.data...) + } + return data, nil +} + +func TestBlockInfo(t *testing.T) { + testCases := []struct { + start, length uint64 + expected blockRange + }{ + {0, 0, blockRange{0, 0, 0, 0, 0}}, + {0, BlockSize * 4, blockRange{0, 0, BlockSize, 4, 0}}, + {0, BlockSize*4 + 500, blockRange{0, 0, BlockSize, 4, 500}}, + {500, BlockSize * 4, blockRange{0, 500, BlockSize - 500, 4, 500}}, + {BlockSize, BlockSize * 4, blockRange{1, 0, BlockSize, 5, 0}}, + {BlockSize, 500, blockRange{1, 0, 500, 1, 500}}, + {500, 1000, blockRange{0, 500, 1000, 0, 1500}}, + } + + for tcNum, tc := range testCases { + actual := newBlockRange(tc.start, tc.length) + if !reflect.DeepEqual(actual, tc.expected) { + t.Errorf("#%d: expected:\n%+v\ngot:\n%+v", tcNum, tc.expected, actual) + } + } +} + +func tryGrow(db *sql.DB, data []byte, id, newSize uint64) ([]byte, error) { + originalSize := uint64(len(data)) + data = append(data, make([]byte, newSize-originalSize)...) + if err := grow(db, id, originalSize, newSize); err != nil { + return nil, err + } + newData, err := getAllBlocks(db, id) + if err != nil { + return nil, err + } + if uint64(len(newData)) != newSize { + return nil, fmt.Errorf("getAllBlocks lengths don't match: got %d, expected %d", len(newData), newSize) + } + if !bytes.Equal(data, newData) { + return nil, fmt.Errorf("getAllBlocks data doesn't match") + } + + if newSize == 0 { + return newData, nil + } + + // Check the read as well. + newData, err = read(db, id, 0, newSize) + if err != nil { + return nil, err + } + + if uint64(len(newData)) != newSize { + return nil, fmt.Errorf("read lengths don't match: got %d, expected %d", len(newData), newSize) + } + if !bytes.Equal(data, newData) { + return nil, fmt.Errorf("read data doesn't match") + } + + return newData, nil +} + +func tryShrink(db *sql.DB, data []byte, id, newSize uint64) ([]byte, error) { + originalSize := uint64(len(data)) + data = data[:newSize] + if err := shrink(db, id, originalSize, newSize); err != nil { + return nil, err + } + newData, err := getAllBlocks(db, id) + if err != nil { + return nil, err + } + if uint64(len(newData)) != newSize { + return nil, fmt.Errorf("getAllData lengths don't match: got %d, expected %d", len(newData), newSize) + } + if !bytes.Equal(data, newData) { + return nil, fmt.Errorf("getAllData data doesn't match") + } + + if newSize == 0 { + return newData, nil + } + + // Check the read as well. + newData, err = read(db, id, 0, newSize) + if err != nil { + return nil, err + } + + if uint64(len(newData)) != newSize { + return nil, fmt.Errorf("read lengths don't match: got %d, expected %d", len(newData), newSize) + } + if !bytes.Equal(data, newData) { + return nil, fmt.Errorf("read data doesn't match") + } + + return newData, nil +} + +func TestShrinkGrow(t *testing.T) { + + id := uint64(10) + + var err error + data := []byte{} + + if data, err = tryGrow(db, data, id, BlockSize*4+500); err != nil { + log.Fatal(err) + } + if data, err = tryGrow(db, data, id, BlockSize*4+600); err != nil { + log.Fatal(err) + } + if data, err = tryGrow(db, data, id, BlockSize*5); err != nil { + log.Fatal(err) + } + + // Shrink it down to 0. + if data, err = tryShrink(db, data, id, 0); err != nil { + log.Fatal(err) + } + if data, err = tryGrow(db, data, id, BlockSize*3+500); err != nil { + log.Fatal(err) + } + if data, err = tryShrink(db, data, id, BlockSize*3+300); err != nil { + log.Fatal(err) + } + if data, err = tryShrink(db, data, id, BlockSize*3); err != nil { + log.Fatal(err) + } + if data, err = tryShrink(db, data, id, 0); err != nil { + log.Fatal(err) + } + if data, err = tryGrow(db, data, id, BlockSize); err != nil { + log.Fatal(err) + } + if data, err = tryShrink(db, data, id, BlockSize-200); err != nil { + log.Fatal(err) + } + if data, err = tryShrink(db, data, id, BlockSize-500); err != nil { + log.Fatal(err) + } + if data, err = tryShrink(db, data, id, 0); err != nil { + log.Fatal(err) + } +} + +func TestReadWriteBlocks(t *testing.T) { + + id := uint64(10) + rng, _ := NewPseudoRand() + length := BlockSize*3 + 500 + part1 := RandBytes(rng, length) + + if err := write(db, id, 0, 0, part1); err != nil { + log.Fatal(err) + } + + readData, err := read(db, id, 0, uint64(length)) + if err != nil { + log.Fatal(err) + } + if !bytes.Equal(part1, readData) { + t.Errorf("Bytes differ. lengths: %d, expected %d", len(readData), len(part1)) + } + + verboseData, err := getAllBlocks(db, id) + if err != nil { + log.Fatal(err) + } + if !bytes.Equal(verboseData, part1) { + t.Errorf("Bytes differ. lengths: %d, expected %d", len(verboseData), len(part1)) + } + + // Write with hole in the middle. + part2 := make([]byte, BlockSize*2+250, BlockSize*2+250) + fullData := append(part1, part2...) + part3 := RandBytes(rng, BlockSize+123) + if err := write(db, id, uint64(len(part1)), uint64(len(fullData)), part3); err != nil { + log.Fatal(err) + } + fullData = append(fullData, part3...) + readData, err = read(db, id, 0, uint64(len(fullData))) + if err != nil { + log.Fatal(err) + } + if !bytes.Equal(fullData, readData) { + t.Errorf("Bytes differ. lengths: %d, expected %d", len(readData), len(fullData)) + } + + verboseData, err = getAllBlocks(db, id) + if err != nil { + log.Fatal(err) + } + if !bytes.Equal(verboseData, fullData) { + t.Errorf("Bytes differ. lengths: %d, expected %d", len(verboseData), len(fullData)) + } + + // Now write into the middle of the file. + part2 = RandBytes(rng, len(part2)) + if err := write(db, id, uint64(len(fullData)), uint64(len(part1)), part2); err != nil { + log.Fatal(err) + } + fullData = append(part1, part2...) + fullData = append(fullData, part3...) + readData, err = read(db, id, 0, uint64(len(fullData))) + if err != nil { + log.Fatal(err) + } + if !bytes.Equal(fullData, readData) { + t.Errorf("Bytes differ. lengths: %d, expected %d", len(readData), len(fullData)) + } + + verboseData, err = getAllBlocks(db, id) + if err != nil { + log.Fatal(err) + } + if !bytes.Equal(verboseData, fullData) { + t.Errorf("Bytes differ. lengths: %d, expected %d", len(verboseData), len(fullData)) + } + + // New file. + id2 := uint64(20) + if err := write(db, id2, 0, 0, []byte("1")); err != nil { + log.Fatal(err) + } + readData, err = read(db, id2, 0, 1) + if err != nil { + log.Fatal(err) + } + if string(readData) != "1" { + log.Fatalf("mismatch: %s", readData) + } + + if err := write(db, id2, 1, 0, []byte("22")); err != nil { + log.Fatal(err) + } + readData, err = read(db, id2, 0, 2) + if err != nil { + log.Fatal(err) + } + if string(readData) != "22" { + log.Fatalf("mismatch: %s", readData) + } + + id3 := uint64(30) + part1 = RandBytes(rng, BlockSize) + // Write 5 blocks. + var offset uint64 + for i := 0; i < 5; i++ { + if err := write(db, id3, offset, offset, part1); err != nil { + log.Fatal(err) + } + offset += BlockSize + } +} diff --git a/cmd/cql-fuse/fs.go b/cmd/cql-fuse/fs.go new file mode 100644 index 000000000..82d9e1166 --- /dev/null +++ b/cmd/cql-fuse/fs.go @@ -0,0 +1,319 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Copyright 2015 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. See the AUTHORS file +// for names of contributors. +// +// Author: Marc Berhault (marc@cockroachlabs.com) + +package main + +import ( + "context" + "database/sql" + "os" + "syscall" + "time" + + "bazil.org/fuse" + "bazil.org/fuse/fs" + "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/kms" +) + +const rootNodeID = 1 + +const ( + fsSchema = ` +CREATE TABLE IF NOT EXISTS fs_namespace ( + parentID INT, + name STRING, + id INT, + PRIMARY KEY (parentID, name) +); + +CREATE TABLE IF NOT EXISTS fs_inode ( + id INT PRIMARY KEY, + inode STRING +); + +CREATE TABLE IF NOT EXISTS fs_block ( + id INT, + block INT, + data BYTES, + PRIMARY KEY (id, block) +); +` +) + +var _ fs.FS = &CFS{} // Root +var _ fs.FSInodeGenerator = &CFS{} // GenerateInode + +// CFS implements a filesystem on top of cockroach. +type CFS struct { + db *sql.DB +} + +func initSchema(db *sql.DB) error { + _, err := db.Exec(fsSchema) + return err +} + +// create inserts a new node. +// parentID: inode ID of the parent directory. +// name: name of the new node +// node: new node +func (cfs CFS) create(ctx context.Context, parentID uint64, name string, node *Node) error { + inode := node.toJSON() + const insertNode = `INSERT INTO fs_inode VALUES (?, ?)` + const insertNamespace = `INSERT INTO fs_namespace VALUES (?, ?, ?)` + + err := client.ExecuteTx(ctx, cfs.db, nil /* txopts */, func(tx *sql.Tx) error { + if _, err := tx.Exec(insertNode, node.ID, inode); err != nil { + return err + } + if _, err := tx.Exec(insertNamespace, parentID, name, node.ID); err != nil { + return err + } + return nil + }) + return err +} + +// remove removes a node give its name and its parent ID. +// If 'checkChildren' is true, fails if the node has children. +func (cfs CFS) remove(ctx context.Context, parentID uint64, name string, checkChildren bool) error { + const lookupSQL = `SELECT id FROM fs_namespace WHERE (parentID, name) = (?, ?)` + const deleteNamespace = `DELETE FROM fs_namespace WHERE (parentID, name) = (?, ?)` + const deleteInode = `DELETE FROM fs_inode WHERE id = ?` + const deleteBlock = `DELETE FROM fs_block WHERE id = ?` + // Start by looking up the node ID. + var id uint64 + if err := cfs.db.QueryRow(lookupSQL, parentID, name).Scan(&id); err != nil { + return err + } + // Check if there are any children. + if checkChildren { + if err := checkIsEmpty(cfs.db, id); err != nil { + return err + } + } + + err := client.ExecuteTx(ctx, cfs.db, nil /* txopts */, func(tx *sql.Tx) error { + // Delete all entries. + if _, err := tx.Exec(deleteNamespace, parentID, name); err != nil { + return err + } + if _, err := tx.Exec(deleteInode, id); err != nil { + return err + } + if _, err := tx.Exec(deleteBlock, id); err != nil { + return err + } + return nil + }) + return err +} + +func (cfs CFS) lookup(parentID uint64, name string) (*Node, error) { + return getInode(cfs.db, parentID, name) +} + +// list returns the children of the node with id 'parentID'. +// Dirent consists of: +// Inode uint64 +// Type DirentType (optional) +// Name string +// TODO(pmattis): lookup all inodes and fill in the type, this will save a Getattr(). +func (cfs CFS) list(parentID uint64) ([]fuse.Dirent, error) { + rows, err := cfs.db.Query(`SELECT name, id FROM fs_namespace WHERE parentID = ?`, parentID) + if err != nil { + return nil, err + } + + var results []fuse.Dirent + for rows.Next() { + dirent := fuse.Dirent{Type: fuse.DT_Unknown} + if err := rows.Scan(&dirent.Name, &dirent.Inode); err != nil { + return nil, err + } + results = append(results, dirent) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return results, nil +} + +// validateRename takes a source and destination node and verifies that +// a rename can be performed from source to destination. +// source must not be nil. destination can be. +func validateRename(e sqlExecutor, source, destination *Node) error { + if destination == nil { + // No object at destination: good. + return nil + } + + if source.isDir() { + if destination.isDir() { + // Both are directories: destination must be empty + return checkIsEmpty(e, destination.ID) + } + // directory -> file: not allowed. + return fuse.Errno(syscall.ENOTDIR) + } + + // Source is a file. + if destination.isDir() { + // file -> directory: not allowed. + return fuse.Errno(syscall.EISDIR) + } + return nil +} + +// rename moves 'oldParentID/oldName' to 'newParentID/newName'. +// If 'newParentID/newName' already exists, it is deleted. +// See NOTE on node.go:Rename. +func (cfs CFS) rename( + ctx context.Context, oldParentID, newParentID uint64, oldName, newName string, +) error { + if oldParentID == newParentID && oldName == newName { + return nil + } + + const deleteNamespace = `DELETE FROM fs_namespace WHERE (parentID, name) = (?, ?)` + const insertNamespace = `INSERT INTO fs_namespace VALUES (?, ?, ?)` + const updateNamespace = `UPDATE fs_namespace SET id = ? WHERE (parentID, name) = (?, ?)` + const deleteInode = `DELETE FROM fs_inode WHERE id = ?` + + // Lookup source inode. + srcObject, err := getInode(cfs.db, oldParentID, oldName) + if err != nil { + return err + } + + // Lookup destination inode. + destObject, err := getInode(cfs.db, newParentID, newName) + if err != nil && err != sql.ErrNoRows { + return err + } + + // Check that the rename is allowed. + if err := validateRename(cfs.db, srcObject, destObject); err != nil { + return err + } + + err = client.ExecuteTx(ctx, cfs.db, nil /* txopts */, func(tx *sql.Tx) error { + // At this point we know the following: + // - srcObject is not nil + // - destObject may be nil. If not, its inode can be deleted. + if destObject == nil { + // No new object: use INSERT. + if _, err := tx.Exec(deleteNamespace, oldParentID, oldName); err != nil { + return err + } + + if _, err := tx.Exec(insertNamespace, newParentID, newName, srcObject.ID); err != nil { + return err + } + } else { + // Destination exists. + if _, err := tx.Exec(deleteNamespace, oldParentID, oldName); err != nil { + return err + } + + if _, err := tx.Exec(updateNamespace, srcObject.ID, newParentID, newName); err != nil { + return err + } + + if _, err := tx.Exec(deleteInode, destObject.ID); err != nil { + return err + } + } + return nil + }) + return err +} + +// Root returns the filesystem's root node. +// This node is special: it has a fixed ID and is not persisted. +func (cfs CFS) Root() (fs.Node, error) { + return &Node{cfs: cfs, ID: rootNodeID, Mode: os.ModeDir | defaultPerms}, nil +} + +// GenerateInode returns a new inode ID. +func (cfs CFS) GenerateInode(parentInode uint64, name string) uint64 { + return cfs.newUniqueID() +} + +func (cfs CFS) newUniqueID() (id uint64) { + // cockroach's unique_rowid() Contains time and space (node ID) components + // https://www.cockroachlabs.com/docs/stable/sql-faqs.html#\ + // what-are-the-differences-between-uuid-sequences-and-unique_rowid + // So, we just build one in the same way. + var idRand uint32 + nodeIDBytes, err := kms.GetLocalNodeIDBytes() + if err == nil { + idRand = hash.FNVHash32uint(nodeIDBytes) + } + return uint64(time.Now().UnixNano()) + uint64(idRand)<<32 + //if err := cfs.db.QueryRow(`SELECT unique_rowid()`).Scan(&id); err != nil { + // panic(err) + //} + //return +} + +// newFileNode returns a new node struct corresponding to a file. +func (cfs CFS) newFileNode() *Node { + return &Node{ + cfs: cfs, + ID: cfs.newUniqueID(), + Mode: defaultPerms, + } +} + +// newDirNode returns a new node struct corresponding to a directory. +func (cfs CFS) newDirNode() *Node { + return &Node{ + cfs: cfs, + ID: cfs.newUniqueID(), + Mode: os.ModeDir | defaultPerms, + } +} + +// newSymlinkNode returns a new node struct corresponding to a symlink. +func (cfs CFS) newSymlinkNode() *Node { + return &Node{ + cfs: cfs, + ID: cfs.newUniqueID(), + // Symlinks don't have permissions, allow all. + Mode: os.ModeSymlink | allPerms, + } +} diff --git a/cmd/cql-fuse/main.go b/cmd/cql-fuse/main.go new file mode 100644 index 000000000..6f0f9f4b5 --- /dev/null +++ b/cmd/cql-fuse/main.go @@ -0,0 +1,160 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Copyright 2015 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. See the AUTHORS file +// for names of contributors. +// +// Author: Marc Berhault (marc@cockroachlabs.com) + +// This is a simple fuse filesystem that stores all metadata and data +// in cockroach. +// +// Inode relationships are stored in the `namespace` table, and inodes +// themselves in the `inode` table. +// +// Data blocks are stored in the `block` table, indexed by inode ID +// and block number. +// +// Basic functionality is implemented, including: +// - mk/rm directory +// - create/rm files +// - read/write files +// - rename +// - symlinks +// +// WARNING: concurrent access on a single mount is fine. However, +// behavior is undefined (read broken) when mounted more than once at the +// same time. Specifically, read/writes will not be seen right away and +// may work on out of date information. +// +// One caveat of the implemented features is that handles are not +// reference counted so if an inode is deleted, all open file descriptors +// pointing to it become invalid. +// +// Some TODOs (definitely not a comprehensive list): +// - support basic attributes (mode, timestamps) +// - support other types: hard links +// - add ref counting (and handle open/release) +// - sparse files: don't store empty blocks +// - sparse files 2: keep track of holes + +package main + +import ( + "database/sql" + "flag" + "fmt" + "os" + "os/signal" + + "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/utils/log" + + "bazil.org/fuse" + "bazil.org/fuse/fs" + _ "bazil.org/fuse/fs/fstestutil" +) + +var usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + fmt.Fprintf(os.Stderr, " %s -config -dsn -mount \n\n", os.Args[0]) + flag.PrintDefaults() +} + +func main() { + var config, dsn, mountPoint, password string + + flag.StringVar(&config, "config", "./conf/config.yaml", "config file path") + flag.StringVar(&mountPoint, "mount", "./", "dir to mount") + flag.StringVar(&dsn, "dsn", "", "database url") + flag.StringVar(&password, "password", "", "master key password for covenantsql") + flag.Usage = usage + flag.Parse() + + log.SetLevel(log.DebugLevel) + + err := client.Init(config, []byte(password)) + if err != nil { + log.Fatal(err) + } + + if err != nil { + log.Fatal(err) + } + + db, err := sql.Open("covenantsql", dsn) + if err != nil { + log.Fatal(err) + } + + defer func() { _ = db.Close() }() + + if err := initSchema(db); err != nil { + log.Fatal(err) + } + + cfs := CFS{db} + // Mount filesystem. + c, err := fuse.Mount( + mountPoint, + fuse.FSName("CovenantFS"), + fuse.Subtype("CovenantFS"), + fuse.LocalVolume(), + fuse.VolumeName(""), + ) + if err != nil { + log.Fatal(err) + } + defer func() { + _ = c.Close() + }() + + go func() { + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt) + for range sig { + if err := fuse.Unmount(mountPoint); err != nil { + log.Printf("Signal received, but could not unmount: %s", err) + } else { + break + } + } + }() + + // Serve root. + err = fs.Serve(c, cfs) + if err != nil { + log.Fatal(err) + } + + // check if the mount process has an error to report + <-c.Ready + if err := c.MountError; err != nil { + log.Fatal(err) + } +} diff --git a/cmd/cql-fuse/node.go b/cmd/cql-fuse/node.go new file mode 100644 index 000000000..5ae46a38a --- /dev/null +++ b/cmd/cql-fuse/node.go @@ -0,0 +1,421 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Copyright 2015 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. See the AUTHORS file +// for names of contributors. +// +// Author: Marc Berhault (marc@cockroachlabs.com) + +package main + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "log" + "math" + "os" + "sync" + "syscall" + + "bazil.org/fuse" + "bazil.org/fuse/fs" + + "github.com/CovenantSQL/CovenantSQL/client" +) + +var _ fs.Node = &Node{} // Attr +var _ fs.NodeSetattrer = &Node{} // Setattr +var _ fs.NodeStringLookuper = &Node{} // Lookup +var _ fs.HandleReadDirAller = &Node{} // HandleReadDirAller +var _ fs.NodeMkdirer = &Node{} // Mkdir +var _ fs.NodeCreater = &Node{} // Create +var _ fs.NodeRemover = &Node{} // Remove +var _ fs.HandleWriter = &Node{} // Write +var _ fs.HandleReader = &Node{} // Read +var _ fs.NodeFsyncer = &Node{} // Fsync +var _ fs.NodeRenamer = &Node{} // Rename +var _ fs.NodeSymlinker = &Node{} // Symlink +var _ fs.NodeReadlinker = &Node{} // Readlink + +// Default permissions: we don't have any right now. +const defaultPerms = 0755 + +// All permissions. +const allPerms = 0777 + +// Maximum file size. +const maxSize = math.MaxUint64 + +// Maximum length of a symlink target. +const maxSymlinkTargetLength = 4096 + +// Node implements the Node interface. +// ID, Mode, and SymlinkTarget are currently immutable after node creation. +// Size (for files only) is protected by mu. +type Node struct { + cfs CFS + // ID is a unique ID allocated at node creation time. + ID uint64 + // Used for type only, permissions are ignored. + Mode os.FileMode + // SymlinkTarget is the path a symlink points to. + SymlinkTarget string + + // Other fields to add: + // nLinks: number of hard links + // openFDs: number of open file descriptors + // timestamps (probably just ctime and mtime) + + // Implicit fields: + // numBlocks: number of 512b blocks + // blocksize: preferred block size + // mode bits: permissions + + // For regular files only. + // Data blocks are addressed by inode number and offset. + // Any op accessing Size and blocks must lock 'mu'. + mu sync.RWMutex + Size uint64 +} + +// convenience functions to query the mode. +func (n *Node) isDir() bool { + return n.Mode.IsDir() +} + +func (n *Node) isRegular() bool { + return n.Mode.IsRegular() +} + +func (n *Node) isSymlink() bool { + return n.Mode&os.ModeSymlink != 0 +} + +// toJSON returns the json-encoded string for this node. +func (n *Node) toJSON() string { + ret, err := json.Marshal(n) + if err != nil { + panic(err) + } + return string(ret) +} + +// Attr fills attr with the standard metadata for the node. +func (n *Node) Attr(_ context.Context, a *fuse.Attr) error { + a.Inode = n.ID + a.Mode = n.Mode + // Does preferred block size make sense on things other + // than regular files? + a.BlockSize = BlockSize + + if n.isRegular() { + n.mu.RLock() + defer n.mu.RUnlock() + a.Size = n.Size + + // Blocks is the number of 512 byte blocks, regardless of + // filesystem blocksize. + a.Blocks = (n.Size + 511) / 512 + } else if n.isSymlink() { + // Symlink: use target name length. + a.Size = uint64(len(n.SymlinkTarget)) + } + return nil +} + +// Setattr modifies node metadata. This includes changing the size. +func (n *Node) Setattr( + ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse, +) error { + if !req.Valid.Size() { + // We can exit early since only setting the size is implemented. + return nil + } + + if !n.isRegular() { + // Setting the size is only available on regular files. + return fuse.Errno(syscall.EINVAL) + } + + if req.Size > maxSize { + // Too big. + return fuse.Errno(syscall.EFBIG) + } + + n.mu.Lock() + defer n.mu.Unlock() + + if req.Size == n.Size { + // Nothing to do. + return nil + } + + // Store the current size in case we need to rollback. + originalSize := n.Size + + // Wrap everything inside a transaction. + err := client.ExecuteTx(ctx, n.cfs.db, nil /* txopts */, func(tx *sql.Tx) error { + // Resize blocks as needed. + if err := resizeBlocks(tx, n.ID, n.Size, req.Size); err != nil { + return err + } + + n.Size = req.Size + return updateNode(tx, n) + }) + + if err != nil { + // Reset our size. + log.Print(err) + n.Size = originalSize + return err + } + return nil +} + +// Lookup looks up a specific entry in the receiver, +// which must be a directory. Lookup should return a Node +// corresponding to the entry. If the name does not exist in +// the directory, Lookup should return ENOENT. +// +// Lookup need not to handle the names "." and "..". +func (n *Node) Lookup(_ context.Context, name string) (fs.Node, error) { + if !n.isDir() { + return nil, fuse.Errno(syscall.ENOTDIR) + } + node, err := n.cfs.lookup(n.ID, name) + if err != nil { + if err == sql.ErrNoRows { + return nil, fuse.ENOENT + } + return nil, err + } + node.cfs = n.cfs + return node, nil +} + +// ReadDirAll returns the list of child inodes. +func (n *Node) ReadDirAll(_ context.Context) ([]fuse.Dirent, error) { + if !n.isDir() { + return nil, fuse.Errno(syscall.ENOTDIR) + } + return n.cfs.list(n.ID) +} + +// Mkdir creates a directory in 'n'. +// We let the sql query fail if the directory already exists. +// TODO(marc): better handling of errors. +func (n *Node) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { + if !n.isDir() { + return nil, fuse.Errno(syscall.ENOTDIR) + } + if !req.Mode.IsDir() { + return nil, fuse.Errno(syscall.ENOTDIR) + } + + node := n.cfs.newDirNode() + err := n.cfs.create(ctx, n.ID, req.Name, node) + if err != nil { + return nil, err + } + return node, nil +} + +// Create creates a new file in the receiver directory. +func (n *Node) Create( + ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse, +) (fs.Node, fs.Handle, error) { + if !n.isDir() { + return nil, nil, fuse.Errno(syscall.ENOTDIR) + } + if req.Mode.IsDir() { + return nil, nil, fuse.Errno(syscall.EISDIR) + } else if !req.Mode.IsRegular() { + return nil, nil, fuse.Errno(syscall.EINVAL) + } + + node := n.cfs.newFileNode() + err := n.cfs.create(ctx, n.ID, req.Name, node) + if err != nil { + return nil, nil, err + } + return node, node, nil +} + +// Remove may be unlink or rmdir. +func (n *Node) Remove(ctx context.Context, req *fuse.RemoveRequest) error { + if !n.isDir() { + return fuse.Errno(syscall.ENOTDIR) + } + + if req.Dir { + // Rmdir. + return n.cfs.remove(ctx, n.ID, req.Name, true /* checkChildren */) + } + // Unlink file/symlink. + return n.cfs.remove(ctx, n.ID, req.Name, false /* !checkChildren */) +} + +// Write writes data to 'n'. It may overwrite existing data, or grow it. +func (n *Node) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { + if !n.isRegular() { + return fuse.Errno(syscall.EINVAL) + } + if req.Offset < 0 { + return fuse.Errno(syscall.EINVAL) + } + if len(req.Data) == 0 { + return nil + } + + n.mu.Lock() + defer n.mu.Unlock() + + newSize := uint64(req.Offset) + uint64(len(req.Data)) + if newSize > maxSize { + return fuse.Errno(syscall.EFBIG) + } + + // Store the current size in case we need to rollback. + originalSize := n.Size + + // Wrap everything inside a transaction. + err := client.ExecuteTx(ctx, n.cfs.db, nil /* txopts */, func(tx *sql.Tx) error { + + // Update blocks. They will be added as needed. + if err := write(tx, n.ID, n.Size, uint64(req.Offset), req.Data); err != nil { + return err + } + + if newSize > originalSize { + // This was an append, commit the size change. + n.Size = newSize + if err := updateNode(tx, n); err != nil { + return err + } + } + return nil + }) + + if err != nil { + // Reset our size. + log.Print(err) + n.Size = originalSize + return err + } + + // We always write everything. + resp.Size = len(req.Data) + return nil +} + +// Read reads data from 'n'. +func (n *Node) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { + if !n.isRegular() { + return fuse.Errno(syscall.EINVAL) + } + if req.Offset < 0 { + // Before beginning of file. + return fuse.Errno(syscall.EINVAL) + } + if req.Size == 0 { + // No bytes requested. + return nil + } + offset := uint64(req.Offset) + + n.mu.RLock() + defer n.mu.RUnlock() + if offset >= n.Size { + // Beyond end of file. + return nil + } + + to := min(n.Size, offset+uint64(req.Size)) + if offset == to { + return nil + } + + data, err := read(n.cfs.db, n.ID, offset, to) + if err != nil { + return err + } + resp.Data = data + return nil +} + +// Fsync is a noop for us, we always push writes to the DB. We do need to implement it though. +func (n *Node) Fsync(_ context.Context, _ *fuse.FsyncRequest) error { + return nil +} + +// Rename renames 'req.OldName' to 'req.NewName', optionally moving it to 'newDir'. +// If req.NewName exists, it is deleted. It is assumed that it cannot be a directory. +// NOTE: we do not keep track of opens, so we delete existing destinations right away. +// This means that anyone holding an open file descriptor on the destination will fail +// when trying to use it. +// To properly handle this, we need to count references (including inode -> inode refs, +// and open handles) and delete the inode only when it reaches zero. +func (n *Node) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fs.Node) error { + newNode, ok := newDir.(*Node) + if !ok { + return fmt.Errorf("newDir is not a Node: %v", newDir) + } + if !n.isDir() || !newNode.isDir() { + return fuse.Errno(syscall.ENOTDIR) + } + return n.cfs.rename(ctx, n.ID, newNode.ID, req.OldName, req.NewName) +} + +// Symlink creates a new symbolic link in the receiver node, which must +// be a directory. +func (n *Node) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, error) { + if !n.isDir() { + return nil, fuse.Errno(syscall.ENOTDIR) + } + if len(req.Target) > maxSymlinkTargetLength { + return nil, fuse.Errno(syscall.ENAMETOOLONG) + } + node := n.cfs.newSymlinkNode() + node.SymlinkTarget = req.Target + err := n.cfs.create(ctx, n.ID, req.NewName, node) + if err != nil { + return nil, err + } + return node, nil +} + +// Readlink reads a symbolic link. +func (n *Node) Readlink(_ context.Context, req *fuse.ReadlinkRequest) (string, error) { + if !n.isSymlink() { + return "", fuse.Errno(syscall.EINVAL) + } + return n.SymlinkTarget, nil +} diff --git a/cmd/cql-fuse/randbytes.go b/cmd/cql-fuse/randbytes.go new file mode 100644 index 000000000..e2532e875 --- /dev/null +++ b/cmd/cql-fuse/randbytes.go @@ -0,0 +1,62 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Copyright 2015 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. See the AUTHORS file +// for names of contributors. +// +// Author: Marc Berhault (marc@cockroachlabs.com) +package main + +import ( + "math/rand" + "time" +) + +var randLetters = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +// NewPseudoRand returns an instance of math/rand.Rand seeded from crypto/rand +// and its seed so we can easily and cheaply generate unique streams of +// numbers. The created object is not safe for concurrent access. +func NewPseudoRand() (*rand.Rand, int64) { + seed := time.Now().UnixNano() + return rand.New(rand.NewSource(seed)), seed +} + +// RandBytes returns a byte slice of the given length with random +// data. +func RandBytes(r *rand.Rand, size int) []byte { + if size <= 0 { + return nil + } + + arr := make([]byte, size) + for i := 0; i < len(arr); i++ { + arr[i] = randLetters[r.Intn(len(randLetters))] + } + return arr +} diff --git a/cmd/cql-fuse/sql.go b/cmd/cql-fuse/sql.go new file mode 100644 index 000000000..6009e78fd --- /dev/null +++ b/cmd/cql-fuse/sql.go @@ -0,0 +1,153 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Copyright 2015 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. See the AUTHORS file +// for names of contributors. +// +// Author: Marc Berhault (marc@cockroachlabs.com) + +package main + +import ( + "database/sql" + "encoding/json" + "syscall" + + "bazil.org/fuse" +) + +// sqlExecutor is an interface needed for basic queries. +// It is implemented by both sql.DB and sql.Txn. +type sqlExecutor interface { + Exec(query string, args ...interface{}) (sql.Result, error) + Query(query string, args ...interface{}) (*sql.Rows, error) + QueryRow(query string, args ...interface{}) *sql.Row +} + +// getInode looks up an inode given its name and its parent ID. +// If not found, error will be sql.ErrNoRows. +func getInode(e sqlExecutor, parentID uint64, name string) (*Node, error) { + var raw string + const sql = `SELECT inode FROM fs_inode WHERE id = +(SELECT id FROM fs_namespace WHERE (parentID, name) = (?, ?))` + if err := e.QueryRow(sql, parentID, name).Scan(&raw); err != nil { + return nil, err + } + + node := &Node{} + err := json.Unmarshal([]byte(raw), node) + return node, err +} + +// checkIsEmpty returns nil if 'id' has no children. +func checkIsEmpty(e sqlExecutor, id uint64) error { + var count uint64 + const countSQL = ` +SELECT COUNT(parentID) FROM fs_namespace WHERE parentID = ?` + if err := e.QueryRow(countSQL, id).Scan(&count); err != nil { + return err + } + if count != 0 { + return fuse.Errno(syscall.ENOTEMPTY) + } + return nil +} + +// updateNode updates an existing node descriptor. +func updateNode(e sqlExecutor, node *Node) error { + inode := node.toJSON() + const sql = ` +UPDATE fs_inode SET inode = ? WHERE id = ?; +` + if _, err := e.Exec(sql, inode, node.ID); err != nil { + return err + } + return nil +} + +// getBlockData returns the block data for a single block. +func getBlockData(e sqlExecutor, inodeID uint64, block int) ([]byte, error) { + var data []byte + const sql = `SELECT data FROM fs_block WHERE id = ? AND block = ?` + if err := e.QueryRow(sql, inodeID, block).Scan(&data); err != nil { + return nil, err + } + return data, nil +} + +// updateBlockData overwrites the data for a single block. +func updateBlockData(e sqlExecutor, inodeID uint64, block int, data []byte) error { + const sql = `UPDATE fs_block SET data = ? WHERE (id, block) = (?, ?)` + if _, err := e.Exec(sql, data, inodeID, block); err != nil { + return err + } + return nil +} + +type blockInfo struct { + block int + data []byte +} + +// getBlocks fetches all the blocks for a given inode and returns +// a list of blockInfo objects. +func getBlocks(e sqlExecutor, inodeID uint64) ([]blockInfo, error) { + stmt := `SELECT block, data FROM fs_block WHERE id = ?` + rows, err := e.Query(stmt, inodeID) + if err != nil { + return nil, err + } + return buildBlockInfos(rows) +} + +// getBlocksBetween fetches blocks with IDs [start, end] for a given inode +// and returns a list of blockInfo objects. +func getBlocksBetween(e sqlExecutor, inodeID uint64, start, end int) ([]blockInfo, error) { + stmt := `SELECT block, data FROM fs_block WHERE id = ? AND block >= ? AND block <= ?` + rows, err := e.Query(stmt, inodeID, start, end) + if err != nil { + return nil, err + } + return buildBlockInfos(rows) +} + +func buildBlockInfos(rows *sql.Rows) ([]blockInfo, error) { + var results []blockInfo + for rows.Next() { + b := blockInfo{} + if err := rows.Scan(&b.block, &b.data); err != nil { + return nil, err + } + results = append(results, b) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return results, nil +} diff --git a/cmd/cql-minerd/benchGNTE.sh b/cmd/cql-minerd/benchGNTE.sh index b26b2d207..fd6da72bb 100755 --- a/cmd/cql-minerd/benchGNTE.sh +++ b/cmd/cql-minerd/benchGNTE.sh @@ -6,3 +6,21 @@ go test -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ && \ go test -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ && \ go test -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ && \ go test -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ + +go test -cpu=4 -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ && \ +go test -cpu=4 -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ && \ +go test -cpu=4 -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ && \ +go test -cpu=4 -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ && \ +go test -cpu=4 -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ + +go test -cpu=2 -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ && \ +go test -cpu=2 -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ && \ +go test -cpu=2 -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ && \ +go test -cpu=2 -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ && \ +go test -cpu=2 -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ + +go test -cpu=1 -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ && \ +go test -cpu=1 -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ && \ +go test -cpu=1 -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ && \ +go test -cpu=1 -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ && \ +go test -cpu=1 -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ diff --git a/cmd/cql-minerd/dbms.go b/cmd/cql-minerd/dbms.go index c2b712304..d033f16de 100644 --- a/cmd/cql-minerd/dbms.go +++ b/cmd/cql-minerd/dbms.go @@ -27,14 +27,12 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/rpc" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/worker" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" "github.com/pkg/errors" ) @@ -65,14 +63,8 @@ func startDBMS(server *rpc.Server) (dbms *worker.DBMS, err error) { // add test fixture database if conf.GConf.Miner.IsTestMode { // in test mode - - var pubKey *asymmetric.PublicKey var privKey *asymmetric.PrivateKey - if pubKey, err = kms.GetLocalPublicKey(); err != nil { - err = errors.Wrap(err, "get local public key failed") - return - } if privKey, err = kms.GetLocalPrivateKey(); err != nil { err = errors.Wrap(err, "get local private key failed") return @@ -81,28 +73,12 @@ func startDBMS(server *rpc.Server) (dbms *worker.DBMS, err error) { // add database to miner for _, testFixture := range conf.GConf.Miner.TestFixtures { // build test db instance configuration - dbPeers := &kayak.Peers{ - Term: testFixture.Term, - Leader: &kayak.Server{ - Role: proto.Leader, - ID: testFixture.Leader, + dbPeers := &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: testFixture.Term, + Leader: testFixture.Leader, + Servers: testFixture.Servers, }, - Servers: (func(servers []proto.NodeID) (ks []*kayak.Server) { - ks = make([]*kayak.Server, len(servers)) - - for i, s := range servers { - ks[i] = &kayak.Server{ - Role: proto.Follower, - ID: s, - } - if s == testFixture.Leader { - ks[i].Role = proto.Leader - } - } - - return - })(testFixture.Servers), - PubKey: pubKey, } if err = dbPeers.Sign(privKey); err != nil { @@ -111,14 +87,14 @@ func startDBMS(server *rpc.Server) (dbms *worker.DBMS, err error) { } // load genesis block - var block *ct.Block + var block *types.Block if block, err = loadGenesisBlock(testFixture); err != nil { err = errors.Wrap(err, "load genesis block failed") return } // add to dbms - instance := &wt.ServiceInstance{ + instance := &types.ServiceInstance{ DatabaseID: testFixture.DatabaseID, Peers: dbPeers, GenesisBlock: block, @@ -133,7 +109,7 @@ func startDBMS(server *rpc.Server) (dbms *worker.DBMS, err error) { return } -func loadGenesisBlock(fixture *conf.MinerDatabaseFixture) (block *ct.Block, err error) { +func loadGenesisBlock(fixture *conf.MinerDatabaseFixture) (block *types.Block, err error) { if fixture.GenesisBlockFile == "" { err = os.ErrNotExist return @@ -171,7 +147,7 @@ func loadGenesisBlock(fixture *conf.MinerDatabaseFixture) (block *ct.Block, err } // copied from sqlchain.xxx_test. -func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error) { +func createRandomBlock(parent hash.Hash, isGenesis bool) (b *types.Block, err error) { // Generate key pair priv, pub, err := asymmetric.GenSecp256k1KeyPair() @@ -182,9 +158,9 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error h := hash.Hash{} rand.Read(h[:]) - b = &ct.Block{ - SignedHeader: ct.SignedHeader{ - Header: ct.Header{ + b = &types.Block{ + SignedHeader: types.SignedHeader{ + Header: types.Header{ Version: 0x01000000, Producer: proto.NodeID(h.String()), GenesisHash: rootHash, @@ -192,12 +168,6 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error Timestamp: time.Now().UTC(), }, }, - Queries: make([]*hash.Hash, rand.Intn(10)+10), - } - - for i := range b.Queries { - b.Queries[i] = new(hash.Hash) - rand.Read(b.Queries[i][:]) } if isGenesis { diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 60024948c..bd6237d67 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -21,11 +21,13 @@ package main import ( "context" "database/sql" + "fmt" "io/ioutil" "math/rand" "os" "os/exec" "path/filepath" + "runtime" "sync" "sync/atomic" "syscall" @@ -36,6 +38,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/CovenantSQL/go-sqlite3-encrypt" . "github.com/smartystreets/goconvey/convey" ) @@ -240,7 +243,7 @@ func startNodesProfile(bypassSign bool) { FJ(baseDir, "./bin/cql-minerd"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_0/config.yaml"), "-cpu-profile", FJ(baseDir, "./cmd/cql-minerd/miner0.profile"), - "-traceFile", FJ(baseDir, "./cmd/cql-minerd/miner0.trace"), + //"-traceFile", FJ(baseDir, "./cmd/cql-minerd/miner0.trace"), "-metricGraphiteServer", "192.168.2.100:2003", "-profileServer", "0.0.0.0:8080", "-metricLog", @@ -258,7 +261,7 @@ func startNodesProfile(bypassSign bool) { FJ(baseDir, "./bin/cql-minerd"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_1/config.yaml"), "-cpu-profile", FJ(baseDir, "./cmd/cql-minerd/miner1.profile"), - "-traceFile", FJ(baseDir, "./cmd/cql-minerd/miner1.trace"), + //"-traceFile", FJ(baseDir, "./cmd/cql-minerd/miner1.trace"), "-metricGraphiteServer", "192.168.2.100:2003", "-profileServer", "0.0.0.0:8081", "-metricLog", @@ -276,7 +279,7 @@ func startNodesProfile(bypassSign bool) { FJ(baseDir, "./bin/cql-minerd"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_2/config.yaml"), "-cpu-profile", FJ(baseDir, "./cmd/cql-minerd/miner2.profile"), - "-traceFile", FJ(baseDir, "./cmd/cql-minerd/miner2.trace"), + //"-traceFile", FJ(baseDir, "./cmd/cql-minerd/miner2.trace"), "-metricGraphiteServer", "192.168.2.100:2003", "-profileServer", "0.0.0.0:8082", "-metricLog", @@ -299,7 +302,7 @@ func stopNodes() { defer wg.Done() thisCmd.Cmd.Process.Signal(syscall.SIGTERM) thisCmd.Cmd.Wait() - grepRace := exec.Command("/bin/sh", "-c", "grep -A 50 'DATA RACE' "+thisCmd.LogPath) + grepRace := exec.Command("/bin/sh", "-c", "grep -a -A 50 'DATA RACE' "+thisCmd.LogPath) out, _ := grepRace.Output() if len(out) > 2 { log.Fatalf("DATA RACE in %s :\n%s", thisCmd.Cmd.Path, string(out)) @@ -326,7 +329,7 @@ func TestFullProcess(t *testing.T) { So(err, ShouldBeNil) // create - dsn, err := client.Create(client.ResourceMeta{Node: 1}) + dsn, err := client.Create(client.ResourceMeta{Node: 2}) So(err, ShouldBeNil) log.Infof("the created database dsn is %v", dsn) @@ -389,17 +392,22 @@ func TestFullProcess(t *testing.T) { }) } +const ROWSTART = 1000000 +const TABLENAME = "insert_table0" + func prepareBenchTable(db *sql.DB) { - _, err := db.Exec("DROP TABLE IF EXISTS test;") + _, err := db.Exec("DROP TABLE IF EXISTS " + TABLENAME + ";") So(err, ShouldBeNil) - _, err = db.Exec("CREATE TABLE test ( indexedColumn, nonIndexedColumn );") + _, err = db.Exec(`CREATE TABLE ` + TABLENAME + ` ("k" INT, "v1" TEXT, PRIMARY KEY("k"))`) So(err, ShouldBeNil) - _, err = db.Exec("CREATE INDEX testIndexedColumn ON test ( indexedColumn );") + _, err = db.Exec("REPLACE INTO "+TABLENAME+" VALUES(?, ?)", ROWSTART-1, "test") So(err, ShouldBeNil) +} - _, err = db.Exec("INSERT INTO test VALUES(?, ?)", 4, 4) +func cleanBenchTable(db *sql.DB) { + _, err := db.Exec("DELETE FROM "+TABLENAME+" WHERE k >= ?", ROWSTART) So(err, ShouldBeNil) } @@ -409,38 +417,26 @@ func benchDB(b *testing.B, db *sql.DB, createDB bool) { prepareBenchTable(db) } - var i int32 - var insertedCount int + cleanBenchTable(db) - rand.Seed(time.Now().UnixNano()) - start := (rand.Int31() % 100) * 10000 + var i int64 + i = -1 - b.Run("benchmark Single INSERT", func(b *testing.B) { + b.Run("benchmark INSERT", func(b *testing.B) { b.ResetTimer() - insertedCount = b.N - for i := 0; i < b.N; i++ { - _, err = db.Exec("INSERT INTO test ( indexedColumn, nonIndexedColumn ) VALUES"+ - "(?, ?)", int(start)+i, i, - ) - if err != nil { - b.Fatal(err) - } - } - }) - - if createDB { - prepareBenchTable(db) - } - - b.Run("benchmark Multi INSERT", func(b *testing.B) { - b.ResetTimer() - insertedCount = b.N b.RunParallel(func(pb *testing.PB) { for pb.Next() { - ii := atomic.AddInt32(&i, 1) - _, err = db.Exec("INSERT INTO test ( indexedColumn, nonIndexedColumn ) VALUES"+ - "(?, ?)", start+ii, ii, + ii := atomic.AddInt64(&i, 1) + _, err = db.Exec("INSERT INTO "+TABLENAME+" ( k, v1 ) VALUES"+ + "(?, ?)", ROWSTART+ii, ii, ) + for err != nil && err.Error() == sqlite3.ErrBusy.Error() { + // retry forever + log.Warnf("ROWSTART+ii = %d retried", ROWSTART+ii) + _, err = db.Exec("INSERT INTO "+TABLENAME+" ( k, v1 ) VALUES"+ + "(?, ?)", ROWSTART+ii, ii, + ) + } if err != nil { b.Fatal(err) } @@ -448,34 +444,56 @@ func benchDB(b *testing.B, db *sql.DB, createDB bool) { }) }) - rowCount := db.QueryRow("SELECT COUNT(1) FROM test") - var count int + routineCount := runtime.NumGoroutine() + if routineCount > 100 { + b.Errorf("go routine count: %d", routineCount) + } else { + log.Infof("go routine count: %d", routineCount) + } + + rowCount := db.QueryRow("SELECT COUNT(1) FROM " + TABLENAME) + var count int64 err = rowCount.Scan(&count) if err != nil { b.Fatal(err) } - log.Warnf("Row Count: %d", count) + log.Warnf("Row Count: %v", count) b.Run("benchmark SELECT", func(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { - index := i%insertedCount + int(start) + 1 - row := db.QueryRow("SELECT nonIndexedColumn FROM test WHERE indexedColumn = ? LIMIT 1", index) - var result int - err = row.Scan(&result) - if err != nil || result < 0 { - log.Errorf("i = %d", i) - b.Fatal(err) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + var index int64 + if createDB { //only data by insert + index = rand.Int63n(count-1) + ROWSTART + } else { //has data before ROWSTART + index = rand.Int63n(count - 1) + } + //log.Debugf("index = %d", index) + row := db.QueryRow("SELECT v1 FROM "+TABLENAME+" WHERE k = ? LIMIT 1", index) + var result []byte + err = row.Scan(&result) + if err != nil || (len(result) == 0) { + log.Errorf("index = %d", index) + b.Fatal(err) + } } - } + }) }) - row := db.QueryRow("SELECT nonIndexedColumn FROM test LIMIT 1") + routineCount = runtime.NumGoroutine() + if routineCount > 100 { + b.Errorf("go routine count: %d", routineCount) + } else { + log.Infof("go routine count: %d", routineCount) + } - var result int - err = row.Scan(&result) - So(err, ShouldBeNil) - So(result, ShouldEqual, 4) + //row := db.QueryRow("SELECT nonIndexedColumn FROM test LIMIT 1") + + //var result int + //err = row.Scan(&result) + //So(err, ShouldBeNil) + //So(result, ShouldEqual, 4) err = db.Close() So(err, ShouldBeNil) @@ -542,17 +560,33 @@ func benchMiner(b *testing.B, minerCount uint16, bypassSign bool) { } func BenchmarkSQLite(b *testing.B) { - os.Remove("./foo.db") - defer os.Remove("./foo.db") - - db, err := sql.Open("sqlite3", "./foo.db?_journal_mode=WAL&_synchronous=NORMAL&cache=shared") - if err != nil { - log.Fatal(err) + var db *sql.DB + var createDB bool + millionFile := fmt.Sprintf("/data/sqlite_bigdata/insert_multi_sqlitedb0_1_%v", ROWSTART) + f, err := os.Open(millionFile) + if err != nil && os.IsNotExist(err) { + os.Remove("./foo.db") + defer os.Remove("./foo.db") + + db, err = sql.Open("sqlite3", "./foo.db?_journal_mode=WAL&_synchronous=NORMAL&cache=shared") + if err != nil { + log.Fatal(err) + } + createDB = true + defer db.Close() + } else { + f.Close() + db, err = sql.Open("sqlite3", millionFile+"?_journal_mode=WAL&_synchronous=NORMAL&cache=shared") + log.Infof("Testing sqlite3 million data exist file %v", millionFile) + if err != nil { + log.Fatal(err) + } + createDB = false + defer db.Close() } - defer db.Close() Convey("bench SQLite", b, func() { - benchDB(b, db, true) + benchDB(b, db, createDB) }) } diff --git a/cmd/cql-minerd/main.go b/cmd/cql-minerd/main.go index 1da8ac364..1dfbd5b34 100644 --- a/cmd/cql-minerd/main.go +++ b/cmd/cql-minerd/main.go @@ -26,7 +26,7 @@ import ( "os" "os/signal" "runtime" - "runtime/trace" + //"runtime/trace" "syscall" "time" @@ -117,7 +117,7 @@ func initLogs() { func main() { // set random rand.Seed(time.Now().UnixNano()) - log.SetLevel(log.DebugLevel) + log.SetLevel(log.InfoLevel) flag.Parse() if showVersion { @@ -254,22 +254,22 @@ func main() { go graphite.Graphite(metrics.DefaultRegistry, 5*time.Second, minerName, addr) } - if traceFile != "" { - f, err := os.Create(traceFile) - if err != nil { - log.WithError(err).Fatal("failed to create trace output file") - } - defer func() { - if err := f.Close(); err != nil { - log.WithError(err).Fatal("failed to close trace file") - } - }() - - if err := trace.Start(f); err != nil { - log.WithError(err).Fatal("failed to start trace") - } - defer trace.Stop() - } + //if traceFile != "" { + // f, err := os.Create(traceFile) + // if err != nil { + // log.WithError(err).Fatal("failed to create trace output file") + // } + // defer func() { + // if err := f.Close(); err != nil { + // log.WithError(err).Fatal("failed to close trace file") + // } + // }() + + // if err := trace.Start(f); err != nil { + // log.WithError(err).Fatal("failed to start trace") + // } + // defer trace.Stop() + //} <-signalCh diff --git a/cmd/cql-minerd/node.go b/cmd/cql-minerd/node.go index 2b7808e0a..3ea86d589 100644 --- a/cmd/cql-minerd/node.go +++ b/cmd/cql-minerd/node.go @@ -25,7 +25,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" @@ -112,7 +112,7 @@ func registerNodeToBP(timeout time.Duration) (err error) { ch <- id return } - if strings.Contains(err.Error(), kayak.ErrNotLeader.Error()) { + if strings.Contains(err.Error(), kt.ErrNotLeader.Error()) { log.Debug("stop ping non leader BP node") return } diff --git a/cmd/cql-mysql-adapter/cursor.go b/cmd/cql-mysql-adapter/cursor.go index 1622f5537..7583c1996 100644 --- a/cmd/cql-mysql-adapter/cursor.go +++ b/cmd/cql-mysql-adapter/cursor.go @@ -290,7 +290,7 @@ func (c *Cursor) UseDB(dbName string) (err error) { } // HandleQuery handle COM_QUERY comamnd, like SELECT, INSERT, UPDATE, etc... -// if Result has a Resultset (SELECT, SHOW, etc...), we will send this as the repsonse, otherwise, we will send Result. +// if Result has a Resultset (SELECT, SHOW, etc...), we will send this as the response, otherwise, we will send Result. func (c *Cursor) HandleQuery(query string) (r *my.Result, err error) { var processed bool diff --git a/cmd/cql-observer/api.go b/cmd/cql-observer/api.go index 2d644eed1..8e4c0c483 100644 --- a/cmd/cql-observer/api.go +++ b/cmd/cql-observer/api.go @@ -27,9 +27,8 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/proto" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" "github.com/gorilla/mux" ) @@ -50,6 +49,10 @@ func sendResponse(code int, success bool, msg interface{}, data interface{}, rw }) } +func notSupported(rw http.ResponseWriter, _ *http.Request) { + sendResponse(500, false, fmt.Sprintf("not supported in %v", version), nil, rw) +} + type explorerAPI struct { service *Service } @@ -76,26 +79,7 @@ func (a *explorerAPI) GetAck(rw http.ResponseWriter, r *http.Request) { } // format ack to json response - sendResponse(200, true, "", map[string]interface{}{ - "ack": map[string]interface{}{ - "request": map[string]interface{}{ - "hash": ack.Response.Request.HeaderHash.String(), - "timestamp": a.formatTime(ack.Response.Request.Timestamp), - "node": ack.Response.Request.NodeID, - "type": ack.Response.Request.QueryType.String(), - "count": ack.Response.Request.BatchCount, - }, - "response": map[string]interface{}{ - "hash": ack.Response.HeaderHash.String(), - "timestamp": a.formatTime(ack.Response.Timestamp), - "node": ack.Response.NodeID, - "log_position": ack.Response.LogOffset, - }, - "hash": ack.HeaderHash.String(), - "timestamp": a.formatTime(ack.AckHeader.Timestamp), - "node": ack.AckHeader.NodeID, - }, - }, rw) + sendResponse(200, true, "", a.formatAck(ack), rw) } func (a *explorerAPI) GetRequest(rw http.ResponseWriter, r *http.Request) { @@ -122,7 +106,7 @@ func (a *explorerAPI) GetRequest(rw http.ResponseWriter, r *http.Request) { sendResponse(200, true, "", a.formatRequest(req), rw) } -func (a *explorerAPI) GetRequestByOffset(rw http.ResponseWriter, r *http.Request) { +func (a *explorerAPI) GetResponse(rw http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) dbID, err := a.getDBID(vars) @@ -131,28 +115,46 @@ func (a *explorerAPI) GetRequestByOffset(rw http.ResponseWriter, r *http.Request return } - offsetStr := vars["offset"] - if offsetStr == "" { - sendResponse(400, false, "", nil, rw) + h, err := a.getHash(vars) + if err != nil { + sendResponse(400, false, err, nil, rw) return } - offset, err := strconv.ParseUint(offsetStr, 10, 64) + resp, err := a.service.getResponseHeader(dbID, h) + if err != nil { + sendResponse(500, false, err, nil, rw) + return + } + + sendResponse(200, true, "", a.formatResponseHeader(resp), rw) +} + +func (a *explorerAPI) GetBlock(rw http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + + dbID, err := a.getDBID(vars) + if err != nil { + sendResponse(400, false, err, nil, rw) + return + } + + h, err := a.getHash(vars) if err != nil { sendResponse(400, false, err, nil, rw) return } - req, err := a.service.getRequestByOffset(dbID, offset) + _, height, block, err := a.service.getBlock(dbID, h) if err != nil { sendResponse(500, false, err, nil, rw) return } - sendResponse(200, true, "", a.formatRequest(req), rw) + sendResponse(200, true, "", a.formatBlock(height, block), rw) } -func (a *explorerAPI) GetBlock(rw http.ResponseWriter, r *http.Request) { +func (a *explorerAPI) GetBlockV3(rw http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) dbID, err := a.getDBID(vars) @@ -167,13 +169,13 @@ func (a *explorerAPI) GetBlock(rw http.ResponseWriter, r *http.Request) { return } - height, block, err := a.service.getBlock(dbID, h) + count, height, block, err := a.service.getBlock(dbID, h) if err != nil { sendResponse(500, false, err, nil, rw) return } - sendResponse(200, true, "", a.formatBlock(height, block), rw) + sendResponse(200, true, "", a.formatBlockV3(count, height, block), rw) } func (a *explorerAPI) GetBlockByCount(rw http.ResponseWriter, r *http.Request) { @@ -187,7 +189,7 @@ func (a *explorerAPI) GetBlockByCount(rw http.ResponseWriter, r *http.Request) { countStr := vars["count"] if countStr == "" { - sendResponse(400, false, "", nil, rw) + sendResponse(400, false, "empty count", nil, rw) return } @@ -208,6 +210,38 @@ func (a *explorerAPI) GetBlockByCount(rw http.ResponseWriter, r *http.Request) { sendResponse(200, true, "", a.formatBlockV2(count, height, block), rw) } +func (a *explorerAPI) GetBlockByCountV3(rw http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + + dbID, err := a.getDBID(vars) + if err != nil { + sendResponse(400, false, err, nil, rw) + return + } + + countStr := vars["count"] + if countStr == "" { + sendResponse(400, false, "empty count", nil, rw) + return + } + + countNumber, err := strconv.ParseInt(countStr, 10, 32) + if err != nil { + sendResponse(400, false, err, nil, rw) + return + } + + count := int32(countNumber) + + height, block, err := a.service.getBlockByCount(dbID, count) + if err != nil { + sendResponse(500, false, err, nil, rw) + return + } + + sendResponse(200, true, "", a.formatBlockV3(count, height, block), rw) +} + func (a *explorerAPI) GetBlockByHeight(rw http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) @@ -219,7 +253,7 @@ func (a *explorerAPI) GetBlockByHeight(rw http.ResponseWriter, r *http.Request) heightStr := vars["height"] if heightStr == "" { - sendResponse(400, false, "", nil, rw) + sendResponse(400, false, "empty height", nil, rw) return } @@ -231,7 +265,7 @@ func (a *explorerAPI) GetBlockByHeight(rw http.ResponseWriter, r *http.Request) height := int32(heightNumber) - block, err := a.service.getBlockByHeight(dbID, height) + _, block, err := a.service.getBlockByHeight(dbID, height) if err != nil { sendResponse(500, false, err, nil, rw) return @@ -240,7 +274,39 @@ func (a *explorerAPI) GetBlockByHeight(rw http.ResponseWriter, r *http.Request) sendResponse(200, true, "", a.formatBlock(height, block), rw) } -func (a *explorerAPI) getHighestBlock(rw http.ResponseWriter, r *http.Request) { +func (a *explorerAPI) GetBlockByHeightV3(rw http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + + dbID, err := a.getDBID(vars) + if err != nil { + sendResponse(400, false, err, nil, rw) + return + } + + heightStr := vars["height"] + if heightStr == "" { + sendResponse(400, false, "empty height", nil, rw) + return + } + + heightNumber, err := strconv.ParseInt(heightStr, 10, 32) + if err != nil { + sendResponse(400, false, err, nil, rw) + return + } + + height := int32(heightNumber) + + count, block, err := a.service.getBlockByHeight(dbID, height) + if err != nil { + sendResponse(500, false, err, nil, rw) + return + } + + sendResponse(200, true, "", a.formatBlockV3(count, height, block), rw) +} + +func (a *explorerAPI) GetHighestBlock(rw http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) dbID, err := a.getDBID(vars) @@ -271,7 +337,7 @@ func (a *explorerAPI) getHighestBlock(rw http.ResponseWriter, r *http.Request) { sendResponse(200, true, "", a.formatBlock(height, block), rw) } -func (a *explorerAPI) getHighestBlockV2(rw http.ResponseWriter, r *http.Request) { +func (a *explorerAPI) GetHighestBlockV2(rw http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) dbID, err := a.getDBID(vars) @@ -302,36 +368,46 @@ func (a *explorerAPI) getHighestBlockV2(rw http.ResponseWriter, r *http.Request) sendResponse(200, true, "", a.formatBlockV2(count, height, block), rw) } -func (a *explorerAPI) formatBlock(height int32, b *ct.Block) map[string]interface{} { - queries := make([]string, 0, len(b.Queries)) +func (a *explorerAPI) GetHighestBlockV3(rw http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) - for _, q := range b.Queries { - queries = append(queries, q.String()) + dbID, err := a.getDBID(vars) + if err != nil { + sendResponse(400, false, err, nil, rw) + return } - return map[string]interface{}{ - "block": map[string]interface{}{ - "height": height, - "hash": b.BlockHash().String(), - "genesis_hash": b.GenesisHash().String(), - "timestamp": a.formatTime(b.Timestamp()), - "version": b.SignedHeader.Version, - "producer": b.Producer(), - "queries": queries, - }, + count, height, block, err := a.service.getHighestBlockV2(dbID) + if err == ErrNotFound { + // try to add subscription + err = a.service.subscribe(dbID, "oldest") + if err == nil { + count, height, block, err = a.service.getHighestBlockV2(dbID) + if err != nil { + sendResponse(500, false, err, nil, rw) + return + } + } else { + sendResponse(400, false, err, nil, rw) + return + } + } else if err != nil { + sendResponse(500, false, err, nil, rw) + return } + + sendResponse(200, true, "", a.formatBlockV3(count, height, block), rw) } -func (a *explorerAPI) formatBlockV2(count, height int32, b *ct.Block) map[string]interface{} { - queries := make([]string, 0, len(b.Queries)) +func (a *explorerAPI) formatBlock(height int32, b *types.Block) (res map[string]interface{}) { + queries := make([]string, 0, len(b.Acks)) - for _, q := range b.Queries { - queries = append(queries, q.String()) + for _, q := range b.Acks { + queries = append(queries, q.Hash().String()) } return map[string]interface{}{ "block": map[string]interface{}{ - "count": count, "height": height, "hash": b.BlockHash().String(), "genesis_hash": b.GenesisHash().String(), @@ -343,7 +419,40 @@ func (a *explorerAPI) formatBlockV2(count, height int32, b *ct.Block) map[string } } -func (a *explorerAPI) formatRequest(req *wt.Request) map[string]interface{} { +func (a *explorerAPI) formatBlockV2(count, height int32, b *types.Block) (res map[string]interface{}) { + res = a.formatBlock(height, b) + res["block"].(map[string]interface{})["count"] = count + return +} + +func (a *explorerAPI) formatBlockV3(count, height int32, b *types.Block) (res map[string]interface{}) { + res = a.formatBlockV2(count, height, b) + blockRes := res["block"].(map[string]interface{}) + blockRes["acks"] = func() (acks []interface{}) { + acks = make([]interface{}, 0, len(b.Acks)) + + for _, ack := range b.Acks { + acks = append(acks, a.formatAck(ack)["ack"]) + } + + return + }() + blockRes["queries"] = func() (tracks []interface{}) { + tracks = make([]interface{}, 0, len(b.QueryTxs)) + + for _, tx := range b.QueryTxs { + t := a.formatRequest(tx.Request) + t["response"] = a.formatResponseHeader(tx.Response)["response"] + tracks = append(tracks, t) + } + + return + }() + + return +} + +func (a *explorerAPI) formatRequest(req *types.Request) map[string]interface{} { // get queries queries := make([]map[string]interface{}, 0, req.Header.BatchCount) @@ -365,7 +474,7 @@ func (a *explorerAPI) formatRequest(req *wt.Request) map[string]interface{} { return map[string]interface{}{ "request": map[string]interface{}{ - "hash": req.Header.HeaderHash.String(), + "hash": req.Header.Hash().String(), "timestamp": a.formatTime(req.Header.Timestamp), "node": req.Header.NodeID, "type": req.Header.QueryType.String(), @@ -375,6 +484,52 @@ func (a *explorerAPI) formatRequest(req *wt.Request) map[string]interface{} { } } +func (a *explorerAPI) formatResponseHeader(resp *types.SignedResponseHeader) map[string]interface{} { + return map[string]interface{}{ + "response": map[string]interface{}{ + "hash": resp.Hash().String(), + "timestamp": a.formatTime(resp.Timestamp), + "node": resp.NodeID, + "row_count": resp.RowCount, + "log_id": resp.LogOffset, + "last_insert_id": resp.LastInsertID, + "affected_rows": resp.AffectedRows, + }, + "request": map[string]interface{}{ + "hash": resp.Request.Hash().String(), + "timestamp": a.formatTime(resp.Request.Timestamp), + "node": resp.Request.NodeID, + "type": resp.Request.QueryType.String(), + "count": resp.Request.BatchCount, + }, + } +} + +func (a *explorerAPI) formatAck(ack *types.SignedAckHeader) map[string]interface{} { + return map[string]interface{}{ + "ack": map[string]interface{}{ + "request": map[string]interface{}{ + "hash": ack.Response.Request.Hash().String(), + "timestamp": a.formatTime(ack.Response.Request.Timestamp), + "node": ack.Response.Request.NodeID, + "type": ack.Response.Request.QueryType.String(), + "count": ack.Response.Request.BatchCount, + }, + "response": map[string]interface{}{ + "hash": ack.Response.Hash().String(), + "timestamp": a.formatTime(ack.Response.Timestamp), + "node": ack.Response.NodeID, + "log_id": ack.Response.LogOffset, // savepoint id in eventual consistency mode + "last_insert_id": ack.Response.LastInsertID, + "affected_rows": ack.Response.AffectedRows, + }, + "hash": ack.Hash().String(), + "timestamp": a.formatTime(ack.Timestamp), + "node": ack.NodeID, + }, + } +} + func (a *explorerAPI) formatTime(t time.Time) float64 { return float64(t.UnixNano()) / 1e6 } @@ -398,7 +553,9 @@ func (a *explorerAPI) getHash(vars map[string]string) (h *hash.Hash, err error) func startAPI(service *Service, listenAddr string) (server *http.Server, err error) { router := mux.NewRouter() router.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) { - sendResponse(http.StatusOK, true, nil, nil, rw) + sendResponse(http.StatusOK, true, nil, map[string]interface{}{ + "version": version, + }, rw) }).Methods("GET") api := &explorerAPI{ @@ -406,14 +563,20 @@ func startAPI(service *Service, listenAddr string) (server *http.Server, err err } v1Router := router.PathPrefix("/v1").Subrouter() v1Router.HandleFunc("/ack/{db}/{hash}", api.GetAck).Methods("GET") - v1Router.HandleFunc("/offset/{db}/{offset:[0-9]+}", api.GetRequestByOffset).Methods("GET") + v1Router.HandleFunc("/offset/{db}/{offset:[0-9]+}", notSupported).Methods("GET") v1Router.HandleFunc("/request/{db}/{hash}", api.GetRequest).Methods("GET") v1Router.HandleFunc("/block/{db}/{hash}", api.GetBlock).Methods("GET") v1Router.HandleFunc("/count/{db}/{count:[0-9]+}", api.GetBlockByCount).Methods("GET") v1Router.HandleFunc("/height/{db}/{height:[0-9]+}", api.GetBlockByHeight).Methods("GET") - v1Router.HandleFunc("/head/{db}", api.getHighestBlock).Methods("GET") + v1Router.HandleFunc("/head/{db}", api.GetHighestBlock).Methods("GET") v2Router := router.PathPrefix("/v2").Subrouter() - v2Router.HandleFunc("/head/{db}", api.getHighestBlockV2).Methods("GET") + v2Router.HandleFunc("/head/{db}", api.GetHighestBlockV2).Methods("GET") + v3Router := router.PathPrefix("/v3").Subrouter() + v3Router.HandleFunc("/response/{db}/{hash}", api.GetResponse).Methods("GET") + v3Router.HandleFunc("/block/{db}/{hash}", api.GetBlockV3).Methods("GET") + v3Router.HandleFunc("/count/{db}/{count:[0-9]+}", api.GetBlockByCountV3).Methods("GET") + v3Router.HandleFunc("/height/{db}/{height:[0-9]+}", api.GetBlockByHeightV3).Methods("GET") + v3Router.HandleFunc("/head/{db}", api.GetHighestBlockV3).Methods("GET") server = &http.Server{ Addr: listenAddr, diff --git a/cmd/cql-observer/observation_test.go b/cmd/cql-observer/observation_test.go index a0be43d42..76a1b57c1 100644 --- a/cmd/cql-observer/observation_test.go +++ b/cmd/cql-observer/observation_test.go @@ -35,9 +35,9 @@ import ( "time" "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" "github.com/jmoiron/jsonq" . "github.com/smartystreets/goconvey/convey" ) @@ -175,7 +175,7 @@ func stopNodes() { defer wg.Done() thisCmd.Cmd.Process.Signal(syscall.SIGTERM) thisCmd.Cmd.Wait() - grepRace := exec.Command("/bin/sh", "-c", "grep -A 50 'DATA RACE' "+thisCmd.LogPath) + grepRace := exec.Command("/bin/sh", "-c", "grep -a -A 50 'DATA RACE' "+thisCmd.LogPath) out, _ := grepRace.Output() if len(out) > 2 { log.Fatal(string(out)) @@ -187,7 +187,7 @@ func stopNodes() { } func getJSON(pattern string, args ...interface{}) (result *jsonq.JsonQuery, err error) { - url := "http://localhost:4663/v1/" + fmt.Sprintf(pattern, args...) + url := "http://localhost:4663/" + fmt.Sprintf(pattern, args...) resp, err := http.Get(url) if err != nil { return @@ -195,6 +195,15 @@ func getJSON(pattern string, args ...interface{}) (result *jsonq.JsonQuery, err var res map[string]interface{} err = json.NewDecoder(resp.Body).Decode(&res) + if err != nil { + return + } + log.WithFields(log.Fields{ + "pattern": pattern, + "args": args, + "response": res, + "code": resp.StatusCode, + }).Debug("send test request") result = jsonq.NewQuery(res) success, err := result.Bool("success") if err != nil { @@ -353,38 +362,41 @@ func TestFullProcess(t *testing.T) { time.Sleep(blockProducePeriod * 2) // test get genesis block by height - res, err := getJSON("height/%v/0", dbID) + res, err := getJSON("v1/height/%v/0", dbID) So(err, ShouldBeNil) So(ensureSuccess(res.Interface("block")), ShouldNotBeNil) So(ensureSuccess(res.Int("block", "height")), ShouldEqual, 0) genesisHash := ensureSuccess(res.String("block", "hash")).(string) // test get first containable block - res, err = getJSON("height/%v/1", dbID) + res, err = getJSON("v3/height/%v/1", dbID) So(err, ShouldBeNil) So(ensureSuccess(res.Interface("block")), ShouldNotBeNil) So(ensureSuccess(res.Int("block", "height")), ShouldEqual, 1) So(ensureSuccess(res.String("block", "hash")), ShouldNotBeEmpty) So(ensureSuccess(res.String("block", "genesis_hash")), ShouldEqual, genesisHash) - So(ensureSuccess(res.ArrayOfStrings("block", "queries")), ShouldNotBeEmpty) + So(ensureSuccess(res.ArrayOfObjects("block", "queries")), ShouldNotBeEmpty) blockHash := ensureSuccess(res.String("block", "hash")).(string) byHeightBlockResult := ensureSuccess(res.Interface()) // test get block by hash - res, err = getJSON("block/%v/%v", dbID, blockHash) + res, err = getJSON("v3/block/%v/%v", dbID, blockHash) So(err, ShouldBeNil) So(ensureSuccess(res.Interface()), ShouldResemble, byHeightBlockResult) + // test get block by hash using v1 version, returns ack hashes as queries + res, err = getJSON("v1/block/%v/%v", dbID, blockHash) + So(err, ShouldBeNil) + ackHashes, err := res.ArrayOfStrings("block", "queries") So(err, ShouldBeNil) So(ackHashes, ShouldNotBeEmpty) // test get acked query in block - var logOffset int var reqHash string for _, ackHash := range ackHashes { - res, err = getJSON("ack/%v/%v", dbID, ackHash) + res, err = getJSON("v1/ack/%v/%v", dbID, ackHash) So(err, ShouldBeNil) So(ensureSuccess(res.Interface("ack")), ShouldNotBeNil) So(ensureSuccess(res.String("ack", "hash")), ShouldNotBeEmpty) @@ -393,12 +405,9 @@ func TestFullProcess(t *testing.T) { queryType, err := res.String("ack", "request", "type") So(err, ShouldBeNil) - So(queryType, ShouldBeIn, []string{wt.WriteQuery.String(), wt.ReadQuery.String()}) + So(queryType, ShouldBeIn, []string{types.WriteQuery.String(), types.ReadQuery.String()}) - if queryType == wt.WriteQuery.String() { - logOffset, err = res.Int("ack", "response", "log_position") - So(err, ShouldBeNil) - So(logOffset, ShouldBeGreaterThanOrEqualTo, 0) + if queryType == types.WriteQuery.String() { reqHash, err = res.String("ack", "request", "hash") So(err, ShouldBeNil) So(reqHash, ShouldNotBeEmpty) @@ -407,36 +416,24 @@ func TestFullProcess(t *testing.T) { // must contains a write query So(reqHash, ShouldNotBeEmpty) - So(logOffset, ShouldBeGreaterThanOrEqualTo, 0) // test get request entity by request hash - res, err = getJSON("request/%v/%v", dbID, reqHash) + res, err = getJSON("v1/request/%v/%v", dbID, reqHash) So(err, ShouldBeNil) So(ensureSuccess(res.Interface("request")), ShouldNotBeNil) So(ensureSuccess(res.String("request", "hash")), ShouldNotBeEmpty) - So(ensureSuccess(res.String("request", "type")), ShouldEqual, wt.WriteQuery.String()) + So(ensureSuccess(res.String("request", "type")), ShouldEqual, types.WriteQuery.String()) So(ensureSuccess(res.Int("request", "count")), ShouldEqual, 1) // no transaction batch is used So(ensureSuccess(res.ArrayOfObjects("request", "queries")), ShouldNotBeEmpty) So(ensureSuccess(res.String("request", "queries", "0", "pattern")), ShouldNotBeEmpty) - byHashRequestResult := ensureSuccess(res.Interface()) - - // test get request entity by log offset - res, err = getJSON("offset/%v/%v", dbID, logOffset) - So(err, ShouldBeNil) - So(ensureSuccess(res.Interface()), ShouldResemble, byHashRequestResult) - - // test get first log offset, should be a create table statement - res, err = getJSON("offset/%v/1", dbID) - So(err, ShouldBeNil) - So(ensureSuccess(res.String("request", "queries", "0", "pattern")), ShouldContainSubstring, "CREATE TABLE") // test get genesis block by height - res, err = getJSON("height/%v/0", dbID2) + res, err = getJSON("v3/height/%v/0", dbID2) So(err, ShouldNotBeNil) log.Info(err, res) // test get genesis block by height - res, err = getJSON("head/%v", dbID2) + res, err = getJSON("v3/head/%v", dbID2) So(err, ShouldBeNil) So(ensureSuccess(res.Interface("block")), ShouldNotBeNil) So(ensureSuccess(res.Int("block", "height")), ShouldEqual, 0) diff --git a/cmd/cql-observer/service.go b/cmd/cql-observer/service.go index 53b4b9720..c9fbe25a9 100644 --- a/cmd/cql-observer/service.go +++ b/cmd/cql-observer/service.go @@ -25,7 +25,6 @@ import ( "sync/atomic" "time" - bp "github.com/CovenantSQL/CovenantSQL/blockproducer" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" @@ -33,10 +32,9 @@ import ( "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/sqlchain" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" "github.com/coreos/bbolt" ) @@ -57,20 +55,20 @@ const ( | | \--> [count] => height | | | [block]-->[`dbID`] - | | |---> [height+hash] => block - | | \--> [height+hash] => block + | | |---> [height+hash+count] => block + | | \--> [height+hash+count] => block | | | [ack]-->[`dbID`] - | | |---> [hash] => ack - | | \--> [hash] => ack + | | |---> [hash] => height+offset + | | \--> [hash] => height+offset | | | [request]-->[`dbID`] - | | |---> [offset+hash] => request - | | \--> [offset+hash] => request + | | |---> [hash] => height+offset + | | \--> [hash] => height+offset | | - | [offset]-->[`dbID`] - | |---> [hash] => offset - | \--> [hash] => offset + | [response]-->[`dbID`] + | |---> [hash] => height+offset + | \--> [hash] => height+offset | \-> [subscription] \---> [`dbID`] => height @@ -81,16 +79,18 @@ var ( ErrStopped = errors.New("observer service has stopped") // ErrNotFound defines error on fail to found specified resource ErrNotFound = errors.New("resource not found") + // ErrInconsistentData represents corrupted observation data. + ErrInconsistentData = errors.New("inconsistent data") // bolt db buckets blockBucket = []byte("block") blockCount2HeightBucket = []byte("block-count-to-height") ackBucket = []byte("ack") requestBucket = []byte("request") + responseBucket = []byte("response") subscriptionBucket = []byte("subscription") blockHeightBucket = []byte("height") - logOffsetBucket = []byte("offset") // blockProducePeriod defines the block producing interval blockProducePeriod = 60 * time.Second @@ -142,7 +142,7 @@ func NewService() (service *Service, err error) { if _, err = tx.CreateBucketIfNotExists(blockHeightBucket); err != nil { return } - _, err = tx.CreateBucketIfNotExists(logOffsetBucket) + _, err = tx.CreateBucketIfNotExists(responseBucket) return }); err != nil { return @@ -170,16 +170,6 @@ func NewService() (service *Service, err error) { return } -func offsetToBytes(offset uint64) (data []byte) { - data = make([]byte, 8) - binary.BigEndian.PutUint64(data, offset) - return -} - -func bytesToOffset(data []byte) uint64 { - return uint64(binary.BigEndian.Uint64(data)) -} - func int32ToBytes(h int32) (data []byte) { data = make([]byte, 4) binary.BigEndian.PutUint32(data, uint32(h)) @@ -204,11 +194,11 @@ func (s *Service) subscribe(dbID proto.DatabaseID, resetSubscribePosition string switch resetSubscribePosition { case "newest": - fromPos = ct.ReplicateFromNewest + fromPos = types.ReplicateFromNewest case "oldest": - fromPos = ct.ReplicateFromBeginning + fromPos = types.ReplicateFromBeginning default: - fromPos = ct.ReplicateFromNewest + fromPos = types.ReplicateFromNewest } s.subscription[dbID] = fromPos @@ -219,7 +209,7 @@ func (s *Service) subscribe(dbID proto.DatabaseID, resetSubscribePosition string } else { // not resetting if _, exists := s.subscription[dbID]; !exists { - s.subscription[dbID] = ct.ReplicateFromNewest + s.subscription[dbID] = types.ReplicateFromNewest shouldStartSubscribe = true } } @@ -253,21 +243,6 @@ func (s *Service) AdviseNewBlock(req *sqlchain.MuxAdviseNewBlockReq, resp *sqlch return s.addBlock(req.DatabaseID, req.Count, req.Block) } -// AdviseAckedQuery handles acked query replication request from the remote database chain service. -func (s *Service) AdviseAckedQuery(req *sqlchain.MuxAdviseAckedQueryReq, resp *sqlchain.MuxAdviseAckedQueryResp) (err error) { - if atomic.LoadInt32(&s.stopped) == 1 { - // stopped - return ErrStopped - } - - if req.Query == nil { - log.WithField("node", req.GetNodeID().String()).Info("received empty acked query") - return - } - - return s.addAckedQuery(req.DatabaseID, req.Query) -} - func (s *Service) start() (err error) { if atomic.LoadInt32(&s.stopped) == 1 { // stopped @@ -322,11 +297,12 @@ func (s *Service) startSubscribe(dbID proto.DatabaseID) (err error) { return } -func (s *Service) addAckedQuery(dbID proto.DatabaseID, ack *wt.SignedAckHeader) (err error) { +func (s *Service) addAck(dbID proto.DatabaseID, height int32, offset int32, ack *types.SignedAckHeader) (err error) { log.WithFields(log.Fields{ - "ack": ack.HeaderHash.String(), - "db": dbID, - }).Debug("add ack query") + "height": height, + "ack": ack.Hash().String(), + "db": dbID, + }).Debug("add ack") if atomic.LoadInt32(&s.stopped) == 1 { // stopped @@ -340,71 +316,62 @@ func (s *Service) addAckedQuery(dbID proto.DatabaseID, ack *wt.SignedAckHeader) return } - // fetch original query - if ack.Response.Request.QueryType == wt.WriteQuery { - req := &wt.GetRequestReq{} - resp := &wt.GetRequestResp{} - - req.DatabaseID = dbID - req.LogOffset = ack.Response.LogOffset - - if err = s.minerRequest(dbID, route.DBSGetRequest.String(), req, resp); err != nil { + // store ack + return s.db.Update(func(tx *bolt.Tx) (err error) { + ab, err := tx.Bucket(ackBucket).CreateBucketIfNotExists([]byte(dbID)) + if err != nil { return } + err = ab.Put(ack.Hash().AsBytes(), utils.ConcatAll(int32ToBytes(height), int32ToBytes(offset))) + return + }) +} - key := offsetToBytes(req.LogOffset) - key = append(key, resp.Request.Header.HeaderHash.CloneBytes()...) +func (s *Service) addQueryTracker(dbID proto.DatabaseID, height int32, offset int32, qt *types.QueryAsTx) (err error) { + log.WithFields(log.Fields{ + "req": qt.Request.Header.Hash(), + "resp": qt.Response.Hash(), + }).Debug("add query tracker") - log.WithFields(log.Fields{ - "offset": req.LogOffset, - "reqHash": resp.Request.Header.HeaderHash.String(), - "reqQueries": resp.Request.Payload.Queries, - }).Debug("add write request") + if atomic.LoadInt32(&s.stopped) == 1 { + // stopped + return ErrStopped + } - var reqBytes *bytes.Buffer - if reqBytes, err = utils.EncodeMsgPack(resp.Request); err != nil { - return - } + s.lock.Lock() + defer s.lock.Unlock() - if err = s.db.Update(func(tx *bolt.Tx) (err error) { - qb, err := tx.Bucket(requestBucket).CreateBucketIfNotExists([]byte(dbID)) - if err != nil { - return - } - if err = qb.Put(key, reqBytes.Bytes()); err != nil { - return - } - ob, err := tx.Bucket(logOffsetBucket).CreateBucketIfNotExists([]byte(dbID)) - if err != nil { - return - } - err = ob.Put(resp.Request.Header.HeaderHash.CloneBytes(), offsetToBytes(req.LogOffset)) - return - }); err != nil { - return - } + if err = qt.Request.Verify(); err != nil { + return + } + if err = qt.Response.Verify(); err != nil { + return } - // store ack + dataBytes := utils.ConcatAll(int32ToBytes(height), int32ToBytes(offset)) + + // store request and response return s.db.Update(func(tx *bolt.Tx) (err error) { - ab, err := tx.Bucket(ackBucket).CreateBucketIfNotExists([]byte(dbID)) + reqb, err := tx.Bucket(requestBucket).CreateBucketIfNotExists([]byte(dbID)) if err != nil { return } - ackBytes, err := utils.EncodeMsgPack(ack) + resb, err := tx.Bucket(responseBucket).CreateBucketIfNotExists([]byte(dbID)) if err != nil { return } - err = ab.Put(ack.HeaderHash.CloneBytes(), ackBytes.Bytes()) + if err = reqb.Put(qt.Request.Header.Hash().AsBytes(), dataBytes); err != nil { + return + } + err = resb.Put(qt.Response.Hash().AsBytes(), dataBytes) return }) } -func (s *Service) addBlock(dbID proto.DatabaseID, count int32, b *ct.Block) (err error) { +func (s *Service) addBlock(dbID proto.DatabaseID, count int32, b *types.Block) (err error) { instance, err := s.getUpstream(dbID) h := int32(b.Timestamp().Sub(instance.GenesisBlock.Timestamp()) / blockProducePeriod) - key := int32ToBytes(h) - key = append(key, b.BlockHash().CloneBytes()...) + key := utils.ConcatAll(int32ToBytes(h), b.BlockHash().AsBytes(), int32ToBytes(count)) // It's actually `countToBytes` ckey := int32ToBytes(count) blockBytes, err := utils.EncodeMsgPack(b) @@ -416,9 +383,10 @@ func (s *Service) addBlock(dbID proto.DatabaseID, count int32, b *ct.Block) (err "count": count, "height": h, "producer": b.Producer(), + "block": b, }).Debugf("Add new block %v -> %v", b.BlockHash(), b.ParentHash()) - return s.db.Update(func(tx *bolt.Tx) (err error) { + if err = s.db.Update(func(tx *bolt.Tx) (err error) { bb, err := tx.Bucket(blockBucket).CreateBucketIfNotExists([]byte(dbID)) if err != nil { return @@ -431,7 +399,7 @@ func (s *Service) addBlock(dbID proto.DatabaseID, count int32, b *ct.Block) (err return } if count >= 0 { - if err = cb.Put(ckey, key); err != nil { + if err = cb.Put(ckey, int32ToBytes(h)); err != nil { return } } @@ -441,7 +409,25 @@ func (s *Service) addBlock(dbID proto.DatabaseID, count int32, b *ct.Block) (err } err = hb.Put(b.BlockHash()[:], int32ToBytes(h)) return - }) + }); err != nil { + return + } + + // save ack + for i, q := range b.Acks { + if err = s.addAck(dbID, h, int32(i), q); err != nil { + return + } + } + + // save queries + for i, q := range b.QueryTxs { + if err = s.addQueryTracker(dbID, h, int32(i), q); err != nil { + return + } + } + + return } func (s *Service) stop() (err error) { @@ -480,14 +466,14 @@ func (s *Service) minerRequest(dbID proto.DatabaseID, method string, request int return } - return s.caller.CallNode(instance.Peers.Leader.ID, method, request, response) + return s.caller.CallNode(instance.Peers.Leader, method, request, response) } -func (s *Service) getUpstream(dbID proto.DatabaseID) (instance *wt.ServiceInstance, err error) { +func (s *Service) getUpstream(dbID proto.DatabaseID) (instance *types.ServiceInstance, err error) { log.WithField("db", dbID).Info("get peers info for database") if iInstance, exists := s.upstreamServers.Load(dbID); exists { - instance = iInstance.(*wt.ServiceInstance) + instance = iInstance.(*types.ServiceInstance) return } @@ -501,12 +487,12 @@ func (s *Service) getUpstream(dbID proto.DatabaseID) (instance *wt.ServiceInstan return } - req := &bp.GetDatabaseRequest{} + req := &types.GetDatabaseRequest{} req.Header.DatabaseID = dbID if err = req.Sign(privateKey); err != nil { return } - resp := &bp.GetDatabaseResponse{} + resp := &types.GetDatabaseResponse{} // get peers list from block producer if err = s.caller.CallNode(curBP, route.BPDBGetDatabase.String(), req, resp); err != nil { return @@ -521,81 +507,167 @@ func (s *Service) getUpstream(dbID proto.DatabaseID) (instance *wt.ServiceInstan return } -func (s *Service) getAck(dbID proto.DatabaseID, h *hash.Hash) (ack *wt.SignedAckHeader, err error) { - err = s.db.View(func(tx *bolt.Tx) error { +func (s *Service) getAck(dbID proto.DatabaseID, h *hash.Hash) (ack *types.SignedAckHeader, err error) { + var ( + blockHeight int32 + dataOffset int32 + ) + + if err = s.db.View(func(tx *bolt.Tx) (err error) { bucket := tx.Bucket(ackBucket).Bucket([]byte(dbID)) if bucket == nil { return ErrNotFound } - ackBytes := bucket.Get(h.CloneBytes()) + ackBytes := bucket.Get(h.AsBytes()) if ackBytes == nil { return ErrNotFound } - return utils.DecodeMsgPack(ackBytes, &ack) - }) + // get block height and object offset in block + if len(ackBytes) != 8 { + // invalid data payload + return ErrInconsistentData + } + + blockHeight = bytesToInt32(ackBytes[:4]) + dataOffset = bytesToInt32(ackBytes[4:]) + + return + }); err != nil { + return + } + + // get data from block + var b *types.Block + if _, b, err = s.getBlockByHeight(dbID, blockHeight); err != nil { + return + } + + if dataOffset < 0 || int32(len(b.Acks)) <= dataOffset { + err = ErrInconsistentData + return + } + + ack = b.Acks[int(dataOffset)] + + // verify hash + ackHash := ack.Hash() + if !ackHash.IsEqual(h) { + err = ErrInconsistentData + } return } -func (s *Service) getRequest(dbID proto.DatabaseID, h *hash.Hash) (request *wt.Request, err error) { - err = s.db.View(func(tx *bolt.Tx) error { - bucket := tx.Bucket(logOffsetBucket).Bucket([]byte(dbID)) +func (s *Service) getRequest(dbID proto.DatabaseID, h *hash.Hash) (request *types.Request, err error) { + var ( + blockHeight int32 + dataOffset int32 + ) + if err = s.db.View(func(tx *bolt.Tx) (err error) { + bucket := tx.Bucket(requestBucket).Bucket([]byte(dbID)) if bucket == nil { return ErrNotFound } - reqKey := bucket.Get(h.CloneBytes()) - if reqKey == nil { + reqBytes := bucket.Get(h.AsBytes()) + if reqBytes == nil { return ErrNotFound } - reqKey = append([]byte{}, reqKey...) - reqKey = append(reqKey, h.CloneBytes()...) - - bucket = tx.Bucket(requestBucket).Bucket([]byte(dbID)) - if bucket == nil { - return ErrNotFound + // get block height and object offset in block + if len(reqBytes) != 8 { + // invalid data payload + return ErrInconsistentData } - reqBytes := bucket.Get(reqKey) - if reqBytes == nil { - return ErrNotFound - } + blockHeight = bytesToInt32(reqBytes[:4]) + dataOffset = bytesToInt32(reqBytes[4:]) - return utils.DecodeMsgPack(reqBytes, &request) - }) + return + }); err != nil { + return + } + + // get data from block + var b *types.Block + if _, b, err = s.getBlockByHeight(dbID, blockHeight); err != nil { + return + } + + if dataOffset < 0 || int32(len(b.QueryTxs)) <= dataOffset { + err = ErrInconsistentData + return + } + + request = b.QueryTxs[int(dataOffset)].Request + + // verify hash + reqHash := request.Header.Hash() + if !reqHash.IsEqual(h) { + err = ErrInconsistentData + } return } -func (s *Service) getRequestByOffset(dbID proto.DatabaseID, offset uint64) (request *wt.Request, err error) { - err = s.db.View(func(tx *bolt.Tx) error { - bucket := tx.Bucket(requestBucket).Bucket([]byte(dbID)) +func (s *Service) getResponseHeader(dbID proto.DatabaseID, h *hash.Hash) (response *types.SignedResponseHeader, err error) { + var ( + blockHeight int32 + dataOffset int32 + ) + if err = s.db.View(func(tx *bolt.Tx) (err error) { + bucket := tx.Bucket(requestBucket).Bucket([]byte(dbID)) if bucket == nil { return ErrNotFound } - keyPrefix := offsetToBytes(offset) - cur := bucket.Cursor() + respBytes := bucket.Get(h.AsBytes()) + if respBytes == nil { + return ErrNotFound + } - for k, v := cur.Seek(keyPrefix); k != nil && bytes.HasPrefix(k, keyPrefix); k, v = cur.Next() { - if v != nil { - return utils.DecodeMsgPack(v, &request) - } + // get block height and object offset in block + if len(respBytes) != 8 { + // invalid data payload + return ErrInconsistentData } - return ErrNotFound - }) + blockHeight = bytesToInt32(respBytes[:4]) + dataOffset = bytesToInt32(respBytes[4:]) + + return + }); err != nil { + return + } + + // get data from block + var b *types.Block + if _, b, err = s.getBlockByHeight(dbID, blockHeight); err != nil { + return + } + + if dataOffset < 0 || int32(len(b.QueryTxs)) <= dataOffset { + err = ErrInconsistentData + return + } + + response = b.QueryTxs[int(dataOffset)].Response + + // verify hash + respHash := response.Hash() + if !respHash.IsEqual(h) { + err = ErrInconsistentData + } return } -func (s *Service) getHighestBlock(dbID proto.DatabaseID) (height int32, b *ct.Block, err error) { +func (s *Service) getHighestBlock(dbID proto.DatabaseID) (height int32, b *types.Block, err error) { err = s.db.View(func(tx *bolt.Tx) error { bucket := tx.Bucket(blockBucket).Bucket([]byte(dbID)) @@ -617,7 +689,7 @@ func (s *Service) getHighestBlock(dbID proto.DatabaseID) (height int32, b *ct.Bl } func (s *Service) getHighestBlockV2( - dbID proto.DatabaseID) (count, height int32, b *ct.Block, err error, + dbID proto.DatabaseID) (count, height int32, b *types.Block, err error, ) { err = s.db.View(func(tx *bolt.Tx) (err error) { var ( @@ -651,7 +723,7 @@ func (s *Service) getHighestBlockV2( return } -func (s *Service) getBlockByHeight(dbID proto.DatabaseID, height int32) (b *ct.Block, err error) { +func (s *Service) getBlockByHeight(dbID proto.DatabaseID, height int32) (count int32, b *types.Block, err error) { err = s.db.View(func(tx *bolt.Tx) error { bucket := tx.Bucket(blockBucket).Bucket([]byte(dbID)) @@ -664,6 +736,10 @@ func (s *Service) getBlockByHeight(dbID proto.DatabaseID, height int32) (b *ct.B cur := bucket.Cursor() for k, v := cur.Seek(keyPrefix); k != nil && bytes.HasPrefix(k, keyPrefix); k, v = cur.Next() { if v != nil { + if len(k) < 4+hash.HashSize+4 { + return ErrInconsistentData + } + count = bytesToInt32(k[4+hash.HashSize:]) return utils.DecodeMsgPack(v, &b) } } @@ -675,7 +751,7 @@ func (s *Service) getBlockByHeight(dbID proto.DatabaseID, height int32) (b *ct.B } func (s *Service) getBlockByCount( - dbID proto.DatabaseID, count int32) (height int32, b *ct.Block, err error, + dbID proto.DatabaseID, count int32) (height int32, b *types.Block, err error, ) { err = s.db.View(func(tx *bolt.Tx) (err error) { var ( @@ -709,7 +785,7 @@ func (s *Service) getBlockByCount( return } -func (s *Service) getBlock(dbID proto.DatabaseID, h *hash.Hash) (height int32, b *ct.Block, err error) { +func (s *Service) getBlock(dbID proto.DatabaseID, h *hash.Hash) (count int32, height int32, b *types.Block, err error) { err = s.db.View(func(tx *bolt.Tx) error { bucket := tx.Bucket(blockHeightBucket).Bucket([]byte(dbID)) @@ -717,37 +793,45 @@ func (s *Service) getBlock(dbID proto.DatabaseID, h *hash.Hash) (height int32, b return ErrNotFound } - blockKey := bucket.Get(h.CloneBytes()) - if blockKey == nil { + blockKeyPrefix := bucket.Get(h.AsBytes()) + if blockKeyPrefix == nil { return ErrNotFound } - blockKey = append([]byte{}, blockKey...) - blockKey = append(blockKey, h.CloneBytes()...) + blockKeyPrefix = append([]byte{}, blockKeyPrefix...) + blockKeyPrefix = append(blockKeyPrefix, h.AsBytes()...) bucket = tx.Bucket(blockBucket).Bucket([]byte(dbID)) if bucket == nil { return ErrNotFound } - blockBytes := bucket.Get(blockKey) + var ( + blockKey []byte + blockBytes []byte + ) + + cur := bucket.Cursor() + for blockKey, blockBytes = cur.Seek(blockKeyPrefix); blockKey != nil && bytes.HasPrefix(blockKey, blockKeyPrefix); blockKey, blockBytes = cur.Next() { + if blockBytes != nil { + break + } + } + if blockBytes == nil { return ErrNotFound } - return utils.DecodeMsgPack(blockBytes, &b) - }) - - if err == nil { - // compute height - var instance *wt.ServiceInstance - instance, err = s.getUpstream(dbID) - if err != nil { - return + // decode count from block key + if len(blockKey) < 4+hash.HashSize+4 { + return ErrInconsistentData } - height = int32(b.Timestamp().Sub(instance.GenesisBlock.Timestamp()) / blockProducePeriod) - } + height = bytesToInt32(blockKey[:4]) + count = bytesToInt32(blockKey[4+hash.HashSize:]) + + return utils.DecodeMsgPack(blockBytes, &b) + }) return } diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 5ec683a4c..91791193e 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -29,9 +29,6 @@ import ( "strconv" "strings" - "github.com/CovenantSQL/CovenantSQL/client" - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/CovenantSQL/go-sqlite3-encrypt" "github.com/xo/dburl" "github.com/xo/usql/drivers" @@ -39,6 +36,10 @@ import ( "github.com/xo/usql/handler" "github.com/xo/usql/rline" "github.com/xo/usql/text" + + "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/utils/log" ) const name = "cql" @@ -220,8 +221,8 @@ func main() { return } - log.Infof("stable coin balance is: %#v", stableCoinBalance) - log.Infof("covenant coin balance is: %#v", covenantCoinBalance) + log.Infof("stable coin balance is: %d", stableCoinBalance) + log.Infof("covenant coin balance is: %d", covenantCoinBalance) return } diff --git a/cmd/cqld/adapter.go b/cmd/cqld/adapter.go index af8d5ad4c..eec701a13 100644 --- a/cmd/cqld/adapter.go +++ b/cmd/cqld/adapter.go @@ -20,20 +20,20 @@ import ( "bytes" "context" "database/sql" - "errors" "os" bp "github.com/CovenantSQL/CovenantSQL/blockproducer" "github.com/CovenantSQL/CovenantSQL/consistent" "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/kayak" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" - "github.com/CovenantSQL/CovenantSQL/sqlchain/storage" - "github.com/CovenantSQL/CovenantSQL/twopc" + "github.com/CovenantSQL/CovenantSQL/storage" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/pkg/errors" ) const ( @@ -51,6 +51,12 @@ type LocalStorage struct { *storage.Storage } +type compiledLog struct { + cmdType string + queries []storage.Query + nodeToSet *proto.Node +} + func initStorage(dbFile string) (stor *LocalStorage, err error) { var st *storage.Storage if st, err = storage.New(dbFile); err != nil { @@ -78,92 +84,93 @@ func initStorage(dbFile string) (stor *LocalStorage, err error) { return } -// Prepare implements twopc Worker.Prepare -func (s *LocalStorage) Prepare(ctx context.Context, wb twopc.WriteBatch) (err error) { - payload, err := s.decodeLog(wb) - if err != nil { - log.WithError(err).Error("decode log failed") +// EncodePayload implements kayak.types.Handler.EncodePayload. +func (s *LocalStorage) EncodePayload(request interface{}) (data []byte, err error) { + var buf *bytes.Buffer + if buf, err = utils.EncodeMsgPack(request); err != nil { + err = errors.Wrap(err, "encode kayak payload failed") return } - execLog, err := s.compileExecLog(payload) - if err != nil { - log.WithError(err).Error("compile exec log failed") - return - } - return s.Storage.Prepare(ctx, execLog) + + data = buf.Bytes() + return } -// Commit implements twopc Worker.Commit -func (s *LocalStorage) Commit(ctx context.Context, wb twopc.WriteBatch) (err error) { - payload, err := s.decodeLog(wb) - if err != nil { - log.WithError(err).Error("decode log failed") +// DecodePayload implements kayak.types.Handler.DecodePayload. +func (s *LocalStorage) DecodePayload(data []byte) (request interface{}, err error) { + var kp *KayakPayload + + if err = utils.DecodeMsgPack(data, &kp); err != nil { + err = errors.Wrap(err, "decode kayak payload failed") return } - return s.commit(ctx, payload) + + request = kp + return } -func (s *LocalStorage) commit(ctx context.Context, payload *KayakPayload) (err error) { - var nodeToSet proto.Node - err = utils.DecodeMsgPack(payload.Data, &nodeToSet) - if err != nil { - log.WithError(err).Error("unmarshal node from payload failed") - return - } - execLog, err := s.compileExecLog(payload) - if err != nil { - log.WithError(err).Error("compile exec log failed") +// Check implements kayak.types.Handler.Check. +func (s *LocalStorage) Check(req interface{}) (err error) { + return nil +} + +// Commit implements kayak.types.Handler.Commit. +func (s *LocalStorage) Commit(req interface{}) (_ interface{}, err error) { + var kp *KayakPayload + var cl *compiledLog + var ok bool + + if kp, ok = req.(*KayakPayload); !ok || kp == nil { + err = errors.Wrapf(kt.ErrInvalidLog, "invalid kayak payload %#v", req) return } - err = route.SetNodeAddrCache(nodeToSet.ID.ToRawNodeID(), nodeToSet.Addr) - if err != nil { - log.WithFields(log.Fields{ - "id": nodeToSet.ID, - "addr": nodeToSet.Addr, - }).WithError(err).Error("set node addr cache failed") - } - err = kms.SetNode(&nodeToSet) - if err != nil { - log.WithField("node", nodeToSet).WithError(err).Error("kms set node failed") - } - // if s.consistent == nil, it is called during Init. and AddCache will be called by consistent.InitConsistent - if s.consistent != nil { - s.consistent.AddCache(nodeToSet) + if cl, err = s.compileLog(kp); err != nil { + err = errors.Wrap(err, "compile log failed") + return } - return s.Storage.Commit(ctx, execLog) -} + if cl.nodeToSet != nil { + err = route.SetNodeAddrCache(cl.nodeToSet.ID.ToRawNodeID(), cl.nodeToSet.Addr) + if err != nil { + log.WithFields(log.Fields{ + "id": cl.nodeToSet.ID, + "addr": cl.nodeToSet.Addr, + }).WithError(err).Error("set node addr cache failed") + } + err = kms.SetNode(cl.nodeToSet) + if err != nil { + log.WithField("node", cl.nodeToSet).WithError(err).Error("kms set node failed") + } -// Rollback implements twopc Worker.Rollback -func (s *LocalStorage) Rollback(ctx context.Context, wb twopc.WriteBatch) (err error) { - payload, err := s.decodeLog(wb) - if err != nil { - log.WithError(err).Error("decode log failed") - return + // if s.consistent == nil, it is called during Init. and AddCache will be called by consistent.InitConsistent + if s.consistent != nil { + s.consistent.AddCache(*cl.nodeToSet) + } } - execLog, err := s.compileExecLog(payload) - if err != nil { - log.WithError(err).Error("compile exec log failed") - return + + // execute query + if _, err = s.Storage.Exec(context.Background(), cl.queries); err != nil { + err = errors.Wrap(err, "execute query in dht database failed") } - return s.Storage.Rollback(ctx, execLog) + return } -func (s *LocalStorage) compileExecLog(payload *KayakPayload) (execLog *storage.ExecLog, err error) { +func (s *LocalStorage) compileLog(payload *KayakPayload) (result *compiledLog, err error) { switch payload.Command { case CmdSet: var nodeToSet proto.Node err = utils.DecodeMsgPack(payload.Data, &nodeToSet) if err != nil { - log.WithError(err).Error("compileExecLog: unmarshal node from payload failed") + log.WithError(err).Error("compileLog: unmarshal node from payload failed") return } query := "INSERT OR REPLACE INTO `dht` (`id`, `node`) VALUES (?, ?);" log.Debugf("sql: %#v", query) - execLog = &storage.ExecLog{ - Queries: []storage.Query{ + result = &compiledLog{ + cmdType: payload.Command, + queries: []storage.Query{ { Pattern: query, Args: []sql.NamedArg{ @@ -172,16 +179,18 @@ func (s *LocalStorage) compileExecLog(payload *KayakPayload) (execLog *storage.E }, }, }, + nodeToSet: &nodeToSet, } case CmdSetDatabase: - var instance wt.ServiceInstance + var instance types.ServiceInstance if err = utils.DecodeMsgPack(payload.Data, &instance); err != nil { - log.WithError(err).Error("compileExecLog: unmarshal instance meta failed") + log.WithError(err).Error("compileLog: unmarshal instance meta failed") return } query := "INSERT OR REPLACE INTO `databases` (`id`, `meta`) VALUES (? ,?);" - execLog = &storage.ExecLog{ - Queries: []storage.Query{ + result = &compiledLog{ + cmdType: payload.Command, + queries: []storage.Query{ { Pattern: query, Args: []sql.NamedArg{ @@ -192,16 +201,17 @@ func (s *LocalStorage) compileExecLog(payload *KayakPayload) (execLog *storage.E }, } case CmdDeleteDatabase: - var instance wt.ServiceInstance + var instance types.ServiceInstance if err = utils.DecodeMsgPack(payload.Data, &instance); err != nil { - log.WithError(err).Error("compileExecLog: unmarshal instance id failed") + log.WithError(err).Error("compileLog: unmarshal instance id failed") return } // TODO(xq262144), should add additional limit 1 after delete clause // however, currently the go-sqlite3 query := "DELETE FROM `databases` WHERE `id` = ?" - execLog = &storage.ExecLog{ - Queries: []storage.Query{ + result = &compiledLog{ + cmdType: payload.Command, + queries: []storage.Query{ { Pattern: query, Args: []sql.NamedArg{ @@ -211,27 +221,9 @@ func (s *LocalStorage) compileExecLog(payload *KayakPayload) (execLog *storage.E }, } default: - err = errors.New("undefined command: " + payload.Command) - log.Error(err) - } - return -} - -func (s *LocalStorage) decodeLog(wb twopc.WriteBatch) (payload *KayakPayload, err error) { - var bytesPayload []byte - var ok bool - payload = new(KayakPayload) - - if bytesPayload, ok = wb.([]byte); !ok { - err = kayak.ErrInvalidLog - return - } - err = utils.DecodeMsgPack(bytesPayload, payload) - if err != nil { - log.WithError(err).Error("unmarshal payload failed") - return + err = errors.Errorf("undefined command: %v", payload.Command) + log.WithError(err).Error("compile log failed") } - return } @@ -254,20 +246,7 @@ func (s *KayakKVServer) Init(storePath string, initNodes []proto.Node) (err erro Command: CmdSet, Data: nodeBuf.Bytes(), } - - var execLog *storage.ExecLog - execLog, err = s.KVStorage.compileExecLog(payload) - if err != nil { - log.WithError(err).Error("compile exec log failed") - return - } - err = s.KVStorage.Storage.Prepare(context.Background(), execLog) - if err != nil { - log.WithError(err).Error("init kayak KV prepare node failed") - return - } - - err = s.KVStorage.commit(context.Background(), payload) + _, err = s.KVStorage.Commit(payload) if err != nil { log.WithError(err).Error("init kayak KV commit node failed") return @@ -294,15 +273,9 @@ func (s *KayakKVServer) SetNode(node *proto.Node) (err error) { Data: nodeBuf.Bytes(), } - writeData, err := utils.EncodeMsgPack(payload) - if err != nil { - log.WithError(err).Error("marshal payload failed") - return err - } - - _, err = s.Runtime.Apply(writeData.Bytes()) + _, _, err = s.Runtime.Apply(context.Background(), payload) if err != nil { - log.Errorf("Apply set node failed: %#v\nPayload:\n %#v", err, writeData) + log.Errorf("Apply set node failed: %#v\nPayload:\n %#v", err, payload) } return @@ -321,7 +294,7 @@ func (s *KayakKVServer) Reset() (err error) { } // GetDatabase implements blockproducer.DBMetaPersistence. -func (s *KayakKVServer) GetDatabase(dbID proto.DatabaseID) (instance wt.ServiceInstance, err error) { +func (s *KayakKVServer) GetDatabase(dbID proto.DatabaseID) (instance types.ServiceInstance, err error) { var result [][]interface{} query := "SELECT `meta` FROM `databases` WHERE `id` = ? LIMIT 1" _, _, result, err = s.KVStorage.Query(context.Background(), []storage.Query{ @@ -354,7 +327,7 @@ func (s *KayakKVServer) GetDatabase(dbID proto.DatabaseID) (instance wt.ServiceI } // SetDatabase implements blockproducer.DBMetaPersistence. -func (s *KayakKVServer) SetDatabase(meta wt.ServiceInstance) (err error) { +func (s *KayakKVServer) SetDatabase(meta types.ServiceInstance) (err error) { var metaBuf *bytes.Buffer if metaBuf, err = utils.EncodeMsgPack(meta); err != nil { return @@ -365,15 +338,9 @@ func (s *KayakKVServer) SetDatabase(meta wt.ServiceInstance) (err error) { Data: metaBuf.Bytes(), } - writeData, err := utils.EncodeMsgPack(payload) - if err != nil { - log.WithError(err).Error("marshal payload failed") - return err - } - - _, err = s.Runtime.Apply(writeData.Bytes()) + _, _, err = s.Runtime.Apply(context.Background(), payload) if err != nil { - log.Errorf("Apply set database failed: %#v\nPayload:\n %#v", err, writeData) + log.Errorf("Apply set database failed: %#v\nPayload:\n %#v", err, payload) } return @@ -381,7 +348,7 @@ func (s *KayakKVServer) SetDatabase(meta wt.ServiceInstance) (err error) { // DeleteDatabase implements blockproducer.DBMetaPersistence. func (s *KayakKVServer) DeleteDatabase(dbID proto.DatabaseID) (err error) { - meta := wt.ServiceInstance{ + meta := types.ServiceInstance{ DatabaseID: dbID, } @@ -394,22 +361,16 @@ func (s *KayakKVServer) DeleteDatabase(dbID proto.DatabaseID) (err error) { Data: metaBuf.Bytes(), } - writeData, err := utils.EncodeMsgPack(payload) - if err != nil { - log.WithError(err).Error("marshal payload failed") - return err - } - - _, err = s.Runtime.Apply(writeData.Bytes()) + _, _, err = s.Runtime.Apply(context.Background(), payload) if err != nil { - log.Errorf("Apply set database failed: %#v\nPayload:\n %#v", err, writeData) + log.Errorf("Apply set database failed: %#v\nPayload:\n %#v", err, payload) } return } // GetAllDatabases implements blockproducer.DBMetaPersistence. -func (s *KayakKVServer) GetAllDatabases() (instances []wt.ServiceInstance, err error) { +func (s *KayakKVServer) GetAllDatabases() (instances []types.ServiceInstance, err error) { var result [][]interface{} query := "SELECT `meta` FROM `databases`" _, _, result, err = s.KVStorage.Query(context.Background(), []storage.Query{ @@ -422,14 +383,14 @@ func (s *KayakKVServer) GetAllDatabases() (instances []wt.ServiceInstance, err e return } - instances = make([]wt.ServiceInstance, 0, len(result)) + instances = make([]types.ServiceInstance, 0, len(result)) for _, row := range result { if len(row) <= 0 { continue } - var instance wt.ServiceInstance + var instance types.ServiceInstance var rawInstanceMeta []byte var ok bool if rawInstanceMeta, ok = row[0].([]byte); !ok { diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index e885e5d3a..ef0ed00c5 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -20,6 +20,7 @@ import ( "fmt" "os" "os/signal" + "path/filepath" "syscall" "time" @@ -29,23 +30,21 @@ import ( "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/kayak" - ka "github.com/CovenantSQL/CovenantSQL/kayak/api" - kt "github.com/CovenantSQL/CovenantSQL/kayak/transport" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + kl "github.com/CovenantSQL/CovenantSQL/kayak/wal" "github.com/CovenantSQL/CovenantSQL/metric" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" - "github.com/CovenantSQL/CovenantSQL/twopc" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/pkg/errors" "golang.org/x/crypto/ssh/terminal" ) const ( - //nodeDirPattern = "./node_%v" - //pubKeyStoreFile = "public.keystore" - //privateKeyFile = "private.key" - //dhtFileName = "dht.db" kayakServiceName = "Kayak" + kayakMethodName = "Call" + kayakWalFileName = "kayak.ldb" ) func runNode(nodeID proto.NodeID, listenAddr string) (err error) { @@ -71,19 +70,18 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { } // init nodes - log.Info("init peers") + log.WithField("node", nodeID).Info("init peers") _, peers, thisNode, err := initNodePeers(nodeID, conf.GConf.PubKeyStoreFile) if err != nil { log.WithError(err).Error("init nodes and peers failed") return } - var service *kt.ETLSTransportService var server *rpc.Server // create server - log.Info("create server") - if service, server, err = createServer( + log.WithField("addr", listenAddr).Info("create server") + if server, err = createServer( conf.GConf.PrivateKeyFile, conf.GConf.PubKeyStoreFile, masterKey, listenAddr); err != nil { log.WithError(err).Error("create server failed") return @@ -100,7 +98,7 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { // init kayak log.Info("init kayak runtime") var kayakRuntime *kayak.Runtime - if _, kayakRuntime, err = initKayakTwoPC(rootPath, thisNode, peers, st, service); err != nil { + if kayakRuntime, err = initKayakTwoPC(rootPath, thisNode, peers, st, server); err != nil { log.WithError(err).Error("init kayak runtime failed") return } @@ -156,8 +154,8 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { server, peers, nodeID, - 2*time.Second, - 900*time.Millisecond, + time.Minute, + 20*time.Second, ) chain, err := bp.NewChain(chainConfig) if err != nil { @@ -192,35 +190,57 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { return } -func createServer(privateKeyPath, pubKeyStorePath string, masterKey []byte, listenAddr string) (service *kt.ETLSTransportService, server *rpc.Server, err error) { - os.Remove(pubKeyStorePath) - +func createServer(privateKeyPath, pubKeyStorePath string, masterKey []byte, listenAddr string) (server *rpc.Server, err error) { server = rpc.NewServer() - if err != nil { - return - } - err = server.InitRPCServer(listenAddr, privateKeyPath, masterKey) - service = ka.NewMuxService(kayakServiceName, server) + if err = server.InitRPCServer(listenAddr, privateKeyPath, masterKey); err != nil { + err = errors.Wrap(err, "init rpc server failed") + } return } -func initKayakTwoPC(rootDir string, node *proto.Node, peers *kayak.Peers, worker twopc.Worker, service *kt.ETLSTransportService) (config kayak.Config, runtime *kayak.Runtime, err error) { +func initKayakTwoPC(rootDir string, node *proto.Node, peers *proto.Peers, h kt.Handler, server *rpc.Server) (runtime *kayak.Runtime, err error) { // create kayak config - log.Info("create twopc config") - config = ka.NewTwoPCConfig(rootDir, service, worker) + log.Info("create kayak config") + + walPath := filepath.Join(rootDir, kayakWalFileName) + + var logWal kt.Wal + if logWal, err = kl.NewLevelDBWal(walPath); err != nil { + err = errors.Wrap(err, "init kayak log pool failed") + return + } + + config := &kt.RuntimeConfig{ + Handler: h, + PrepareThreshold: 1.0, + CommitThreshold: 1.0, + PrepareTimeout: time.Second, + CommitTimeout: time.Second * 60, + Peers: peers, + Wal: logWal, + NodeID: node.ID, + ServiceName: kayakServiceName, + MethodName: kayakMethodName, + } // create kayak runtime - log.Info("create kayak runtime") - runtime, err = ka.NewTwoPCKayak(peers, config) - if err != nil { + log.Info("init kayak runtime") + if runtime, err = kayak.NewRuntime(config); err != nil { + err = errors.Wrap(err, "init kayak runtime failed") + return + } + + // register rpc service + if _, err = NewKayakService(server, kayakServiceName, runtime); err != nil { + err = errors.Wrap(err, "init kayak rpc service failed") return } // init runtime - log.Info("init kayak twopc runtime") - err = runtime.Init() + log.Info("start kayak runtime") + runtime.Start() return } diff --git a/cmd/cqld/initconf.go b/cmd/cqld/initconf.go index cf2f96a0d..961f6c950 100644 --- a/cmd/cqld/initconf.go +++ b/cmd/cqld/initconf.go @@ -20,33 +20,22 @@ import ( "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/utils/log" ) -func initNodePeers(nodeID proto.NodeID, publicKeystorePath string) (nodes *[]proto.Node, peers *kayak.Peers, thisNode *proto.Node, err error) { +func initNodePeers(nodeID proto.NodeID, publicKeystorePath string) (nodes *[]proto.Node, peers *proto.Peers, thisNode *proto.Node, err error) { privateKey, err := kms.GetLocalPrivateKey() if err != nil { log.WithError(err).Fatal("get local private key failed") } - publicKey, err := kms.GetLocalPublicKey() - if err != nil { - log.WithError(err).Fatal("get local public key failed") - } - - leader := &kayak.Server{ - Role: proto.Leader, - ID: conf.GConf.BP.NodeID, - PubKey: publicKey, - } - peers = &kayak.Peers{ - Term: 1, - Leader: leader, - Servers: []*kayak.Server{}, - PubKey: publicKey, + peers = &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: 1, + Leader: conf.GConf.BP.NodeID, + }, } if conf.GConf.KnownNodes != nil { @@ -54,11 +43,7 @@ func initNodePeers(nodeID proto.NodeID, publicKeystorePath string) (nodes *[]pro if n.Role == proto.Leader || n.Role == proto.Follower { //FIXME all KnownNodes conf.GConf.KnownNodes[i].PublicKey = kms.BP.PublicKey - peers.Servers = append(peers.Servers, &kayak.Server{ - Role: n.Role, - ID: n.ID, - PubKey: publicKey, - }) + peers.Servers = append(peers.Servers, n.ID) } } } @@ -77,7 +62,7 @@ func initNodePeers(nodeID proto.NodeID, publicKeystorePath string) (nodes *[]pro // set p route and public keystore if conf.GConf.KnownNodes != nil { - for _, p := range conf.GConf.KnownNodes { + for i, p := range conf.GConf.KnownNodes { rawNodeIDHash, err := hash.NewHashFromStr(string(p.ID)) if err != nil { log.WithError(err).Error("load hash from node id failed") @@ -102,7 +87,7 @@ func initNodePeers(nodeID proto.NodeID, publicKeystorePath string) (nodes *[]pro } if p.ID == nodeID { kms.SetLocalNodeIDNonce(rawNodeID.CloneBytes(), &p.Nonce) - thisNode = &p + thisNode = &conf.GConf.KnownNodes[i] } } } diff --git a/cmd/cqld/kayak.go b/cmd/cqld/kayak.go new file mode 100644 index 000000000..2d64c9e04 --- /dev/null +++ b/cmd/cqld/kayak.go @@ -0,0 +1,44 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "github.com/CovenantSQL/CovenantSQL/kayak" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/rpc" +) + +// KayakService defines the leader service kayak. +type KayakService struct { + serviceName string + rt *kayak.Runtime +} + +// NewKayakService returns new kayak service instance for block producer consensus. +func NewKayakService(server *rpc.Server, serviceName string, rt *kayak.Runtime) (s *KayakService, err error) { + s = &KayakService{ + serviceName: serviceName, + rt: rt, + } + err = server.RegisterService(serviceName, s) + return +} + +// Call handles kayak call. +func (s *KayakService) Call(req *kt.RPCRequest, _ *interface{}) (err error) { + return s.rt.FollowerApply(req.Log) +} diff --git a/cmd/hotfix/hash-upgrade/main.go b/cmd/hotfix/hash-upgrade/main.go index 0b51cfa96..7e4e89a93 100644 --- a/cmd/hotfix/hash-upgrade/main.go +++ b/cmd/hotfix/hash-upgrade/main.go @@ -21,20 +21,21 @@ import ( "context" "database/sql" "encoding/binary" + "encoding/json" "flag" "fmt" "os/exec" "strings" "time" + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/sqlchain/storage" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + ct "github.com/CovenantSQL/CovenantSQL/sqlchain/otypes" + "github.com/CovenantSQL/CovenantSQL/storage" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + wt "github.com/CovenantSQL/CovenantSQL/worker/otypes" ) var ( @@ -81,14 +82,53 @@ func (b *OldBlock) UnmarshalBinary(data []byte) (err error) { return } -// ServiceInstance defines the old service instance type before marshaller updates. -type ServiceInstance struct { +// OldServer ports back the original kayak server structure. +type OldServer struct { + Role proto.ServerRole + ID proto.NodeID + PubKey *asymmetric.PublicKey +} + +// OldPeers ports back the original kayak peers structure. +type OldPeers struct { + Term uint64 + Leader *OldServer + Servers []*OldServer + PubKey *asymmetric.PublicKey + Signature *asymmetric.Signature +} + +// PlainOldServiceInstance defines the plain old service instance type before marshaller updates. +type PlainOldServiceInstance struct { DatabaseID proto.DatabaseID - Peers *kayak.Peers + Peers *OldPeers ResourceMeta wt.ResourceMeta GenesisBlock *OldBlock } +// OldServiceInstance defines the old service instance type before marshaller updates. +type OldServiceInstance struct { + DatabaseID proto.DatabaseID + Peers *OldPeers + ResourceMeta wt.ResourceMeta + GenesisBlock *ct.Block +} + +func convertPeers(oldPeers *OldPeers) (newPeers *proto.Peers) { + if oldPeers == nil { + return + } + + newPeers = new(proto.Peers) + for _, s := range oldPeers.Servers { + newPeers.Servers = append(newPeers.Servers, s.ID) + } + newPeers.Leader = oldPeers.Leader.ID + newPeers.Term = oldPeers.Term + + return +} + func main() { flag.Parse() @@ -141,8 +181,8 @@ func main() { } else { // detect if the genesis block is in old version if strings.Contains(fmt.Sprintf("%#v", testDecode), "\"GenesisBlock\":[]uint8") { - log.Info("detected old version") - var instance ServiceInstance + log.Info("detected plain old version (without msgpack tag and use custom serializer)") + var instance PlainOldServiceInstance if err := utils.DecodeMsgPackPlain(rawInstance, &instance); err != nil { log.WithError(err).Fatal("decode msgpack failed") @@ -150,12 +190,25 @@ func main() { } newInstance.DatabaseID = instance.DatabaseID - newInstance.Peers = instance.Peers + newInstance.Peers = convertPeers(instance.Peers) newInstance.ResourceMeta = instance.ResourceMeta newInstance.GenesisBlock = &ct.Block{ SignedHeader: instance.GenesisBlock.SignedHeader, Queries: instance.GenesisBlock.Queries, } + } else if strings.Contains(fmt.Sprintf("%#v", testDecode), "\"PubKey\"") { + log.Info("detected old version (old kayak implementation [called as kaar])") + var instance OldServiceInstance + + if err := utils.DecodeMsgPack(rawInstance, &instance); err != nil { + log.WithError(err).Fatal("decode msgpack failed") + return + } + + newInstance.DatabaseID = instance.DatabaseID + newInstance.Peers = convertPeers(instance.Peers) + newInstance.ResourceMeta = instance.ResourceMeta + newInstance.GenesisBlock = instance.GenesisBlock } else { log.Info("detected new version, need re-signature") @@ -163,24 +216,25 @@ func main() { log.WithError(err).Fatal("decode msgpack failed") return } + } - // set genesis block to now - newInstance.GenesisBlock.SignedHeader.Timestamp = time.Now().UTC() + // set genesis block to now + newInstance.GenesisBlock.SignedHeader.Timestamp = time.Now().UTC() - // sign peers again - if err := newInstance.Peers.Sign(privateKey); err != nil { - log.WithError(err).Fatal("sign peers failed") - return - } + // sign peers again + if err := newInstance.Peers.Sign(privateKey); err != nil { + log.WithError(err).Fatal("sign peers failed") + return + } - if err := newInstance.GenesisBlock.PackAndSignBlock(privateKey); err != nil { - log.WithError(err).Fatal("sign genesis block failed") - return - } + if err := newInstance.GenesisBlock.PackAndSignBlock(privateKey); err != nil { + log.WithError(err).Fatal("sign genesis block failed") + return } } - log.Infof("database is: %#v -> %#v", id, newInstance) + d, _ := json.Marshal(newInstance) + log.Infof("database is: %#v -> %s", id, string(d)) // encode and put back to database rawInstanceBuffer, err := utils.EncodeMsgPack(newInstance) diff --git a/cmd/hotfix/observer-upgrade/main.go b/cmd/hotfix/observer-upgrade/main.go index 137b8aaf6..c650f306d 100644 --- a/cmd/hotfix/observer-upgrade/main.go +++ b/cmd/hotfix/observer-upgrade/main.go @@ -23,7 +23,7 @@ import ( "os" "github.com/CovenantSQL/CovenantSQL/crypto/hash" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + ct "github.com/CovenantSQL/CovenantSQL/sqlchain/otypes" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/coreos/bbolt" diff --git a/conf/config.go b/conf/config.go index 59e4a8e0c..29ae8f1da 100644 --- a/conf/config.go +++ b/conf/config.go @@ -177,14 +177,5 @@ func LoadConfig(configPath string) (config *Config, err error) { if config.Miner != nil && !path.IsAbs(config.Miner.RootDir) { config.Miner.RootDir = path.Join(configDir, config.Miner.RootDir) } - /* - The `go test -race` makes BP catch up block too slow, so let's make - genesis block just one day ago in test mode - */ - if config.IsTestMode { - if config.BP != nil { - config.BP.BPGenesis.Timestamp = time.Now().AddDate(0, 0, -1) - } - } return } diff --git a/crypto/asymmetric/signature.go b/crypto/asymmetric/signature.go index 77ac14580..ec7e59804 100644 --- a/crypto/asymmetric/signature.go +++ b/crypto/asymmetric/signature.go @@ -96,6 +96,9 @@ func (s *Signature) Verify(hash []byte, signee *PublicKey) bool { if BypassSignature { return true } + if signee == nil || s == nil { + return false + } signature := make([]byte, 64) copy(signature, utils.PaddedBigBytes(s.R, 32)) diff --git a/crypto/etls/conn.go b/crypto/etls/conn.go index fe471adfc..d3c81359d 100644 --- a/crypto/etls/conn.go +++ b/crypto/etls/conn.go @@ -122,6 +122,9 @@ func (c *CryptoConn) Write(b []byte) (n int, err error) { // Close closes the connection. // Any blocked Read or Write operations will be unblocked and return errors. func (c *CryptoConn) Close() error { + if c.Conn == nil { + return nil + } return c.Conn.Close() } diff --git a/crypto/hash/hash.go b/crypto/hash/hash.go index 8108e9f3e..74b9e49e4 100644 --- a/crypto/hash/hash.go +++ b/crypto/hash/hash.go @@ -48,6 +48,11 @@ func (h Hash) String() string { return hex.EncodeToString(h[:]) } +// AsBytes returns internal bytes of hash. +func (h Hash) AsBytes() []byte { + return h[:] +} + // CloneBytes returns a copy of the bytes which represent the hash as a byte // slice. // diff --git a/crypto/verifier/common.go b/crypto/verifier/common.go new file mode 100644 index 000000000..41fa8ecb4 --- /dev/null +++ b/crypto/verifier/common.go @@ -0,0 +1,83 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package verifier + +import ( + ca "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/pkg/errors" +) + +//go:generate hsp + +// MarshalHasher is the interface implemented by an object that can be stably marshalling hashed. +type MarshalHasher interface { + MarshalHash() ([]byte, error) +} + +// HashSignVerifier is the interface implemented by an object that contains a hash value of an +// MarshalHasher, can be signed by a private key and verified later. +type HashSignVerifier interface { + Hash() hash.Hash + Sign(MarshalHasher, *ca.PrivateKey) error + Verify(MarshalHasher) error +} + +// DefaultHashSignVerifierImpl defines a default implementation of HashSignVerifier. +type DefaultHashSignVerifierImpl struct { + DataHash hash.Hash + Signee *ca.PublicKey + Signature *ca.Signature +} + +// Hash implements HashSignVerifier.Hash. +func (i *DefaultHashSignVerifierImpl) Hash() hash.Hash { + return i.DataHash +} + +// Sign implements HashSignVerifier.Sign. +func (i *DefaultHashSignVerifierImpl) Sign(mh MarshalHasher, signer *ca.PrivateKey) (err error) { + var enc []byte + if enc, err = mh.MarshalHash(); err != nil { + return + } + var h = hash.THashH(enc) + if i.Signature, err = signer.Sign(h[:]); err != nil { + return + } + i.DataHash = h + i.Signee = signer.PubKey() + return +} + +// Verify implements HashSignVerifier.Verify. +func (i *DefaultHashSignVerifierImpl) Verify(mh MarshalHasher) (err error) { + var enc []byte + if enc, err = mh.MarshalHash(); err != nil { + return + } + var h = hash.THashH(enc) + if !i.DataHash.IsEqual(&h) { + err = errors.WithStack(ErrHashValueNotMatch) + return + } + if !i.Signature.Verify(h[:], i.Signee) { + err = errors.WithStack(ErrSignatureNotMatch) + return + } + return +} diff --git a/crypto/verifier/common_gen.go b/crypto/verifier/common_gen.go new file mode 100644 index 000000000..943ac0eda --- /dev/null +++ b/crypto/verifier/common_gen.go @@ -0,0 +1,59 @@ +package verifier + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + hsp "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// MarshalHash marshals for hash +func (z *DefaultHashSignVerifierImpl) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 3 + o = append(o, 0x83, 0x83) + if z.Signee == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Signee.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x83) + if z.Signature == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Signature.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x83) + if oTemp, err := z.DataHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *DefaultHashSignVerifierImpl) Msgsize() (s int) { + s = 1 + 7 + if z.Signee == nil { + s += hsp.NilSize + } else { + s += z.Signee.Msgsize() + } + s += 10 + if z.Signature == nil { + s += hsp.NilSize + } else { + s += z.Signature.Msgsize() + } + s += 9 + z.DataHash.Msgsize() + return +} diff --git a/chain/xxx_gen_test_test.go b/crypto/verifier/common_gen_test.go similarity index 66% rename from chain/xxx_gen_test_test.go rename to crypto/verifier/common_gen_test.go index 04c1be79d..4f73911c7 100644 --- a/chain/xxx_gen_test_test.go +++ b/crypto/verifier/common_gen_test.go @@ -1,4 +1,4 @@ -package chain +package verifier // Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. @@ -9,8 +9,8 @@ import ( "testing" ) -func TestMarshalHashDemoHeader(t *testing.T) { - v := DemoHeader{} +func TestMarshalHashDefaultHashSignVerifierImpl(t *testing.T) { + v := DefaultHashSignVerifierImpl{} binary.Read(rand.Reader, binary.BigEndian, &v) bts1, err := v.MarshalHash() if err != nil { @@ -25,8 +25,8 @@ func TestMarshalHashDemoHeader(t *testing.T) { } } -func BenchmarkMarshalHashDemoHeader(b *testing.B) { - v := DemoHeader{} +func BenchmarkMarshalHashDefaultHashSignVerifierImpl(b *testing.B) { + v := DefaultHashSignVerifierImpl{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -34,8 +34,8 @@ func BenchmarkMarshalHashDemoHeader(b *testing.B) { } } -func BenchmarkAppendMsgDemoHeader(b *testing.B) { - v := DemoHeader{} +func BenchmarkAppendMsgDefaultHashSignVerifierImpl(b *testing.B) { + v := DefaultHashSignVerifierImpl{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalHash() b.SetBytes(int64(len(bts))) diff --git a/crypto/verifier/common_test.go b/crypto/verifier/common_test.go new file mode 100644 index 000000000..953ce61f8 --- /dev/null +++ b/crypto/verifier/common_test.go @@ -0,0 +1,101 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package verifier + +import ( + "math/big" + "testing" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/pkg/errors" + . "github.com/smartystreets/goconvey/convey" +) + +var ( + MockHash = []byte{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + } +) + +type MockHeader struct{} + +func (*MockHeader) MarshalHash() ([]byte, error) { + return MockHash, nil +} + +type MockObject struct { + MockHeader + HSV DefaultHashSignVerifierImpl +} + +func (o *MockObject) Sign(signer *asymmetric.PrivateKey) error { + return o.HSV.Sign(&o.MockHeader, signer) +} + +func (o *MockObject) Verify() error { + return o.HSV.Verify(&o.MockHeader) +} + +func TestDefaultHashSignVerifierImpl(t *testing.T) { + Convey("Given a dummy object and a pair of keys", t, func() { + var ( + obj = &MockObject{} + priv, _, err = asymmetric.GenSecp256k1KeyPair() + ) + So(err, ShouldBeNil) + So(priv, ShouldNotBeNil) + Convey("When the object is signed by the key pair", func() { + err = obj.Sign(priv) + So(err, ShouldBeNil) + Convey("The object should be verifiable", func() { + err = obj.Verify() + So(err, ShouldBeNil) + }) + Convey("The object should have data hash", func() { + So(obj.HSV.Hash(), ShouldEqual, hash.THashH(MockHash)) + }) + Convey("When the hash is modified", func() { + obj.HSV.DataHash = hash.Hash{0x0, 0x0, 0x0, 0x1} + Convey("The verifier should return hash value not match error", func() { + err = errors.Cause(obj.Verify()) + So(err, ShouldEqual, ErrHashValueNotMatch) + }) + }) + Convey("When the signee is modified", func() { + var _, pub, err = asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + obj.HSV.Signee = pub + Convey("The verifier should return signature not match error", func() { + err = errors.Cause(obj.Verify()) + So(err, ShouldEqual, ErrSignatureNotMatch) + }) + }) + Convey("When the signature is modified", func() { + var val = obj.HSV.Signature.R + val.Add(val, big.NewInt(1)) + Convey("The verifier should return signature not match error", func() { + err = errors.Cause(obj.Verify()) + So(err, ShouldEqual, ErrSignatureNotMatch) + }) + }) + }) + }) +} diff --git a/crypto/verifier/errors.go b/crypto/verifier/errors.go new file mode 100644 index 000000000..57da3f1f0 --- /dev/null +++ b/crypto/verifier/errors.go @@ -0,0 +1,26 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package verifier + +import "errors" + +var ( + // ErrHashValueNotMatch indicates the hash value not match error from verifier. + ErrHashValueNotMatch = errors.New("hash value not match") + // ErrSignatureNotMatch indicates the signature not match error from verifier. + ErrSignatureNotMatch = errors.New("signature not match") +) diff --git a/docker-compose.yml b/docker-compose.yml index 3f6ab07a3..e4b1fdf5d 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,8 +1,8 @@ -version: '3' +version: "3" services: covenantsql_bp_0: - image: covenantsql.io/covenantsql:latest + image: covenantsql/covenantsql:latest container_name: covenantsql_bp_0 restart: always ports: @@ -21,7 +21,7 @@ services: max-size: "1m" max-file: "10" covenantsql_bp_1: - image: covenantsql.io/covenantsql:latest + image: covenantsql/covenantsql:latest container_name: covenantsql_bp_1 restart: always ports: @@ -40,7 +40,7 @@ services: max-size: "1m" max-file: "10" covenantsql_bp_2: - image: covenantsql.io/covenantsql:latest + image: covenantsql/covenantsql:latest container_name: covenantsql_bp_2 restart: always ports: @@ -59,7 +59,7 @@ services: max-size: "1m" max-file: "10" covenantsql_miner_0: - image: covenantsql.io/covenantsql:latest + image: covenantsql/covenantsql:latest container_name: covenantsql_miner_0 restart: always ports: @@ -78,7 +78,7 @@ services: max-size: "1m" max-file: "10" covenantsql_miner_1: - image: covenantsql.io/covenantsql:latest + image: covenantsql/covenantsql:latest container_name: covenantsql_miner_1 restart: always ports: @@ -97,7 +97,7 @@ services: max-size: "1m" max-file: "10" covenantsql_miner_2: - image: covenantsql.io/covenantsql:latest + image: covenantsql/covenantsql:latest container_name: covenantsql_miner_2 restart: always ports: @@ -116,28 +116,32 @@ services: max-size: "1m" max-file: "10" covenantsql_adapter: - image: covenantsql.io/covenantsql:latest + image: covenantsql/covenantsql:latest container_name: covenantsql_adapter restart: always ports: - "11105:4661" environment: COVENANT_ROLE: adapter - COVENANT_CONF: ./node_c/config.yaml + COVENANT_CONF: ./node_adapter/config.yaml volumes: - - ./test/service/node_c/config.yaml:/app/config.yaml - - ./test/service/node_c/private.key:/app/private.key - - ./test/service/node_c/:/app/node_c/ + - ./test/service/node_adapter/config.yaml:/app/config.yaml + - ./test/service/node_adapter/private.key:/app/private.key + - ./test/service/node_adapter/:/app/node_adapter/ networks: default: ipv4_address: 172.254.1.8 covenantsql_observer: - image: covenantsql.io/covenantsql:latest + image: covenantsql/covenantsql:latest container_name: covenantsql_observer restart: always ports: - "11106:4663" - command: ["-database", "057e55460f501ad071383c95f691293f2f0a7895988e22593669ceeb52a6452a", "-reset", "oldest", "-listen", "0.0.0.0:4663"] + command: + [ + "-listen", + "0.0.0.0:4663", + ] environment: COVENANT_ROLE: observer COVENANT_CONF: ./node_observer/config.yaml @@ -152,7 +156,7 @@ services: max-size: "1m" max-file: "10" covenantsql_mysql_adapter: - image: covenantsql.io/covenantsql:latest + image: covenantsql/covenantsql:latest container_name: covenantsql_mysql_adapter restart: always ports: @@ -171,6 +175,20 @@ services: options: max-size: "1m" max-file: "10" + covenantsql_explorer: + image: covenantsql/explorer:latest + container_name: covenantsql_explorer + depends_on: + - covenantsql_observer + restart: always + ports: + - "11108:80" + environment: + COVENANTSQL_EXPLORER_DOMAIN: localhost + COVENANTSQL_OBSERVER_ADDR: covenantsql_observer:4663 + logging: + options: + max-size: "5m" networks: default: diff --git a/kayak/api/twopc.go b/kayak/api/twopc.go deleted file mode 100644 index 91b8231eb..000000000 --- a/kayak/api/twopc.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package api - -import ( - "time" - - "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" - kt "github.com/CovenantSQL/CovenantSQL/kayak/transport" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/twopc" - "github.com/CovenantSQL/CovenantSQL/utils/log" -) - -var ( - // DefaultProcessTimeout defines package default process timeout. - DefaultProcessTimeout = time.Second * 5 - // DefaultTransportID defines default transport id for service multiplex. - DefaultTransportID = "DEFAULT" -) - -// TwoPCOptions defines optional arguments for kayak twopc config. -type TwoPCOptions struct { - ProcessTimeout time.Duration - NodeID proto.NodeID - TransportID string - Logger *log.Logger -} - -// NewTwoPCOptions creates empty twopc configuration options. -func NewTwoPCOptions() *TwoPCOptions { - return &TwoPCOptions{ - ProcessTimeout: DefaultProcessTimeout, - TransportID: DefaultTransportID, - } -} - -// NewDefaultTwoPCOptions creates twopc configuration options with default settings. -func NewDefaultTwoPCOptions() *TwoPCOptions { - nodeID, _ := kms.GetLocalNodeID() - return NewTwoPCOptions().WithNodeID(nodeID) -} - -// WithProcessTimeout set custom process timeout to options. -func (o *TwoPCOptions) WithProcessTimeout(timeout time.Duration) *TwoPCOptions { - o.ProcessTimeout = timeout - return o -} - -// WithNodeID set custom node id to options. -func (o *TwoPCOptions) WithNodeID(nodeID proto.NodeID) *TwoPCOptions { - o.NodeID = nodeID - return o -} - -// WithTransportID set custom transport id to options. -func (o *TwoPCOptions) WithTransportID(id string) *TwoPCOptions { - o.TransportID = id - return o -} - -// WithLogger set custom logger to options. -func (o *TwoPCOptions) WithLogger(l *log.Logger) *TwoPCOptions { - o.Logger = l - return o -} - -// NewTwoPCKayak creates new kayak runtime. -func NewTwoPCKayak(peers *kayak.Peers, config kayak.Config) (*kayak.Runtime, error) { - return kayak.NewRuntime(config, peers) -} - -// NewTwoPCConfig creates new twopc config object. -func NewTwoPCConfig(rootDir string, service *kt.ETLSTransportService, worker twopc.Worker) kayak.Config { - return NewTwoPCConfigWithOptions(rootDir, service, worker, NewDefaultTwoPCOptions()) -} - -// NewTwoPCConfigWithOptions creates new twopc config object with custom options. -func NewTwoPCConfigWithOptions(rootDir string, service *kt.ETLSTransportService, - worker twopc.Worker, options *TwoPCOptions) kayak.Config { - runner := kayak.NewTwoPCRunner() - xptCfg := &kt.ETLSTransportConfig{ - TransportService: service, - NodeID: options.NodeID, - TransportID: options.TransportID, - ServiceName: service.ServiceName, - } - xpt := kt.NewETLSTransport(xptCfg) - cfg := &kayak.TwoPCConfig{ - RuntimeConfig: kayak.RuntimeConfig{ - RootDir: rootDir, - LocalID: options.NodeID, - Runner: runner, - Transport: xpt, - ProcessTimeout: options.ProcessTimeout, - }, - Storage: worker, - } - - return cfg -} diff --git a/kayak/api/twopc_integ_test.go b/kayak/api/twopc_integ_test.go deleted file mode 100644 index 1fa52ab38..000000000 --- a/kayak/api/twopc_integ_test.go +++ /dev/null @@ -1,379 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package api - -import ( - "context" - "crypto/rand" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "sync" - "testing" - "time" - - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" - "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/route" - "github.com/CovenantSQL/CovenantSQL/rpc" - "github.com/CovenantSQL/CovenantSQL/twopc" - "github.com/CovenantSQL/CovenantSQL/utils/log" - . "github.com/smartystreets/goconvey/convey" - "github.com/stretchr/testify/mock" -) - -// MockWorker is an autogenerated mock type for the Worker type -type MockWorker struct { - mock.Mock -} - -// Commit provides a mock function with given fields: ctx, wb -func (_m *MockWorker) Commit(ctx context.Context, wb twopc.WriteBatch) error { - ret := _m.Called(ctx, wb) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, twopc.WriteBatch) error); ok { - r0 = rf(ctx, wb) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Prepare provides a mock function with given fields: ctx, wb -func (_m *MockWorker) Prepare(ctx context.Context, wb twopc.WriteBatch) error { - ret := _m.Called(ctx, wb) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, twopc.WriteBatch) error); ok { - r0 = rf(ctx, wb) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Rollback provides a mock function with given fields: ctx, wb -func (_m *MockWorker) Rollback(ctx context.Context, wb twopc.WriteBatch) error { - ret := _m.Called(ctx, wb) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, twopc.WriteBatch) error); ok { - r0 = rf(ctx, wb) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -type CallCollector struct { - l sync.Mutex - callOrder []string -} - -func (c *CallCollector) Append(call string) { - c.l.Lock() - defer c.l.Unlock() - c.callOrder = append(c.callOrder, call) -} - -func (c *CallCollector) Get() []string { - c.l.Lock() - defer c.l.Unlock() - return c.callOrder[:] -} - -func (c *CallCollector) Reset() { - c.l.Lock() - defer c.l.Unlock() - c.callOrder = c.callOrder[:0] -} - -type mockRes struct { - rootDir string - nodeID proto.NodeID - worker *MockWorker - server *rpc.Server - config kayak.Config - runtime *kayak.Runtime - listenAddr string -} - -func initKMS() (err error) { - var f *os.File - f, err = ioutil.TempFile("", "keystore_") - f.Close() - os.Remove(f.Name()) - route.InitKMS(f.Name()) - - // flag as test - kms.Unittest = true - - return -} - -func testWithNewNode() (mock *mockRes, err error) { - mock = &mockRes{} - addr := "127.0.0.1:0" - - // random node id - randBytes := make([]byte, 4) - rand.Read(randBytes) - mock.nodeID = proto.NodeID(hash.THashH(randBytes).String()) - kms.SetLocalNodeIDNonce(mock.nodeID.ToRawNodeID().CloneBytes(), &cpuminer.Uint256{}) - - // mock rpc server - mock.server, err = rpc.NewServerWithService(rpc.ServiceMap{}) - if err != nil { - return - } - _, testFile, _, _ := runtime.Caller(0) - privKeyPath := filepath.Join(filepath.Dir(testFile), "../../test/node_standalone/private.key") - if err = mock.server.InitRPCServer(addr, privKeyPath, []byte("")); err != nil { - return - } - mock.listenAddr = mock.server.Listener.Addr().String() - route.SetNodeAddrCache(mock.nodeID.ToRawNodeID(), mock.listenAddr) - var nonce *cpuminer.Uint256 - if nonce, err = kms.GetLocalNonce(); err != nil { - return - } - var pubKey *asymmetric.PublicKey - if pubKey, err = kms.GetLocalPublicKey(); err != nil { - return - } - if err = kms.SetPublicKey(mock.nodeID, *nonce, pubKey); err != nil { - return - } - - // create mux service for kayak - service := NewMuxService("Kayak", mock.server) - mock.rootDir, err = ioutil.TempDir("", "kayak_test") - if err != nil { - return - } - - // worker - mock.worker = &MockWorker{} - - // create two pc config - options := NewTwoPCOptions(). - WithNodeID(mock.nodeID). - WithProcessTimeout(time.Millisecond * 300). - WithTransportID(DefaultTransportID). - WithLogger(log.StandardLogger()) - mock.config = NewTwoPCConfigWithOptions(mock.rootDir, service, mock.worker, options) - - return -} - -func createRuntime(peers *kayak.Peers, mock *mockRes) (err error) { - mock.runtime, err = NewTwoPCKayak(peers, mock.config) - return -} - -func testPeersFixture(term uint64, servers []*kayak.Server) *kayak.Peers { - testPriv := []byte{ - 0xea, 0xf0, 0x2c, 0xa3, 0x48, 0xc5, 0x24, 0xe6, - 0x39, 0x26, 0x55, 0xba, 0x4d, 0x29, 0x60, 0x3c, - 0xd1, 0xa7, 0x34, 0x7d, 0x9d, 0x65, 0xcf, 0xe9, - 0x3c, 0xe1, 0xeb, 0xff, 0xdc, 0xa2, 0x26, 0x94, - } - privKey, pubKey := asymmetric.PrivKeyFromBytes(testPriv) - - newServers := make([]*kayak.Server, 0, len(servers)) - var leaderNode *kayak.Server - - for _, s := range servers { - newS := &kayak.Server{ - Role: s.Role, - ID: s.ID, - PubKey: pubKey, - } - newServers = append(newServers, newS) - if newS.Role == proto.Leader { - leaderNode = newS - } - } - - peers := &kayak.Peers{ - Term: term, - Leader: leaderNode, - Servers: servers, - PubKey: pubKey, - } - - peers.Sign(privKey) - - return peers -} - -func TestExampleTwoPCCommit(t *testing.T) { - // cleanup log storage after execution - cleanupDir := func(c *mockRes) { - os.RemoveAll(c.rootDir) - } - - // only commit logic - Convey("commit", t, func() { - var err error - - err = initKMS() - So(err, ShouldBeNil) - - lMock, err := testWithNewNode() - So(err, ShouldBeNil) - f1Mock, err := testWithNewNode() - So(err, ShouldBeNil) - f2Mock, err := testWithNewNode() - So(err, ShouldBeNil) - - // peers is a simple 3-node peer configuration - peers := testPeersFixture(1, []*kayak.Server{ - { - Role: proto.Leader, - ID: lMock.nodeID, - }, - { - Role: proto.Follower, - ID: f1Mock.nodeID, - }, - { - Role: proto.Follower, - ID: f2Mock.nodeID, - }, - }) - defer cleanupDir(lMock) - defer cleanupDir(f1Mock) - defer cleanupDir(f2Mock) - - // create runtime - err = createRuntime(peers, lMock) - So(err, ShouldBeNil) - err = createRuntime(peers, f1Mock) - So(err, ShouldBeNil) - err = createRuntime(peers, f2Mock) - So(err, ShouldBeNil) - - // init - err = lMock.runtime.Init() - So(err, ShouldBeNil) - err = f1Mock.runtime.Init() - So(err, ShouldBeNil) - err = f2Mock.runtime.Init() - So(err, ShouldBeNil) - - // make request issuer as leader node - kms.SetLocalNodeIDNonce(lMock.nodeID.ToRawNodeID().CloneBytes(), &cpuminer.Uint256{}) - - // payload to send - testPayload := []byte("test data") - - // underlying worker mock, prepare/commit/rollback with be received the decoded data - callOrder := &CallCollector{} - f1Mock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("f_prepare") - }) - f2Mock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("f_prepare") - }) - f1Mock.worker.On("Commit", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("f_commit") - }) - f2Mock.worker.On("Commit", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("f_commit") - }) - lMock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("l_prepare") - }) - lMock.worker.On("Commit", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("l_commit") - }) - - // start server - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - lMock.server.Serve() - }() - wg.Add(1) - go func() { - defer wg.Done() - f1Mock.server.Serve() - }() - wg.Add(1) - go func() { - defer wg.Done() - f2Mock.server.Serve() - }() - - // process the encoded data - _, err = lMock.runtime.Apply(testPayload) - So(err, ShouldBeNil) - So(callOrder.Get(), ShouldResemble, []string{ - "f_prepare", - "f_prepare", - "l_prepare", - "f_commit", - "f_commit", - "l_commit", - }) - - // process the encoded data again - callOrder.Reset() - _, err = lMock.runtime.Apply(testPayload) - So(err, ShouldBeNil) - So(callOrder.Get(), ShouldResemble, []string{ - "f_prepare", - "f_prepare", - "l_prepare", - "f_commit", - "f_commit", - "l_commit", - }) - - // shutdown - lMock.runtime.Shutdown() - f1Mock.runtime.Shutdown() - f2Mock.runtime.Shutdown() - - // close - lMock.server.Listener.Close() - f1Mock.server.Listener.Close() - f2Mock.server.Listener.Close() - lMock.server.Stop() - f1Mock.server.Stop() - f2Mock.server.Stop() - - wg.Wait() - }) -} diff --git a/kayak/boltdb_store.go b/kayak/boltdb_store.go deleted file mode 100644 index 23ab9ce90..000000000 --- a/kayak/boltdb_store.go +++ /dev/null @@ -1,284 +0,0 @@ -/* - * Copyright 2018 HashiCorp. - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kayak - -import ( - "errors" - - "github.com/CovenantSQL/CovenantSQL/utils" - "github.com/coreos/bbolt" -) - -const ( - // Permissions to use on the db file. This is only used if the - // database file does not exist and needs to be created. - dbFileMode = 0600 -) - -var ( - // Bucket names we perform transactions in - dbLogs = []byte("logs") - dbConf = []byte("conf") - - // ErrKeyNotFound is an error indicating a given key does not exist - ErrKeyNotFound = errors.New("not found") -) - -// BoltStore provides access to BoltDB for Raft to store and retrieve -// log entries. It also provides key/value storage, and can be used as -// a LogStore and StableStore. -type BoltStore struct { - // conn is the underlying handle to the db. - conn *bolt.DB - - // The path to the Bolt database file - path string -} - -// Options contains all the configuration used to open the BoltDB -type Options struct { - // Path is the file path to the BoltDB to use - Path string - - // BoltOptions contains any specific BoltDB options you might - // want to specify [e.g. open timeout] - BoltOptions *bolt.Options - - // NoSync causes the database to skip fsync calls after each - // write to the log. This is unsafe, so it should be used - // with caution. - NoSync bool -} - -// readOnly returns true if the contained bolt options say to open -// the DB in readOnly mode [this can be useful to tools that want -// to examine the log] -func (o *Options) readOnly() bool { - return o != nil && o.BoltOptions != nil && o.BoltOptions.ReadOnly -} - -// NewBoltStore takes a file path and returns a connected Raft backend. -func NewBoltStore(path string) (*BoltStore, error) { - return NewBoltStoreWithOptions(Options{Path: path}) -} - -// NewBoltStoreWithOptions uses the supplied options to open the BoltDB and prepare it for use as a raft backend. -func NewBoltStoreWithOptions(options Options) (*BoltStore, error) { - // Try to connect - handle, err := bolt.Open(options.Path, dbFileMode, options.BoltOptions) - if err != nil { - return nil, err - } - handle.NoSync = options.NoSync - - // Create the new store - store := &BoltStore{ - conn: handle, - path: options.Path, - } - - // If the store was opened read-only, don't try and create buckets - if !options.readOnly() { - // Set up our buckets - if err := store.initialize(); err != nil { - store.Close() - return nil, err - } - } - return store, nil -} - -// initialize is used to set up all of the buckets. -func (b *BoltStore) initialize() error { - tx, err := b.conn.Begin(true) - if err != nil { - return err - } - defer tx.Rollback() - - // Create all the buckets - if _, err := tx.CreateBucketIfNotExists(dbLogs); err != nil { - return err - } - if _, err := tx.CreateBucketIfNotExists(dbConf); err != nil { - return err - } - - return tx.Commit() -} - -// Close is used to gracefully close the DB connection. -func (b *BoltStore) Close() error { - return b.conn.Close() -} - -// FirstIndex returns the first known index from the Raft log. -func (b *BoltStore) FirstIndex() (uint64, error) { - tx, err := b.conn.Begin(false) - if err != nil { - return 0, err - } - defer tx.Rollback() - - curs := tx.Bucket(dbLogs).Cursor() - if first, _ := curs.First(); first != nil { - return bytesToUint64(first), nil - } - - return 0, nil -} - -// LastIndex returns the last known index from the Raft log. -func (b *BoltStore) LastIndex() (uint64, error) { - tx, err := b.conn.Begin(false) - if err != nil { - return 0, err - } - defer tx.Rollback() - - curs := tx.Bucket(dbLogs).Cursor() - if last, _ := curs.Last(); last != nil { - return bytesToUint64(last), nil - } - return 0, nil -} - -// GetLog is used to retrieve a log from BoltDB at a given index. -func (b *BoltStore) GetLog(idx uint64, log *Log) error { - tx, err := b.conn.Begin(false) - if err != nil { - return err - } - defer tx.Rollback() - - bucket := tx.Bucket(dbLogs) - val := bucket.Get(uint64ToBytes(idx)) - - if val == nil { - return ErrKeyNotFound - } - return utils.DecodeMsgPack(val, log) -} - -// StoreLog is used to store a single raft log. -func (b *BoltStore) StoreLog(log *Log) error { - return b.StoreLogs([]*Log{log}) -} - -// StoreLogs is used to store a set of raft logs. -func (b *BoltStore) StoreLogs(logs []*Log) error { - tx, err := b.conn.Begin(true) - if err != nil { - return err - } - defer tx.Rollback() - - for _, log := range logs { - key := uint64ToBytes(log.Index) - val, err := utils.EncodeMsgPack(log) - if err != nil { - return err - } - bucket := tx.Bucket(dbLogs) - if err := bucket.Put(key, val.Bytes()); err != nil { - return err - } - } - - return tx.Commit() -} - -// DeleteRange is used to delete logs within a given range inclusively. -func (b *BoltStore) DeleteRange(min, max uint64) error { - minKey := uint64ToBytes(min) - - tx, err := b.conn.Begin(true) - if err != nil { - return err - } - defer tx.Rollback() - - curs := tx.Bucket(dbLogs).Cursor() - for k, _ := curs.Seek(minKey); k != nil; k, _ = curs.Next() { - // Handle out-of-range log index - if bytesToUint64(k) > max { - break - } - - // Delete in-range log index - if err := curs.Delete(); err != nil { - return err - } - } - - return tx.Commit() -} - -// Set is used to set a key/value set outside of the raft log. -func (b *BoltStore) Set(k, v []byte) error { - tx, err := b.conn.Begin(true) - if err != nil { - return err - } - defer tx.Rollback() - - bucket := tx.Bucket(dbConf) - if err := bucket.Put(k, v); err != nil { - return err - } - - return tx.Commit() -} - -// Get is used to retrieve a value from the k/v store by key. -func (b *BoltStore) Get(k []byte) ([]byte, error) { - tx, err := b.conn.Begin(false) - if err != nil { - return nil, err - } - defer tx.Rollback() - - bucket := tx.Bucket(dbConf) - val := bucket.Get(k) - - if val == nil { - return nil, ErrKeyNotFound - } - return append([]byte(nil), val...), nil -} - -// SetUint64 is like Set, but handles uint64 values. -func (b *BoltStore) SetUint64(key []byte, val uint64) error { - return b.Set(key, uint64ToBytes(val)) -} - -// GetUint64 is like Get, but handles uint64 values. -func (b *BoltStore) GetUint64(key []byte) (uint64, error) { - val, err := b.Get(key) - if err != nil { - return 0, err - } - return bytesToUint64(val), nil -} - -// Sync performs an fsync on the database file handle. This is not necessary -// under normal operation unless NoSync is enabled, in which this forces the -// database file to sync against the disk. -func (b *BoltStore) Sync() error { - return b.conn.Sync() -} diff --git a/kayak/boltdb_store_test.go b/kayak/boltdb_store_test.go deleted file mode 100644 index 994f12857..000000000 --- a/kayak/boltdb_store_test.go +++ /dev/null @@ -1,376 +0,0 @@ -/* - * Copyright 2018 HashiCorp. - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kayak - -import ( - "io/ioutil" - "os" - "testing" - "time" - - "github.com/coreos/bbolt" - . "github.com/smartystreets/goconvey/convey" -) - -func testBoltStore(t testing.TB) *BoltStore { - fh, err := ioutil.TempFile("", "bolt") - if err != nil { - t.Fatalf("err: %s", err) - } - os.Remove(fh.Name()) - - // Successfully creates and returns a store - store, err := NewBoltStore(fh.Name()) - if err != nil { - t.Fatalf("err: %s", err) - } - - return store -} - -func testLog(idx uint64, data string) *Log { - return &Log{ - Data: []byte(data), - Index: idx, - } -} - -func TestBoltStore_Implements(t *testing.T) { - Convey("test bolt store implements", t, func() { - var store interface{} = &BoltStore{} - var ok bool - _, ok = store.(StableStore) - So(ok, ShouldBeTrue) - _, ok = store.(LogStore) - So(ok, ShouldBeTrue) - }) -} - -func TestBoltOptionsTimeout(t *testing.T) { - Convey("test bolt options timeout", t, func() { - fh, err := ioutil.TempFile("", "bolt") - So(err, ShouldBeNil) - os.Remove(fh.Name()) - defer os.Remove(fh.Name()) - options := Options{ - Path: fh.Name(), - BoltOptions: &bolt.Options{ - Timeout: time.Second / 10, - }, - } - store, err := NewBoltStoreWithOptions(options) - So(err, ShouldBeNil) - defer store.Close() - // trying to open it again should timeout - doneCh := make(chan error, 1) - go func() { - _, err := NewBoltStoreWithOptions(options) - doneCh <- err - }() - select { - case err := <-doneCh: - So(err, ShouldNotBeNil) - So(err.Error(), ShouldEqual, "timeout") - case <-time.After(5 * time.Second): - Print("Gave up waiting for timeout response") - } - }) -} - -func TestBoltOptionsReadOnly(t *testing.T) { - Convey("test bolt options readonly", t, func() { - var err error - fh, err := ioutil.TempFile("", "bolt") - So(err, ShouldBeNil) - defer os.Remove(fh.Name()) - store, err := NewBoltStore(fh.Name()) - So(err, ShouldBeNil) - // Create the log - log := testLog(1, "log1") - // Attempt to store the log - err = store.StoreLog(log) - So(err, ShouldBeNil) - store.Close() - options := Options{ - Path: fh.Name(), - BoltOptions: &bolt.Options{ - Timeout: time.Second / 10, - ReadOnly: true, - }, - } - roStore, err := NewBoltStoreWithOptions(options) - So(err, ShouldBeNil) - defer roStore.Close() - result := new(Log) - err = roStore.GetLog(1, result) - So(err, ShouldBeNil) - - // Ensure the log comes back the same - So(result, ShouldResemble, log) - // Attempt to store the log, should fail on a read-only store/ - err = roStore.StoreLog(log) - So(err, ShouldEqual, bolt.ErrDatabaseReadOnly) - }) -} - -func TestNewBoltStore(t *testing.T) { - Convey("TestNewBoltStore", t, func() { - var err error - fh, err := ioutil.TempFile("", "bolt") - So(err, ShouldBeNil) - os.Remove(fh.Name()) - defer os.Remove(fh.Name()) - - // Successfully creates and returns a store - store, err := NewBoltStore(fh.Name()) - So(err, ShouldBeNil) - - // Ensure the file was created - So(store.path, ShouldEqual, fh.Name()) - _, err = os.Stat(fh.Name()) - So(err, ShouldBeNil) - - // Close the store so we can open again - err = store.Close() - So(err, ShouldBeNil) - - // Ensure our tables were created - db, err := bolt.Open(fh.Name(), dbFileMode, nil) - So(err, ShouldBeNil) - tx, err := db.Begin(true) - So(err, ShouldBeNil) - _, err = tx.CreateBucket([]byte(dbLogs)) - So(err, ShouldEqual, bolt.ErrBucketExists) - _, err = tx.CreateBucket([]byte(dbConf)) - So(err, ShouldEqual, bolt.ErrBucketExists) - }) -} - -func TestBoltStore_FirstIndex(t *testing.T) { - Convey("FirstIndex", t, func() { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - // Should get 0 index on empty log - var err error - idx, err := store.FirstIndex() - So(err, ShouldBeNil) - So(idx, ShouldEqual, uint64(0)) - - // Set a mock raft log - logs := []*Log{ - testLog(1, "log1"), - testLog(2, "log2"), - testLog(3, "log3"), - } - err = store.StoreLogs(logs) - So(err, ShouldBeNil) - - // Fetch the first Raft index - idx, err = store.FirstIndex() - So(err, ShouldBeNil) - So(idx, ShouldEqual, uint64(1)) - }) -} - -func TestBoltStore_LastIndex(t *testing.T) { - Convey("LastIndex", t, func() { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - // Should get 0 index on empty log - var err error - idx, err := store.LastIndex() - So(err, ShouldBeNil) - So(idx, ShouldEqual, uint64(0)) - - // Set a mock raft log - logs := []*Log{ - testLog(1, "log1"), - testLog(2, "log2"), - testLog(3, "log3"), - } - err = store.StoreLogs(logs) - So(err, ShouldBeNil) - - // Fetch the last Raft index - idx, err = store.LastIndex() - So(err, ShouldBeNil) - So(idx, ShouldEqual, uint64(3)) - }) -} - -func TestBoltStore_GetLog(t *testing.T) { - Convey("GetLog", t, func() { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - log := new(Log) - - // Should return an error on non-existent log - var err error - err = store.GetLog(1, log) - So(err, ShouldEqual, ErrKeyNotFound) - - // Set a mock raft log - logs := []*Log{ - testLog(1, "log1"), - testLog(2, "log2"), - testLog(3, "log3"), - } - err = store.StoreLogs(logs) - So(err, ShouldBeNil) - - // Should return th/e proper log - err = store.GetLog(2, log) - So(err, ShouldBeNil) - So(log, ShouldResemble, logs[1]) - }) -} - -func TestBoltStore_SetLog(t *testing.T) { - Convey("SetLog", t, func() { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - // Create the log - log := testLog(1, "log1") - - // Attempt to store the log - var err error - err = store.StoreLog(log) - So(err, ShouldBeNil) - - // Retrieve the log again - result := new(Log) - err = store.GetLog(1, result) - So(err, ShouldBeNil) - - // Ensure the log comes back the same - So(result, ShouldResemble, log) - }) -} - -func TestBoltStore_SetLogs(t *testing.T) { - Convey("SetLogs", t, func() { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - // Create a set of logs - logs := []*Log{ - testLog(1, "log1"), - testLog(2, "log2"), - } - - // Attempt to store the logs - var err error - err = store.StoreLogs(logs) - So(err, ShouldBeNil) - - // Ensure we stored them all - result1, result2 := new(Log), new(Log) - err = store.GetLog(1, result1) - So(err, ShouldBeNil) - So(result1, ShouldResemble, logs[0]) - err = store.GetLog(2, result2) - So(err, ShouldBeNil) - So(result2, ShouldResemble, logs[1]) - }) -} - -func TestBoltStore_DeleteRange(t *testing.T) { - Convey("DeleteRange", t, func() { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - // Create a set of logs - log1 := testLog(1, "log1") - log2 := testLog(2, "log2") - log3 := testLog(3, "log3") - logs := []*Log{log1, log2, log3} - - // Attempt to store the logs - var err error - err = store.StoreLogs(logs) - So(err, ShouldBeNil) - - // Attempt to delete a range of logs - err = store.DeleteRange(1, 2) - So(err, ShouldBeNil) - - // Ensure the logs were deleted - err = store.GetLog(1, new(Log)) - So(err, ShouldEqual, ErrKeyNotFound) - err = store.GetLog(2, new(Log)) - So(err, ShouldEqual, ErrKeyNotFound) - }) -} - -func TestBoltStore_Set_Get(t *testing.T) { - Convey("Set_Get", t, func() { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - // Returns error on non-existent key - var err error - _, err = store.Get([]byte("bad")) - So(err, ShouldEqual, ErrKeyNotFound) - - k, v := []byte("hello"), []byte("world") - - // Try to set a k/v pair - err = store.Set(k, v) - So(err, ShouldBeNil) - - // Try to read it back - val, err := store.Get(k) - So(err, ShouldBeNil) - So(val, ShouldResemble, v) - }) -} - -func TestBoltStore_SetUint64_GetUint64(t *testing.T) { - Convey("SetUint64_GetUint64", t, func() { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - // Returns error on non-existent key - var err error - _, err = store.GetUint64([]byte("bad")) - So(err, ShouldEqual, ErrKeyNotFound) - - k, v := []byte("abc"), uint64(123) - - // Attempt to set the k/v pair - err = store.SetUint64(k, v) - So(err, ShouldBeNil) - - // Read back the value - val, err := store.GetUint64(k) - So(err, ShouldBeNil) - So(val, ShouldEqual, v) - }) -} diff --git a/kayak/caller.go b/kayak/caller.go new file mode 100644 index 000000000..27c2ac634 --- /dev/null +++ b/kayak/caller.go @@ -0,0 +1,22 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kayak + +// Caller defines the rpc caller, supports mocks for the default rpc.PersistCaller. +type Caller interface { + Call(method string, req interface{}, resp interface{}) error +} diff --git a/kayak/doc.go b/kayak/doc.go index fc2851c61..ea76a4746 100644 --- a/kayak/doc.go +++ b/kayak/doc.go @@ -14,8 +14,5 @@ * limitations under the License. */ -/* -Package kayak is a simple configurable multi-purpose consensus sdk. -The storage implementations contains code refactored from original hashicorp/raft and hashicorp/raft-boltdb repository. -*/ +// Package kayak implements a configurable consistency consensus middleware. package kayak diff --git a/kayak/inmem_store_test.go b/kayak/inmem_store_test.go deleted file mode 100644 index 9eee8dadb..000000000 --- a/kayak/inmem_store_test.go +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright 2018 HashiCorp. - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kayak - -import ( - "sync" -) - -// MockInmemStore implements the LogStore and StableStore interface. -// It should NOT EVER be used for production. It is used only for -// unit tests. Use the MDBStore implementation instead. -type MockInmemStore struct { - l sync.RWMutex - lowIndex uint64 - highIndex uint64 - logs map[uint64]*Log - kv map[string][]byte - kvInt map[string]uint64 -} - -// NewMockInmemStore returns a new in-memory backend. Do not ever -// use for production. Only for testing. -func NewMockInmemStore() *MockInmemStore { - i := &MockInmemStore{ - logs: make(map[uint64]*Log), - kv: make(map[string][]byte), - kvInt: make(map[string]uint64), - } - return i -} - -// FirstIndex implements the LogStore interface. -func (i *MockInmemStore) FirstIndex() (uint64, error) { - i.l.RLock() - defer i.l.RUnlock() - return i.lowIndex, nil -} - -// LastIndex implements the LogStore interface. -func (i *MockInmemStore) LastIndex() (uint64, error) { - i.l.RLock() - defer i.l.RUnlock() - return i.highIndex, nil -} - -// GetLog implements the LogStore interface. -func (i *MockInmemStore) GetLog(index uint64, log *Log) error { - i.l.RLock() - defer i.l.RUnlock() - l, ok := i.logs[index] - if !ok { - return ErrKeyNotFound - } - *log = *l - return nil -} - -// StoreLog implements the LogStore interface. -func (i *MockInmemStore) StoreLog(log *Log) error { - return i.StoreLogs([]*Log{log}) -} - -// StoreLogs implements the LogStore interface. -func (i *MockInmemStore) StoreLogs(logs []*Log) error { - i.l.Lock() - defer i.l.Unlock() - for _, l := range logs { - i.logs[l.Index] = l - if i.lowIndex == 0 { - i.lowIndex = l.Index - } - if l.Index > i.highIndex { - i.highIndex = l.Index - } - } - return nil -} - -// DeleteRange implements the LogStore interface. -func (i *MockInmemStore) DeleteRange(min, max uint64) error { - i.l.Lock() - defer i.l.Unlock() - for j := min; j <= max; j++ { - delete(i.logs, j) - } - if min <= i.lowIndex { - i.lowIndex = max + 1 - } - if max >= i.highIndex { - i.highIndex = min - 1 - } - if i.lowIndex > i.highIndex { - i.lowIndex = 0 - i.highIndex = 0 - } - return nil -} - -// Set implements the StableStore interface. -func (i *MockInmemStore) Set(key []byte, val []byte) error { - i.l.Lock() - defer i.l.Unlock() - i.kv[string(key)] = val - return nil -} - -// Get implements the StableStore interface. -func (i *MockInmemStore) Get(key []byte) ([]byte, error) { - i.l.RLock() - defer i.l.RUnlock() - return i.kv[string(key)], nil -} - -// SetUint64 implements the StableStore interface. -func (i *MockInmemStore) SetUint64(key []byte, val uint64) error { - i.l.Lock() - defer i.l.Unlock() - i.kvInt[string(key)] = val - return nil -} - -// GetUint64 implements the StableStore interface. -func (i *MockInmemStore) GetUint64(key []byte) (uint64, error) { - i.l.RLock() - defer i.l.RUnlock() - return i.kvInt[string(key)], nil -} diff --git a/kayak/mock_Config_test.go b/kayak/mock_Config_test.go deleted file mode 100644 index f50b067f2..000000000 --- a/kayak/mock_Config_test.go +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Code generated by mockery v1.0.0. DO NOT EDIT. -package kayak - -import mock "github.com/stretchr/testify/mock" - -// MockConfig is an autogenerated mock type for the Config type -type MockConfig struct { - mock.Mock -} - -// GetRuntimeConfig provides a mock function with given fields: -func (_m *MockConfig) GetRuntimeConfig() *RuntimeConfig { - ret := _m.Called() - - var r0 *RuntimeConfig - if rf, ok := ret.Get(0).(func() *RuntimeConfig); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*RuntimeConfig) - } - } - - return r0 -} diff --git a/kayak/mock_LogStore_test.go b/kayak/mock_LogStore_test.go deleted file mode 100644 index 0cbaf7aba..000000000 --- a/kayak/mock_LogStore_test.go +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Code generated by mockery v1.0.0. DO NOT EDIT. -package kayak - -import mock "github.com/stretchr/testify/mock" - -// MockLogStore is an autogenerated mock type for the LogStore type -type MockLogStore struct { - mock.Mock -} - -// DeleteRange provides a mock function with given fields: min, max -func (_m *MockLogStore) DeleteRange(min uint64, max uint64) error { - ret := _m.Called(min, max) - - var r0 error - if rf, ok := ret.Get(0).(func(uint64, uint64) error); ok { - r0 = rf(min, max) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// FirstIndex provides a mock function with given fields: -func (_m *MockLogStore) FirstIndex() (uint64, error) { - ret := _m.Called() - - var r0 uint64 - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) - } - - var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetLog provides a mock function with given fields: index, l -func (_m *MockLogStore) GetLog(index uint64, l *Log) error { - ret := _m.Called(index, l) - - var r0 error - if rf, ok := ret.Get(0).(func(uint64, *Log) error); ok { - r0 = rf(index, l) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// LastIndex provides a mock function with given fields: -func (_m *MockLogStore) LastIndex() (uint64, error) { - ret := _m.Called() - - var r0 uint64 - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) - } - - var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// StoreLog provides a mock function with given fields: l -func (_m *MockLogStore) StoreLog(l *Log) error { - ret := _m.Called(l) - - var r0 error - if rf, ok := ret.Get(0).(func(*Log) error); ok { - r0 = rf(l) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreLogs provides a mock function with given fields: logs -func (_m *MockLogStore) StoreLogs(logs []*Log) error { - ret := _m.Called(logs) - - var r0 error - if rf, ok := ret.Get(0).(func([]*Log) error); ok { - r0 = rf(logs) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/kayak/mock_Runner_test.go b/kayak/mock_Runner_test.go deleted file mode 100644 index 34ccefb6f..000000000 --- a/kayak/mock_Runner_test.go +++ /dev/null @@ -1,73 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package kayak - -import mock "github.com/stretchr/testify/mock" - -// MockRunner is an autogenerated mock type for the Runner type -type MockRunner struct { - mock.Mock -} - -// Apply provides a mock function with given fields: data -func (_m *MockRunner) Apply(data []byte) (uint64, error) { - ret := _m.Called(data) - - var r0 uint64 - if rf, ok := ret.Get(0).(func([]byte) uint64); ok { - r0 = rf(data) - } else { - r0 = ret.Get(0).(uint64) - } - - var r1 error - if rf, ok := ret.Get(1).(func([]byte) error); ok { - r1 = rf(data) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Init provides a mock function with given fields: config, peers, logs, stable, transport -func (_m *MockRunner) Init(config Config, peers *Peers, logs LogStore, stable StableStore, transport Transport) error { - ret := _m.Called(config, peers, logs, stable, transport) - - var r0 error - if rf, ok := ret.Get(0).(func(Config, *Peers, LogStore, StableStore, Transport) error); ok { - r0 = rf(config, peers, logs, stable, transport) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Shutdown provides a mock function with given fields: wait -func (_m *MockRunner) Shutdown(wait bool) error { - ret := _m.Called(wait) - - var r0 error - if rf, ok := ret.Get(0).(func(bool) error); ok { - r0 = rf(wait) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UpdatePeers provides a mock function with given fields: peers -func (_m *MockRunner) UpdatePeers(peers *Peers) error { - ret := _m.Called(peers) - - var r0 error - if rf, ok := ret.Get(0).(func(*Peers) error); ok { - r0 = rf(peers) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/kayak/mock_StableStore_test.go b/kayak/mock_StableStore_test.go deleted file mode 100644 index 353e65d87..000000000 --- a/kayak/mock_StableStore_test.go +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Code generated by mockery v1.0.0. DO NOT EDIT. -package kayak - -import mock "github.com/stretchr/testify/mock" - -// MockStableStore is an autogenerated mock type for the StableStore type -type MockStableStore struct { - mock.Mock -} - -// Get provides a mock function with given fields: key -func (_m *MockStableStore) Get(key []byte) ([]byte, error) { - ret := _m.Called(key) - - var r0 []byte - if rf, ok := ret.Get(0).(func([]byte) []byte); ok { - r0 = rf(key) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func([]byte) error); ok { - r1 = rf(key) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetUint64 provides a mock function with given fields: key -func (_m *MockStableStore) GetUint64(key []byte) (uint64, error) { - ret := _m.Called(key) - - var r0 uint64 - if rf, ok := ret.Get(0).(func([]byte) uint64); ok { - r0 = rf(key) - } else { - r0 = ret.Get(0).(uint64) - } - - var r1 error - if rf, ok := ret.Get(1).(func([]byte) error); ok { - r1 = rf(key) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Set provides a mock function with given fields: key, val -func (_m *MockStableStore) Set(key []byte, val []byte) error { - ret := _m.Called(key, val) - - var r0 error - if rf, ok := ret.Get(0).(func([]byte, []byte) error); ok { - r0 = rf(key, val) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SetUint64 provides a mock function with given fields: key, val -func (_m *MockStableStore) SetUint64(key []byte, val uint64) error { - ret := _m.Called(key, val) - - var r0 error - if rf, ok := ret.Get(0).(func([]byte, uint64) error); ok { - r0 = rf(key, val) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/kayak/mock_Worker_test.go b/kayak/mock_Worker_test.go deleted file mode 100644 index 07b94afd6..000000000 --- a/kayak/mock_Worker_test.go +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Code generated by mockery v1.0.0. DO NOT EDIT. -package kayak - -import ( - "context" - - "github.com/CovenantSQL/CovenantSQL/twopc" - "github.com/stretchr/testify/mock" -) - -// MockWorker is an autogenerated mock type for the Worker type -type MockWorker struct { - mock.Mock -} - -// Commit provides a mock function with given fields: ctx, wb -func (_m *MockWorker) Commit(ctx context.Context, wb twopc.WriteBatch) error { - ret := _m.Called(ctx, wb) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, twopc.WriteBatch) error); ok { - r0 = rf(ctx, wb) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Prepare provides a mock function with given fields: ctx, wb -func (_m *MockWorker) Prepare(ctx context.Context, wb twopc.WriteBatch) error { - ret := _m.Called(ctx, wb) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, twopc.WriteBatch) error); ok { - r0 = rf(ctx, wb) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Rollback provides a mock function with given fields: ctx, wb -func (_m *MockWorker) Rollback(ctx context.Context, wb twopc.WriteBatch) error { - ret := _m.Called(ctx, wb) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, twopc.WriteBatch) error); ok { - r0 = rf(ctx, wb) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/kayak/mock_kayak_test.go b/kayak/mock_kayak_test.go deleted file mode 100644 index 08b4121c2..000000000 --- a/kayak/mock_kayak_test.go +++ /dev/null @@ -1,492 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kayak - -import ( - "context" - "crypto/rand" - "errors" - "fmt" - "os" - "reflect" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/twopc" - "github.com/CovenantSQL/CovenantSQL/utils/log" - . "github.com/smartystreets/goconvey/convey" -) - -// common mocks -type MockTransportRouter struct { - reqSeq uint64 - transports map[proto.NodeID]*MockTransport - transportLock sync.Mutex -} - -type MockTransport struct { - nodeID proto.NodeID - router *MockTransportRouter - queue chan Request - waitQueue chan *MockResponse - giveUp map[uint64]bool -} - -type MockRequest struct { - transport *MockTransport - ctx context.Context - RequestID uint64 - NodeID proto.NodeID - Method string - Log *Log -} - -type MockResponse struct { - ResponseID uint64 - Data []byte - Error error -} - -type MockTwoPCWorker struct { - nodeID proto.NodeID - state string - data int64 - total int64 -} - -var ( - _ twopc.Worker = &MockTwoPCWorker{} -) - -func (m *MockTransportRouter) getTransport(nodeID proto.NodeID) *MockTransport { - m.transportLock.Lock() - defer m.transportLock.Unlock() - - if _, ok := m.transports[nodeID]; !ok { - m.transports[nodeID] = &MockTransport{ - nodeID: nodeID, - router: m, - queue: make(chan Request, 1000), - waitQueue: make(chan *MockResponse, 1000), - giveUp: make(map[uint64]bool), - } - } - - return m.transports[nodeID] -} - -func (m *MockTransportRouter) ResetTransport(nodeID proto.NodeID) { - m.transportLock.Lock() - defer m.transportLock.Unlock() - - if _, ok := m.transports[nodeID]; ok { - // reset - delete(m.transports, nodeID) - } -} - -func (m *MockTransportRouter) ResetAll() { - m.transportLock.Lock() - defer m.transportLock.Unlock() - - m.transports = make(map[proto.NodeID]*MockTransport) -} - -func (m *MockTransportRouter) getReqID() uint64 { - return atomic.AddUint64(&m.reqSeq, 1) -} - -func (m *MockTransport) Init() error { - return nil -} - -func (m *MockTransport) Request(ctx context.Context, nodeID proto.NodeID, method string, log *Log) ([]byte, error) { - return m.router.getTransport(nodeID).sendRequest(&MockRequest{ - RequestID: m.router.getReqID(), - NodeID: m.nodeID, - Method: method, - Log: log, - ctx: ctx, - }) -} - -func (m *MockTransport) Process() <-chan Request { - return m.queue -} - -func (m *MockTransport) Shutdown() error { - return nil -} - -func (m *MockTransport) sendRequest(req Request) ([]byte, error) { - r := req.(*MockRequest) - r.transport = m - - if log.GetLevel() >= log.DebugLevel { - fmt.Println() - } - log.Debugf("[%v] [%v] -> [%v] request %v", r.RequestID, r.NodeID, req.GetPeerNodeID(), r.GetLog()) - m.queue <- r - - for { - select { - case <-r.ctx.Done(): - // deadline reached - log.Debugf("[%v] [%v] -> [%v] request timeout", - r.RequestID, r.NodeID, req.GetPeerNodeID()) - m.giveUp[r.RequestID] = true - return nil, r.ctx.Err() - case res := <-m.waitQueue: - if res.ResponseID != r.RequestID { - // put back to queue - if !m.giveUp[res.ResponseID] { - m.waitQueue <- res - } else { - delete(m.giveUp, res.ResponseID) - } - } else { - log.Debugf("[%v] [%v] -> [%v] response %v: %v", - r.RequestID, req.GetPeerNodeID(), r.NodeID, res.Data, res.Error) - return res.Data, res.Error - } - } - } -} - -func (m *MockRequest) GetPeerNodeID() proto.NodeID { - return m.NodeID -} - -func (m *MockRequest) GetMethod() string { - return m.Method -} - -func (m *MockRequest) GetLog() *Log { - return m.Log -} - -func (m *MockRequest) SendResponse(v []byte, err error) error { - m.transport.waitQueue <- &MockResponse{ - ResponseID: m.RequestID, - Data: v, - Error: err, - } - - return nil -} - -func (w *MockTwoPCWorker) Prepare(ctx context.Context, wb twopc.WriteBatch) error { - // test prepare - if w.state != "" { - return errors.New("invalid state") - } - - value, ok := wb.(int64) - if !ok { - return errors.New("invalid data") - } - - w.state = "prepared" - w.data = value - - return nil -} - -func (w *MockTwoPCWorker) Commit(ctx context.Context, wb twopc.WriteBatch) error { - // test commit - if w.state != "prepared" { - return errors.New("invalid state") - } - - if !reflect.DeepEqual(wb, w.data) { - return errors.New("commit data not same as last") - } - - w.total += w.data - w.state = "" - - return nil -} - -func (w *MockTwoPCWorker) Rollback(ctx context.Context, wb twopc.WriteBatch) error { - // test rollback - if w.state != "prepared" { - return errors.New("invalid state") - } - - if !reflect.DeepEqual(wb, w.data) { - return errors.New("commit data not same as last") - } - - w.data = 0 - w.state = "" - - return nil -} - -func (w *MockTwoPCWorker) GetTotal() int64 { - return w.total -} - -func (w *MockTwoPCWorker) GetState() string { - return w.state -} - -type CallCollector struct { - l sync.Mutex - callOrder []string -} - -func (c *CallCollector) Append(call string) { - c.l.Lock() - defer c.l.Unlock() - c.callOrder = append(c.callOrder, call) -} - -func (c *CallCollector) Get() []string { - c.l.Lock() - defer c.l.Unlock() - return c.callOrder[:] -} - -func (c *CallCollector) Reset() { - c.l.Lock() - defer c.l.Unlock() - c.callOrder = c.callOrder[:0] -} - -func testPeersFixture(term uint64, servers []*Server) *Peers { - testPriv := []byte{ - 0xea, 0xf0, 0x2c, 0xa3, 0x48, 0xc5, 0x24, 0xe6, - 0x39, 0x26, 0x55, 0xba, 0x4d, 0x29, 0x60, 0x3c, - 0xd1, 0xa7, 0x34, 0x7d, 0x9d, 0x65, 0xcf, 0xe9, - 0x3c, 0xe1, 0xeb, 0xff, 0xdc, 0xa2, 0x26, 0x94, - } - privKey, pubKey := asymmetric.PrivKeyFromBytes(testPriv) - - newServers := make([]*Server, 0, len(servers)) - var leaderNode *Server - - for _, s := range servers { - newS := &Server{ - Role: s.Role, - ID: s.ID, - PubKey: pubKey, - } - newServers = append(newServers, newS) - if newS.Role == proto.Leader { - leaderNode = newS - } - } - - peers := &Peers{ - Term: term, - Leader: leaderNode, - Servers: servers, - PubKey: pubKey, - } - - peers.Sign(privKey) - - return peers -} - -func testLogFixture(data []byte) (log *Log) { - log = &Log{ - Index: uint64(1), - Term: uint64(1), - Data: data, - } - - log.ComputeHash() - - return -} - -// test mock library itself -func TestMockTransport(t *testing.T) { - Convey("test transport with request timeout", t, func() { - mockRouter := &MockTransportRouter{ - transports: make(map[proto.NodeID]*MockTransport), - } - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) - defer cancel() - - var err error - var response []byte - response, err = mockRouter.getTransport("a").Request( - ctx, "b", "Test", testLogFixture([]byte("happy"))) - - So(response, ShouldBeNil) - So(err, ShouldNotBeNil) - }) - - Convey("test transport with successful request", t, func(c C) { - mockRouter := &MockTransportRouter{ - transports: make(map[proto.NodeID]*MockTransport), - } - testLog := testLogFixture([]byte("happy")) - var wg sync.WaitGroup - - wg.Add(1) - - go func() { - defer wg.Done() - select { - case req := <-mockRouter.getTransport("d").Process(): - c.So(req.GetPeerNodeID(), ShouldEqual, proto.NodeID("c")) - c.So(req.GetMethod(), ShouldEqual, "Test") - c.So(req.GetLog(), ShouldResemble, testLog) - req.SendResponse([]byte("happy too"), nil) - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - var err error - var response []byte - response, err = mockRouter.getTransport("c").Request( - context.Background(), "d", "Test", testLog) - - c.So(err, ShouldBeNil) - c.So(response, ShouldResemble, []byte("happy too")) - }() - - wg.Wait() - }) - - Convey("test transport with concurrent request", t, FailureContinues, func(c C) { - mockRouter := &MockTransportRouter{ - transports: make(map[proto.NodeID]*MockTransport), - } - testLog := testLogFixture([]byte("happy")) - var wg sync.WaitGroup - - wg.Add(1) - go func() { - defer wg.Done() - var err error - var response []byte - response, err = mockRouter.getTransport("e").Request( - context.Background(), "g", "test1", testLog) - - c.So(err, ShouldBeNil) - c.So(response, ShouldResemble, []byte("happy e test1")) - }() - - wg.Add(1) - go func() { - defer wg.Done() - var err error - var response []byte - response, err = mockRouter.getTransport("f").Request( - context.Background(), "g", "test2", testLog) - - c.So(err, ShouldBeNil) - c.So(response, ShouldResemble, []byte("happy f test2")) - }() - - wg.Add(1) - go func() { - defer wg.Done() - - for i := 0; i < 2; i++ { - select { - case req := <-mockRouter.getTransport("g").Process(): - c.So(req.GetPeerNodeID(), ShouldBeIn, []proto.NodeID{"e", "f"}) - c.So(req.GetMethod(), ShouldBeIn, []string{"test1", "test2"}) - c.So(req.GetLog(), ShouldResemble, testLog) - req.SendResponse([]byte(fmt.Sprintf("happy %s %s", req.GetPeerNodeID(), req.GetMethod())), nil) - } - } - }() - - wg.Wait() - }) - - Convey("test transport with piped request", t, FailureContinues, func(c C) { - mockRouter := &MockTransportRouter{ - transports: make(map[proto.NodeID]*MockTransport), - } - var wg sync.WaitGroup - - randReq := testLogFixture([]byte("happy")) - randResp := make([]byte, 4) - rand.Read(randResp) - - t.Logf("test with request %d, response %d", randReq, randResp) - - wg.Add(1) - go func() { - defer wg.Done() - var err error - var response []byte - var req Request - - select { - case req = <-mockRouter.getTransport("j").Process(): - c.So(req.GetPeerNodeID(), ShouldEqual, proto.NodeID("i")) - c.So(req.GetMethod(), ShouldEqual, "pass1") - } - - response, err = mockRouter.getTransport("j").Request( - context.Background(), "k", "pass2", req.GetLog()) - - c.So(err, ShouldBeNil) - req.SendResponse(response, nil) - }() - - wg.Add(1) - go func() { - defer wg.Done() - select { - case req := <-mockRouter.getTransport("k").Process(): - c.So(req.GetPeerNodeID(), ShouldEqual, proto.NodeID("j")) - c.So(req.GetMethod(), ShouldEqual, "pass2") - c.So(req.GetLog(), ShouldResemble, randReq) - req.SendResponse(randResp, nil) - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - var err error - var response []byte - - response, err = mockRouter.getTransport("i").Request( - context.Background(), "j", "pass1", randReq) - - c.So(err, ShouldBeNil) - c.So(response, ShouldResemble, randResp) - }() - - wg.Wait() - }) -} - -func init() { - // set logger level by env - if os.Getenv("DEBUG") != "" { - log.SetLevel(log.DebugLevel) - } -} diff --git a/kayak/runtime.go b/kayak/runtime.go index 68213698b..8c763ceda 100644 --- a/kayak/runtime.go +++ b/kayak/runtime.go @@ -17,163 +17,920 @@ package kayak import ( + "context" + "encoding/binary" "fmt" - "path/filepath" + "io" + "math" + "sync" + "sync/atomic" + "time" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/rpc" + "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/pkg/errors" ) const ( - // FileStorePath is the default log store filename - FileStorePath = "kayak.db" + // commit channel window size + commitWindow = 10 + // prepare window + trackerWindow = 10 ) -// Runtime defines common init/shutdown logic for different consensus protocol runner. +// Runtime defines the main kayak Runtime. type Runtime struct { - config *RuntimeConfig - runnerConfig Config - peers *Peers - isLeader bool - logStore *BoltStore + /// Indexes + // index for next log. + nextIndexLock sync.Mutex + nextIndex uint64 + // lastCommit, last commit log index + lastCommit uint64 + // pendingPrepares, prepares needs to be committed/rollback + pendingPrepares map[uint64]bool + pendingPreparesLock sync.RWMutex + + /// Runtime entities + // current node id. + nodeID proto.NodeID + // instance identifies kayak in multi-instance environment + // e.g. use database id for SQLChain scenario. + instanceID string + // wal defines the wal for kayak. + wal kt.Wal + // underlying handler + sh kt.Handler + + /// Peers info + // peers defines the server peers. + peers *proto.Peers + // cached role of current node in peers, calculated from peers info. + role proto.ServerRole + // cached followers in peers, calculated from peers info. + followers []proto.NodeID + // peers lock for peers update logic. + peersLock sync.RWMutex + // calculated min follower nodes for prepare. + minPreparedFollowers int + // calculated min follower nodes for commit. + minCommitFollowers int + + /// RPC related + // callerMap caches the caller for peering nodes. + callerMap sync.Map // map[proto.NodeID]Caller + // service name for mux service. + serviceName string + // rpc method for coordination requests. + rpcMethod string + // tracks the outgoing rpc requests. + rpcTrackCh chan *rpcTracker + + //// Parameters + // prepare threshold defines the minimum node count requirement for prepare operation. + prepareThreshold float64 + // commit threshold defines the minimum node count requirement for commit operation. + commitThreshold float64 + // prepare timeout defines the max allowed time for prepare operation. + prepareTimeout time.Duration + // commit timeout defines the max allowed time for commit operation. + commitTimeout time.Duration + // channel for awaiting commits. + commitCh chan *commitReq + + /// Sub-routines management. + started uint32 + stopCh chan struct{} + wg sync.WaitGroup } -// NewRuntime creates new runtime. -func NewRuntime(config Config, peers *Peers) (*Runtime, error) { - if config == nil || peers == nil { - return nil, ErrInvalidConfig +// commitReq defines the commit operation input. +type commitReq struct { + ctx context.Context + data interface{} + index uint64 + lastCommit uint64 + log *kt.Log + result chan *commitResult +} + +// followerCommitResult defines the commit operation result. +type commitResult struct { + start time.Time + dbCost time.Duration + result interface{} + err error + rpc *rpcTracker +} + +// NewRuntime creates new kayak Runtime. +func NewRuntime(cfg *kt.RuntimeConfig) (rt *Runtime, err error) { + if cfg == nil { + err = errors.Wrap(kt.ErrInvalidConfig, "nil config") + return + } + + peers := cfg.Peers + + if peers == nil { + err = errors.Wrap(kt.ErrInvalidConfig, "nil peers") + return + } + + // verify peers + if err = peers.Verify(); err != nil { + err = errors.Wrap(err, "verify peers during kayak init failed") + return + } + + followers := make([]proto.NodeID, 0, len(peers.Servers)) + exists := false + var role proto.ServerRole + + for _, v := range peers.Servers { + if !v.IsEqual(&peers.Leader) { + followers = append(followers, v) + } + + if v.IsEqual(&cfg.NodeID) { + exists = true + if v.IsEqual(&peers.Leader) { + role = proto.Leader + } else { + role = proto.Follower + } + } + } + + if !exists { + err = errors.Wrapf(kt.ErrNotInPeer, "node %v not in peers %v", cfg.NodeID, peers) + return + } + + // calculate fan-out count according to threshold and peers info + minPreparedFollowers := int(math.Max(math.Ceil(cfg.PrepareThreshold*float64(len(peers.Servers))), 1) - 1) + minCommitFollowers := int(math.Max(math.Ceil(cfg.CommitThreshold*float64(len(peers.Servers))), 1) - 1) + + rt = &Runtime{ + // indexes + pendingPrepares: make(map[uint64]bool, commitWindow*2), + + // handler and logs + sh: cfg.Handler, + wal: cfg.Wal, + instanceID: cfg.InstanceID, + + // peers + peers: cfg.Peers, + nodeID: cfg.NodeID, + followers: followers, + role: role, + minPreparedFollowers: minPreparedFollowers, + minCommitFollowers: minCommitFollowers, + + // rpc related + serviceName: cfg.ServiceName, + rpcMethod: fmt.Sprintf("%v.%v", cfg.ServiceName, cfg.MethodName), + rpcTrackCh: make(chan *rpcTracker, trackerWindow), + + // commits related + prepareThreshold: cfg.PrepareThreshold, + prepareTimeout: cfg.PrepareTimeout, + commitThreshold: cfg.CommitThreshold, + commitTimeout: cfg.CommitTimeout, + commitCh: make(chan *commitReq, commitWindow), + + // stop coordinator + stopCh: make(chan struct{}), + } + + // read from pool to rebuild uncommitted log map + if err = rt.readLogs(); err != nil { + return + } + + return +} + +// Start starts the Runtime. +func (r *Runtime) Start() (err error) { + if !atomic.CompareAndSwapUint32(&r.started, 0, 1) { + return + } + + // start commit cycle + r.goFunc(r.commitCycle) + // start rpc tracker collector + // TODO(): + + return +} + +// Shutdown waits for the Runtime to stop. +func (r *Runtime) Shutdown() (err error) { + if !atomic.CompareAndSwapUint32(&r.started, 1, 2) { + return + } + + select { + case <-r.stopCh: + default: + close(r.stopCh) + } + r.wg.Wait() + + return +} + +// Apply defines entry for Leader node. +func (r *Runtime) Apply(ctx context.Context, req interface{}) (result interface{}, logIndex uint64, err error) { + var commitFuture <-chan *commitResult + + var tmStart, tmLeaderPrepare, tmFollowerPrepare, tmCommitEnqueue, tmLeaderRollback, + tmRollback, tmCommitDequeue, tmLeaderCommit, tmCommit time.Time + var dbCost time.Duration + + defer func() { + fields := log.Fields{ + "r": logIndex, + } + if !tmLeaderPrepare.Before(tmStart) { + fields["lp"] = tmLeaderPrepare.Sub(tmStart).Nanoseconds() + } + if !tmFollowerPrepare.Before(tmLeaderPrepare) { + fields["fp"] = tmFollowerPrepare.Sub(tmLeaderPrepare).Nanoseconds() + } + if !tmLeaderRollback.Before(tmFollowerPrepare) { + fields["lr"] = tmLeaderRollback.Sub(tmFollowerPrepare).Nanoseconds() + } + if !tmRollback.Before(tmLeaderRollback) { + fields["fr"] = tmRollback.Sub(tmLeaderRollback).Nanoseconds() + } + if !tmCommitEnqueue.Before(tmFollowerPrepare) { + fields["eq"] = tmCommitEnqueue.Sub(tmFollowerPrepare).Nanoseconds() + } + if !tmCommitDequeue.Before(tmCommitEnqueue) { + fields["dq"] = tmCommitDequeue.Sub(tmCommitEnqueue).Nanoseconds() + } + if !tmLeaderCommit.Before(tmCommitDequeue) { + fields["lc"] = tmLeaderCommit.Sub(tmCommitDequeue).Nanoseconds() + } + if !tmCommit.Before(tmLeaderCommit) { + fields["fc"] = tmCommit.Sub(tmLeaderCommit).Nanoseconds() + } + if dbCost > 0 { + fields["dc"] = dbCost.Nanoseconds() + } + if !tmCommit.Before(tmStart) { + fields["t"] = tmCommit.Sub(tmStart).Nanoseconds() + } else if !tmRollback.Before(tmStart) { + fields["t"] = tmRollback.Sub(tmStart).Nanoseconds() + } + log.WithFields(fields).WithError(err).Info("kayak leader apply") + }() + + r.peersLock.RLock() + defer r.peersLock.RUnlock() + + if r.role != proto.Leader { + // not leader + err = kt.ErrNotLeader + return } - // config authentication check - if !peers.Verify() { - return nil, ErrInvalidConfig + tmStart = time.Now() + + // check prepare in leader + if err = r.doCheck(req); err != nil { + err = errors.Wrap(err, "leader verify log") + return + } + + // encode request + var encBuf []byte + if encBuf, err = r.sh.EncodePayload(req); err != nil { + err = errors.Wrap(err, "encode kayak payload failed") + return + } + + // create prepare request + var prepareLog *kt.Log + if prepareLog, err = r.leaderLogPrepare(encBuf); err != nil { + // serve error, leader could not write logs, change leader in block producer + // TODO(): CHANGE LEADER + return } - // peers config verification - serverInPeers := false - runtime := &Runtime{ - config: config.GetRuntimeConfig(), - peers: peers, - runnerConfig: config, + // Leader pending map handling. + r.markPendingPrepare(prepareLog.Index) + defer r.markPrepareFinished(prepareLog.Index) + + tmLeaderPrepare = time.Now() + + // send prepare to all nodes + prepareTracker := r.rpc(prepareLog, r.minPreparedFollowers) + prepareCtx, prepareCtxCancelFunc := context.WithTimeout(ctx, r.prepareTimeout) + defer prepareCtxCancelFunc() + prepareErrors, prepareDone, _ := prepareTracker.get(prepareCtx) + if !prepareDone { + // timeout, rollback + err = kt.ErrPrepareTimeout + goto ROLLBACK } - for _, s := range peers.Servers { - if s.ID == runtime.config.LocalID { - serverInPeers = true + // collect errors + if err = r.errorSummary(prepareErrors); err != nil { + goto ROLLBACK + } + + tmFollowerPrepare = time.Now() + + commitFuture = r.leaderCommitResult(ctx, req, prepareLog) + + tmCommitEnqueue = time.Now() - if s.Role == proto.Leader { - runtime.isLeader = true + select { + case cResult := <-commitFuture: + if cResult != nil { + logIndex = prepareLog.Index + result = cResult.result + err = cResult.err + + tmCommitDequeue = cResult.start + dbCost = cResult.dbCost + tmLeaderCommit = time.Now() + + // wait until context deadline or commit done + if cResult.rpc != nil { + cResult.rpc.get(ctx) + } + } else { + log.Fatal("IMPOSSIBLE BRANCH") + select { + case <-ctx.Done(): + err = errors.Wrap(ctx.Err(), "process commit timeout") + goto ROLLBACK + default: } } + case <-ctx.Done(): + // pipeline commit timeout + logIndex = prepareLog.Index + err = errors.Wrap(ctx.Err(), "enqueue commit timeout") + goto ROLLBACK + } + + tmCommit = time.Now() + + return + +ROLLBACK: + // rollback local + var rollbackLog *kt.Log + var logErr error + if rollbackLog, logErr = r.leaderLogRollback(prepareLog.Index); logErr != nil { + // serve error, construct rollback log failed, internal error + // TODO(): CHANGE LEADER + return + } + + tmLeaderRollback = time.Now() + + // async send rollback to all nodes + r.rpc(rollbackLog, 0) + + tmRollback = time.Now() + + return +} + +// FollowerApply defines entry for follower node. +func (r *Runtime) FollowerApply(l *kt.Log) (err error) { + if l == nil { + err = errors.Wrap(kt.ErrInvalidLog, "log is nil") + return + } + + var tmStart, tmEnd time.Time + + defer func() { + log.WithFields(log.Fields{ + "t": l.Type.String(), + "i": l.Index, + "c": tmEnd.Sub(tmStart).Nanoseconds(), + }).WithError(err).Info("kayak follower apply") + }() + + r.peersLock.RLock() + defer r.peersLock.RUnlock() + + if r.role == proto.Leader { + // not follower + err = kt.ErrNotFollower + return } - if !serverInPeers { - return nil, ErrInvalidConfig + // verify log structure + switch l.Type { + case kt.LogPrepare: + err = r.followerPrepare(l) + case kt.LogRollback: + err = r.followerRollback(l) + case kt.LogCommit: + err = r.followerCommit(l) + case kt.LogBarrier: + // support barrier for log truncation and peer update + fallthrough + case kt.LogNoop: + // do nothing + err = r.followerNoop(l) } - return runtime, nil + if err == nil { + r.updateNextIndex(l) + } + + return +} + +// UpdatePeers defines entry for peers update logic. +func (r *Runtime) UpdatePeers(peers *proto.Peers) (err error) { + r.peersLock.Lock() + defer r.peersLock.Unlock() + + return +} + +func (r *Runtime) leaderLogPrepare(data []byte) (*kt.Log, error) { + // just write new log + return r.newLog(kt.LogPrepare, data) +} + +func (r *Runtime) leaderLogRollback(i uint64) (*kt.Log, error) { + // just write new log + return r.newLog(kt.LogRollback, r.uint64ToBytes(i)) } -// Init defines the common init logic. -func (r *Runtime) Init() (err error) { - // init log store - var logStore *BoltStore +func (r *Runtime) doCheck(req interface{}) (err error) { + if err = r.sh.Check(req); err != nil { + err = errors.Wrap(err, "verify log") + return + } + + return +} - if logStore, err = NewBoltStore(filepath.Join(r.config.RootDir, FileStorePath)); err != nil { - return fmt.Errorf("new bolt store: %s", err.Error()) +func (r *Runtime) followerPrepare(l *kt.Log) (err error) { + // decode + var req interface{} + if req, err = r.sh.DecodePayload(l.Data); err != nil { + err = errors.Wrap(err, "decode kayak payload failed") + return } - // call transport init - if err = r.config.Transport.Init(); err != nil { + if err = r.doCheck(req); err != nil { return } - // call runner init - if err = r.config.Runner.Init(r.runnerConfig, r.peers, logStore, logStore, r.config.Transport); err != nil { - logStore.Close() - return fmt.Errorf("%s runner init: %s", r.config.LocalID, err.Error()) + // write log + if err = r.wal.Write(l); err != nil { + err = errors.Wrap(err, "write follower prepare log failed") + return } - r.logStore = logStore - return nil + r.markPendingPrepare(l.Index) + + return } -// Shutdown defines common shutdown logic. -func (r *Runtime) Shutdown() (err error) { - if err = r.config.Runner.Shutdown(true); err != nil { - return fmt.Errorf("%s runner shutdown: %s", r.config.LocalID, err.Error()) +func (r *Runtime) followerRollback(l *kt.Log) (err error) { + var prepareLog *kt.Log + if _, prepareLog, err = r.getPrepareLog(l); err != nil || prepareLog == nil { + err = errors.Wrap(err, "get original request in rollback failed") + return + } + + // check if prepare already processed + if r.checkIfPrepareFinished(prepareLog.Index) { + err = errors.Wrap(kt.ErrInvalidLog, "prepare request already processed") + return + } + + // write wal + if err = r.wal.Write(l); err != nil { + err = errors.Wrap(err, "write follower rollback log failed") + } + + r.markPrepareFinished(l.Index) + + return +} + +func (r *Runtime) followerCommit(l *kt.Log) (err error) { + var prepareLog *kt.Log + var lastCommit uint64 + if lastCommit, prepareLog, err = r.getPrepareLog(l); err != nil { + err = errors.Wrap(err, "get original request in commit failed") + return + } + + // check if prepare already processed + if r.checkIfPrepareFinished(prepareLog.Index) { + err = errors.Wrap(kt.ErrInvalidLog, "prepare request already processed") + return + } + + cResult := <-r.followerCommitResult(context.Background(), l, prepareLog, lastCommit) + if cResult != nil { + err = cResult.err + } + + r.markPrepareFinished(l.Index) + + return +} + +func (r *Runtime) leaderCommitResult(ctx context.Context, reqPayload interface{}, prepareLog *kt.Log) (res chan *commitResult) { + // decode log and send to commit channel to process + res = make(chan *commitResult, 1) + + if prepareLog == nil { + res <- &commitResult{ + err: errors.Wrap(kt.ErrInvalidLog, "nil prepare log in commit"), + } + return + } + + // decode prepare log + req := &commitReq{ + ctx: ctx, + data: reqPayload, + index: prepareLog.Index, + result: res, + } + + select { + case <-ctx.Done(): + case r.commitCh <- req: + } + + return +} + +func (r *Runtime) followerCommitResult(ctx context.Context, commitLog *kt.Log, prepareLog *kt.Log, lastCommit uint64) (res chan *commitResult) { + // decode log and send to commit channel to process + res = make(chan *commitResult, 1) + + if prepareLog == nil { + res <- &commitResult{ + err: errors.Wrap(kt.ErrInvalidLog, "nil prepare log in commit"), + } + return } - if err = r.config.Transport.Shutdown(); err != nil { + myLastCommit := atomic.LoadUint64(&r.lastCommit) + + // check committed index + if lastCommit < myLastCommit { + // leader pushed a early index before commit + log.WithFields(log.Fields{ + "head": myLastCommit, + "supplied": lastCommit, + }).Warning("invalid last commit log") + res <- &commitResult{ + err: errors.Wrap(kt.ErrInvalidLog, "invalid last commit log index"), + } + return + } + + // decode prepare log + var logReq interface{} + var err error + if logReq, err = r.sh.DecodePayload(prepareLog.Data); err != nil { + res <- &commitResult{ + err: errors.Wrap(err, "decode log payload failed"), + } return } - if r.logStore != nil { - if err = r.logStore.Close(); err != nil { - return fmt.Errorf("shutdown bolt store: %s", err.Error()) + req := &commitReq{ + ctx: ctx, + data: logReq, + index: prepareLog.Index, + lastCommit: lastCommit, + result: res, + log: commitLog, + } + + select { + case <-ctx.Done(): + case r.commitCh <- req: + } + + return +} + +func (r *Runtime) commitCycle() { + // TODO(): panic recovery + for { + var cReq *commitReq + + select { + case <-r.stopCh: + return + case cReq = <-r.commitCh: + } + + if cReq != nil { + r.doCommit(cReq) } + } +} + +func (r *Runtime) doCommit(req *commitReq) { + r.peersLock.RLock() + defer r.peersLock.RUnlock() + + resp := &commitResult{ + start: time.Now(), + } + + if r.role == proto.Leader { + resp.dbCost, resp.rpc, resp.result, resp.err = r.leaderDoCommit(req) + req.result <- resp + } else { + r.followerDoCommit(req) + } +} + +func (r *Runtime) leaderDoCommit(req *commitReq) (dbCost time.Duration, tracker *rpcTracker, result interface{}, err error) { + if req.log != nil { + // mis-use follower commit for leader + log.Fatal("INVALID EXISTING LOG FOR LEADER COMMIT") + return + } + + // create leader log + var l *kt.Log + var logData []byte + + logData = append(logData, r.uint64ToBytes(req.index)...) + logData = append(logData, r.uint64ToBytes(atomic.LoadUint64(&r.lastCommit))...) - r.logStore = nil + if l, err = r.newLog(kt.LogCommit, logData); err != nil { + // serve error, leader could not write log + return } - return nil + // not wrapping underlying handler commit error + tmStartDB := time.Now() + result, err = r.sh.Commit(req.data) + dbCost = time.Now().Sub(tmStartDB) + + // mark last commit + atomic.StoreUint64(&r.lastCommit, l.Index) + + // send commit + tracker = r.rpc(l, r.minCommitFollowers) + + // TODO(): text log for rpc errors + + // TODO(): mark uncommitted nodes and remove from peers + + return } -// Apply defines common process logic. -func (r *Runtime) Apply(data []byte) (offset uint64, err error) { - // validate if myself is leader - if !r.isLeader { - return 0, ErrNotLeader +func (r *Runtime) followerDoCommit(req *commitReq) (err error) { + if req.log == nil { + log.Fatal("NO LOG FOR FOLLOWER COMMIT") + return + } + + // check for last commit availability + myLastCommit := atomic.LoadUint64(&r.lastCommit) + if req.lastCommit != myLastCommit { + // TODO(): need counter for retries, infinite commit re-order would cause troubles + go func(req *commitReq) { + r.commitCh <- req + }(req) + return } - offset, err = r.config.Runner.Apply(data) - if err != nil { - return 0, err + // write log first + if err = r.wal.Write(req.log); err != nil { + err = errors.Wrap(err, "write follower commit log failed") + return } + // do commit, not wrapping underlying handler commit error + _, err = r.sh.Commit(req.data) + + // mark last commit + atomic.StoreUint64(&r.lastCommit, req.log.Index) + + req.result <- &commitResult{err: err} + return } -// GetLog fetches runtime log produced by runner. -func (r *Runtime) GetLog(offset uint64) (data []byte, err error) { - var l Log - if err = r.logStore.GetLog(offset, &l); err != nil { +func (r *Runtime) getPrepareLog(l *kt.Log) (lastCommitIndex uint64, pl *kt.Log, err error) { + var prepareIndex uint64 + + // decode prepare index + if prepareIndex, err = r.bytesToUint64(l.Data); err != nil { + err = errors.Wrap(err, "log does not contain valid prepare index") return } - data = l.Data + // decode commit index + if len(l.Data) >= 16 { + lastCommitIndex, _ = r.bytesToUint64(l.Data[8:]) + } + + pl, err = r.wal.Get(prepareIndex) return } -// UpdatePeers defines common peers update logic. -func (r *Runtime) UpdatePeers(peers *Peers) error { - // Verify peers - if !peers.Verify() { - return ErrInvalidConfig +func (r *Runtime) newLog(logType kt.LogType, data []byte) (l *kt.Log, err error) { + // allocate index + r.nextIndexLock.Lock() + i := r.nextIndex + r.nextIndex++ + r.nextIndexLock.Unlock() + l = &kt.Log{ + LogHeader: kt.LogHeader{ + Index: i, + Type: logType, + Producer: r.nodeID, + }, + Data: data, + } + + // error write will be a fatal error, cause to node to fail fast + if err = r.wal.Write(l); err != nil { + log.Fatalf("WRITE LOG FAILED: %v", err) } - // Check if myself is still in peers - inPeers := false - isLeader := false + return +} - for _, s := range peers.Servers { - if s.ID == r.config.LocalID { - inPeers = true - isLeader = s.Role == proto.Leader +func (r *Runtime) readLogs() (err error) { + // load logs, only called during init + var l *kt.Log + + for { + if l, err = r.wal.Read(); err != nil && err != io.EOF { + err = errors.Wrap(err, "load previous logs in wal failed") + return + } else if err == io.EOF { + err = nil break } + + switch l.Type { + case kt.LogPrepare: + // record in pending prepares + r.pendingPrepares[l.Index] = true + case kt.LogCommit: + // record last commit + var lastCommit uint64 + var prepareLog *kt.Log + if lastCommit, prepareLog, err = r.getPrepareLog(l); err != nil { + err = errors.Wrap(err, "previous prepare does not exists, node need full recovery") + return + } + if lastCommit != r.lastCommit { + err = errors.Wrapf(err, + "last commit record in wal mismatched (expected: %v, actual: %v)", r.lastCommit, lastCommit) + return + } + if !r.pendingPrepares[prepareLog.Index] { + err = errors.Wrap(kt.ErrInvalidLog, "previous prepare already committed/rollback") + return + } + r.lastCommit = l.Index + // resolve previous prepared + delete(r.pendingPrepares, prepareLog.Index) + case kt.LogRollback: + var prepareLog *kt.Log + if _, prepareLog, err = r.getPrepareLog(l); err != nil { + err = errors.Wrap(err, "previous prepare does not exists, node need full recovery") + return + } + if !r.pendingPrepares[prepareLog.Index] { + err = errors.Wrap(kt.ErrInvalidLog, "previous prepare already committed/rollback") + return + } + // resolve previous prepared + delete(r.pendingPrepares, prepareLog.Index) + case kt.LogBarrier: + case kt.LogNoop: + default: + err = errors.Wrapf(kt.ErrInvalidLog, "invalid log type: %v", l.Type) + return + } + + // record nextIndex + r.updateNextIndex(l) + } + + return +} + +func (r *Runtime) updateNextIndex(l *kt.Log) { + r.nextIndexLock.Lock() + defer r.nextIndexLock.Unlock() + + if r.nextIndex < l.Index+1 { + r.nextIndex = l.Index + 1 + } +} + +func (r *Runtime) checkIfPrepareFinished(index uint64) (finished bool) { + r.pendingPreparesLock.RLock() + defer r.pendingPreparesLock.RUnlock() + + return !r.pendingPrepares[index] +} + +func (r *Runtime) markPendingPrepare(index uint64) { + r.pendingPreparesLock.Lock() + defer r.pendingPreparesLock.Unlock() + + r.pendingPrepares[index] = true +} + +func (r *Runtime) markPrepareFinished(index uint64) { + r.pendingPreparesLock.Lock() + defer r.pendingPreparesLock.Unlock() + + delete(r.pendingPrepares, index) +} + +func (r *Runtime) errorSummary(errs map[proto.NodeID]error) error { + failNodes := make(map[proto.NodeID]error) + + for s, err := range errs { + if err != nil { + failNodes[s] = err + } } - if !inPeers { - // shutdown - return r.Shutdown() + if len(failNodes) == 0 { + return nil } - if err := r.config.Runner.UpdatePeers(peers); err != nil { - return fmt.Errorf("update peers to %s: %s", peers, err.Error()) + return errors.Wrapf(kt.ErrPrepareFailed, "fail on nodes: %v", failNodes) +} + +/// rpc related +func (r *Runtime) rpc(l *kt.Log, minCount int) (tracker *rpcTracker) { + req := &kt.RPCRequest{ + Instance: r.instanceID, + Log: l, } - r.isLeader = isLeader + tracker = newTracker(r, req, minCount) + tracker.send() + + // TODO(): track this rpc + + // TODO(): log remote errors + + return +} + +func (r *Runtime) getCaller(id proto.NodeID) Caller { + var caller Caller = rpc.NewPersistentCaller(id) + rawCaller, _ := r.callerMap.LoadOrStore(id, caller) + return rawCaller.(Caller) +} + +func (r *Runtime) goFunc(f func()) { + r.wg.Add(1) + go func() { + defer r.wg.Done() + f() + }() +} + +/// utils +func (r *Runtime) uint64ToBytes(i uint64) (res []byte) { + res = make([]byte, 8) + binary.BigEndian.PutUint64(res, i) + return +} + +func (r *Runtime) bytesToUint64(b []byte) (uint64, error) { + if len(b) < 8 { + return 0, kt.ErrInvalidLog + } + return binary.BigEndian.Uint64(b), nil +} - return nil +//// future extensions, barrier, noop log placeholder etc. +func (r *Runtime) followerNoop(l *kt.Log) (err error) { + return r.wal.Write(l) } diff --git a/kayak/runtime_inject_test.go b/kayak/runtime_inject_test.go new file mode 100644 index 000000000..1f729770e --- /dev/null +++ b/kayak/runtime_inject_test.go @@ -0,0 +1,24 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kayak + +import "github.com/CovenantSQL/CovenantSQL/proto" + +// SetCaller injects caller for test purpose. +func (r *Runtime) SetCaller(id proto.NodeID, c Caller) { + r.callerMap.Store(id, c) +} diff --git a/kayak/runtime_test.go b/kayak/runtime_test.go index b0735744a..101883bab 100644 --- a/kayak/runtime_test.go +++ b/kayak/runtime_test.go @@ -14,354 +14,655 @@ * limitations under the License. */ -package kayak +package kayak_test import ( - "errors" - "io/ioutil" + "bytes" + "context" + "database/sql" + "encoding/binary" + "fmt" + "math/rand" + "net" + "net/rpc" "os" + "sync/atomic" "testing" "time" + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/kayak" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + kl "github.com/CovenantSQL/CovenantSQL/kayak/wal" "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/storage" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/jordwest/mock-conn" + "github.com/pkg/errors" . "github.com/smartystreets/goconvey/convey" - "github.com/stretchr/testify/mock" ) -func testConfig(rootDir string, nodeID proto.NodeID) Config { - config := &MockConfig{} - log.SetLevel(log.FatalLevel) - - runtimeConfig := &RuntimeConfig{ - RootDir: rootDir, - LocalID: nodeID, - Runner: &MockRunner{}, - Transport: &MockTransport{}, - ProcessTimeout: time.Microsecond * 800, - AutoBanCount: 100, +func init() { + rand.Seed(time.Now().UnixNano()) +} + +var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +func RandStringRunes(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letterRunes[rand.Intn(len(letterRunes))] } + return string(b) +} - config.On("GetRuntimeConfig").Return(runtimeConfig) +type sqliteStorage struct { + st *storage.Storage + dsn string +} - return config +type queryStructure struct { + ConnID uint64 + SeqNo uint64 + Timestamp int64 + Queries []storage.Query } -func TestNewRuntime(t *testing.T) { - Convey("new runtime", t, func() { - config := testConfig(".", "leader") - peers := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) +func newSQLiteStorage(dsn string) (s *sqliteStorage, err error) { + s = &sqliteStorage{} + s.st, err = storage.New(dsn) + s.dsn = dsn + return +} - Convey("missing arguments", func() { - var r *Runtime - var err error +func (s *sqliteStorage) EncodePayload(request interface{}) (data []byte, err error) { + var buf *bytes.Buffer + if buf, err = utils.EncodeMsgPack(request); err != nil { + err = errors.Wrap(err, "encode payload failed") + return + } - r, err = NewRuntime(nil, nil) - So(r, ShouldBeNil) - So(err, ShouldNotBeNil) - So(err, ShouldEqual, ErrInvalidConfig) + data = buf.Bytes() + return +} - r, err = NewRuntime(config, nil) - So(r, ShouldBeNil) - So(err, ShouldNotBeNil) - So(err, ShouldEqual, ErrInvalidConfig) +func (s *sqliteStorage) DecodePayload(data []byte) (request interface{}, err error) { + var req *queryStructure + if err = utils.DecodeMsgPack(data, &req); err != nil { + err = errors.Wrap(err, "decode payload failed") + return + } - r, err = NewRuntime(nil, peers) - So(r, ShouldBeNil) - So(err, ShouldNotBeNil) - So(err, ShouldEqual, ErrInvalidConfig) - }) + request = req + return +} - Convey("invalid peer", func() { - newPeers := peers.Clone() - // change peer signature - newPeers.Term = 3 +func (s *sqliteStorage) Check(data interface{}) (err error) { + // no check + return nil +} - r, err := NewRuntime(config, &newPeers) - So(r, ShouldBeNil) - So(err, ShouldNotBeNil) - So(err, ShouldEqual, ErrInvalidConfig) - }) +func (s *sqliteStorage) Commit(data interface{}) (result interface{}, err error) { + var d *queryStructure + var ok bool + if d, ok = data.(*queryStructure); !ok { + err = errors.New("invalid data") + return + } - Convey("server not in peers", func() { - newConfig := testConfig(".", "test2") + result, err = s.st.Exec(context.Background(), d.Queries) - r, err := NewRuntime(newConfig, peers) + return +} - So(r, ShouldBeNil) - So(err, ShouldNotBeNil) - So(err, ShouldEqual, ErrInvalidConfig) - }) +func (s *sqliteStorage) Query(ctx context.Context, queries []storage.Query) (columns []string, types []string, + data [][]interface{}, err error) { + return s.st.Query(ctx, queries) +} - Convey("success", func() { - r, err := NewRuntime(config, peers) +func (s *sqliteStorage) Close() { + if s.st != nil { + s.st.Close() + } +} - So(r, ShouldNotBeNil) - So(err, ShouldBeNil) - So(r.isLeader, ShouldBeTrue) - }) +type fakeMux struct { + mux map[proto.NodeID]*fakeService +} - Convey("success with follower", func() { - newConfig := testConfig(".", "follower1") - r, err := NewRuntime(newConfig, peers) +func newFakeMux() *fakeMux { + return &fakeMux{ + mux: make(map[proto.NodeID]*fakeService), + } +} - So(r, ShouldNotBeNil) - So(err, ShouldBeNil) - So(r.isLeader, ShouldBeFalse) - }) - }) +func (m *fakeMux) register(nodeID proto.NodeID, s *fakeService) { + m.mux[nodeID] = s +} + +func (m *fakeMux) get(nodeID proto.NodeID) *fakeService { + return m.mux[nodeID] +} + +type fakeService struct { + rt *kayak.Runtime + s *rpc.Server +} + +func newFakeService(rt *kayak.Runtime) (fs *fakeService) { + fs = &fakeService{ + rt: rt, + s: rpc.NewServer(), + } + + fs.s.RegisterName("Test", fs) + + return +} + +func (s *fakeService) Call(req *kt.RPCRequest, resp *interface{}) (err error) { + return s.rt.FollowerApply(req.Log) +} + +func (s *fakeService) serveConn(c net.Conn) { + s.s.ServeCodec(utils.GetMsgPackServerCodec(c)) +} + +type fakeCaller struct { + m *fakeMux + target proto.NodeID +} + +func newFakeCaller(m *fakeMux, nodeID proto.NodeID) *fakeCaller { + return &fakeCaller{ + m: m, + target: nodeID, + } } -func TestRuntimeAll(t *testing.T) { - Convey("init", t, func() { - d, err := ioutil.TempDir("", "kayak_test") +func (c *fakeCaller) Call(method string, req interface{}, resp interface{}) (err error) { + fakeConn := mock_conn.NewConn() + + go c.m.get(c.target).serveConn(fakeConn.Server) + client := rpc.NewClientWithCodec(utils.GetMsgPackClientCodec(fakeConn.Client)) + defer client.Close() + + return client.Call(method, req, resp) +} + +func TestRuntime(t *testing.T) { + Convey("runtime test", t, func(c C) { + lvl := log.GetLevel() + log.SetLevel(log.FatalLevel) + defer log.SetLevel(lvl) + db1, err := newSQLiteStorage("test1.db") + So(err, ShouldBeNil) + defer func() { + db1.Close() + os.Remove("test1.db") + }() + db2, err := newSQLiteStorage("test2.db") So(err, ShouldBeNil) - if err == nil { - defer os.RemoveAll(d) + defer func() { + db2.Close() + os.Remove("test2.db") + }() + + node1 := proto.NodeID("000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade") + node2 := proto.NodeID("000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5") + + peers := &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Leader: node1, + Servers: []proto.NodeID{ + node1, + node2, + }, + }, } - config := testConfig(d, "leader") - peers := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) + privKey, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + err = peers.Sign(privKey) + So(err, ShouldBeNil) - r, err := NewRuntime(config, peers) + wal1 := kl.NewMemWal() + defer wal1.Close() + cfg1 := &kt.RuntimeConfig{ + Handler: db1, + PrepareThreshold: 1.0, + CommitThreshold: 1.0, + PrepareTimeout: time.Second, + CommitTimeout: 10 * time.Second, + Peers: peers, + Wal: wal1, + NodeID: node1, + ServiceName: "Test", + MethodName: "Call", + } + rt1, err := kayak.NewRuntime(cfg1) So(err, ShouldBeNil) - runner := config.GetRuntimeConfig().Runner.(*MockRunner) + wal2 := kl.NewMemWal() + defer wal2.Close() + cfg2 := &kt.RuntimeConfig{ + Handler: db2, + PrepareThreshold: 1.0, + CommitThreshold: 1.0, + PrepareTimeout: time.Second, + CommitTimeout: 10 * time.Second, + Peers: peers, + Wal: wal2, + NodeID: node2, + ServiceName: "Test", + MethodName: "Call", + } + rt2, err := kayak.NewRuntime(cfg2) + So(err, ShouldBeNil) - Convey("runner init failed", func() { - unknownErr := errors.New("unknown error") - runner.On("Init", - mock.Anything, // config - mock.Anything, // peers - mock.Anything, // logStore - mock.Anything, // stableStore - mock.Anything, // transport - ).Return(unknownErr) + m := newFakeMux() + fs1 := newFakeService(rt1) + m.register(node1, fs1) + fs2 := newFakeService(rt2) + m.register(node2, fs2) - err := r.Init() + rt1.SetCaller(node2, newFakeCaller(m, node2)) + rt2.SetCaller(node1, newFakeCaller(m, node1)) - So(err, ShouldNotBeNil) - So(r.logStore, ShouldBeNil) - }) + err = rt1.Start() + So(err, ShouldBeNil) + defer rt1.Shutdown() - Convey("runner init success", func() { - runner.On("Init", - mock.Anything, // config - mock.Anything, // peers - mock.Anything, // logStore - mock.Anything, // stableStore - mock.Anything, // transport - ).Return(nil) - runner.On("Shutdown", mock.Anything). - Return(nil) - - var err error - err = r.Init() - So(err, ShouldBeNil) - So(r.logStore, ShouldNotBeNil) + err = rt2.Start() + So(err, ShouldBeNil) + defer rt2.Shutdown() - // run process - runner.On("Apply", mock.Anything).Return(uint64(1), nil) + q1 := &queryStructure{ + Queries: []storage.Query{ + {Pattern: "CREATE TABLE IF NOT EXISTS test (t1 text, t2 text, t3 text)"}, + }, + } + So(err, ShouldBeNil) - _, err = r.Apply([]byte("test")) - So(err, ShouldBeNil) + r1 := RandStringRunes(333) + r2 := RandStringRunes(333) + r3 := RandStringRunes(333) - // test get log - var l Log - l.Data = []byte("test") - l.Index = uint64(1) - err = r.logStore.StoreLog(&l) - So(err, ShouldBeNil) + q2 := &queryStructure{ + Queries: []storage.Query{ + { + Pattern: "INSERT INTO test (t1, t2, t3) VALUES(?, ?, ?)", + Args: []sql.NamedArg{ + sql.Named("", r1), + sql.Named("", r2), + sql.Named("", r3), + }, + }, + }, + } - data, err := r.GetLog(1) - So(err, ShouldBeNil) - So(data, ShouldResemble, []byte("test")) + rt1.Apply(context.Background(), q1) + rt2.Apply(context.Background(), q2) + rt1.Apply(context.Background(), q2) + db1.Query(context.Background(), []storage.Query{ + {Pattern: "SELECT * FROM test"}, + }) + + var count uint64 + atomic.StoreUint64(&count, 1) + + for i := 0; i != 1000; i++ { + atomic.AddUint64(&count, 1) + q := &queryStructure{ + Queries: []storage.Query{ + { + Pattern: "INSERT INTO test (t1, t2, t3) VALUES(?, ?, ?)", + Args: []sql.NamedArg{ + sql.Named("", r1), + sql.Named("", r2), + sql.Named("", r3), + }, + }, + }, + } - // call shutdowns - err = r.Shutdown() + _, _, err = rt1.Apply(context.Background(), q) So(err, ShouldBeNil) + } + + // test rollback + q := &queryStructure{ + Queries: []storage.Query{ + { + Pattern: "INVALID QUERY", + }, + }, + } + _, _, err = rt1.Apply(context.Background(), q) + So(err, ShouldNotBeNil) + + // test timeout + q = &queryStructure{ + Queries: []storage.Query{ + { + Pattern: "INSERT INTO test (t1, t2, t3) VALUES(?, ?, ?)", + Args: []sql.NamedArg{ + sql.Named("", r1), + sql.Named("", r2), + sql.Named("", r3), + }, + }, + }, + } + cancelCtx, cancelCtxFunc := context.WithCancel(context.Background()) + cancelCtxFunc() + _, _, err = rt1.Apply(cancelCtx, q) + So(err, ShouldNotBeNil) + + total := atomic.LoadUint64(&count) + _, _, d1, _ := db1.Query(context.Background(), []storage.Query{ + {Pattern: "SELECT COUNT(1) FROM test"}, + }) + So(d1, ShouldHaveLength, 1) + So(d1[0], ShouldHaveLength, 1) + So(fmt.Sprint(d1[0][0]), ShouldEqual, fmt.Sprint(total)) + + _, _, d2, _ := db2.Query(context.Background(), []storage.Query{ + {Pattern: "SELECT COUNT(1) FROM test"}, }) + So(d2, ShouldHaveLength, 1) + So(d2[0], ShouldHaveLength, 1) + So(fmt.Sprint(d2[0][0]), ShouldResemble, fmt.Sprint(total)) }) + Convey("trivial cases", t, func() { + node1 := proto.NodeID("000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade") + node2 := proto.NodeID("000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5") + node3 := proto.NodeID("000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8") + + peers := &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Leader: node1, + Servers: []proto.NodeID{ + node1, + node2, + }, + }, + } - Convey("init success with follower", t, func() { - d, err := ioutil.TempDir("", "kayak_test") + _, err := kayak.NewRuntime(nil) + So(err, ShouldNotBeNil) + _, err = kayak.NewRuntime(&kt.RuntimeConfig{}) + So(err, ShouldNotBeNil) + _, err = kayak.NewRuntime(&kt.RuntimeConfig{ + Peers: peers, + }) + So(err, ShouldNotBeNil) + + privKey, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + err = peers.Sign(privKey) So(err, ShouldBeNil) - if err == nil { - defer os.RemoveAll(d) - } - config := testConfig(d, "follower1") - peers := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "leader", + _, err = kayak.NewRuntime(&kt.RuntimeConfig{ + Peers: peers, + NodeID: node3, + }) + So(err, ShouldNotBeNil) + }) + Convey("test log loading", t, func() { + w, err := kl.NewLevelDBWal("testLoad.db") + defer os.RemoveAll("testLoad.db") + So(err, ShouldBeNil) + err = w.Write(&kt.Log{ + LogHeader: kt.LogHeader{ + Index: 0, + Type: kt.LogPrepare, + Producer: proto.NodeID("0000000000000000000000000000000000000000000000000000000000000000"), }, - { - Role: proto.Follower, - ID: "follower1", + Data: []byte("happy1"), + }) + So(err, ShouldBeNil) + err = w.Write(&kt.Log{ + LogHeader: kt.LogHeader{ + Index: 1, + Type: kt.LogPrepare, + Producer: proto.NodeID("0000000000000000000000000000000000000000000000000000000000000000"), }, - { - Role: proto.Follower, - ID: "follower2", + Data: []byte("happy1"), + }) + So(err, ShouldBeNil) + data := make([]byte, 16) + binary.BigEndian.PutUint64(data, 0) // prepare log index + binary.BigEndian.PutUint64(data, 0) // last commit index + err = w.Write(&kt.Log{ + LogHeader: kt.LogHeader{ + Index: 2, + Type: kt.LogCommit, + Producer: proto.NodeID("0000000000000000000000000000000000000000000000000000000000000000"), + }, + Data: data, + }) + So(err, ShouldBeNil) + data = make([]byte, 8) + binary.BigEndian.PutUint64(data, 1) // prepare log index + err = w.Write(&kt.Log{ + LogHeader: kt.LogHeader{ + Index: 3, + Type: kt.LogRollback, + Producer: proto.NodeID("0000000000000000000000000000000000000000000000000000000000000000"), }, + Data: data, }) + So(err, ShouldBeNil) + w.Close() + + w, err = kl.NewLevelDBWal("testLoad.db") + So(err, ShouldBeNil) + defer w.Close() + + node1 := proto.NodeID("000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade") + peers := &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Leader: node1, + Servers: []proto.NodeID{node1}, + }, + } - r, err := NewRuntime(config, peers) + privKey, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + err = peers.Sign(privKey) So(err, ShouldBeNil) - runner := config.GetRuntimeConfig().Runner.(*MockRunner) - runner.On("Init", - mock.Anything, // config - mock.Anything, // peers - mock.Anything, // logStore - mock.Anything, // stableStore - mock.Anything, // transport - ).Return(nil) - runner.On("Shutdown", mock.Anything). - Return(nil) - runner.On("Apply", mock.Anything).Return(uint64(1), nil) - - err = r.Init() + cfg := &kt.RuntimeConfig{ + Handler: nil, + PrepareThreshold: 1.0, + CommitThreshold: 1.0, + PrepareTimeout: time.Second, + CommitTimeout: 10 * time.Second, + Peers: peers, + Wal: w, + NodeID: node1, + ServiceName: "Test", + MethodName: "Call", + } + rt, err := kayak.NewRuntime(cfg) So(err, ShouldBeNil) - defer r.Shutdown() - _, err = r.Apply([]byte("test")) - So(err, ShouldNotBeNil) - So(err, ShouldEqual, ErrNotLeader) + So(rt.Start(), ShouldBeNil) + So(func() { rt.Start() }, ShouldNotPanic) + + So(rt.Shutdown(), ShouldBeNil) + So(func() { rt.Shutdown() }, ShouldNotPanic) }) +} - Convey("init success with peers update", t, func() { - d, err := ioutil.TempDir("", "kayak_test") +func BenchmarkRuntime(b *testing.B) { + Convey("runtime test", b, func(c C) { + log.SetLevel(log.DebugLevel) + f, err := os.OpenFile("test.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) So(err, ShouldBeNil) - if err == nil { - defer os.RemoveAll(d) - } + log.SetOutput(f) + defer f.Close() - config := testConfig(d, "leader") - peers := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", + db1, err := newSQLiteStorage("test1.db") + So(err, ShouldBeNil) + defer func() { + db1.Close() + os.Remove("test1.db") + }() + db2, err := newSQLiteStorage("test2.db") + So(err, ShouldBeNil) + defer func() { + db2.Close() + os.Remove("test2.db") + }() + + node1 := proto.NodeID("000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade") + node2 := proto.NodeID("000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5") + + peers := &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Leader: node1, + Servers: []proto.NodeID{ + node1, + node2, + }, }, - }) + } - r, err := NewRuntime(config, peers) + privKey, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + err = peers.Sign(privKey) So(err, ShouldBeNil) - runner := config.GetRuntimeConfig().Runner.(*MockRunner) - runner.On("Init", - mock.Anything, // config - mock.Anything, // peers - mock.Anything, // logStore - mock.Anything, // stableStore - mock.Anything, // transport - ).Return(nil) - runner.On("Shutdown", mock.Anything).Return(nil) - runner.On("UpdatePeers", mock.Anything).Return(nil) - - err = r.Init() + wal1 := kl.NewMemWal() + defer wal1.Close() + cfg1 := &kt.RuntimeConfig{ + Handler: db1, + PrepareThreshold: 1.0, + CommitThreshold: 1.0, + PrepareTimeout: time.Second, + CommitTimeout: 10 * time.Second, + Peers: peers, + Wal: wal1, + NodeID: node1, + ServiceName: "Test", + MethodName: "Call", + } + rt1, err := kayak.NewRuntime(cfg1) So(err, ShouldBeNil) - defer r.Shutdown() - Convey("invalid peers", func() { - newPeers := testPeersFixture(3, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) + wal2 := kl.NewMemWal() + defer wal2.Close() + cfg2 := &kt.RuntimeConfig{ + Handler: db2, + PrepareThreshold: 1.0, + CommitThreshold: 1.0, + PrepareTimeout: time.Second, + CommitTimeout: 10 * time.Second, + Peers: peers, + Wal: wal2, + NodeID: node2, + ServiceName: "Test", + MethodName: "Call", + } + rt2, err := kayak.NewRuntime(cfg2) + So(err, ShouldBeNil) - newPeers.Term = 5 + m := newFakeMux() + fs1 := newFakeService(rt1) + m.register(node1, fs1) + fs2 := newFakeService(rt2) + m.register(node2, fs2) - // not valid - err := r.UpdatePeers(newPeers) + rt1.SetCaller(node2, newFakeCaller(m, node2)) + rt2.SetCaller(node1, newFakeCaller(m, node1)) - So(err, ShouldNotBeNil) - So(err, ShouldEqual, ErrInvalidConfig) - }) + err = rt1.Start() + So(err, ShouldBeNil) + defer rt1.Shutdown() - Convey("change leader", func() { - newPeers := testPeersFixture(3, []*Server{ - { - Role: proto.Follower, - ID: "leader", - }, - { - Role: proto.Leader, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) + err = rt2.Start() + So(err, ShouldBeNil) + defer rt2.Shutdown() - // valid - err := r.UpdatePeers(newPeers) + q1 := &queryStructure{ + Queries: []storage.Query{ + {Pattern: "CREATE TABLE IF NOT EXISTS test (t1 text, t2 text, t3 text)"}, + }, + } + So(err, ShouldBeNil) - So(err, ShouldBeNil) - So(r.isLeader, ShouldBeFalse) - }) + r1 := RandStringRunes(333) + r2 := RandStringRunes(333) + r3 := RandStringRunes(333) - Convey("dropped peer", func() { - newPeers := testPeersFixture(3, []*Server{ - { - Role: proto.Leader, - ID: "follower1", - }, + q2 := &queryStructure{ + Queries: []storage.Query{ { - Role: proto.Follower, - ID: "follower2", + Pattern: "INSERT INTO test (t1, t2, t3) VALUES(?, ?, ?)", + Args: []sql.NamedArg{ + sql.Named("", r1), + sql.Named("", r2), + sql.Named("", r3), + }, }, - }) + }, + } - // valid - err := r.UpdatePeers(newPeers) + rt1.Apply(context.Background(), q1) + rt2.Apply(context.Background(), q2) + rt1.Apply(context.Background(), q2) + db1.Query(context.Background(), []storage.Query{ + {Pattern: "SELECT * FROM test"}, + }) - So(err, ShouldBeNil) - runner.AssertCalled(t, "Shutdown", true) + b.ResetTimer() + + var count uint64 + atomic.StoreUint64(&count, 1) + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + atomic.AddUint64(&count, 1) + q := &queryStructure{ + Queries: []storage.Query{ + { + Pattern: "INSERT INTO test (t1, t2, t3) VALUES(?, ?, ?)", + Args: []sql.NamedArg{ + sql.Named("", r1), + sql.Named("", r2), + sql.Named("", r3), + }, + }, + }, + } + _ = err + //c.So(err, ShouldBeNil) + + _, _, err = rt1.Apply(context.Background(), q) + //c.So(err, ShouldBeNil) + } + }) + + b.StopTimer() + + total := atomic.LoadUint64(&count) + _, _, d1, _ := db1.Query(context.Background(), []storage.Query{ + {Pattern: "SELECT COUNT(1) FROM test"}, }) + So(d1, ShouldHaveLength, 1) + So(d1[0], ShouldHaveLength, 1) + So(fmt.Sprint(d1[0][0]), ShouldEqual, fmt.Sprint(total)) + + //_, _, d2, _ := db2.Query(context.Background(), []storage.Query{ + // {Pattern: "SELECT COUNT(1) FROM test"}, + //}) + //So(d2, ShouldHaveLength, 1) + //So(d2[0], ShouldHaveLength, 1) + //So(fmt.Sprint(d2[0][0]), ShouldResemble, fmt.Sprint(total)) + + b.StartTimer() }) } diff --git a/kayak/tracker.go b/kayak/tracker.go new file mode 100644 index 000000000..986a9198b --- /dev/null +++ b/kayak/tracker.go @@ -0,0 +1,160 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kayak + +import ( + "context" + "sync" + "sync/atomic" + + "github.com/CovenantSQL/CovenantSQL/proto" +) + +// rpcTracker defines the rpc call tracker +// support tracking the rpc result. +type rpcTracker struct { + // related runtime + r *Runtime + // target nodes, a copy of current followers + nodes []proto.NodeID + // rpc method + method string + // rpc request + req interface{} + // minimum response count + minCount int + // responses + errLock sync.RWMutex + errors map[proto.NodeID]error + // scoreboard + complete int + sent uint32 + doneOnce sync.Once + doneCh chan struct{} + wg sync.WaitGroup + closed uint32 +} + +func newTracker(r *Runtime, req interface{}, minCount int) (t *rpcTracker) { + // copy nodes + nodes := append([]proto.NodeID(nil), r.followers...) + + if minCount > len(nodes) { + minCount = len(nodes) + } + if minCount < 0 { + minCount = 0 + } + + t = &rpcTracker{ + r: r, + nodes: nodes, + method: r.rpcMethod, + req: req, + minCount: minCount, + errors: make(map[proto.NodeID]error, len(nodes)), + doneCh: make(chan struct{}), + } + + return +} + +func (t *rpcTracker) send() { + if !atomic.CompareAndSwapUint32(&t.sent, 0, 1) { + return + } + + for i := range t.nodes { + t.wg.Add(1) + go t.callSingle(i) + } + + if t.minCount == 0 { + t.done() + } +} + +func (t *rpcTracker) callSingle(idx int) { + err := t.r.getCaller(t.nodes[idx]).Call(t.method, t.req, nil) + defer t.wg.Done() + t.errLock.Lock() + defer t.errLock.Unlock() + t.errors[t.nodes[idx]] = err + t.complete++ + + if t.complete >= t.minCount { + t.done() + } +} + +func (t *rpcTracker) done() { + t.doneOnce.Do(func() { + if t.doneCh != nil { + select { + case <-t.doneCh: + default: + close(t.doneCh) + } + } + }) +} + +func (t *rpcTracker) get(ctx context.Context) (errors map[proto.NodeID]error, meets bool, finished bool) { + for { + select { + case <-t.doneCh: + meets = true + default: + } + + select { + case <-ctx.Done(): + case <-t.doneCh: + meets = true + } + + break + } + + t.errLock.RLock() + defer t.errLock.RUnlock() + + errors = make(map[proto.NodeID]error) + + for s, e := range t.errors { + errors[s] = e + } + + if !meets && len(errors) >= t.minCount { + meets = true + } + + if len(errors) == len(t.nodes) { + finished = true + } + + return +} + +func (t *rpcTracker) close() { + if !atomic.CompareAndSwapUint32(&t.closed, 0, 1) { + return + } + + t.wg.Wait() + t.done() +} diff --git a/kayak/tracker_test.go b/kayak/tracker_test.go new file mode 100644 index 000000000..6ffccce6b --- /dev/null +++ b/kayak/tracker_test.go @@ -0,0 +1,103 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kayak + +import ( + "context" + "testing" + "time" + + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/pkg/errors" + . "github.com/smartystreets/goconvey/convey" +) + +type fakeTrackerCaller struct { + c C +} + +func (c *fakeTrackerCaller) Call(method string, req interface{}, resp interface{}) (err error) { + time.Sleep(time.Millisecond * 500) + c.c.So(method, ShouldEqual, "test") + if req != 1 { + err = errors.New("invalid result") + } + return +} + +func TestTracker(t *testing.T) { + Convey("test tracker", t, func(c C) { + nodeID1 := proto.NodeID("000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5") + nodeID2 := proto.NodeID("000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade") + r := &Runtime{ + rpcMethod: "test", + followers: []proto.NodeID{ + nodeID1, + nodeID2, + }, + } + r.SetCaller(nodeID1, &fakeTrackerCaller{c: c}) + r.SetCaller(nodeID2, &fakeTrackerCaller{c: c}) + t1 := newTracker(r, 1, 0) + t1.send() + _, meets, _ := t1.get(context.Background()) + So(meets, ShouldBeTrue) + + t2 := newTracker(r, 1, 1) + t2.send() + r2, meets, _ := t2.get(context.Background()) + So(r2, ShouldNotBeEmpty) + So(meets, ShouldBeTrue) + + t3 := newTracker(r, 1, 1) + t3.send() + ctx1, cancelCtx1 := context.WithTimeout(context.Background(), time.Millisecond*1) + defer cancelCtx1() + r3, meets, finished := t3.get(ctx1) + So(r3, ShouldBeEmpty) + So(meets, ShouldBeFalse) + So(finished, ShouldBeFalse) + + r3, meets, finished = t3.get(context.Background()) + So(r3, ShouldNotBeEmpty) + So(meets, ShouldBeTrue) + + t4 := newTracker(r, 1, 2) + t4.send() + r4, meets, finished := t4.get(context.Background()) + So(r4, ShouldHaveLength, 2) + So(meets, ShouldBeTrue) + So(finished, ShouldBeTrue) + + t5 := newTracker(r, 2, 2) + t5.send() + ctx2, cancelCtx2 := context.WithTimeout(context.Background(), time.Millisecond*1) + defer cancelCtx2() + r5, meets, finished := t5.get(ctx2) + So(r5, ShouldBeEmpty) + So(meets, ShouldBeFalse) + So(finished, ShouldBeFalse) + + r5, meets, finished = t5.get(context.Background()) + So(r5, ShouldHaveLength, 2) + So(meets, ShouldBeTrue) + So(finished, ShouldBeTrue) + + t5.close() + So(t5.closed, ShouldEqual, 1) + }) +} diff --git a/kayak/transport/doc.go b/kayak/transport/doc.go deleted file mode 100644 index c9307e311..000000000 --- a/kayak/transport/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* -Package transport implements applicable transport implementations for kayak runtime. -*/ -package transport diff --git a/kayak/transport/etls_transport.go b/kayak/transport/etls_transport.go deleted file mode 100644 index 59b104f18..000000000 --- a/kayak/transport/etls_transport.go +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package transport - -import ( - "context" - "sync" - - "github.com/CovenantSQL/CovenantSQL/kayak" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/rpc" -) - -// ETLSTransportConfig defines a transport config with transport id and rpc service related config. -type ETLSTransportConfig struct { - NodeID proto.NodeID - TransportID string - TransportService *ETLSTransportService - ServiceName string -} - -// ETLSTransport defines kayak transport using ETLS rpc as transport layer. -type ETLSTransport struct { - *ETLSTransportConfig - queue chan kayak.Request -} - -// ETLSTransportService defines kayak rpc endpoint to be registered to rpc server. -type ETLSTransportService struct { - ServiceName string - serviceMap sync.Map -} - -// ETLSTransportRequest defines kayak rpc request entity. -type ETLSTransportRequest struct { - proto.Envelope - TransportID string - NodeID proto.NodeID - Method string - Log *kayak.Log - Response []byte - Error error - respAvailable chan struct{} - respInit sync.Once -} - -// ETLSTransportResponse defines kayak rpc response entity. -type ETLSTransportResponse struct { - proto.Envelope - Data []byte -} - -// NewETLSTransport creates new transport and bind to transport service with specified transport id. -func NewETLSTransport(config *ETLSTransportConfig) (t *ETLSTransport) { - t = &ETLSTransport{ - ETLSTransportConfig: config, - queue: make(chan kayak.Request, 100), - } - - return -} - -// Init implements kayak.Transport.Init. -func (e *ETLSTransport) Init() error { - e.TransportService.register(e) - return nil -} - -// Request implements kayak.Transport.Request. -func (e *ETLSTransport) Request(ctx context.Context, - nodeID proto.NodeID, method string, log *kayak.Log) (response []byte, err error) { - req := &ETLSTransportRequest{ - TransportID: e.TransportID, - NodeID: e.NodeID, - Method: method, - Log: log, - } - resp := &ETLSTransportResponse{} - - if err = rpc.NewCaller().CallNodeWithContext(ctx, nodeID, e.ServiceName+".Call", req, resp); err != nil { - return - } - - response = resp.Data - - return -} - -// Process implements kayak.Transport.Process. -func (e *ETLSTransport) Process() <-chan kayak.Request { - // get response from remote request - return e.queue -} - -// Shutdown implements kayak.Transport.Shutdown. -func (e *ETLSTransport) Shutdown() error { - e.TransportService.deRegister(e) - return nil -} - -func (e *ETLSTransport) enqueue(req *ETLSTransportRequest) { - e.queue <- req -} - -// GetPeerNodeID implements kayak.Request.GetPeerNodeID. -func (r *ETLSTransportRequest) GetPeerNodeID() proto.NodeID { - return r.NodeID -} - -// GetMethod implements kayak.Request.GetMethod. -func (r *ETLSTransportRequest) GetMethod() string { - return r.Method -} - -// GetLog implements kayak.Request.GetLog. -func (r *ETLSTransportRequest) GetLog() *kayak.Log { - return r.Log -} - -// SendResponse implements kayak.Request.SendResponse. -func (r *ETLSTransportRequest) SendResponse(resp []byte, err error) error { - // send response with transport id - r.respInit.Do(r.initChan) - select { - case <-r.respAvailable: - return kayak.ErrInvalidRequest - default: - r.Response = resp - r.Error = err - close(r.respAvailable) - } - return nil -} - -func (r *ETLSTransportRequest) initChan() { - r.respAvailable = make(chan struct{}) -} - -func (r *ETLSTransportRequest) getResponse() ([]byte, error) { - r.respInit.Do(r.initChan) - <-r.respAvailable - return r.Response, r.Error -} - -// Call is the rpc entry of ETLS transport. -func (s *ETLSTransportService) Call(req *ETLSTransportRequest, resp *ETLSTransportResponse) error { - // verify - // TODO(xq262144): unified NodeID types in project - if req.Envelope.NodeID.String() != string(req.NodeID) { - return kayak.ErrInvalidRequest - } - - var t interface{} - var trans *ETLSTransport - var ok bool - - if t, ok = s.serviceMap.Load(req.TransportID); !ok { - return kayak.ErrInvalidRequest - } - - if trans, ok = t.(*ETLSTransport); !ok { - return kayak.ErrInvalidRequest - } - - trans.enqueue(req) - obj, err := req.getResponse() - - if resp != nil { - resp.Data = obj - } - - return err -} - -func (s *ETLSTransportService) register(t *ETLSTransport) { - // register transport to service map - s.serviceMap.Store(t.TransportID, t) -} - -func (s *ETLSTransportService) deRegister(t *ETLSTransport) { - // de-register transport from service map - s.serviceMap.Delete(t.TransportID) -} diff --git a/kayak/transport/etls_transport_test.go b/kayak/transport/etls_transport_test.go deleted file mode 100644 index 61eb385a8..000000000 --- a/kayak/transport/etls_transport_test.go +++ /dev/null @@ -1,345 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package transport - -import ( - "context" - "crypto/rand" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "sync" - "testing" - "time" - - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" - "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/route" - "github.com/CovenantSQL/CovenantSQL/rpc" - "github.com/CovenantSQL/CovenantSQL/utils/log" - . "github.com/smartystreets/goconvey/convey" - "github.com/stretchr/testify/mock" -) - -type mockRes struct { - nodeID proto.NodeID - service *ETLSTransportService - transport *ETLSTransport - server *rpc.Server - listenAddr string -} - -func testWithNewNode() (mock *mockRes, err error) { - // mock etls transport without kms server - mock = &mockRes{} - addr := "127.0.0.1:0" - - // random node id - randBytes := make([]byte, 4) - rand.Read(randBytes) - mock.nodeID = proto.NodeID(hash.THashH(randBytes).String()) - kms.SetLocalNodeIDNonce(mock.nodeID.ToRawNodeID().CloneBytes(), &cpuminer.Uint256{}) - mock.service = &ETLSTransportService{} - mock.transport = NewETLSTransport(&ETLSTransportConfig{ - NodeID: mock.nodeID, - TransportID: "test", - TransportService: mock.service, - ServiceName: "Kayak", - }) - mock.server, err = rpc.NewServerWithService(rpc.ServiceMap{"Kayak": mock.service}) - if err != nil { - return - } - _, testFile, _, _ := runtime.Caller(0) - privKeyPath := filepath.Join(filepath.Dir(testFile), "../../test/node_standalone/private.key") - if err = mock.server.InitRPCServer(addr, privKeyPath, []byte("")); err != nil { - return - } - mock.listenAddr = mock.server.Listener.Addr().String() - route.SetNodeAddrCache(mock.nodeID.ToRawNodeID(), mock.listenAddr) - var nonce *cpuminer.Uint256 - if nonce, err = kms.GetLocalNonce(); err != nil { - return - } - var pubKey *asymmetric.PublicKey - if pubKey, err = kms.GetLocalPublicKey(); err != nil { - return - } - if err = kms.SetPublicKey(mock.nodeID, *nonce, pubKey); err != nil { - return - } - - log.Infof("fake node with node id: %v", mock.nodeID) - return -} - -func initKMS() (err error) { - var f *os.File - f, err = ioutil.TempFile("", "keystore_") - f.Close() - os.Remove(f.Name()) - route.InitKMS(f.Name()) - - // flag as test - kms.Unittest = true - - return -} - -func TestETLSTransport(t *testing.T) { - Convey("full test", t, FailureContinues, func(c C) { - var err error - - err = initKMS() - So(err, ShouldBeNil) - - mock1, err := testWithNewNode() - So(err, ShouldBeNil) - mock2, err := testWithNewNode() - So(err, ShouldBeNil) - - var wgServer, wgRequest sync.WaitGroup - - // start server - wgServer.Add(1) - go func() { - defer wgServer.Done() - mock1.server.Serve() - }() - - wgServer.Add(1) - go func() { - defer wgServer.Done() - mock2.server.Serve() - }() - - // init transport - err = mock1.transport.Init() - So(err, ShouldBeNil) - err = mock2.transport.Init() - So(err, ShouldBeNil) - - testLog := testLogFixture([]byte("test request")) - - // make request issuer as node 1 - kms.SetLocalNodeIDNonce(mock1.nodeID.ToRawNodeID().CloneBytes(), &cpuminer.Uint256{}) - - wgRequest.Add(1) - go func() { - defer wgRequest.Done() - res, err := mock1.transport.Request(context.Background(), mock2.nodeID, "test method", testLog) - c.So(err, ShouldBeNil) - c.So(res, ShouldResemble, []byte("test response")) - }() - - wgRequest.Add(1) - go func() { - defer wgRequest.Done() - select { - case req := <-mock2.transport.Process(): - c.So(req.GetLog(), ShouldResemble, testLog) - c.So(req.GetMethod(), ShouldEqual, "test method") - c.So(req.GetPeerNodeID(), ShouldEqual, mock1.nodeID) - req.SendResponse([]byte("test response"), nil) - } - }() - - wgRequest.Wait() - - // shutdown transport - err = mock1.transport.Shutdown() - So(err, ShouldBeNil) - err = mock2.transport.Shutdown() - So(err, ShouldBeNil) - - // stop - mock1.server.Listener.Close() - mock1.server.Stop() - mock2.server.Listener.Close() - mock2.server.Stop() - - wgServer.Wait() - }) -} - -func TestETLSIntegration(t *testing.T) { - type createMockRes struct { - runner *kayak.TwoPCRunner - transport *ETLSTransport - worker *MockWorker - config *kayak.TwoPCConfig - runtime *kayak.Runtime - etlsMock *mockRes - } - - // create mock returns basic arguments to prepare for a server - createMock := func(etlsMock *mockRes, peers *kayak.Peers) (res *createMockRes) { - res = &createMockRes{} - log.SetLevel(log.FatalLevel) - d, _ := ioutil.TempDir("", "kayak_test") - - // etls mock res - res.etlsMock = etlsMock - // runner instance - res.runner = kayak.NewTwoPCRunner() - // transport for this instance - res.transport = res.etlsMock.transport - // underlying worker - res.worker = &MockWorker{} - // runner config including timeout settings, commit log storage, local server id - res.config = &kayak.TwoPCConfig{ - RuntimeConfig: kayak.RuntimeConfig{ - RootDir: d, - LocalID: etlsMock.nodeID, - Runner: res.runner, - Transport: res.transport, - ProcessTimeout: time.Millisecond * 800, - }, - Storage: res.worker, - } - res.runtime, _ = kayak.NewRuntime(res.config, peers) - go func() { - res.etlsMock.server.Serve() - }() - return - } - // cleanup log storage after execution - cleanupDir := func(c *createMockRes) { - os.RemoveAll(c.config.RuntimeConfig.RootDir) - } - - Convey("integration test", t, FailureContinues, func(c C) { - var err error - - err = initKMS() - So(err, ShouldBeNil) - - lNodeEtls, err := testWithNewNode() - So(err, ShouldBeNil) - f1NodeEtls, err := testWithNewNode() - So(err, ShouldBeNil) - f2NodeEtls, err := testWithNewNode() - So(err, ShouldBeNil) - - // peers is a simple 3-node peer configuration - peers := testPeersFixture(1, []*kayak.Server{ - { - Role: proto.Leader, - ID: lNodeEtls.nodeID, - }, - { - Role: proto.Follower, - ID: f1NodeEtls.nodeID, - }, - { - Role: proto.Follower, - ID: f2NodeEtls.nodeID, - }, - }) - - lMock := createMock(lNodeEtls, peers) - f1Mock := createMock(f1NodeEtls, peers) - f2Mock := createMock(f2NodeEtls, peers) - defer cleanupDir(lMock) - defer cleanupDir(f1Mock) - defer cleanupDir(f2Mock) - - // init - err = lMock.runtime.Init() - So(err, ShouldBeNil) - err = f1Mock.runtime.Init() - So(err, ShouldBeNil) - err = f2Mock.runtime.Init() - So(err, ShouldBeNil) - - // payload to send - testPayload := []byte("test data") - - // make request issuer as leader node - kms.SetLocalNodeIDNonce(lMock.config.LocalID.ToRawNodeID().CloneBytes(), &cpuminer.Uint256{}) - - // underlying worker mock, prepare/commit/rollback with be received the decoded data - callOrder := &CallCollector{} - f1Mock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("f_prepare") - }) - f2Mock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("f_prepare") - }) - f1Mock.worker.On("Commit", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("f_commit") - }) - f2Mock.worker.On("Commit", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("f_commit") - }) - lMock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("l_prepare") - }) - lMock.worker.On("Commit", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("l_commit") - }) - - // process the encoded data - _, err = lMock.runtime.Apply(testPayload) - So(err, ShouldBeNil) - So(callOrder.Get(), ShouldResemble, []string{ - "f_prepare", - "f_prepare", - "l_prepare", - "f_commit", - "f_commit", - "l_commit", - }) - - // process the encoded data again - callOrder.Reset() - _, err = lMock.runtime.Apply(testPayload) - So(err, ShouldBeNil) - So(callOrder.Get(), ShouldResemble, []string{ - "f_prepare", - "f_prepare", - "l_prepare", - "f_commit", - "f_commit", - "l_commit", - }) - - // shutdown - lMock.runtime.Shutdown() - f1Mock.runtime.Shutdown() - f2Mock.runtime.Shutdown() - - // stop server - lNodeEtls.server.Listener.Close() - f1NodeEtls.server.Listener.Close() - f2NodeEtls.server.Listener.Close() - lNodeEtls.server.Stop() - f1NodeEtls.server.Stop() - f2NodeEtls.server.Stop() - }) -} diff --git a/kayak/transport/network_transport.go b/kayak/transport/network_transport.go deleted file mode 100644 index 5ebade2f3..000000000 --- a/kayak/transport/network_transport.go +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package transport - -import ( - "context" - "io" - "net" - "net/rpc" - "net/rpc/jsonrpc" - "sync" - - "github.com/CovenantSQL/CovenantSQL/kayak" - "github.com/CovenantSQL/CovenantSQL/proto" -) - -// ConnWithPeerNodeID defines interface support getting remote peer ID. -type ConnWithPeerNodeID interface { - net.Conn - - GetPeerNodeID() proto.NodeID -} - -// StreamLayer is the underlying network connection layer. -type StreamLayer interface { - Accept() (ConnWithPeerNodeID, error) - Dial(context.Context, proto.NodeID) (ConnWithPeerNodeID, error) -} - -// NetworkRequest is the request object hand off inter node request. -type NetworkRequest struct { - NodeID proto.NodeID - Method string - Log *kayak.Log - Response []byte - Error error - respAvailable chan struct{} - respInit sync.Once -} - -// ClientCodecBuilder is the client codec builder. -type ClientCodecBuilder func(io.ReadWriteCloser) rpc.ClientCodec - -// ServerCodecBuilder is the server codec builder. -type ServerCodecBuilder func(closer io.ReadWriteCloser) rpc.ServerCodec - -// NetworkResponse is the response object hand off inter node response. -type NetworkResponse struct { - Response []byte -} - -// NetworkTransport support customized stream layer integration with kayak transport. -type NetworkTransport struct { - config *NetworkTransportConfig - shutdownCh chan struct{} - queue chan kayak.Request -} - -// NetworkTransportConfig defines NetworkTransport config object. -type NetworkTransportConfig struct { - NodeID proto.NodeID - StreamLayer StreamLayer - - ClientCodec ClientCodecBuilder - ServerCodec ServerCodecBuilder -} - -// NetworkTransportRequestProxy defines a rpc proxy method exported to golang net/rpc. -type NetworkTransportRequestProxy struct { - transport *NetworkTransport - conn ConnWithPeerNodeID - server *rpc.Server -} - -// NewConfig returns new transport config. -func NewConfig(nodeID proto.NodeID, streamLayer StreamLayer) (c *NetworkTransportConfig) { - return NewConfigWithCodec(nodeID, streamLayer, jsonrpc.NewClientCodec, jsonrpc.NewServerCodec) -} - -// NewConfigWithCodec returns new transport config with custom codec. -func NewConfigWithCodec(nodeID proto.NodeID, streamLayer StreamLayer, - clientCodec ClientCodecBuilder, serverCodec ServerCodecBuilder) (c *NetworkTransportConfig) { - return &NetworkTransportConfig{ - NodeID: nodeID, - StreamLayer: streamLayer, - ClientCodec: clientCodec, - ServerCodec: serverCodec, - } -} - -// NewRequest returns new request entity. -func NewRequest(nodeID proto.NodeID, method string, log *kayak.Log) (r *NetworkRequest) { - return &NetworkRequest{ - NodeID: nodeID, - Method: method, - Log: log, - } -} - -// NewResponse returns response returns new response entity. -func NewResponse() (r *NetworkResponse) { - return &NetworkResponse{} -} - -// NewTransport returns new network transport. -func NewTransport(config *NetworkTransportConfig) (t *NetworkTransport) { - t = &NetworkTransport{ - config: config, - shutdownCh: make(chan struct{}), - queue: make(chan kayak.Request, 100), - } - - return -} - -// NewRequestProxy returns request proxy object hand-off golang net/rpc. -func NewRequestProxy(transport *NetworkTransport, conn ConnWithPeerNodeID) (rp *NetworkTransportRequestProxy) { - rp = &NetworkTransportRequestProxy{ - transport: transport, - conn: conn, - server: rpc.NewServer(), - } - - rp.server.RegisterName("Service", rp) - - return -} - -// GetPeerNodeID implements kayak.Request.GetPeerNodeID. -func (r *NetworkRequest) GetPeerNodeID() proto.NodeID { - return r.NodeID -} - -// GetMethod implements kayak.Request.GetMethod. -func (r *NetworkRequest) GetMethod() string { - return r.Method -} - -// GetLog implements kayak.Request.GetLog. -func (r *NetworkRequest) GetLog() *kayak.Log { - return r.Log -} - -// SendResponse implements kayak.Request.SendResponse. -func (r *NetworkRequest) SendResponse(resp []byte, err error) error { - r.respInit.Do(r.initChan) - select { - case <-r.respAvailable: - return kayak.ErrInvalidRequest - default: - r.Response = resp - r.Error = err - close(r.respAvailable) - } - return nil -} - -func (r *NetworkRequest) getResponse() ([]byte, error) { - r.respInit.Do(r.initChan) - <-r.respAvailable - return r.Response, r.Error -} - -func (r *NetworkRequest) initChan() { - r.respAvailable = make(chan struct{}) -} - -func (r *NetworkResponse) set(v []byte) { - r.Response = v -} - -func (r *NetworkResponse) get() []byte { - return r.Response -} - -// Init implements kayak.Transport.Init method. -func (t *NetworkTransport) Init() error { - go t.run() - return nil -} - -// Request implements kayak.Transport.Request method. -func (t *NetworkTransport) Request(ctx context.Context, nodeID proto.NodeID, - method string, log *kayak.Log) (response []byte, err error) { - conn, err := t.config.StreamLayer.Dial(ctx, nodeID) - - if err != nil { - return - } - - // check node id - if conn.GetPeerNodeID() != nodeID { - // err creating connection - return nil, kayak.ErrInvalidRequest - } - - client := rpc.NewClientWithCodec(t.config.ClientCodec(conn)) - req := NewRequest(t.config.NodeID, method, log) - res := NewResponse() - err = client.Call("Service.Call", req, res) - - return res.get(), err -} - -// Process implements kayak.Transport.Process method. -func (t *NetworkTransport) Process() <-chan kayak.Request { - return t.queue -} - -// Shutdown implements kayak.Transport.Shutdown method. -func (t *NetworkTransport) Shutdown() error { - select { - case <-t.shutdownCh: - default: - close(t.shutdownCh) - } - return nil -} - -func (t *NetworkTransport) enqueue(req *NetworkRequest) { - t.queue <- req -} - -// Call hand-off request from remote rpc server. -func (p *NetworkTransportRequestProxy) Call(req *NetworkRequest, res *NetworkResponse) error { - // verify node id - if p.conn.GetPeerNodeID() != req.NodeID { - return kayak.ErrInvalidRequest - } - - p.transport.enqueue(req) - obj, err := req.getResponse() - res.set(obj) - return err -} - -func (p *NetworkTransportRequestProxy) serve() { - p.server.ServeCodec(p.transport.config.ServerCodec(p.conn)) -} - -func (t *NetworkTransport) run() { - for { - select { - case <-t.shutdownCh: - return - default: - conn, err := t.config.StreamLayer.Accept() - if err != nil { - continue - } - - go t.handleConn(conn) - } - } -} - -func (t *NetworkTransport) handleConn(conn ConnWithPeerNodeID) { - NewRequestProxy(t, conn).serve() -} diff --git a/kayak/transport/network_transport_test.go b/kayak/transport/network_transport_test.go deleted file mode 100644 index 9ed230f68..000000000 --- a/kayak/transport/network_transport_test.go +++ /dev/null @@ -1,454 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package transport - -import ( - "context" - "io/ioutil" - "os" - "sync" - "testing" - "time" - - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/kayak" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/twopc" - "github.com/CovenantSQL/CovenantSQL/utils/log" - "github.com/jordwest/mock-conn" - . "github.com/smartystreets/goconvey/convey" - "github.com/stretchr/testify/mock" -) - -type TestConn struct { - *mock_conn.End - peerNodeID proto.NodeID -} - -type TestStreamRouter struct { - sync.Mutex - streamMap map[proto.NodeID]*TestStream -} - -type TestStream struct { - nodeID proto.NodeID - router *TestStreamRouter - queue chan *TestConn -} - -func NewTestStreamRouter() *TestStreamRouter { - return &TestStreamRouter{ - streamMap: make(map[proto.NodeID]*TestStream), - } -} - -func NewTestStream(nodeID proto.NodeID, router *TestStreamRouter) *TestStream { - return &TestStream{ - nodeID: nodeID, - router: router, - queue: make(chan *TestConn), - } -} - -func NewSocketPair(fromNode proto.NodeID, toNode proto.NodeID) (clientConn *TestConn, serverConn *TestConn) { - conn := mock_conn.NewConn() - clientConn = NewTestConn(conn.Server, toNode) - serverConn = NewTestConn(conn.Client, fromNode) - return -} - -func NewTestConn(endpoint *mock_conn.End, peerNodeID proto.NodeID) *TestConn { - return &TestConn{ - End: endpoint, - peerNodeID: peerNodeID, - } -} - -func (r *TestStreamRouter) Get(id proto.NodeID) *TestStream { - r.Lock() - defer r.Unlock() - - if _, ok := r.streamMap[id]; !ok { - r.streamMap[id] = NewTestStream(id, r) - } - - return r.streamMap[id] -} - -func (c *TestConn) GetPeerNodeID() proto.NodeID { - return c.peerNodeID -} - -func (s *TestStream) Accept() (conn ConnWithPeerNodeID, err error) { - select { - case conn := <-s.queue: - return conn, nil - } -} - -func (s *TestStream) Dial(ctx context.Context, nodeID proto.NodeID) (conn ConnWithPeerNodeID, err error) { - clientConn, serverConn := NewSocketPair(s.nodeID, nodeID) - - select { - case <-ctx.Done(): - return nil, ctx.Err() - case s.router.Get(nodeID).queue <- serverConn: - } - - return clientConn, nil -} - -// MockWorker is an autogenerated mock type for the Worker type -type MockWorker struct { - mock.Mock -} - -// Commit provides a mock function with given fields: ctx, wb -func (_m *MockWorker) Commit(ctx context.Context, wb twopc.WriteBatch) error { - ret := _m.Called(ctx, wb) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, twopc.WriteBatch) error); ok { - r0 = rf(ctx, wb) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Prepare provides a mock function with given fields: ctx, wb -func (_m *MockWorker) Prepare(ctx context.Context, wb twopc.WriteBatch) error { - ret := _m.Called(ctx, wb) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, twopc.WriteBatch) error); ok { - r0 = rf(ctx, wb) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Rollback provides a mock function with given fields: ctx, wb -func (_m *MockWorker) Rollback(ctx context.Context, wb twopc.WriteBatch) error { - ret := _m.Called(ctx, wb) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, twopc.WriteBatch) error); ok { - r0 = rf(ctx, wb) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -type CallCollector struct { - l sync.Mutex - callOrder []string -} - -func (c *CallCollector) Append(call string) { - c.l.Lock() - defer c.l.Unlock() - c.callOrder = append(c.callOrder, call) -} - -func (c *CallCollector) Get() []string { - c.l.Lock() - defer c.l.Unlock() - return c.callOrder[:] -} - -func (c *CallCollector) Reset() { - c.l.Lock() - defer c.l.Unlock() - c.callOrder = c.callOrder[:0] -} - -func testPeersFixture(term uint64, servers []*kayak.Server) *kayak.Peers { - testPriv := []byte{ - 0xea, 0xf0, 0x2c, 0xa3, 0x48, 0xc5, 0x24, 0xe6, - 0x39, 0x26, 0x55, 0xba, 0x4d, 0x29, 0x60, 0x3c, - 0xd1, 0xa7, 0x34, 0x7d, 0x9d, 0x65, 0xcf, 0xe9, - 0x3c, 0xe1, 0xeb, 0xff, 0xdc, 0xa2, 0x26, 0x94, - } - privKey, pubKey := asymmetric.PrivKeyFromBytes(testPriv) - - newServers := make([]*kayak.Server, 0, len(servers)) - var leaderNode *kayak.Server - - for _, s := range servers { - newS := &kayak.Server{ - Role: s.Role, - ID: s.ID, - PubKey: pubKey, - } - newServers = append(newServers, newS) - if newS.Role == proto.Leader { - leaderNode = newS - } - } - - peers := &kayak.Peers{ - Term: term, - Leader: leaderNode, - Servers: servers, - PubKey: pubKey, - } - - peers.Sign(privKey) - - return peers -} - -func testLogFixture(data []byte) (log *kayak.Log) { - log = &kayak.Log{ - Index: uint64(1), - Term: uint64(1), - Data: data, - } - - log.ComputeHash() - - return -} - -func TestConnPair(t *testing.T) { - Convey("test transport", t, FailureContinues, func(c C) { - router := NewTestStreamRouter() - stream1 := router.Get("id1") - stream2 := router.Get("id2") - - var wg sync.WaitGroup - - wg.Add(1) - go func() { - defer wg.Done() - var err error - clientConn, err := stream1.Dial(context.Background(), "id2") - c.So(err, ShouldBeNil) - _, err = clientConn.Write([]byte("test")) - c.So(err, ShouldBeNil) - clientConn.Close() - }() - - wg.Add(1) - go func() { - defer wg.Done() - var err error - serverConn, err := stream2.Accept() - c.So(err, ShouldBeNil) - buffer, err := ioutil.ReadAll(serverConn) - c.So(err, ShouldBeNil) - c.So(buffer, ShouldResemble, []byte("test")) - }() - - wg.Wait() - }) -} - -func TestTransport(t *testing.T) { - Convey("test transport", t, FailureContinues, func(c C) { - router := NewTestStreamRouter() - stream1 := router.Get("id1") - stream2 := router.Get("id2") - config1 := NewConfig("id1", stream1) - config2 := NewConfig("id2", stream2) - t1 := NewTransport(config1) - t2 := NewTransport(config2) - testLog := testLogFixture([]byte("test request")) - - var err error - - // init - err = t1.Init() - So(err, ShouldBeNil) - err = t2.Init() - So(err, ShouldBeNil) - - var wg sync.WaitGroup - - wg.Add(1) - go func() { - defer wg.Done() - res, err := t1.Request(context.Background(), "id2", "test method", testLog) - c.So(err, ShouldBeNil) - c.So(res, ShouldResemble, []byte("test response")) - }() - - wg.Add(1) - go func() { - defer wg.Done() - select { - case req := <-t2.Process(): - c.So(req.GetLog(), ShouldResemble, testLog) - c.So(req.GetMethod(), ShouldEqual, "test method") - c.So(req.GetPeerNodeID(), ShouldEqual, proto.NodeID("id1")) - req.SendResponse([]byte("test response"), nil) - } - }() - - wg.Wait() - - // shutdown transport - err = t1.Shutdown() - So(err, ShouldBeNil) - err = t2.Shutdown() - So(err, ShouldBeNil) - }) -} - -func TestIntegration(t *testing.T) { - type createMockRes struct { - runner *kayak.TwoPCRunner - transport *NetworkTransport - worker *MockWorker - config *kayak.TwoPCConfig - runtime *kayak.Runtime - } - - // router is a dummy channel based local rpc transport router - mockRouter := NewTestStreamRouter() - - // peers is a simple 3-node peer configuration - peers := testPeersFixture(1, []*kayak.Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - // create mock returns basic arguments to prepare for a server - createMock := func(nodeID proto.NodeID) (res *createMockRes) { - res = &createMockRes{} - log.SetLevel(log.FatalLevel) - d, _ := ioutil.TempDir("", "kayak_test") - - // runner instance - res.runner = kayak.NewTwoPCRunner() - // transport for this instance - res.transport = NewTransport(NewConfig(nodeID, mockRouter.Get(nodeID))) - // underlying worker - res.worker = &MockWorker{} - // runner config including timeout settings, commit log storage, local server id - res.config = &kayak.TwoPCConfig{ - RuntimeConfig: kayak.RuntimeConfig{ - RootDir: d, - LocalID: nodeID, - Runner: res.runner, - Transport: res.transport, - ProcessTimeout: time.Millisecond * 800, - }, - Storage: res.worker, - } - res.runtime, _ = kayak.NewRuntime(res.config, peers) - return - } - // cleanup log storage after execution - cleanupDir := func(c *createMockRes) { - os.RemoveAll(c.config.RuntimeConfig.RootDir) - } - - Convey("integration test", t, FailureContinues, func(c C) { - var err error - - lMock := createMock("leader") - f1Mock := createMock("follower1") - f2Mock := createMock("follower2") - defer cleanupDir(lMock) - defer cleanupDir(f1Mock) - defer cleanupDir(f2Mock) - - // init - err = lMock.runtime.Init() - So(err, ShouldBeNil) - err = f1Mock.runtime.Init() - So(err, ShouldBeNil) - err = f2Mock.runtime.Init() - So(err, ShouldBeNil) - - // payload to send - testPayload := []byte("test data") - - // underlying worker mock, prepare/commit/rollback with be received the decoded data - callOrder := &CallCollector{} - f1Mock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("f_prepare") - }) - f2Mock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("f_prepare") - }) - f1Mock.worker.On("Commit", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("f_commit") - }) - f2Mock.worker.On("Commit", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("f_commit") - }) - lMock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("l_prepare") - }) - lMock.worker.On("Commit", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("l_commit") - }) - - // process the encoded data - _, err = lMock.runtime.Apply(testPayload) - So(err, ShouldBeNil) - So(callOrder.Get(), ShouldResemble, []string{ - "f_prepare", - "f_prepare", - "l_prepare", - "f_commit", - "f_commit", - "l_commit", - }) - - // process the encoded data again - callOrder.Reset() - _, err = lMock.runtime.Apply(testPayload) - So(err, ShouldBeNil) - So(callOrder.Get(), ShouldResemble, []string{ - "f_prepare", - "f_prepare", - "l_prepare", - "f_commit", - "f_commit", - "l_commit", - }) - - // shutdown - lMock.runtime.Shutdown() - f1Mock.runtime.Shutdown() - f2Mock.runtime.Shutdown() - }) -} diff --git a/kayak/twopc_runner.go b/kayak/twopc_runner.go deleted file mode 100644 index 3f9c75069..000000000 --- a/kayak/twopc_runner.go +++ /dev/null @@ -1,741 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kayak - -import ( - "context" - "fmt" - "runtime/trace" - "sync" - "time" - - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/twopc" - "github.com/CovenantSQL/CovenantSQL/utils/log" -) - -var ( - // current term stored in local meta - keyCurrentTerm = []byte("CurrentTerm") - - // committed index store in local meta - keyCommittedIndex = []byte("CommittedIndex") -) - -// TwoPCConfig is a RuntimeConfig implementation organizing two phase commit mutation. -type TwoPCConfig struct { - RuntimeConfig - - // Storage is the underlying twopc Storage - Storage twopc.Worker -} - -type logProcessResult struct { - offset uint64 - err error -} - -// TwoPCRunner is a Runner implementation organizing two phase commit mutation. -type TwoPCRunner struct { - config *TwoPCConfig - peers *Peers - logStore LogStore - stableStore StableStore - transport Transport - - // Current term/log state - currentTerm uint64 - lastLogIndex uint64 - lastLogTerm uint64 - lastLogHash *hash.Hash - - // Server role - leader *Server - role proto.ServerRole - - // Shutdown channel to exit, protected to prevent concurrent exits - shutdown bool - shutdownCh chan struct{} - shutdownLock sync.Mutex - - // Lock/events - processReq chan []byte - processRes chan logProcessResult - updatePeersLock sync.Mutex - updatePeersReq chan *Peers - updatePeersRes chan error - - currentState ServerState - stateLock sync.Mutex - currentContext context.Context - - // Tracks running goroutines - routinesGroup sync.WaitGroup -} - -// TwoPCWorkerWrapper wraps remote runner as worker. -type TwoPCWorkerWrapper struct { - runner *TwoPCRunner - nodeID proto.NodeID -} - -// NewTwoPCRunner create a two pc runner. -func NewTwoPCRunner() *TwoPCRunner { - return &TwoPCRunner{ - shutdownCh: make(chan struct{}), - processReq: make(chan []byte), - processRes: make(chan logProcessResult), - updatePeersReq: make(chan *Peers), - updatePeersRes: make(chan error), - } -} - -// GetRuntimeConfig implements Config.GetRuntimeConfig. -func (tpc *TwoPCConfig) GetRuntimeConfig() *RuntimeConfig { - return &tpc.RuntimeConfig -} - -// Init implements Runner.Init. -func (r *TwoPCRunner) Init(config Config, peers *Peers, logs LogStore, stable StableStore, transport Transport) error { - if _, ok := config.(*TwoPCConfig); !ok { - return ErrInvalidConfig - } - - if peers == nil || logs == nil || stable == nil || transport == nil { - return ErrInvalidConfig - } - - r.config = config.(*TwoPCConfig) - r.peers = peers - r.logStore = logs - r.stableStore = stable - r.transport = transport - r.setState(Idle) - - // restore from log/stable store - if err := r.tryRestore(); err != nil { - return err - } - - // set init peers and update term - if err := r.initState(); err != nil { - return err - } - - r.goFunc(r.run) - - return nil -} - -func (r *TwoPCRunner) tryRestore() error { - // Init term, committedIndex, storage - var err error - var lastTerm uint64 - - lastTerm, err = r.stableStore.GetUint64(keyCurrentTerm) - if err != nil && err != ErrKeyNotFound { - return fmt.Errorf("get last term failed: %s", err.Error()) - } - - if r.peers.Term < lastTerm { - // invalid config, term older than current context - // suggest rebuild local config - return ErrInvalidConfig - } - - var lastCommitted uint64 - lastCommitted, err = r.stableStore.GetUint64(keyCommittedIndex) - if err != nil && err != ErrKeyNotFound { - return fmt.Errorf("last committed index not found: %s", err.Error()) - } - - var lastCommittedLog Log - if lastCommitted > 0 { - if err = r.logStore.GetLog(lastCommitted, &lastCommittedLog); err != nil { - return fmt.Errorf("failed to get last log at index %d: %s", lastCommitted, err.Error()) - } - } - - // committed index term check - if r.peers.Term < lastCommittedLog.Term { - return fmt.Errorf("invalid last committed log term, peers: %d, local committed: %d", - r.peers.Term, lastCommittedLog.Term) - } - - // assert index related log validation - if lastCommitted != lastCommittedLog.Index { - // invalid log - return fmt.Errorf("invalid last committed log index, index: %d, log: %d", - lastCommitted, lastCommittedLog.Index) - } - - // get last index - var lastIndex uint64 - lastIndex, err = r.logStore.LastIndex() - if err != nil { - return fmt.Errorf("failed to get last index: %s", err.Error()) - } - - if lastIndex > lastCommitted { - // uncommitted log found, print warning - log.WithFields(log.Fields{ - "uncommitted": lastIndex, - "committed": lastCommitted, - }).Warning("truncating local uncommitted log") - - // truncate local uncommitted logs - r.logStore.DeleteRange(lastCommitted+1, lastIndex) - } - - if err = r.reValidateLocalLogs(); err != nil { - return err - } - - if err = r.restoreUnderlying(); err != nil { - return err - } - - r.currentTerm = r.peers.Term - r.lastLogTerm = lastCommittedLog.Term - r.lastLogIndex = lastCommitted - if lastCommittedLog.Index != 0 { - r.lastLogHash = &lastCommittedLog.Hash - } else { - r.lastLogHash = nil - } - - return nil -} - -func (r *TwoPCRunner) initState() error { - if !r.peers.Verify() { - return ErrInvalidConfig - } - - // set leader and node role - r.leader = r.peers.Leader - - for _, s := range r.peers.Servers { - if s.ID == r.config.LocalID { - r.role = s.Role - break - } - } - - // update peers term - return r.stableStore.SetUint64(keyCurrentTerm, r.peers.Term) -} - -func (r *TwoPCRunner) reValidateLocalLogs() error { - // TODO(xq262144): maybe re-validating local log hashes - return nil -} - -func (r *TwoPCRunner) restoreUnderlying() error { - // TODO(xq262144): restore underlying from snapshot and replaying local logs - return nil -} - -// UpdatePeers implements Runner.UpdatePeers. -func (r *TwoPCRunner) UpdatePeers(peers *Peers) error { - r.updatePeersLock.Lock() - defer r.updatePeersLock.Unlock() - - // wait for transaction completion - // TODO(xq262144): support transaction timeout - - if peers.Term == r.peers.Term { - // same term, ignore - return nil - } - - if peers.Term < r.peers.Term { - // lower term, maybe spoofing request - return ErrInvalidConfig - } - - // validate peers structure - if !peers.Verify() { - return ErrInvalidConfig - } - - r.updatePeersReq <- peers - return <-r.updatePeersRes -} - -// Apply implements Runner.Apply. -func (r *TwoPCRunner) Apply(data []byte) (uint64, error) { - // check leader privilege - if r.role != proto.Leader { - return 0, ErrNotLeader - } - - //TODO(auxten): need throughput optimization - r.processReq <- data - res := <-r.processRes - - return res.offset, res.err -} - -// Shutdown implements Runner.Shutdown. -func (r *TwoPCRunner) Shutdown(wait bool) error { - r.shutdownLock.Lock() - defer r.shutdownLock.Unlock() - - if !r.shutdown { - close(r.shutdownCh) - r.shutdown = true - r.setState(Shutdown) - if wait { - r.routinesGroup.Wait() - } - } - - return nil -} - -func (r *TwoPCRunner) run() { - for { - select { - case <-r.shutdownCh: - // TODO(xq262144): cleanup logic - return - case data := <-r.processReq: - r.processRes <- r.processNewLog(data) - case request := <-r.transport.Process(): - r.processRequest(request) - // TODO(xq262144): support timeout logic for auto rollback prepared transaction on leader change - case peersUpdate := <-r.safeForPeersUpdate(): - r.processPeersUpdate(peersUpdate) - } - } -} - -func (r *TwoPCRunner) safeForPeersUpdate() chan *Peers { - if r.getState() == Idle { - return r.updatePeersReq - } - - return nil -} - -func (r *TwoPCRunner) processNewLog(data []byte) (res logProcessResult) { - ctx := context.Background() - ctx, task := trace.NewTask(ctx, "processNewLog") - defer task.End() - defer trace.StartRegion(ctx, "processNewLogRegion").End() - - // build Log - l := &Log{ - Index: r.lastLogIndex + 1, - Term: r.currentTerm, - Data: data, - LastHash: r.lastLogHash, - } - - // compute hash - l.ComputeHash() - - localPrepare := func(ctx context.Context) error { - // prepare local prepare node - if err := r.config.Storage.Prepare(ctx, l.Data); err != nil { - return err - } - - // write log to storage - return r.logStore.StoreLog(l) - } - - localRollback := func(ctx context.Context) error { - // prepare local rollback node - r.logStore.DeleteRange(r.lastLogIndex+1, l.Index) - return r.config.Storage.Rollback(ctx, l.Data) - } - - localCommit := func(ctx context.Context) (err error) { - err = r.config.Storage.Commit(ctx, l.Data) - - r.stableStore.SetUint64(keyCommittedIndex, l.Index) - r.lastLogHash = &l.Hash - r.lastLogIndex = l.Index - r.lastLogTerm = l.Term - - return - } - - // build 2PC workers - if len(r.peers.Servers) > 1 { - nodes := make([]twopc.Worker, 0, len(r.peers.Servers)-1) - - for _, s := range r.peers.Servers { - if s.ID != r.config.LocalID { - nodes = append(nodes, NewTwoPCWorkerWrapper(r, s.ID)) - } - } - - // start coordination - c := twopc.NewCoordinator(twopc.NewOptionsWithCallback( - r.config.ProcessTimeout, - nil, - localPrepare, // after all remote nodes prepared - localRollback, // before all remote nodes rollback - localCommit, // after all remote nodes commit - )) - - res.err = c.Put(nodes, l) - res.offset = r.lastLogIndex - } else { - // single node short cut - // init context - ctx, cancel := context.WithTimeout(context.Background(), r.config.ProcessTimeout) - defer cancel() - - if err := localPrepare(ctx); err != nil { - localRollback(ctx) - res.err = err - return - } - - // Commit myself - // return commit err but still commit - res.err = localCommit(ctx) - res.offset = r.lastLogIndex - } - - return -} - -func (r *TwoPCRunner) setState(state ServerState) { - r.stateLock.Lock() - defer r.stateLock.Unlock() - r.currentState = state -} - -func (r *TwoPCRunner) getState() ServerState { - r.stateLock.Lock() - defer r.stateLock.Unlock() - return r.currentState -} - -func (r *TwoPCRunner) processRequest(req Request) { - ctx := context.Background() - ctx, task := trace.NewTask(ctx, "processRequest") - defer task.End() - defer trace.StartRegion(ctx, "processRequestRegion").End() - - // verify call from leader - if err := r.verifyLeader(req); err != nil { - req.SendResponse(nil, err) - return - } - - switch req.GetMethod() { - case "Prepare": - r.processPrepare(req) - case "Commit": - r.processCommit(req) - case "Rollback": - r.processRollback(req) - default: - req.SendResponse(nil, ErrInvalidRequest) - } -} - -func (r *TwoPCRunner) processPeersUpdate(peersUpdate *Peers) { - // update peers - var err error - if err = r.stableStore.SetUint64(keyCurrentTerm, peersUpdate.Term); err == nil { - r.peers = peersUpdate - r.currentTerm = peersUpdate.Term - - // change role - r.leader = r.peers.Leader - - notFound := true - - for _, s := range r.peers.Servers { - if s.ID == r.config.LocalID { - r.role = s.Role - notFound = false - break - } - } - - if notFound { - // shutdown - r.Shutdown(false) - } - } - - r.updatePeersRes <- err -} - -func (r *TwoPCRunner) verifyLeader(req Request) error { - // TODO(xq262144): verify call from current leader or from new leader containing new peers info - if req.GetPeerNodeID() != r.peers.Leader.ID { - // not our leader - return ErrInvalidRequest - } - - return nil -} - -func (r *TwoPCRunner) verifyLog(req Request) (log *Log, err error) { - log = req.GetLog() - - if log == nil { - err = ErrInvalidLog - return - } - - if !log.VerifyHash() { - err = ErrInvalidLog - return - } - - return -} - -func (r *TwoPCRunner) processPrepare(req Request) { - req.SendResponse(nil, func() (err error) { - // already in transaction, try abort previous - if r.getState() != Idle { - // TODO(xq262144): has running transaction - // TODO(xq262144): abort previous or failed current - } - - // init context - var cancelFunc context.CancelFunc - r.currentContext, cancelFunc = context.WithTimeout(context.Background(), r.config.ProcessTimeout) - _ = cancelFunc - - // get log - var l *Log - if l, err = r.verifyLog(req); err != nil { - return - } - - // check log index existence - var lastIndex uint64 - if lastIndex, err = r.logStore.LastIndex(); err != nil || lastIndex >= l.Index { - // already prepared or failed - return - } - - // check prepare hash with last log hash - if l.LastHash != nil && lastIndex == 0 { - // invalid - return ErrInvalidLog - } - - if lastIndex > 0 { - var lastLog Log - if err = r.logStore.GetLog(lastIndex, &lastLog); err != nil { - return - } - - if !l.LastHash.IsEqual(&lastLog.Hash) { - return ErrInvalidLog - } - } - - // prepare on storage - if err = r.config.Storage.Prepare(r.currentContext, l.Data); err != nil { - return - } - - // write log to storage - if err = r.logStore.StoreLog(l); err != nil { - return - } - - // set state to prepared - r.setState(Prepared) - - return nil - }()) -} - -func (r *TwoPCRunner) processCommit(req Request) { - // commit log - req.SendResponse(nil, func() (err error) { - // TODO(xq262144): check current running transaction index - if r.getState() != Prepared { - // not prepared, failed directly - return ErrInvalidRequest - } - - // get log - var l *Log - if l, err = r.verifyLog(req); err != nil { - return - } - - var lastIndex uint64 - if lastIndex, err = r.logStore.LastIndex(); err != nil { - return - } else if lastIndex < l.Index { - // not logged, need re-prepare - return ErrInvalidLog - } - - if r.lastLogIndex+1 != l.Index { - // not at the head of the commit position - return ErrInvalidLog - } - - // get log - var lastLog Log - if err = r.logStore.GetLog(l.Index, &lastLog); err != nil { - return - } - - // commit on storage - // return err but still commit local index - err = r.config.Storage.Commit(r.currentContext, l.Data) - - // commit log - r.stableStore.SetUint64(keyCommittedIndex, l.Index) - r.lastLogHash = &lastLog.Hash - r.lastLogIndex = lastLog.Index - r.lastLogTerm = lastLog.Term - - // set state to idle - r.setState(Idle) - - return - }()) -} - -func (r *TwoPCRunner) processRollback(req Request) { - // rollback log - req.SendResponse(nil, func() (err error) { - // TODO(xq262144): check current running transaction index - if r.getState() != Prepared { - // not prepared, failed directly - return ErrInvalidRequest - } - - // get log - var l *Log - if l, err = r.verifyLog(req); err != nil { - return - } - - var lastIndex uint64 - if lastIndex, err = r.logStore.LastIndex(); err != nil { - return - } else if lastIndex < l.Index { - // not logged, no rollback required, maybe previous initiated rollback - return - } - - if r.lastLogIndex+1 != l.Index { - // not at the head of the commit position - return ErrInvalidLog - } - - // get log - var lastLog Log - if err = r.logStore.GetLog(l.Index, &lastLog); err != nil { - return - } - - // rollback on storage - if err = r.config.Storage.Rollback(r.currentContext, l.Data); err != nil { - return - } - - // rewind log, can be failed, since committedIndex is not updated - r.logStore.DeleteRange(r.lastLogIndex+1, l.Index) - - // set state to idle - r.setState(Idle) - - return - }()) -} - -// Start a goroutine and properly handle the race between a routine -// starting and incrementing, and exiting and decrementing. -func (r *TwoPCRunner) goFunc(f func()) { - r.routinesGroup.Add(1) - go func() { - defer r.routinesGroup.Done() - f() - }() -} - -// NewTwoPCWorkerWrapper returns a wrapper for remote worker. -func NewTwoPCWorkerWrapper(runner *TwoPCRunner, nodeID proto.NodeID) *TwoPCWorkerWrapper { - return &TwoPCWorkerWrapper{ - nodeID: nodeID, - runner: runner, - } -} - -// Prepare implements twopc.Worker.Prepare. -func (tpww *TwoPCWorkerWrapper) Prepare(ctx context.Context, wb twopc.WriteBatch) error { - // extract log - l, ok := wb.(*Log) - if !ok { - return ErrInvalidLog - } - - return tpww.callRemote(ctx, "Prepare", l) -} - -// Commit implements twopc.Worker.Commit. -func (tpww *TwoPCWorkerWrapper) Commit(ctx context.Context, wb twopc.WriteBatch) error { - // extract log - l, ok := wb.(*Log) - if !ok { - return ErrInvalidLog - } - - return tpww.callRemote(ctx, "Commit", l) -} - -// Rollback implements twopc.Worker.Rollback. -func (tpww *TwoPCWorkerWrapper) Rollback(ctx context.Context, wb twopc.WriteBatch) error { - // extract log - l, ok := wb.(*Log) - if !ok { - return ErrInvalidLog - } - - return tpww.callRemote(ctx, "Rollback", l) -} - -func (tpww *TwoPCWorkerWrapper) callRemote(ctx context.Context, method string, log *Log) (err error) { - // TODO(xq262144): handle retry - _, err = tpww.runner.transport.Request(ctx, tpww.nodeID, method, log) - return -} - -func nestedTimeoutCtx(ctx context.Context, timeout time.Duration, process func(context.Context) error) error { - nestedCtx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - return process(nestedCtx) -} - -var ( - _ Config = &TwoPCConfig{} - _ Runner = &TwoPCRunner{} - _ twopc.Worker = &TwoPCWorkerWrapper{} -) diff --git a/kayak/twopc_runner_test.go b/kayak/twopc_runner_test.go deleted file mode 100644 index c7fd5e839..000000000 --- a/kayak/twopc_runner_test.go +++ /dev/null @@ -1,992 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kayak - -import ( - "context" - "errors" - "sync" - "testing" - "time" - - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/utils/log" - . "github.com/smartystreets/goconvey/convey" - "github.com/stretchr/testify/mock" -) - -func TestTwoPCRunner_Init(t *testing.T) { - // invalid config - Convey("test invalid config", t, func() { - runner := NewTwoPCRunner() - config := &MockConfig{} - err := runner.Init(config, nil, nil, nil, nil) - So(err, ShouldNotBeNil) - }) - - Convey("test nil parameters", t, func() { - runner := NewTwoPCRunner() - config := &TwoPCConfig{} - err := runner.Init(config, nil, nil, nil, nil) - So(err, ShouldNotBeNil) - }) - - Convey("test sign broken peers", t, func() { - runner := NewTwoPCRunner() - log.SetLevel(log.FatalLevel) - config := &TwoPCConfig{ - RuntimeConfig: RuntimeConfig{}, - } - peers := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "happy", - }, - }) - // change term to invalidate signature - peers.Term = 2 - mockRouter := &MockTransportRouter{ - transports: make(map[proto.NodeID]*MockTransport), - } - mockLogStore := &MockLogStore{} - mockStableStore := &MockStableStore{} - mockTransport := mockRouter.getTransport("happy") - testLog := &Log{ - Term: 1, - Index: 1, - } - testLog.ComputeHash() - mockStableStore.On("GetUint64", keyCurrentTerm).Return(uint64(1), nil) - mockStableStore.On("GetUint64", keyCommittedIndex).Return(uint64(1), nil) - mockStableStore.On("SetUint64", keyCurrentTerm, uint64(2)).Return(nil) - mockLogStore.On("GetLog", uint64(1), mock.AnythingOfType("*kayak.Log")). - Return(nil).Run(func(args mock.Arguments) { - arg := args.Get(1).(*Log) - *arg = *testLog - }) - mockLogStore.On("LastIndex").Return(uint64(2), nil) - mockLogStore.On("DeleteRange", - mock.AnythingOfType("uint64"), mock.AnythingOfType("uint64")).Return(nil) - - err := runner.Init(config, peers, mockLogStore, mockStableStore, mockTransport) - So(err, ShouldNotBeNil) - }) - - Convey("test log restore", t, func() { - runner := NewTwoPCRunner() - log.SetLevel(log.FatalLevel) - config := &TwoPCConfig{ - RuntimeConfig: RuntimeConfig{}, - } - peers := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "happy", - }, - }) - mockRouter := &MockTransportRouter{ - transports: make(map[proto.NodeID]*MockTransport), - } - mockLogStore := &MockLogStore{} - mockStableStore := &MockStableStore{} - mockTransport := mockRouter.getTransport("happy") - unknownErr := errors.New("unknown error") - - Convey("failed getting currentTerm from log", func() { - mockStableStore.On("GetUint64", keyCurrentTerm).Return(uint64(0), unknownErr) - - err := runner.Init(config, peers, mockLogStore, mockStableStore, mockTransport) - So(err, ShouldNotBeNil) - }) - - Convey("currentTerm in log older than term in peers", func() { - mockStableStore.On("GetUint64", keyCurrentTerm).Return(uint64(2), nil) - - err := runner.Init(config, peers, mockLogStore, mockStableStore, mockTransport) - So(err, ShouldNotBeNil) - }) - - Convey("get last committed index failed", func() { - mockStableStore.On("GetUint64", keyCurrentTerm).Return(uint64(1), nil) - mockStableStore.On("GetUint64", keyCommittedIndex).Return(uint64(0), unknownErr) - - err := runner.Init(config, peers, mockLogStore, mockStableStore, mockTransport) - So(err, ShouldNotBeNil) - }) - - Convey("get last committed log data failed", func() { - mockStableStore.On("GetUint64", keyCurrentTerm).Return(uint64(1), nil) - mockStableStore.On("GetUint64", keyCommittedIndex).Return(uint64(1), nil) - mockLogStore.On("GetLog", uint64(1), mock.Anything).Return(unknownErr) - - err := runner.Init(config, peers, mockLogStore, mockStableStore, mockTransport) - So(err, ShouldNotBeNil) - }) - - Convey("last committed log with higher term than peers", func() { - mockStableStore.On("GetUint64", keyCurrentTerm).Return(uint64(1), nil) - mockStableStore.On("GetUint64", keyCommittedIndex).Return(uint64(1), nil) - mockLogStore.On("GetLog", uint64(1), mock.AnythingOfType("*kayak.Log")). - Return(nil).Run(func(args mock.Arguments) { - arg := args.Get(1).(*Log) - *arg = Log{ - Term: 2, - Index: 1, - } - }) - - err := runner.Init(config, peers, mockLogStore, mockStableStore, mockTransport) - So(err, ShouldNotBeNil) - }) - - Convey("last committed log not equal to index field", func() { - mockStableStore.On("GetUint64", keyCurrentTerm).Return(uint64(1), nil) - mockStableStore.On("GetUint64", keyCommittedIndex).Return(uint64(1), nil) - mockLogStore.On("GetLog", uint64(1), mock.AnythingOfType("*kayak.Log")). - Return(nil).Run(func(args mock.Arguments) { - arg := args.Get(1).(*Log) - *arg = Log{ - Term: 1, - Index: 2, - } - }) - - err := runner.Init(config, peers, mockLogStore, mockStableStore, mockTransport) - So(err, ShouldNotBeNil) - }) - - Convey("get last index failed", func() { - mockStableStore.On("GetUint64", keyCurrentTerm).Return(uint64(1), nil) - mockStableStore.On("GetUint64", keyCommittedIndex).Return(uint64(1), nil) - mockLogStore.On("GetLog", uint64(1), mock.AnythingOfType("*kayak.Log")). - Return(nil).Run(func(args mock.Arguments) { - arg := args.Get(1).(*Log) - *arg = Log{ - Term: 1, - Index: 1, - } - }) - mockLogStore.On("LastIndex").Return(uint64(0), unknownErr) - - err := runner.Init(config, peers, mockLogStore, mockStableStore, mockTransport) - So(err, ShouldNotBeNil) - }) - - Convey("last index overlaps committed index", func() { - testLog := &Log{ - Term: 1, - Index: 1, - } - testLog.ComputeHash() - mockStableStore.On("GetUint64", keyCurrentTerm).Return(uint64(1), nil) - mockStableStore.On("GetUint64", keyCommittedIndex).Return(uint64(1), nil) - mockStableStore.On("SetUint64", keyCurrentTerm, uint64(1)).Return(nil) - mockLogStore.On("GetLog", uint64(1), mock.AnythingOfType("*kayak.Log")). - Return(nil).Run(func(args mock.Arguments) { - arg := args.Get(1).(*Log) - *arg = *testLog - }) - mockLogStore.On("LastIndex").Return(uint64(2), nil) - mockLogStore.On("DeleteRange", - mock.AnythingOfType("uint64"), mock.AnythingOfType("uint64")).Return(nil) - - err := runner.Init(config, peers, mockLogStore, mockStableStore, mockTransport) - mockLogStore.AssertCalled(t, "DeleteRange", uint64(2), uint64(2)) - - So(err, ShouldBeNil) - So(runner.currentTerm, ShouldEqual, uint64(1)) - So(runner.lastLogTerm, ShouldEqual, uint64(1)) - So(runner.lastLogIndex, ShouldEqual, 1) - So(runner.lastLogHash, ShouldNotBeNil) - So(runner.lastLogHash.IsEqual(&testLog.Hash), ShouldBeTrue) - }) - }) -} - -func TestTwoPCRunner_Apply(t *testing.T) { - mockRouter := &MockTransportRouter{ - transports: make(map[proto.NodeID]*MockTransport), - } - - type createMockRes struct { - runner *TwoPCRunner - transport *MockTransport - worker *MockWorker - config *TwoPCConfig - logStore *MockLogStore - stableStore *MockStableStore - } - - createMock := func(nodeID proto.NodeID) (res *createMockRes) { - res = &createMockRes{} - log.SetLevel(log.FatalLevel) - res.runner = NewTwoPCRunner() - res.transport = mockRouter.getTransport(nodeID) - res.worker = &MockWorker{} - res.config = &TwoPCConfig{ - RuntimeConfig: RuntimeConfig{ - RootDir: "test_dir", - LocalID: nodeID, - Runner: res.runner, - Transport: res.transport, - ProcessTimeout: time.Millisecond * 300, - }, - Storage: res.worker, - } - res.logStore = &MockLogStore{} - res.stableStore = &MockStableStore{} - - // init with no log and no term info - res.stableStore.On("GetUint64", keyCurrentTerm).Return(uint64(0), nil) - res.stableStore.On("GetUint64", keyCommittedIndex).Return(uint64(0), nil) - res.stableStore.On("SetUint64", keyCurrentTerm, uint64(1)).Return(nil) - res.logStore.On("LastIndex").Return(uint64(0), nil) - return - } - peers := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower", - }, - }) - - Convey("call process on no leader", t, func() { - mockRouter.ResetAll() - mockRes := createMock("follower") - - err := mockRes.runner.Init(mockRes.config, peers, mockRes.logStore, mockRes.stableStore, mockRes.transport) - - So(err, ShouldBeNil) - So(mockRes.runner.role, ShouldEqual, proto.Follower) - So(mockRes.runner.leader.ID, ShouldEqual, proto.NodeID("leader")) - - // try call process - testPayload := []byte("test data") - _, err = mockRes.runner.Apply(testPayload) - So(err, ShouldNotBeNil) - So(err, ShouldEqual, ErrNotLeader) - }) - - Convey("call process on leader with single node", t, func() { - mockRouter.ResetAll() - - // change server id to leader and set peers to single node - peers := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - }) - mockRes := createMock("leader") - - err := mockRes.runner.Init(mockRes.config, peers, mockRes.logStore, mockRes.stableStore, mockRes.transport) - - So(err, ShouldBeNil) - So(mockRes.runner.role, ShouldEqual, proto.Leader) - So(mockRes.runner.leader.ID, ShouldEqual, proto.NodeID("leader")) - - Convey("commit", func() { - testPayload := []byte("test data") - - // mock worker - callOrder := &CallCollector{} - mockRes.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("prepare") - }) - mockRes.logStore.On("StoreLog", mock.AnythingOfType("*kayak.Log")). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("store_log") - }) - mockRes.worker.On("Commit", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("commit") - }) - mockRes.stableStore.On("SetUint64", keyCommittedIndex, uint64(1)). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("update_committed") - }) - - // try call process - var offset uint64 - offset, err = mockRes.runner.Apply(testPayload) - So(err, ShouldBeNil) - So(offset, ShouldEqual, uint64(1)) - - // test call orders - So(callOrder.Get(), ShouldResemble, []string{ - "prepare", - "store_log", - "commit", - "update_committed", - }) - }) - - Convey("rollback", func() { - testPayload := []byte("test data") - - // mock worker - callOrder := &CallCollector{} - unknownErr := errors.New("unknown error") - mockRes.worker.On("Prepare", mock.Anything, testPayload). - Return(unknownErr).Run(func(args mock.Arguments) { - callOrder.Append("prepare") - }) - mockRes.logStore.On("StoreLog", mock.AnythingOfType("*kayak.Log")). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("store_log") - }) - mockRes.worker.On("Rollback", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("rollback") - }) - mockRes.logStore.On("DeleteRange", uint64(1), uint64(1)). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("truncate_log") - }) - - // try call process - _, err = mockRes.runner.Apply(testPayload) - So(err, ShouldNotBeNil) - - // no log should be written to local log store after failed preparing - mockRes.logStore.AssertNotCalled(t, "StoreLog", mock.AnythingOfType("*kayak.Log")) - So(callOrder.Get(), ShouldResemble, []string{ - "prepare", - "truncate_log", - "rollback", - }) - }) - - Convey("prepare timeout", FailureContinues, func(c C) { - testPayload := []byte("test data") - unknownErr := errors.New("unknown error") - mockRes.worker.On("Prepare", mock.Anything, testPayload). - Return(unknownErr).After(time.Millisecond * 400).Run(func(args mock.Arguments) { - ctx := args.Get(0).(context.Context) - c.So(ctx.Err(), ShouldNotBeNil) - }) - mockRes.worker.On("Rollback", mock.Anything, testPayload).Return(nil) - mockRes.logStore.On("DeleteRange", uint64(1), uint64(1)).Return(nil) - - // try call process - _, err = mockRes.runner.Apply(testPayload) - - So(err, ShouldNotBeNil) - }) - - Convey("commit timeout", FailureContinues, func(c C) { - testPayload := []byte("test data") - unknownErr := errors.New("unknown error") - mockRes.worker.On("Prepare", mock.Anything, testPayload). - Return(nil) - mockRes.logStore.On("StoreLog", mock.AnythingOfType("*kayak.Log")). - Return(nil) - mockRes.worker.On("Commit", mock.Anything, testPayload). - Return(unknownErr).After(time.Millisecond * 400).Run(func(args mock.Arguments) { - ctx := args.Get(0).(context.Context) - c.So(ctx.Err(), ShouldNotBeNil) - }) - mockRes.stableStore.On("SetUint64", keyCommittedIndex, uint64(1)). - Return(nil) - - // try call process - _, err = mockRes.runner.Apply(testPayload) - - So(err, ShouldNotBeNil) - }) - - Convey("rollback timeout", FailureContinues, func(c C) { - testPayload := []byte("test data") - prepareErr := errors.New("prepare error") - rollbackErr := errors.New("rollback error") - mockRes.worker.On("Prepare", mock.Anything, testPayload). - Return(prepareErr) - mockRes.logStore.On("StoreLog", mock.AnythingOfType("*kayak.Log")). - Return(nil) - mockRes.worker.On("Rollback", mock.Anything, testPayload). - Return(rollbackErr).After(time.Millisecond * 400).Run(func(args mock.Arguments) { - ctx := args.Get(0).(context.Context) - c.So(ctx.Err(), ShouldNotBeNil) - }) - mockRes.logStore.On("DeleteRange", uint64(1), uint64(1)).Return(nil) - - // try call process - _, err = mockRes.runner.Apply(testPayload) - - // rollback error is ignored - So(err, ShouldNotBeNil) - So(err, ShouldEqual, prepareErr) - }) - }) - - Convey("call process on leader with multiple nodes", t, func(c C) { - mockRouter.ResetAll() - - peers := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - initMock := func(mocks ...*createMockRes) { - for _, r := range mocks { - store := NewMockInmemStore() - err := r.runner.Init(r.config, peers, store, store, r.transport) - So(err, ShouldBeNil) - } - } - - Convey("commit", func() { - lMock := createMock("leader") - f1Mock := createMock("follower1") - f2Mock := createMock("follower2") - - // init - initMock(lMock, f1Mock, f2Mock) - - testPayload := []byte("test data") - - callOrder := &CallCollector{} - f1Mock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("f_prepare") - }) - f2Mock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("f_prepare") - }) - f1Mock.worker.On("Commit", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("f_commit") - }) - f2Mock.worker.On("Commit", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("f_commit") - }) - lMock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("l_prepare") - }) - lMock.worker.On("Commit", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("l_commit") - }) - - // try call process - _, err := lMock.runner.Apply(testPayload) - - So(err, ShouldBeNil) - - // test call orders - So(callOrder.Get(), ShouldResemble, []string{ - "f_prepare", - "f_prepare", - "l_prepare", - "f_commit", - "f_commit", - "l_commit", - }) - - lastLogHash := lMock.runner.lastLogHash - lastLogIndex := lMock.runner.lastLogIndex - lastLogTerm := lMock.runner.lastLogTerm - - So(lastLogHash, ShouldNotBeNil) - So(lastLogIndex, ShouldEqual, uint64(1)) - So(lastLogTerm, ShouldEqual, uint64(1)) - - // check with log - var firstLog Log - err = lMock.runner.logStore.GetLog(1, &firstLog) - So(err, ShouldBeNil) - - So(firstLog.LastHash, ShouldBeNil) - So(lastLogHash.IsEqual(&firstLog.Hash), ShouldBeTrue) - So(lastLogIndex, ShouldResemble, firstLog.Index) - So(lastLogTerm, ShouldResemble, firstLog.Term) - - // commit second log - callOrder.Reset() - - _, err = lMock.runner.Apply(testPayload) - - So(err, ShouldBeNil) - - // test call orders - So(callOrder.Get(), ShouldResemble, []string{ - "f_prepare", - "f_prepare", - "l_prepare", - "f_commit", - "f_commit", - "l_commit", - }) - - // check with log - var secondLog Log - err = lMock.runner.logStore.GetLog(2, &secondLog) - So(err, ShouldBeNil) - - So(secondLog.LastHash.IsEqual(lastLogHash), ShouldBeTrue) - }) - - Convey("rollback", func() { - lMock := createMock("leader") - f1Mock := createMock("follower1") - f2Mock := createMock("follower2") - - // init - initMock(lMock, f1Mock, f2Mock) - - testPayload := []byte("test data") - - callOrder := &CallCollector{} - unknownErr := errors.New("unknown error") - // f1 prepare with error - f1Mock.worker.On("Prepare", mock.Anything, testPayload). - Return(unknownErr).Run(func(args mock.Arguments) { - callOrder.Append("f_prepare") - }) - f1Mock.worker.On("Rollback", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("f_rollback") - }) - // f2 prepare with no error - f2Mock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("f_prepare") - }) - f2Mock.worker.On("Rollback", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("f_rollback") - }) - lMock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("l_prepare") - }) - lMock.worker.On("Rollback", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("l_rollback") - }) - - // try call process - _, err := lMock.runner.Apply(testPayload) - - So(err, ShouldNotBeNil) - So(err, ShouldEqual, unknownErr) - - // test call orders - // prepare failed, so no l_prepare is called - // since one prepare failed, only one f_rollback with be triggered - So(callOrder.Get(), ShouldResemble, []string{ - "f_prepare", - "f_prepare", - //"l_prepare", - "l_rollback", - "f_rollback", - //"f_rollback", - }) - }) - }) - - Convey("sybil test", t, func() { - mockRouter.ResetAll() - - peers := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - initMock := func(mocks ...*createMockRes) { - for _, r := range mocks { - err := r.runner.Init(r.config, peers, r.logStore, r.stableStore, r.transport) - So(err, ShouldBeNil) - } - } - - Convey("request from non-leader", func() { - lMock := createMock("leader") - f1Mock := createMock("follower1") - f2Mock := createMock("follower1") - - // init - initMock(lMock, f1Mock, f2Mock) - - // fake request - testPayload := []byte("test data") - fakeLog := &Log{ - Term: 1, - Index: 1, - Data: testPayload, - } - - var err error - var rv []byte - rv, err = f1Mock.transport.Request( - context.Background(), - f2Mock.config.LocalID, - "Prepare", - fakeLog, - ) - - So(rv, ShouldBeNil) - So(err, ShouldNotBeNil) - So(err.Error(), ShouldEqual, ErrInvalidRequest.Error()) - }) - - Convey("send invalid request", func() { - lMock := createMock("leader") - f1Mock := createMock("follower1") - - // init - initMock(lMock, f1Mock) - - // fake request - var err error - var rv []byte - rv, err = lMock.transport.Request( - context.Background(), - f1Mock.config.LocalID, - "invalid request", - nil, - ) - - So(rv, ShouldBeNil) - So(err, ShouldNotBeNil) - So(err.Error(), ShouldEqual, ErrInvalidRequest.Error()) - }) - - Convey("log could not be decoded", func() { - lMock := createMock("leader") - f1Mock := createMock("follower1") - - // init - initMock(lMock, f1Mock) - - var err error - var rv []byte - rv, err = lMock.transport.Request( - context.Background(), - f1Mock.config.LocalID, - "Prepare", - nil, - ) - - So(rv, ShouldBeNil) - So(err, ShouldNotBeNil) - So(err, ShouldEqual, ErrInvalidLog) - }) - }) -} - -func TestTwoPCRunner_UpdatePeers(t *testing.T) { - mockRouter := &MockTransportRouter{ - transports: make(map[proto.NodeID]*MockTransport), - } - - type createMockRes struct { - runner *TwoPCRunner - transport *MockTransport - worker *MockWorker - config *TwoPCConfig - logStore *MockLogStore - stableStore *MockStableStore - } - - createMock := func(nodeID proto.NodeID) (res *createMockRes) { - res = &createMockRes{} - log.SetLevel(log.FatalLevel) - res.runner = NewTwoPCRunner() - res.transport = mockRouter.getTransport(nodeID) - res.worker = &MockWorker{} - res.config = &TwoPCConfig{ - RuntimeConfig: RuntimeConfig{ - RootDir: "test_dir", - LocalID: nodeID, - Runner: res.runner, - Transport: res.transport, - ProcessTimeout: time.Millisecond * 800, - }, - Storage: res.worker, - } - res.logStore = &MockLogStore{} - res.stableStore = &MockStableStore{} - - // init with no log and no term info - res.stableStore.On("GetUint64", keyCurrentTerm).Return(uint64(0), nil) - res.stableStore.On("GetUint64", keyCommittedIndex).Return(uint64(0), nil) - res.stableStore.On("SetUint64", keyCurrentTerm, uint64(2)).Return(nil) - res.logStore.On("LastIndex").Return(uint64(0), nil) - return - } - peers := testPeersFixture(2, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - initMock := func(mocks ...*createMockRes) { - for _, r := range mocks { - err := r.runner.Init(r.config, peers, r.logStore, r.stableStore, r.transport) - So(err, ShouldBeNil) - } - } - testMock := func(peers *Peers, testFunc func(*createMockRes, error), mocks ...*createMockRes) { - wg := new(sync.WaitGroup) - - for _, r := range mocks { - wg.Add(1) - go func(m *createMockRes) { - defer wg.Done() - err := m.runner.UpdatePeers(peers) - if testFunc != nil { - testFunc(m, err) - } - }(r) - } - - wg.Wait() - } - - Convey("update peers with invalid configuration", t, func() { - mockRouter.ResetAll() - - lMock := createMock("leader") - f1Mock := createMock("follower1") - f2Mock := createMock("follower2") - - // init - initMock(lMock, f1Mock, f2Mock) - - Convey("same peers term", FailureContinues, func(c C) { - newPeers := testPeersFixture(2, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - - testFunc := func(_ *createMockRes, err error) { - c.So(err, ShouldBeNil) - } - testMock(newPeers, testFunc, lMock, f1Mock, f2Mock) - }) - - Convey("invalid peers term", FailureContinues, func(c C) { - newPeers := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - - testFunc := func(_ *createMockRes, err error) { - c.So(err, ShouldNotBeNil) - c.So(err, ShouldEqual, ErrInvalidConfig) - } - testMock(newPeers, testFunc, lMock, f1Mock, f2Mock) - }) - - Convey("invalid peers signature", FailureContinues, func(c C) { - newPeers := testPeersFixture(4, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - - newPeers.Term = 3 - - testFunc := func(_ *createMockRes, err error) { - c.So(err, ShouldNotBeNil) - c.So(err, ShouldEqual, ErrInvalidConfig) - } - testMock(newPeers, testFunc, lMock, f1Mock, f2Mock) - }) - - Convey("peers update success", FailureContinues, func(c C) { - updateMock := func(mocks ...*createMockRes) { - for _, r := range mocks { - r.stableStore.On("SetUint64", keyCurrentTerm, uint64(3)).Return(nil) - } - } - - updateMock(lMock, f1Mock, f2Mock) - - newPeers := testPeersFixture(3, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - - testFunc := func(r *createMockRes, err error) { - c.So(err, ShouldBeNil) - c.So(r.runner.currentTerm, ShouldEqual, uint64(3)) - c.So(r.runner.peers, ShouldResemble, newPeers) - r.stableStore.AssertCalled(t, "SetUint64", keyCurrentTerm, uint64(3)) - } - - testMock(newPeers, testFunc, lMock, f1Mock, f2Mock) - }) - - Convey("peers update include leader change", FailureContinues, func(c C) { - updateMock := func(mocks ...*createMockRes) { - for _, r := range mocks { - r.stableStore.On("SetUint64", keyCurrentTerm, uint64(3)).Return(nil) - } - } - - updateMock(lMock, f1Mock, f2Mock) - - newPeers := testPeersFixture(3, []*Server{ - { - Role: proto.Follower, - ID: "leader", - }, - { - Role: proto.Leader, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - - testFunc := func(r *createMockRes, err error) { - c.So(err, ShouldBeNil) - c.So(r.runner.currentTerm, ShouldEqual, uint64(3)) - c.So(r.runner.peers, ShouldResemble, newPeers) - - switch r.config.LocalID { - case "leader": - c.So(r.runner.role, ShouldEqual, proto.Follower) - case "follower1": - c.So(r.runner.role, ShouldEqual, proto.Leader) - case "follower2": - c.So(r.runner.role, ShouldEqual, proto.Follower) - } - - r.stableStore.AssertCalled(t, "SetUint64", keyCurrentTerm, uint64(3)) - } - - testMock(newPeers, testFunc, lMock, f1Mock, f2Mock) - - // test call process - testPayload := []byte("test data") - _, err := lMock.runner.Apply(testPayload) - - // no longer leader - So(err, ShouldNotBeNil) - }) - - Convey("peers update with shutdown", FailureContinues, func(c C) { - updateMock := func(mocks ...*createMockRes) { - for _, r := range mocks { - r.stableStore.On("SetUint64", keyCurrentTerm, uint64(3)).Return(nil) - } - } - - updateMock(lMock, f1Mock, f2Mock) - - newPeers := testPeersFixture(3, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - }) - - testFunc := func(r *createMockRes, err error) { - c.So(err, ShouldBeNil) - c.So(r.runner.currentTerm, ShouldEqual, uint64(3)) - c.So(r.runner.peers, ShouldResemble, newPeers) - r.stableStore.AssertCalled(t, "SetUint64", keyCurrentTerm, uint64(3)) - } - - testMock(newPeers, testFunc, lMock, f1Mock, f2Mock) - - So(f2Mock.runner.currentState, ShouldEqual, Shutdown) - }) - }) -} diff --git a/kayak/types.go b/kayak/types.go deleted file mode 100644 index e71aa5872..000000000 --- a/kayak/types.go +++ /dev/null @@ -1,327 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kayak - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/binary" - "fmt" - "time" - - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/proto" -) - -//go:generate hsp -//hsp:ignore RuntimeConfig - -// Log entries are replicated to all members of the Kayak cluster -// and form the heart of the replicated state machine. -type Log struct { - // Index holds the index of the log entry. - Index uint64 - - // Term holds the election term of the log entry. - Term uint64 - - // Data holds the log entry's type-specific data. - Data []byte - - // LastHash is log entry hash - LastHash *hash.Hash - - // Hash is current log entry hash - Hash hash.Hash -} - -// ComputeHash updates Hash. -func (l *Log) ComputeHash() { - l.Hash.SetBytes(hash.DoubleHashB(l.Serialize())) -} - -// VerifyHash validates hash field. -func (l *Log) VerifyHash() bool { - h := hash.DoubleHashH(l.Serialize()) - return h.IsEqual(&l.Hash) -} - -// Serialize transform log structure to bytes. -func (l *Log) Serialize() []byte { - if l == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - binary.Write(buf, binary.LittleEndian, l.Index) - binary.Write(buf, binary.LittleEndian, l.Term) - binary.Write(buf, binary.LittleEndian, uint64(len(l.Data))) - buf.Write(l.Data) - if l.LastHash != nil { - buf.Write(l.LastHash[:]) - } else { - buf.WriteRune('\000') - } - - return buf.Bytes() -} - -// LogStore is used to provide an interface for storing -// and retrieving logs in a durable fashion. -type LogStore interface { - // FirstIndex returns the first index written. 0 for no entries. - FirstIndex() (uint64, error) - - // LastIndex returns the last index written. 0 for no entries. - LastIndex() (uint64, error) - - // GetLog gets a log entry at a given index. - GetLog(index uint64, l *Log) error - - // StoreLog stores a log entry. - StoreLog(l *Log) error - - // StoreLogs stores multiple log entries. - StoreLogs(logs []*Log) error - - // DeleteRange deletes a range of log entries. The range is inclusive. - DeleteRange(min, max uint64) error -} - -// StableStore is used to provide stable storage -// of key configurations to ensure safety. -type StableStore interface { - Set(key []byte, val []byte) error - - // Get returns the value for key, or an empty byte slice if key was not found. - Get(key []byte) ([]byte, error) - - SetUint64(key []byte, val uint64) error - - // GetUint64 returns the uint64 value for key, or 0 if key was not found. - GetUint64(key []byte) (uint64, error) -} - -// ServerState define the state of node to be checked by commit/peers update logic. -type ServerState int - -// Note: Don't renumber these, since the numbers are written into the log. -const ( - // Idle indicates no running transaction. - Idle ServerState = iota - - // Prepared indicates in-flight transaction prepared. - Prepared - - // Shutdown state - Shutdown -) - -func (s ServerState) String() string { - switch s { - case Idle: - return "Idle" - case Prepared: - return "Prepared" - } - return "Unknown" -} - -// Server tracks the information about a single server in a configuration. -type Server struct { - // Suffrage determines whether the server gets a vote. - Role proto.ServerRole - // ID is a unique string identifying this server for all time. - ID proto.NodeID - // Public key - PubKey *asymmetric.PublicKey -} - -func (s *Server) String() string { - return fmt.Sprintf("Server id:%s role:%s pubKey:%s", - s.ID, s.Role, - base64.StdEncoding.EncodeToString(s.PubKey.Serialize())) -} - -// Serialize server struct to bytes. -func (s *Server) Serialize() []byte { - if s == nil { - return []byte{'\000'} - } - - buffer := new(bytes.Buffer) - binary.Write(buffer, binary.LittleEndian, s.Role) - binary.Write(buffer, binary.LittleEndian, uint64(len(s.ID))) - buffer.WriteString(string(s.ID)) - if s.PubKey != nil { - buffer.Write(s.PubKey.Serialize()) - } else { - buffer.WriteRune('\000') - } - - return buffer.Bytes() -} - -// Peers defines peer configuration. -type Peers struct { - Term uint64 - Leader *Server - Servers []*Server - PubKey *asymmetric.PublicKey - Signature *asymmetric.Signature -} - -// Clone makes a deep copy of a Peers. -func (c *Peers) Clone() (copy Peers) { - copy.Term = c.Term - copy.Leader = c.Leader - copy.Servers = append(copy.Servers, c.Servers...) - copy.PubKey = c.PubKey - copy.Signature = c.Signature - return -} - -// Serialize peers struct to bytes. -func (c *Peers) Serialize() []byte { - if c == nil { - return []byte{'\000'} - } - - buffer := new(bytes.Buffer) - binary.Write(buffer, binary.LittleEndian, c.Term) - binary.Write(buffer, binary.LittleEndian, c.Leader.Serialize()) - binary.Write(buffer, binary.LittleEndian, uint64(len(c.Servers))) - for _, s := range c.Servers { - binary.Write(buffer, binary.LittleEndian, s.Serialize()) - } - if c.PubKey != nil { - buffer.Write(c.PubKey.Serialize()) - } else { - buffer.WriteRune('\000') - } - return buffer.Bytes() -} - -// Sign generates signature. -func (c *Peers) Sign(signer *asymmetric.PrivateKey) error { - c.PubKey = signer.PubKey() - h := hash.THashB(c.Serialize()) - sig, err := signer.Sign(h) - - if err != nil { - return fmt.Errorf("sign peer configuration failed: %s", err.Error()) - } - - c.Signature = sig - - return nil -} - -// Verify verify signature. -func (c *Peers) Verify() bool { - h := hash.THashB(c.Serialize()) - - return c.Signature.Verify(h, c.PubKey) -} - -func (c *Peers) String() string { - return fmt.Sprintf("Peers term:%v nodesCnt:%v leader:%s signature:%s", - c.Term, len(c.Servers), c.Leader.ID, - base64.StdEncoding.EncodeToString(c.Signature.Serialize())) -} - -// Find finds the index of the server with the specified key in the server list. -func (c *Peers) Find(key proto.NodeID) (index int32, found bool) { - if c.Servers != nil { - for i, s := range c.Servers { - if s.ID == key { - index = int32(i) - found = true - break - } - } - } - - return -} - -// RuntimeConfig defines minimal configuration fields for consensus runner. -type RuntimeConfig struct { - // RootDir is the root dir for runtime - RootDir string - - // LocalID is the unique ID for this server across all time. - LocalID proto.NodeID - - // Runner defines the runner type - Runner Runner - - // Transport defines the dialer type - Transport Transport - - // ProcessTimeout defines whole process timeout - ProcessTimeout time.Duration - - // AutoBanCount defines how many times a nodes will be banned from execution - AutoBanCount uint32 -} - -// Config interface for abstraction. -type Config interface { - // Get config returns runtime config - GetRuntimeConfig() *RuntimeConfig -} - -// Request defines a transport request payload. -type Request interface { - GetPeerNodeID() proto.NodeID - GetMethod() string - GetLog() *Log - SendResponse([]byte, error) error -} - -// Transport adapter for abstraction. -type Transport interface { - Init() error - - // Request - Request(ctx context.Context, nodeID proto.NodeID, method string, log *Log) ([]byte, error) - - // Process - Process() <-chan Request - - Shutdown() error -} - -// Runner adapter for different consensus protocols including Eventual Consistency/2PC/3PC. -type Runner interface { - // Init defines setup logic. - Init(config Config, peers *Peers, logs LogStore, stable StableStore, transport Transport) error - - // UpdatePeers defines peer configuration update logic. - UpdatePeers(peers *Peers) error - - // Apply defines log replication and log commit logic - // and should be called by Leader role only. - Apply(data []byte) (uint64, error) - - // Shutdown defines destruct logic. - Shutdown(wait bool) error -} diff --git a/kayak/types/config.go b/kayak/types/config.go new file mode 100644 index 000000000..0407a5e4a --- /dev/null +++ b/kayak/types/config.go @@ -0,0 +1,49 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "time" + + "github.com/CovenantSQL/CovenantSQL/proto" +) + +// RuntimeConfig defines the runtime config of kayak. +type RuntimeConfig struct { + // underlying handler. + Handler Handler + // minimum rpc success node percent requirement for prepare operation. + PrepareThreshold float64 + // minimum rpc success node percent requirement for commit operation. + CommitThreshold float64 + // maximum allowed time for prepare operation. + PrepareTimeout time.Duration + // maximum allowed time for commit operation. + CommitTimeout time.Duration + // init peers of node. + Peers *proto.Peers + // wal for kayak. + Wal Wal + // current node id. + NodeID proto.NodeID + // current instance id. + InstanceID string + // mux service name. + ServiceName string + // mux service method. + MethodName string +} diff --git a/worker/types/doc.go b/kayak/types/doc.go similarity index 92% rename from worker/types/doc.go rename to kayak/types/doc.go index cf6b420cc..ffdd029c8 100644 --- a/worker/types/doc.go +++ b/kayak/types/doc.go @@ -14,7 +14,5 @@ * limitations under the License. */ -/* -Package types defines miner node export types. -*/ +// Package types defines required types of kayak. package types diff --git a/kayak/types/errors.go b/kayak/types/errors.go new file mode 100644 index 000000000..6ceab9c9d --- /dev/null +++ b/kayak/types/errors.go @@ -0,0 +1,38 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import "github.com/pkg/errors" + +var ( + // ErrNotLeader represents current node is not a peer leader. + ErrNotLeader = errors.New("not leader") + // ErrNotFollower represents current node is not a peer follower. + ErrNotFollower = errors.New("not follower") + // ErrPrepareTimeout represents timeout failure for prepare operation. + ErrPrepareTimeout = errors.New("prepare timeout") + // ErrPrepareFailed represents failure for prepare operation. + ErrPrepareFailed = errors.New("prepare failed") + // ErrInvalidLog represents log is invalid. + ErrInvalidLog = errors.New("invalid log") + // ErrNotInPeer represents current node does not exists in peer list. + ErrNotInPeer = errors.New("node not in peer") + // ErrNeedRecovery represents current follower node needs recovery, back-off is required by leader. + ErrNeedRecovery = errors.New("need recovery") + // ErrInvalidConfig represents invalid kayak runtime config. + ErrInvalidConfig = errors.New("invalid runtime config") +) diff --git a/kayak/types/handler.go b/kayak/types/handler.go new file mode 100644 index 000000000..c74b053e2 --- /dev/null +++ b/kayak/types/handler.go @@ -0,0 +1,25 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +// Handler defines the main underlying fsm of kayak. +type Handler interface { + EncodePayload(req interface{}) (data []byte, err error) + DecodePayload(data []byte) (req interface{}, err error) + Check(request interface{}) error + Commit(request interface{}) (result interface{}, err error) +} diff --git a/kayak/types/log.go b/kayak/types/log.go new file mode 100644 index 000000000..38238f72f --- /dev/null +++ b/kayak/types/log.go @@ -0,0 +1,74 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "github.com/CovenantSQL/CovenantSQL/proto" +) + +// LogType defines the log type. +type LogType uint16 + +const ( + // LogPrepare defines the prepare phase of a commit. + LogPrepare LogType = iota + // LogRollback defines the rollback phase of a commit. + LogRollback + // LogCommit defines the commit phase of a commit. + LogCommit + // LogCheckpoint defines the checkpoint log (created/virtually created by block production or log truncation). + LogCheckpoint + // LogBarrier defines barrier log, all open windows should be waiting this operations to complete. + LogBarrier + // LogNoop defines noop log. + LogNoop +) + +func (t LogType) String() (s string) { + switch t { + case LogPrepare: + return "LogPrepare" + case LogRollback: + return "LogRollback" + case LogCommit: + return "LogCommit" + case LogCheckpoint: + return "LogCheckpoint" + case LogBarrier: + return "LogBarrier" + case LogNoop: + return "LogNoop" + default: + return "Unknown" + } +} + +// LogHeader defines the checksum header structure. +type LogHeader struct { + Index uint64 // log index + Version uint64 // log version + Type LogType // log type + Producer proto.NodeID // producer node + DataLength uint64 // data length +} + +// Log defines the log data structure. +type Log struct { + LogHeader + // Data could be detected and handle decode properly by log layer + Data []byte +} diff --git a/kayak/types/log_test.go b/kayak/types/log_test.go new file mode 100644 index 000000000..c737e9b91 --- /dev/null +++ b/kayak/types/log_test.go @@ -0,0 +1,31 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestLogType_String(t *testing.T) { + Convey("test log string function", t, func() { + for i := LogPrepare; i <= LogNoop+1; i++ { + So(i.String(), ShouldNotBeEmpty) + } + }) +} diff --git a/kayak/types/rpc.go b/kayak/types/rpc.go new file mode 100644 index 000000000..7b96f42aa --- /dev/null +++ b/kayak/types/rpc.go @@ -0,0 +1,26 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import "github.com/CovenantSQL/CovenantSQL/proto" + +// RPCRequest defines the RPC request entity. +type RPCRequest struct { + proto.Envelope + Instance string + Log *Log +} diff --git a/kayak/types/wal.go b/kayak/types/wal.go new file mode 100644 index 000000000..e955fa42f --- /dev/null +++ b/kayak/types/wal.go @@ -0,0 +1,27 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +// Wal defines the log storage interface. +type Wal interface { + // sequential write + Write(*Log) error + // sequential read, return io.EOF if there is no more records to read + Read() (*Log, error) + // random access + Get(index uint64) (*Log, error) +} diff --git a/kayak/types_gen.go b/kayak/types_gen.go deleted file mode 100644 index 4ab3f6aff..000000000 --- a/kayak/types_gen.go +++ /dev/null @@ -1,190 +0,0 @@ -package kayak - -// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. - -import ( - hsp "github.com/CovenantSQL/HashStablePack/marshalhash" -) - -// MarshalHash marshals for hash -func (z *Log) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 5 - o = append(o, 0x85, 0x85) - if z.LastHash == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.LastHash.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x85) - o = hsp.AppendBytes(o, z.Data) - o = append(o, 0x85) - if oTemp, err := z.Hash.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x85) - o = hsp.AppendUint64(o, z.Index) - o = append(o, 0x85) - o = hsp.AppendUint64(o, z.Term) - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *Log) Msgsize() (s int) { - s = 1 + 9 - if z.LastHash == nil { - s += hsp.NilSize - } else { - s += z.LastHash.Msgsize() - } - s += 5 + hsp.BytesPrefixSize + len(z.Data) + 5 + z.Hash.Msgsize() + 6 + hsp.Uint64Size + 5 + hsp.Uint64Size - return -} - -// MarshalHash marshals for hash -func (z *Peers) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 5 - o = append(o, 0x85, 0x85) - if z.Leader == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Leader.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x85) - if z.PubKey == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.PubKey.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x85) - if z.Signature == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signature.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x85) - o = hsp.AppendArrayHeader(o, uint32(len(z.Servers))) - for za0001 := range z.Servers { - if z.Servers[za0001] == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Servers[za0001].MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - } - o = append(o, 0x85) - o = hsp.AppendUint64(o, z.Term) - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *Peers) Msgsize() (s int) { - s = 1 + 7 - if z.Leader == nil { - s += hsp.NilSize - } else { - s += z.Leader.Msgsize() - } - s += 7 - if z.PubKey == nil { - s += hsp.NilSize - } else { - s += z.PubKey.Msgsize() - } - s += 10 - if z.Signature == nil { - s += hsp.NilSize - } else { - s += z.Signature.Msgsize() - } - s += 8 + hsp.ArrayHeaderSize - for za0001 := range z.Servers { - if z.Servers[za0001] == nil { - s += hsp.NilSize - } else { - s += z.Servers[za0001].Msgsize() - } - } - s += 5 + hsp.Uint64Size - return -} - -// MarshalHash marshals for hash -func (z *Server) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 3 - o = append(o, 0x83, 0x83) - if z.PubKey == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.PubKey.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x83) - if oTemp, err := z.ID.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x83) - if oTemp, err := z.Role.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *Server) Msgsize() (s int) { - s = 1 + 7 - if z.PubKey == nil { - s += hsp.NilSize - } else { - s += z.PubKey.Msgsize() - } - s += 3 + z.ID.Msgsize() + 5 + z.Role.Msgsize() - return -} - -// MarshalHash marshals for hash -func (z ServerState) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - o = hsp.AppendInt(o, int(z)) - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z ServerState) Msgsize() (s int) { - s = hsp.IntSize - return -} diff --git a/kayak/types_test.go b/kayak/types_test.go deleted file mode 100644 index 57a47a831..000000000 --- a/kayak/types_test.go +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kayak - -import ( - "fmt" - "testing" - - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/proto" - . "github.com/smartystreets/goconvey/convey" -) - -func TestLog_ComputeHash(t *testing.T) { - log1 := &Log{ - Index: 1, - Term: 1, - Data: []byte("happy"), - } - - log2 := &Log{ - Index: 1, - Term: 1, - Data: []byte("happy"), - } - - log1.ComputeHash() - log2.ComputeHash() - - Convey("same hash result on identical field value", t, func() { - equalHash := log1.Hash.IsEqual(&log2.Hash) - So(equalHash, ShouldBeTrue) - }) -} - -func TestLog_VerifyHash(t *testing.T) { - // Test with no LastHash - log1 := &Log{ - Index: 1, - Term: 1, - Data: []byte("happy"), - } - - log1.ComputeHash() - - Convey("verify correct hash", t, func() { - So(log1.VerifyHash(), ShouldBeTrue) - }) - - // Test including LastHash - log2 := &Log{ - Index: 2, - Term: 1, - Data: []byte("happy2"), - LastHash: &log1.Hash, - } - - log2.ComputeHash() - - Convey("verify correct hash", t, func() { - So(log2.VerifyHash(), ShouldBeTrue) - }) - - log2.Hash.SetBytes(hash.HashB([]byte("test generation"))) - - Convey("verify incorrect hash", t, func() { - So(log2.VerifyHash(), ShouldBeFalse) - }) -} - -func TestServer_Serialize(t *testing.T) { - testKey := []byte{ - 0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, - 0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e, - 0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca, - 0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0, - 0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64, - 0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9, - 0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56, - 0xb4, 0x12, 0xa3, - } - - pubKey, err := asymmetric.ParsePubKey(testKey) - - if err != nil { - t.Fatalf("parse pubkey failed: %v", err.Error()) - } - - s := &Server{ - Role: proto.Leader, - ID: "happy", - PubKey: pubKey, - } - data := s.Serialize() - - // try to load data from serialization - s2 := &Server{ - Role: proto.Leader, - ID: "happy", - PubKey: pubKey, - } - data2 := s2.Serialize() - - Convey("test serialization", t, func() { - So(data, ShouldResemble, data2) - }) - - Convey("test serialize with nil PubKey", t, func() { - s.PubKey = nil - So(s.Serialize(), ShouldNotResemble, data2) - }) -} - -func TestPeers_Clone(t *testing.T) { - testPriv := []byte{ - 0xea, 0xf0, 0x2c, 0xa3, 0x48, 0xc5, 0x24, 0xe6, - 0x39, 0x26, 0x55, 0xba, 0x4d, 0x29, 0x60, 0x3c, - 0xd1, 0xa7, 0x34, 0x7d, 0x9d, 0x65, 0xcf, 0xe9, - 0x3c, 0xe1, 0xeb, 0xff, 0xdc, 0xa2, 0x26, 0x94, - } - _, pubKey := asymmetric.PrivKeyFromBytes(testPriv) - - samplePeersConf := &Peers{ - Term: 1, - Leader: &Server{ - Role: proto.Leader, - ID: "happy", - PubKey: pubKey, - }, - Servers: []*Server{ - { - Role: proto.Leader, - ID: "happy", - PubKey: pubKey, - }, - }, - PubKey: pubKey, - } - - Convey("clone peers", t, func() { - peers := samplePeersConf.Clone() - So(peers.Term, ShouldEqual, samplePeersConf.Term) - So(peers.Leader, ShouldResemble, samplePeersConf.Leader) - So(peers.Servers, ShouldResemble, samplePeersConf.Servers) - So(peers.PubKey, ShouldResemble, samplePeersConf.PubKey) - So(peers.Signature, ShouldResemble, samplePeersConf.Signature) - }) -} - -func TestPeers_Find(t *testing.T) { - samplePeersConf := &Peers{ - Servers: []*Server{ - {ID: "X1"}, - {ID: "X2"}, - {ID: "X3"}, - {ID: "X4"}, - {ID: "X5"}, - }, - } - - Convey("find server", t, func() { - index, found := samplePeersConf.Find("X1") - So(found, ShouldBeTrue) - So(index, ShouldEqual, 0) - index, found = samplePeersConf.Find("X6") - So(found, ShouldBeFalse) - samplePeersConf.Servers = nil - index, found = samplePeersConf.Find("X6") - So(found, ShouldBeFalse) - }) -} - -func TestPeers_Sign(t *testing.T) { - testPriv := []byte{ - 0xea, 0xf0, 0x2c, 0xa3, 0x48, 0xc5, 0x24, 0xe6, - 0x39, 0x26, 0x55, 0xba, 0x4d, 0x29, 0x60, 0x3c, - 0xd1, 0xa7, 0x34, 0x7d, 0x9d, 0x65, 0xcf, 0xe9, - 0x3c, 0xe1, 0xeb, 0xff, 0xdc, 0xa2, 0x26, 0x94, - } - privKey, pubKey := asymmetric.PrivKeyFromBytes(testPriv) - peers := &Peers{ - Term: 1, - Leader: &Server{ - Role: proto.Leader, - ID: "happy", - PubKey: pubKey, - }, - Servers: []*Server{ - { - Role: proto.Leader, - ID: "happy", - PubKey: pubKey, - }, - }, - PubKey: pubKey, - } - - if err := peers.Sign(privKey); err != nil { - t.Fatalf("sign peer conf failed: %v", err.Error()) - } - Convey("verify signed peers", t, func() { - So(peers.Verify(), ShouldBeTrue) - }) - Convey("verify corrupted peers", t, func() { - peers.Term = 2 - So(peers.Verify(), ShouldBeFalse) - }) -} - -func TestToString(t *testing.T) { - Convey("ServerRole", t, func() { - So(fmt.Sprint(proto.Leader), ShouldEqual, "Leader") - So(fmt.Sprint(proto.Follower), ShouldEqual, "Follower") - So(fmt.Sprint(proto.ServerRole(100)), ShouldEqual, "Unknown") - }) - Convey("ServerState", t, func() { - So(fmt.Sprint(Idle), ShouldEqual, "Idle") - So(fmt.Sprint(Prepared), ShouldEqual, "Prepared") - So(fmt.Sprint(ServerState(100)), ShouldEqual, "Unknown") - }) - Convey("Server", t, func() { - s := &Server{ - Role: proto.Leader, - ID: "test", - } - So(fmt.Sprint(s), ShouldNotBeEmpty) - }) - Convey("Peers", t, func() { - p := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "test", - }, - }) - So(fmt.Sprint(p), ShouldNotBeEmpty) - }) -} diff --git a/chain/doc.go b/kayak/wal/doc.go similarity index 89% rename from chain/doc.go rename to kayak/wal/doc.go index 2fa161437..31e6ee6b4 100644 --- a/chain/doc.go +++ b/kayak/wal/doc.go @@ -14,5 +14,5 @@ * limitations under the License. */ -// Package chain defines commonly types for block chain. -package chain +// Package wal defines toy implementations of kayak wal. +package wal diff --git a/kayak/errors.go b/kayak/wal/errors.go similarity index 62% rename from kayak/errors.go rename to kayak/wal/errors.go index 0df42ab4c..f54332685 100644 --- a/kayak/errors.go +++ b/kayak/wal/errors.go @@ -14,17 +14,17 @@ * limitations under the License. */ -package kayak +package wal -import "errors" +import "github.com/pkg/errors" var ( - // ErrInvalidConfig defines invalid config error - ErrInvalidConfig = errors.New("invalid configuration") - // ErrInvalidLog defines invalid log error + // ErrWalClosed represents the log file is closed. + ErrWalClosed = errors.New("wal is closed") + // ErrInvalidLog represents the log object is invalid. ErrInvalidLog = errors.New("invalid log") - // ErrNotLeader defines not leader on log processing - ErrNotLeader = errors.New("not leader") - // ErrInvalidRequest indicate inconsistent state - ErrInvalidRequest = errors.New("invalid request") + // ErrAlreadyExists represents the log already exists. + ErrAlreadyExists = errors.New("log already exists") + // ErrNotExists represents the log does not exists. + ErrNotExists = errors.New("log not exists") ) diff --git a/kayak/wal/leveldb_wal.go b/kayak/wal/leveldb_wal.go new file mode 100644 index 000000000..8382744de --- /dev/null +++ b/kayak/wal/leveldb_wal.go @@ -0,0 +1,232 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package wal + +import ( + "bytes" + "encoding/binary" + "io" + "sync" + "sync/atomic" + + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/utils" + "github.com/pkg/errors" + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/util" +) + +var ( + // logHeaderKeyPrefix defines the leveldb header key prefix. + logHeaderKeyPrefix = []byte{'L', 'H'} + // logDataKeyPrefix defines the leveldb data key prefix. + logDataKeyPrefix = []byte{'L', 'D'} +) + +// LevelDBWal defines a toy wal using leveldb as storage. +type LevelDBWal struct { + db *leveldb.DB + it iterator.Iterator + closed uint32 + readLock sync.Mutex + read uint32 +} + +// NewLevelDBWal returns new leveldb wal instance. +func NewLevelDBWal(filename string) (p *LevelDBWal, err error) { + p = &LevelDBWal{} + if p.db, err = leveldb.OpenFile(filename, nil); err != nil { + err = errors.Wrap(err, "open database failed") + return + } + + return +} + +// Write implements Wal.Write. +func (p *LevelDBWal) Write(l *kt.Log) (err error) { + if atomic.LoadUint32(&p.closed) == 1 { + err = ErrWalClosed + return + } + + // mark wal as already read + atomic.CompareAndSwapUint32(&p.read, 0, 1) + + if l == nil { + err = ErrInvalidLog + return + } + + // build header headerKey + headerKey := append(append([]byte(nil), logHeaderKeyPrefix...), p.uint64ToBytes(l.Index)...) + + if _, err = p.db.Get(headerKey, nil); err != nil && err != leveldb.ErrNotFound { + err = errors.Wrap(err, "access leveldb failed") + return + } else if err == nil { + err = ErrAlreadyExists + return + } + + dataKey := append(append([]byte(nil), logDataKeyPrefix...), p.uint64ToBytes(l.Index)...) + + // write data first + var enc *bytes.Buffer + if enc, err = utils.EncodeMsgPack(l.Data); err != nil { + err = errors.Wrap(err, "encode log data failed") + return + } + + if err = p.db.Put(dataKey, enc.Bytes(), nil); err != nil { + err = errors.Wrap(err, "write log data failed") + return + } + + // write header + l.DataLength = uint64(enc.Len()) + + if enc, err = utils.EncodeMsgPack(l.LogHeader); err != nil { + err = errors.Wrap(err, "encode log header failed") + return + } + + // save header + if err = p.db.Put(headerKey, enc.Bytes(), nil); err != nil { + err = errors.Wrap(err, "encode log header failed") + return + } + + return +} + +// Read implements Wal.Read. +func (p *LevelDBWal) Read() (l *kt.Log, err error) { + if atomic.LoadUint32(&p.closed) == 1 { + err = ErrWalClosed + return + } + + if atomic.LoadUint32(&p.read) == 1 { + err = io.EOF + return + } + + p.readLock.Lock() + defer p.readLock.Unlock() + + // start with base, use iterator to read + if p.it == nil { + keyRange := util.BytesPrefix(logHeaderKeyPrefix) + p.it = p.db.NewIterator(keyRange, nil) + } + + if p.it.Next() { + // load + l, err = p.load(p.it.Value()) + return + } + + p.it.Release() + if err = p.it.Error(); err == nil { + err = io.EOF + } + p.it = nil + + // log read complete, could not read again + atomic.StoreUint32(&p.read, 1) + + return +} + +// Get implements Wal.Get. +func (p *LevelDBWal) Get(i uint64) (l *kt.Log, err error) { + if atomic.LoadUint32(&p.closed) == 1 { + err = ErrWalClosed + return + } + + headerKey := append(append([]byte(nil), logHeaderKeyPrefix...), p.uint64ToBytes(i)...) + + var headerData []byte + if headerData, err = p.db.Get(headerKey, nil); err == leveldb.ErrNotFound { + err = ErrNotExists + } else if err != nil { + err = errors.Wrap(err, "get log header failed") + return + } + + return p.load(headerData) +} + +// Close implements Wal.Close. +func (p *LevelDBWal) Close() { + if !atomic.CompareAndSwapUint32(&p.closed, 0, 1) { + return + } + + if p.it != nil { + p.it.Release() + p.it = nil + } + + if p.db != nil { + p.db.Close() + } +} + +// GetDB returns the leveldb for storage extensions. +func (p *LevelDBWal) GetDB() (d *leveldb.DB, err error) { + if atomic.LoadUint32(&p.closed) == 1 { + err = ErrWalClosed + return + } + + d = p.db + return +} + +func (p *LevelDBWal) load(logHeader []byte) (l *kt.Log, err error) { + l = new(kt.Log) + + if err = utils.DecodeMsgPack(logHeader, &l.LogHeader); err != nil { + err = errors.Wrap(err, "decode log header failed") + return + } + + dataKey := append(append([]byte(nil), logDataKeyPrefix...), p.uint64ToBytes(l.Index)...) + + var encData []byte + if encData, err = p.db.Get(dataKey, nil); err != nil { + err = errors.Wrap(err, "get log data failed") + return + } + + // load data + if err = utils.DecodeMsgPack(encData, &l.Data); err != nil { + err = errors.Wrap(err, "decode log data failed") + } + + return +} + +func (p *LevelDBWal) uint64ToBytes(o uint64) (res []byte) { + res = make([]byte, 8) + binary.BigEndian.PutUint64(res, o) + return +} diff --git a/kayak/wal/leveldb_wal_test.go b/kayak/wal/leveldb_wal_test.go new file mode 100644 index 000000000..25b005be5 --- /dev/null +++ b/kayak/wal/leveldb_wal_test.go @@ -0,0 +1,146 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package wal + +import ( + "io" + "os" + "testing" + + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/proto" + . "github.com/smartystreets/goconvey/convey" +) + +func TestLevelDBWal_Write(t *testing.T) { + Convey("wal write/get/close", t, func() { + dbFile := "testWrite.ldb" + + var p *LevelDBWal + var err error + p, err = NewLevelDBWal(dbFile) + So(err, ShouldBeNil) + defer os.RemoveAll(dbFile) + + err = p.Write(nil) + So(err, ShouldNotBeNil) + + l1 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 0, + Type: kt.LogPrepare, + Producer: proto.NodeID("0000000000000000000000000000000000000000000000000000000000000000"), + }, + Data: []byte("happy1"), + } + + err = p.Write(l1) + So(err, ShouldBeNil) + err = p.Write(l1) + So(err, ShouldNotBeNil) + + // test get + var l *kt.Log + l, err = p.Get(l1.Index) + So(err, ShouldBeNil) + So(l, ShouldResemble, l1) + + _, err = p.Get(10000) + So(err, ShouldNotBeNil) + + // test consecutive writes + l2 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 1, + Type: kt.LogPrepare, + }, + Data: []byte("happy2"), + } + err = p.Write(l2) + So(err, ShouldBeNil) + + // test not consecutive writes + l4 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 3, + Type: kt.LogPrepare, + }, + Data: []byte("happy3"), + } + err = p.Write(l4) + So(err, ShouldBeNil) + + l3 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 2, + Type: kt.LogPrepare, + }, + Data: []byte("happy4"), + } + err = p.Write(l3) + So(err, ShouldBeNil) + + _, err = p.Read() + So(err, ShouldEqual, io.EOF) + + p.Close() + + _, err = p.Read() + So(err, ShouldEqual, ErrWalClosed) + + err = p.Write(l1) + So(err, ShouldEqual, ErrWalClosed) + + _, err = p.Get(l1.Index) + So(err, ShouldEqual, ErrWalClosed) + + // load again + p, err = NewLevelDBWal(dbFile) + So(err, ShouldBeNil) + + for i := 0; i != 4; i++ { + l, err = p.Read() + So(err, ShouldBeNil) + So(l.Index, ShouldEqual, i) + } + + _, err = p.Read() + So(err, ShouldEqual, io.EOF) + + p.Close() + + // load again + p, err = NewLevelDBWal(dbFile) + So(err, ShouldBeNil) + + // not complete read + for i := 0; i != 3; i++ { + l, err = p.Read() + So(err, ShouldBeNil) + So(l.Index, ShouldEqual, i) + } + + p.Close() + + // close multiple times + So(p.Close, ShouldNotPanic) + }) + Convey("open failed test", t, func() { + _, err := NewLevelDBWal("") + So(err, ShouldNotBeNil) + }) +} diff --git a/kayak/wal/mem_wal.go b/kayak/wal/mem_wal.go new file mode 100644 index 000000000..a1e63d6aa --- /dev/null +++ b/kayak/wal/mem_wal.go @@ -0,0 +1,113 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package wal + +import ( + "io" + "sync" + "sync/atomic" + + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" +) + +// MemWal defines a toy wal using memory as storage. +type MemWal struct { + sync.RWMutex + logs []*kt.Log + revIndex map[uint64]int + offset uint64 + closed uint32 +} + +// NewMemWal returns new memory wal instance. +func NewMemWal() (p *MemWal) { + p = &MemWal{ + revIndex: make(map[uint64]int, 100000), + logs: make([]*kt.Log, 0, 100000), + } + + return +} + +// Write implements Wal.Write. +func (p *MemWal) Write(l *kt.Log) (err error) { + if atomic.LoadUint32(&p.closed) == 1 { + err = ErrWalClosed + return + } + + if l == nil { + err = ErrInvalidLog + return + } + + p.Lock() + defer p.Unlock() + + if _, exists := p.revIndex[l.Index]; exists { + err = ErrAlreadyExists + return + } + + offset := atomic.AddUint64(&p.offset, 1) - 1 + p.logs = append(p.logs, nil) + copy(p.logs[offset+1:], p.logs[offset:]) + p.logs[offset] = l + p.revIndex[l.Index] = int(offset) + + return +} + +// Read implements Wal.Read. +func (p *MemWal) Read() (l *kt.Log, err error) { + if atomic.LoadUint32(&p.closed) == 1 { + err = ErrWalClosed + return + } + + err = io.EOF + return +} + +// Get implements Wal.Get. +func (p *MemWal) Get(index uint64) (l *kt.Log, err error) { + if atomic.LoadUint32(&p.closed) == 1 { + err = ErrWalClosed + return + } + + p.RLock() + defer p.RUnlock() + + var i int + var exists bool + if i, exists = p.revIndex[index]; !exists { + err = ErrNotExists + return + } + + l = p.logs[i] + + return +} + +// Close implements Wal.Close. +func (p *MemWal) Close() { + if !atomic.CompareAndSwapUint32(&p.closed, 0, 1) { + return + } +} diff --git a/kayak/wal/mem_wal_test.go b/kayak/wal/mem_wal_test.go new file mode 100644 index 000000000..c79db6959 --- /dev/null +++ b/kayak/wal/mem_wal_test.go @@ -0,0 +1,189 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package wal + +import ( + "io" + "sync" + "testing" + + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + . "github.com/smartystreets/goconvey/convey" +) + +func TestMemWal_Write(t *testing.T) { + Convey("test mem wal write", t, func() { + var p *MemWal + p = NewMemWal() + + l1 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 0, + Type: kt.LogPrepare, + }, + Data: []byte("happy1"), + } + + var err error + err = p.Write(l1) + So(err, ShouldBeNil) + So(p.logs, ShouldResemble, []*kt.Log{l1}) + err = p.Write(l1) + So(err, ShouldNotBeNil) + So(p.revIndex, ShouldHaveLength, 1) + So(p.revIndex[l1.Index], ShouldEqual, 0) + So(p.offset, ShouldEqual, 1) + + // test get + var l *kt.Log + l, err = p.Get(l1.Index) + So(err, ShouldBeNil) + So(l, ShouldResemble, l1) + + _, err = p.Get(10000) + So(err, ShouldNotBeNil) + + // test consecutive writes + l2 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 1, + Type: kt.LogPrepare, + }, + Data: []byte("happy2"), + } + err = p.Write(l2) + So(err, ShouldBeNil) + So(p.revIndex, ShouldHaveLength, 2) + So(p.revIndex[l2.Index], ShouldEqual, 1) + So(p.offset, ShouldEqual, 2) + + // test not consecutive writes + l4 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 3, + Type: kt.LogPrepare, + }, + Data: []byte("happy3"), + } + err = p.Write(l4) + So(err, ShouldBeNil) + So(p.revIndex, ShouldHaveLength, 3) + So(p.revIndex[l4.Index], ShouldEqual, 2) + So(p.offset, ShouldEqual, 3) + + l3 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 2, + Type: kt.LogPrepare, + }, + Data: []byte("happy4"), + } + err = p.Write(l3) + So(err, ShouldBeNil) + So(p.revIndex, ShouldHaveLength, 4) + So(p.revIndex[l3.Index], ShouldEqual, 3) + So(p.offset, ShouldEqual, 4) + + _, err = p.Read() + So(err, ShouldEqual, io.EOF) + + p.Close() + _, err = p.Read() + So(err, ShouldEqual, ErrWalClosed) + + _, err = p.Get(1) + So(err, ShouldEqual, ErrWalClosed) + + err = p.Write(l1) + So(err, ShouldEqual, ErrWalClosed) + }) +} + +func TestMemWal_Write2(t *testing.T) { + Convey("test mem wal write", t, func() { + l1 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 0, + Type: kt.LogPrepare, + }, + Data: []byte("happy1"), + } + l2 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 1, + Type: kt.LogPrepare, + }, + Data: []byte("happy2"), + } + l3 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 2, + Type: kt.LogPrepare, + }, + Data: []byte("happy4"), + } + l4 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 3, + Type: kt.LogPrepare, + }, + Data: []byte("happy3"), + } + l5 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 4, + Type: kt.LogPrepare, + }, + Data: []byte("happy5"), + } + + var wg sync.WaitGroup + var p *MemWal + p = NewMemWal() + + wg.Add(1) + go func() { + defer wg.Done() + p.Write(l1) + }() + wg.Add(1) + go func() { + defer wg.Done() + p.Write(l2) + }() + wg.Add(1) + go func() { + defer wg.Done() + p.Write(l3) + }() + wg.Add(1) + go func() { + defer wg.Done() + p.Write(l4) + }() + wg.Add(1) + go func() { + defer wg.Done() + p.Write(l5) + }() + + wg.Wait() + + So(p.revIndex, ShouldHaveLength, 5) + So(p.offset, ShouldEqual, 5) + }) +} diff --git a/metric/rpc.go b/metric/rpc.go index 232854826..57ee7eb2e 100644 --- a/metric/rpc.go +++ b/metric/rpc.go @@ -147,6 +147,6 @@ func (cc *CollectClient) UploadMetrics(BPNodeID proto.NodeID) (err error) { if err != nil { log.Errorf("calling RPC %s failed: %s", reqType, err) } - log.Infof("resp %s: %v", reqType, resp) + log.Debugf("resp %s: %v", reqType, resp) return } diff --git a/proto/nodeinfo.go b/proto/nodeinfo.go index 019e32237..a4822549d 100644 --- a/proto/nodeinfo.go +++ b/proto/nodeinfo.go @@ -141,9 +141,14 @@ func (id *NodeID) MarshalBinary() (keyBytes []byte, err error) { // UnmarshalBinary does the deserialization func (id *NodeID) UnmarshalBinary(keyBytes []byte) (err error) { + // for backward compatible + if len(keyBytes) == 64 { + *id = NodeID(keyBytes) + return + } h, err := hash.NewHash(keyBytes) if err != nil { - log.Error("nodeID bytes len should be 32") + log.Error("load 32 bytes nodeID failed") return } *id = NodeID(h.String()) @@ -231,7 +236,6 @@ func (s *ServerRole) UnmarshalYAML(unmarshal func(interface{}) error) error { } func parseServerRole(roleStr string) (role ServerRole, err error) { - switch strings.ToLower(roleStr) { case "leader": role = Leader diff --git a/proto/nodeinfo_test.go b/proto/nodeinfo_test.go index f033d7865..b0ac900ab 100644 --- a/proto/nodeinfo_test.go +++ b/proto/nodeinfo_test.go @@ -131,7 +131,7 @@ func TestNodeID_IsEmpty(t *testing.T) { func TestNodeID_MarshalBinary(t *testing.T) { Convey("NodeID MarshalBinary", t, func() { - var nodeID, nodeID2 NodeID + var nodeID, nodeID2, nodeID3 NodeID nb, err := nodeID.MarshalBinary() So(err, ShouldBeNil) @@ -147,5 +147,9 @@ func TestNodeID_MarshalBinary(t *testing.T) { err = nodeID2.UnmarshalBinary(nb) So(err, ShouldBeNil) So(nodeID2, ShouldResemble, nodeID) + + nodeID3.UnmarshalBinary([]byte("0000000000000000000000000000000000000000000000000000000000000000")) + So(err, ShouldBeNil) + So(nodeID3, ShouldResemble, nodeID) }) } diff --git a/proto/servers.go b/proto/servers.go new file mode 100644 index 000000000..fc61c2cd4 --- /dev/null +++ b/proto/servers.go @@ -0,0 +1,73 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package proto + +import ( + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" +) + +//go:generate hsp + +// PeersHeader defines the header for miner peers. +type PeersHeader struct { + Version uint64 + Term uint64 + Leader NodeID + Servers []NodeID +} + +// Peers defines the peers configuration. +type Peers struct { + PeersHeader + verifier.DefaultHashSignVerifierImpl +} + +// Clone makes a deep copy of Peers. +func (p *Peers) Clone() (copy Peers) { + copy.Version = p.Version + copy.Term = p.Term + copy.Leader = p.Leader + copy.Servers = append(copy.Servers, p.Servers...) + copy.DefaultHashSignVerifierImpl = p.DefaultHashSignVerifierImpl + return +} + +// Sign generates signature. +func (p *Peers) Sign(signer *asymmetric.PrivateKey) (err error) { + return p.DefaultHashSignVerifierImpl.Sign(&p.PeersHeader, signer) +} + +// Verify verify signature. +func (p *Peers) Verify() (err error) { + return p.DefaultHashSignVerifierImpl.Verify(&p.PeersHeader) +} + +// Find finds the index of the server with the specified key in the server list. +func (p *Peers) Find(key NodeID) (index int32, found bool) { + if p.Servers != nil { + for i, s := range p.Servers { + if key.IsEqual(&s) { + index = int32(i) + found = true + break + } + } + } + + return +} diff --git a/proto/servers_gen.go b/proto/servers_gen.go new file mode 100644 index 000000000..8041564e7 --- /dev/null +++ b/proto/servers_gen.go @@ -0,0 +1,70 @@ +package proto + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + hsp "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// MarshalHash marshals for hash +func (z *Peers) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.PeersHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Peers) Msgsize() (s int) { + s = 1 + 12 + z.PeersHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *PeersHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 4 + o = append(o, 0x84, 0x84) + if oTemp, err := z.Leader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x84) + o = hsp.AppendArrayHeader(o, uint32(len(z.Servers))) + for za0001 := range z.Servers { + if oTemp, err := z.Servers[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x84) + o = hsp.AppendUint64(o, z.Version) + o = append(o, 0x84) + o = hsp.AppendUint64(o, z.Term) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *PeersHeader) Msgsize() (s int) { + s = 1 + 7 + z.Leader.Msgsize() + 8 + hsp.ArrayHeaderSize + for za0001 := range z.Servers { + s += z.Servers[za0001].Msgsize() + } + s += 8 + hsp.Uint64Size + 5 + hsp.Uint64Size + return +} diff --git a/proto/servers_gen_test.go b/proto/servers_gen_test.go new file mode 100644 index 000000000..3111a438c --- /dev/null +++ b/proto/servers_gen_test.go @@ -0,0 +1,84 @@ +package proto + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "testing" +) + +func TestMarshalHashPeers(t *testing.T) { + v := Peers{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashPeers(b *testing.B) { + v := Peers{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgPeers(b *testing.B) { + v := Peers{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashPeersHeader(t *testing.T) { + v := PeersHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashPeersHeader(b *testing.B) { + v := PeersHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgPeersHeader(b *testing.B) { + v := PeersHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} diff --git a/proto/servers_test.go b/proto/servers_test.go new file mode 100644 index 000000000..1260b6227 --- /dev/null +++ b/proto/servers_test.go @@ -0,0 +1,77 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package proto + +import ( + "testing" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/utils" + . "github.com/smartystreets/goconvey/convey" +) + +func TestPeers(t *testing.T) { + Convey("test peers", t, func() { + privKey, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + p := &Peers{ + PeersHeader: PeersHeader{ + Term: 1, + Leader: NodeID("00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9"), + Servers: []NodeID{ + NodeID("00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9"), + NodeID("00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35"), + }, + }, + } + err = p.Sign(privKey) + So(err, ShouldBeNil) + err = p.Verify() + So(err, ShouldBeNil) + + // after encode/decode + buf, err := utils.EncodeMsgPack(p) + var peers *Peers + err = utils.DecodeMsgPack(buf.Bytes(), &peers) + So(err, ShouldBeNil) + err = peers.Verify() + So(err, ShouldBeNil) + + peers2 := peers.Clone() + err = peers2.Verify() + So(err, ShouldBeNil) + + i, found := peers.Find(NodeID("00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35")) + So(i, ShouldEqual, 1) + So(found, ShouldBeTrue) + + i, found = peers.Find(NodeID("0000000000000000000000000000000000000000000000000000000000000001")) + So(found, ShouldBeFalse) + + // verify hash failed + peers.Term = 2 + err = peers.Verify() + So(err, ShouldNotBeNil) + err = peers.Sign(privKey) + So(err, ShouldBeNil) + + // verify failed + p.Signature = peers.Signature + err = p.Verify() + So(err, ShouldNotBeNil) + }) +} diff --git a/route/acl.go b/route/acl.go index 25fdf7494..9664f7e3c 100644 --- a/route/acl.go +++ b/route/acl.go @@ -77,8 +77,6 @@ const ( DBSAck // DBSDeploy is used by BP to create/drop/update database DBSDeploy - // DBSGetRequest is used by observer to view original request - DBSGetRequest // DBCCall is used by Miner for data consistency DBCCall // BPDBCreateDatabase is used by client to create database @@ -93,14 +91,10 @@ const ( SQLCAdviseNewBlock // SQLCAdviseBinLog is usd by sqlchain to advise binlog between adjacent node SQLCAdviseBinLog - // SQLCAdviseResponsedQuery is used by sqlchain to advice response query between adjacent node - SQLCAdviseResponsedQuery - // SQLCAdviseAckedQuery is used by sqlchain to advise response ack between adjacent node + // SQLCAdviseAckedQuery is used by sqlchain to advice response query between adjacent node SQLCAdviseAckedQuery // SQLCFetchBlock is used by sqlchain to fetch block from adjacent nodes SQLCFetchBlock - // SQLCFetchAckedQuery is used by sqlchain to fetch response ack from adjacent nodes - SQLCFetchAckedQuery // SQLCSignBilling is used by sqlchain to response billing signature for periodic billing request SQLCSignBilling // SQLCLaunchBilling is used by blockproducer to trigger the billing process in sqlchain @@ -109,8 +103,6 @@ const ( SQLCSubscribeTransactions // SQLCCancelSubscription is used by sqlchain to handle observer subscription cancellation request SQLCCancelSubscription - // OBSAdviseAckedQuery is used by sqlchain to push acked query to observers - OBSAdviseAckedQuery // OBSAdviseNewBlock is used by sqlchain to push new block to observers OBSAdviseNewBlock // MCCAdviseNewBlock is used by block producer to push block to adjacent nodes @@ -167,8 +159,6 @@ func (s RemoteFunc) String() string { return "DBS.Ack" case DBSDeploy: return "DBS.Deploy" - case DBSGetRequest: - return "DBS.GetRequest" case DBCCall: return "DBC.Call" case BPDBCreateDatabase: @@ -183,14 +173,10 @@ func (s RemoteFunc) String() string { return "SQLC.AdviseNewBlock" case SQLCAdviseBinLog: return "SQLC.AdviseBinLog" - case SQLCAdviseResponsedQuery: - return "SQLC.AdviseResponsedQuery" case SQLCAdviseAckedQuery: return "SQLC.AdviseAckedQuery" case SQLCFetchBlock: return "SQLC.FetchBlock" - case SQLCFetchAckedQuery: - return "SQLC.FetchAckedQuery" case SQLCSignBilling: return "SQLC.SignBilling" case SQLCLaunchBilling: @@ -199,8 +185,6 @@ func (s RemoteFunc) String() string { return "SQLC.SubscribeTransactions" case SQLCCancelSubscription: return "SQLC.CancelSubscription" - case OBSAdviseAckedQuery: - return "OBS.AdviseAckedQuery" case OBSAdviseNewBlock: return "OBS.AdviseNewBlock" case MCCAdviseNewBlock: diff --git a/route/service.go b/route/service.go index ad7d34da7..b02725380 100644 --- a/route/service.go +++ b/route/service.go @@ -50,7 +50,7 @@ func NewDHTService(DHTStorePath string, persistImpl consistent.Persistence, init } // Nil RPC does nothing just for probe -func (DHT *DHTService) Nil(req *proto.PingReq, resp *proto.PingResp) (err error) { +func (DHT *DHTService) Nil(req *interface{}, resp *interface{}) (err error) { return } diff --git a/rpc/leak_test.go b/rpc/leak_test.go index d90f4fe83..91b96159c 100644 --- a/rpc/leak_test.go +++ b/rpc/leak_test.go @@ -43,7 +43,8 @@ func TestSessionPool_SessionBroken(t *testing.T) { os.Remove(FJ(testWorkingDir, "./leak/leader/dht.db")) os.Remove(FJ(testWorkingDir, "./leak/leader/dht.db-shm")) os.Remove(FJ(testWorkingDir, "./leak/leader/dht.db-wal")) - os.Remove(FJ(testWorkingDir, "./leak/leader/kayak.db")) + os.Remove(FJ(testWorkingDir, "./leak/kayak.db")) + os.RemoveAll(FJ(testWorkingDir, "./leak/kayak.ldb")) leader, err := utils.RunCommandNB( FJ(baseDir, "./bin/cqld"), diff --git a/rpc/rpcutil.go b/rpc/rpcutil.go index cf971c554..d39b12c86 100644 --- a/rpc/rpcutil.go +++ b/rpc/rpcutil.go @@ -61,12 +61,12 @@ func NewPersistentCaller(target proto.NodeID) *PersistentCaller { } } -func (c *PersistentCaller) initClient(method string) (err error) { +func (c *PersistentCaller) initClient(isAnonymous bool) (err error) { c.Lock() defer c.Unlock() if c.client == nil { var conn net.Conn - conn, err = DialToNode(c.TargetID, c.pool, method == route.DHTPing.String()) + conn, err = DialToNode(c.TargetID, c.pool, isAnonymous) if err != nil { log.WithField("target", c.TargetID).WithError(err).Error("dial to node failed") return @@ -83,28 +83,21 @@ func (c *PersistentCaller) initClient(method string) (err error) { // Call invokes the named function, waits for it to complete, and returns its error status. func (c *PersistentCaller) Call(method string, args interface{}, reply interface{}) (err error) { - err = c.initClient(method) + err = c.initClient(method == route.DHTPing.String()) if err != nil { log.WithError(err).Error("init PersistentCaller client failed") return } err = c.client.Call(method, args, reply) if err != nil { - if err == io.EOF || err == io.ErrUnexpectedEOF { + if err == io.EOF || + err == io.ErrUnexpectedEOF || + err == io.ErrClosedPipe || + err == rpc.ErrShutdown { // if got EOF, retry once - c.Lock() - c.Close() - c.client = nil - c.Unlock() - err = c.initClient(method) + err = c.Reconnect(method) if err != nil { - log.WithField("rpc", method).WithError(err).Error("second init client for RPC failed") - return - } - err = c.client.Call(method, args, reply) - if err != nil { - log.WithField("rpc", method).WithError(err).Error("second time call RPC failed") - return + log.WithField("rpc", method).WithError(err).Error("reconnect failed") } } log.WithField("rpc", method).WithError(err).Error("call RPC failed") @@ -112,7 +105,21 @@ func (c *PersistentCaller) Call(method string, args interface{}, reply interface return } -// Close closes the stream and RPC client +// Reconnect tries to rebuild RPC client +func (c *PersistentCaller) Reconnect(method string) (err error) { + c.Lock() + c.Close() + c.client = nil + c.Unlock() + err = c.initClient(method == route.DHTPing.String()) + if err != nil { + log.WithField("rpc", method).WithError(err).Error("second init client for RPC failed") + return + } + return +} + +// CloseStream closes the stream and RPC client func (c *PersistentCaller) CloseStream() { if c.client != nil { if c.client.Conn != nil { diff --git a/rpc/rpcutil_test.go b/rpc/rpcutil_test.go index c3442336c..13c786a28 100644 --- a/rpc/rpcutil_test.go +++ b/rpc/rpcutil_test.go @@ -342,20 +342,28 @@ func BenchmarkPersistentCaller_Call(b *testing.B) { client = NewPersistentCaller(conf.GConf.BP.NodeID) b.Run("benchmark Persistent Call Nil", func(b *testing.B) { - var ( - req proto.PingReq - resp proto.PingResp - ) - b.ResetTimer() for i := 0; i < b.N; i++ { - err = client.Call("DHT.Nil", &req, &resp) + err = client.Call("DHT.Nil", nil, nil) if err != nil { b.Error(err) } } }) + b.Run("benchmark Persistent Call parallel Nil", func(b *testing.B) { + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + err := client.Call("DHT.Nil", nil, nil) + if err != nil { + b.Error(err) + } + } + }) + }) + req := &proto.FindNeighborReq{ ID: "1234567812345678123456781234567812345678123456781234567812345678", Count: 10, diff --git a/rpc/sharedsecret.go b/rpc/sharedsecret.go index e9166a1c3..86d150464 100644 --- a/rpc/sharedsecret.go +++ b/rpc/sharedsecret.go @@ -17,6 +17,7 @@ package rpc import ( + "fmt" "sync" "github.com/CovenantSQL/CovenantSQL/conf" @@ -70,8 +71,8 @@ func GetSharedSecretWith(nodeID *proto.RawNodeID, isAnonymous bool) (symmetricKe symmetricKeyCache.Store(nodeID, symmetricKey) log.WithFields(log.Fields{ "node": nodeID.String(), - "remotePub": remotePublicKey.Serialize(), - "sessionKey": symmetricKey, + "remotePub": fmt.Sprintf("%#x", remotePublicKey.Serialize()), + "sessionKey": fmt.Sprintf("%#x", symmetricKey), }).Debug("generated shared secret") } //log.Debugf("ECDH for %s Public Key: %x, Private Key: %x Session Key: %x", diff --git a/sqlchain/ackindex.go b/sqlchain/ackindex.go new file mode 100644 index 000000000..8e84d8bf5 --- /dev/null +++ b/sqlchain/ackindex.go @@ -0,0 +1,239 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sqlchain + +import ( + "sync" + "sync/atomic" + + "github.com/CovenantSQL/CovenantSQL/types" + "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/pkg/errors" +) + +var ( + // Global atomic counters for stats + multiIndexCount int32 + responseCount int32 + ackTrackerCount int32 +) + +type ackTracker struct { + resp *types.SignedResponseHeader + ack *types.SignedAckHeader +} + +type multiAckIndex struct { + sync.RWMutex + ri map[types.QueryKey]*types.SignedResponseHeader // ri is the index of queries without acks + qi map[types.QueryKey]*ackTracker // qi is the index of query trackers +} + +func (i *multiAckIndex) addResponse(resp *types.SignedResponseHeader) (err error) { + var key = resp.ResponseHeader.Request.GetQueryKey() + log.Debugf("Adding key %s <-- resp %s", &key, resp.Hash()) + i.Lock() + defer i.Unlock() + if oresp, ok := i.ri[key]; ok { + if oresp.Hash() != resp.Hash() { + err = errors.Wrapf(ErrResponseSeqNotMatch, "add key %s <-- resp %s", &key, resp.Hash()) + return + } + return + } + i.ri[key] = resp + atomic.AddInt32(&responseCount, 1) + return +} + +func (i *multiAckIndex) register(ack *types.SignedAckHeader) (err error) { + var ( + resp *types.SignedResponseHeader + ok bool + key = ack.SignedRequestHeader().GetQueryKey() + ) + log.Debugf("Registering key %s <-- ack %s", &key, ack.Hash()) + + i.Lock() + defer i.Unlock() + if resp, ok = i.ri[key]; !ok { + err = errors.Wrapf(ErrQueryNotFound, "register key %s <-- ack %s", &key, ack.Hash()) + return + } + delete(i.ri, key) + i.qi[key] = &ackTracker{ + resp: resp, + ack: ack, + } + atomic.AddInt32(&responseCount, -1) + atomic.AddInt32(&ackTrackerCount, 1) + return +} + +func (i *multiAckIndex) remove(ack *types.SignedAckHeader) (err error) { + var key = ack.SignedRequestHeader().GetQueryKey() + log.Debugf("Removing key %s -x- ack %s", &key, ack.Hash()) + i.Lock() + defer i.Unlock() + if _, ok := i.ri[key]; ok { + delete(i.ri, key) + atomic.AddInt32(&responseCount, -1) + return + } + if oack, ok := i.qi[key]; ok { + if oack.ack.Hash() != ack.Hash() { + err = errors.Wrapf( + ErrMultipleAckOfSeqNo, "remove key %s -x- ack %s", &key, ack.Hash()) + return + } + delete(i.qi, key) + atomic.AddInt32(&ackTrackerCount, -1) + return + } + err = errors.Wrapf(ErrQueryNotFound, "remove key %s -x- ack %s", &key, ack.Hash()) + return +} + +func (i *multiAckIndex) acks() (ret []*types.SignedAckHeader) { + i.RLock() + defer i.RUnlock() + for _, v := range i.qi { + ret = append(ret, v.ack) + } + return +} + +func (i *multiAckIndex) expire() { + i.RLock() + defer i.RUnlock() + // TODO(leventeliu): need further processing. + for _, v := range i.ri { + log.WithFields(log.Fields{ + "request_hash": v.Request.Hash(), + "request_time": v.Request.Timestamp, + "request_type": v.Request.QueryType, + "request_node": v.Request.NodeID, + "response_hash": v.Hash(), + "response_node": v.NodeID, + "response_time": v.Timestamp, + }).Warn("Query expires without acknowledgement") + } + for _, v := range i.qi { + log.WithFields(log.Fields{ + "request_hash": v.resp.Request.Hash(), + "request_time": v.resp.Request.Timestamp, + "request_type": v.resp.Request.QueryType, + "request_node": v.resp.Request.NodeID, + "response_hash": v.ack.Response.Hash(), + "response_node": v.ack.Response.NodeID, + "response_time": v.ack.Response.Timestamp, + "ack_hash": v.ack.Hash(), + "ack_node": v.ack.NodeID, + "ack_time": v.ack.Timestamp, + }).Warn("Query expires without block producing") + } +} + +type ackIndex struct { + hi map[int32]*multiAckIndex + + sync.RWMutex + barrier int32 +} + +func newAckIndex() *ackIndex { + return &ackIndex{ + hi: make(map[int32]*multiAckIndex), + } +} + +func (i *ackIndex) load(h int32) (mi *multiAckIndex, err error) { + var ok bool + i.Lock() + defer i.Unlock() + if h < i.barrier { + err = errors.Wrapf(ErrQueryExpired, "loading index at height %d barrier %d", h, i.barrier) + return + } + if mi, ok = i.hi[h]; !ok { + mi = &multiAckIndex{ + ri: make(map[types.QueryKey]*types.SignedResponseHeader), + qi: make(map[types.QueryKey]*ackTracker), + } + i.hi[h] = mi + atomic.AddInt32(&multiIndexCount, 1) + } + return +} + +func (i *ackIndex) advance(h int32) { + var dl []*multiAckIndex + i.Lock() + for x := i.barrier; x < h; x++ { + if mi, ok := i.hi[x]; ok { + dl = append(dl, mi) + } + delete(i.hi, x) + } + i.barrier = h + i.Unlock() + // Record expired and not acknowledged queries + for _, v := range dl { + v.expire() + atomic.AddInt32(&responseCount, int32(-len(v.ri))) + atomic.AddInt32(&ackTrackerCount, int32(-len(v.qi))) + } + atomic.AddInt32(&multiIndexCount, int32(-len(dl))) +} + +func (i *ackIndex) addResponse(h int32, resp *types.SignedResponseHeader) (err error) { + var mi *multiAckIndex + if mi, err = i.load(h); err != nil { + return + } + return mi.addResponse(resp) +} + +func (i *ackIndex) register(h int32, ack *types.SignedAckHeader) (err error) { + var mi *multiAckIndex + if mi, err = i.load(h); err != nil { + return + } + return mi.register(ack) +} + +func (i *ackIndex) remove(h int32, ack *types.SignedAckHeader) (err error) { + var mi *multiAckIndex + if mi, err = i.load(h); err != nil { + return + } + return mi.remove(ack) +} + +func (i *ackIndex) acks(h int32) (ret []*types.SignedAckHeader) { + var b = func() int32 { + i.RLock() + defer i.RUnlock() + return i.barrier + }() + for x := b; x <= h; x++ { + if mi, err := i.load(x); err == nil { + ret = append(ret, mi.acks()...) + } + } + return +} diff --git a/sqlchain/ackindex_test.go b/sqlchain/ackindex_test.go new file mode 100644 index 000000000..77ccd4d18 --- /dev/null +++ b/sqlchain/ackindex_test.go @@ -0,0 +1,60 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sqlchain + +import ( + "testing" + + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/types" + . "github.com/smartystreets/goconvey/convey" +) + +func TestAckIndex(t *testing.T) { + Convey("Given a ackIndex instance", t, func() { + var ( + err error + + ai = newAckIndex() + resp = &types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ + Request: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ + NodeID: proto.NodeID( + "0000000000000000000000000000000000000000000000000000000000000000"), + ConnectionID: 0, + SeqNo: 0, + }, + }, + }, + } + ack = &types.SignedAckHeader{ + AckHeader: types.AckHeader{ + Response: *resp, + }, + } + ) + Convey("Add response and register ack should return no error", func() { + err = ai.addResponse(0, resp) + So(err, ShouldBeNil) + err = ai.register(0, ack) + So(err, ShouldBeNil) + err = ai.remove(0, ack) + So(err, ShouldBeNil) + }) + }) +} diff --git a/sqlchain/blockindex.go b/sqlchain/blockindex.go index c2333a1e1..04bf7d97f 100644 --- a/sqlchain/blockindex.go +++ b/sqlchain/blockindex.go @@ -21,18 +21,18 @@ import ( "sync" "github.com/CovenantSQL/CovenantSQL/crypto/hash" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" ) type blockNode struct { parent *blockNode - block *ct.Block // TODO(leventeliu): cleanup history blocks to release memory. + block *types.Block hash hash.Hash height int32 // height is the chain height of the head count int32 // count counts the blocks (except genesis) at this head } -func newBlockNode(height int32, block *ct.Block, parent *blockNode) *blockNode { +func newBlockNode(height int32, block *types.Block, parent *blockNode) *blockNode { return &blockNode{ hash: *block.BlockHash(), parent: parent, @@ -48,7 +48,7 @@ func newBlockNode(height int32, block *ct.Block, parent *blockNode) *blockNode { } } -func (n *blockNode) initBlockNode(height int32, block *ct.Block, parent *blockNode) { +func (n *blockNode) initBlockNode(height int32, block *types.Block, parent *blockNode) { n.block = block n.hash = *block.BlockHash() n.parent = nil @@ -85,19 +85,14 @@ func (n *blockNode) indexKey() (key []byte) { } type blockIndex struct { - cfg *Config - mu sync.RWMutex index map[hash.Hash]*blockNode } -func newBlockIndex(cfg *Config) (index *blockIndex) { - index = &blockIndex{ - cfg: cfg, +func newBlockIndex() (index *blockIndex) { + return &blockIndex{ index: make(map[hash.Hash]*blockNode), } - - return index } func (i *blockIndex) addBlock(newBlock *blockNode) { diff --git a/sqlchain/blockindex_test.go b/sqlchain/blockindex_test.go index df47a4cdd..e372980c8 100644 --- a/sqlchain/blockindex_test.go +++ b/sqlchain/blockindex_test.go @@ -20,16 +20,16 @@ import ( "testing" "github.com/CovenantSQL/CovenantSQL/crypto/hash" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" ) var ( - testBlocks []*ct.Block + testBlocks []*types.Block testBlockNumber = 50 ) func generateTestBlocks() (err error) { - testBlocks = make([]*ct.Block, 0, testBlockNumber) + testBlocks = make([]*types.Block, 0, testBlockNumber) for i, prev := 0, genesisHash; i < testBlockNumber; i++ { b, err := createRandomBlock(prev, false) @@ -108,8 +108,7 @@ func TestInitBlockNode(t *testing.T) { } func TestAncestor(t *testing.T) { - cfg := &Config{} - index := newBlockIndex(cfg) + index := newBlockIndex() parent := (*blockNode)(nil) for h, b := range testBlocks { @@ -142,8 +141,7 @@ func TestAncestor(t *testing.T) { } func TestIndex(t *testing.T) { - cfg := &Config{} - index := newBlockIndex(cfg) + index := newBlockIndex() parent := (*blockNode)(nil) for h, b := range testBlocks { diff --git a/sqlchain/chain.go b/sqlchain/chain.go index 8984fd8aa..a95f81c45 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -21,28 +21,34 @@ import ( "encoding/binary" "fmt" "os" + rt "runtime" "sync" + "sync/atomic" "time" pt "github.com/CovenantSQL/CovenantSQL/blockproducer/types" "github.com/CovenantSQL/CovenantSQL/crypto" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + x "github.com/CovenantSQL/CovenantSQL/xenomint" + xi "github.com/CovenantSQL/CovenantSQL/xenomint/interfaces" + xs "github.com/CovenantSQL/CovenantSQL/xenomint/sqlite" "github.com/pkg/errors" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/util" ) +const ( + minBlockCacheTTL = int32(30) +) + var ( metaState = [4]byte{'S', 'T', 'A', 'T'} metaBlockIndex = [4]byte{'B', 'L', 'C', 'K'} @@ -50,13 +56,21 @@ var ( metaResponseIndex = [4]byte{'R', 'E', 'S', 'P'} metaAckIndex = [4]byte{'Q', 'A', 'C', 'K'} leveldbConf = opt.Options{} + + // Atomic counters for stats + cachedBlockCount int32 ) func init() { leveldbConf.BlockSize = 4 * 1024 * 1024 leveldbConf.Compression = opt.SnappyCompression - leveldbConf.WriteBuffer = 64 * 1024 * 1024 - leveldbConf.BlockCacheCapacity = 2 * leveldbConf.WriteBuffer +} + +func statBlock(b *types.Block) { + atomic.AddInt32(&cachedBlockCount, 1) + rt.SetFinalizer(b, func(_ *types.Block) { + atomic.AddInt32(&cachedBlockCount, -1) + }) } // heightToKey converts a height in int32 to a key in bytes. @@ -94,15 +108,21 @@ type Chain struct { // tdb stores ack/request/response tdb *leveldb.DB bi *blockIndex - qi *queryIndex + ai *ackIndex + st *x.State cl *rpc.Caller rt *runtime stopCh chan struct{} - blocks chan *ct.Block + blocks chan *types.Block heights chan int32 - responses chan *wt.ResponseHeader - acks chan *wt.AckHeader + responses chan *types.ResponseHeader + acks chan *types.AckHeader + + // DBAccount info + tokenType pt.TokenType + gasPrice uint64 + updatePeriod uint64 // observerLock defines the lock of observer update operations. observerLock sync.Mutex @@ -114,6 +134,11 @@ type Chain struct { replCh chan struct{} // replWg defines the waitGroups for running replications. replWg sync.WaitGroup + + // Cached fileds, may need to renew some of this fields later. + // + // pk is the private key of the local miner. + pk *asymmetric.PrivateKey } // NewChain creates a new sql-chain struct. @@ -121,7 +146,7 @@ func NewChain(c *Config) (chain *Chain, err error) { // TODO(leventeliu): this is a rough solution, you may also want to clean database file and // force rebuilding. var fi os.FileInfo - if fi, err = os.Stat(c.DataFile + "-block-state.ldb"); err == nil && fi.Mode().IsDir() { + if fi, err = os.Stat(c.ChainFilePrefix + "-block-state.ldb"); err == nil && fi.Mode().IsDir() { return LoadChain(c) } @@ -131,39 +156,68 @@ func NewChain(c *Config) (chain *Chain, err error) { } // Open LevelDB for block and state - bdbFile := c.DataFile + "-block-state.ldb" + bdbFile := c.ChainFilePrefix + "-block-state.ldb" bdb, err := leveldb.OpenFile(bdbFile, &leveldbConf) if err != nil { err = errors.Wrapf(err, "open leveldb %s", bdbFile) return } + log.Debugf("Create new chain bdb %s", bdbFile) + // Open LevelDB for ack/request/response - tdbFile := c.DataFile + "-ack-req-resp.ldb" + tdbFile := c.ChainFilePrefix + "-ack-req-resp.ldb" tdb, err := leveldb.OpenFile(tdbFile, &leveldbConf) if err != nil { err = errors.Wrapf(err, "open leveldb %s", tdbFile) return } + log.Debugf("Create new chain tdb %s", tdbFile) + + // Open x.State + var ( + strg xi.Storage + state *x.State + ) + if strg, err = xs.NewSqlite(c.DataFile); err != nil { + return + } + if state, err = x.NewState(c.Server, strg); err != nil { + return + } + + // Cache local private key + var pk *asymmetric.PrivateKey + if pk, err = kms.GetLocalPrivateKey(); err != nil { + err = errors.Wrap(err, "failed to cache private key") + return + } + // Create chain state chain = &Chain{ - bdb: bdb, - tdb: tdb, - bi: newBlockIndex(c), - qi: newQueryIndex(), - cl: rpc.NewCaller(), - rt: newRunTime(c), - stopCh: make(chan struct{}), - blocks: make(chan *ct.Block), - heights: make(chan int32, 1), - responses: make(chan *wt.ResponseHeader), - acks: make(chan *wt.AckHeader), + bdb: bdb, + tdb: tdb, + bi: newBlockIndex(), + ai: newAckIndex(), + st: state, + cl: rpc.NewCaller(), + rt: newRunTime(c), + stopCh: make(chan struct{}), + blocks: make(chan *types.Block), + heights: make(chan int32, 1), + responses: make(chan *types.ResponseHeader), + acks: make(chan *types.AckHeader), + tokenType: c.TokenType, + gasPrice: c.GasPrice, + updatePeriod: c.UpdatePeriod, // Observer related observers: make(map[proto.NodeID]int32), observerReplicators: make(map[proto.NodeID]*observerReplicator), replCh: make(chan struct{}), + + pk: pk, } if err = chain.pushBlock(c.Genesis); err != nil { @@ -175,9 +229,8 @@ func NewChain(c *Config) (chain *Chain, err error) { // LoadChain loads the chain state from the specified database and rebuilds a memory index. func LoadChain(c *Config) (chain *Chain, err error) { - // Open LevelDB for block and state - bdbFile := c.DataFile + "-block-state.ldb" + bdbFile := c.ChainFilePrefix + "-block-state.ldb" bdb, err := leveldb.OpenFile(bdbFile, &leveldbConf) if err != nil { err = errors.Wrapf(err, "open leveldb %s", bdbFile) @@ -185,31 +238,56 @@ func LoadChain(c *Config) (chain *Chain, err error) { } // Open LevelDB for ack/request/response - tdbFile := c.DataFile + "-ack-req-resp.ldb" + tdbFile := c.ChainFilePrefix + "-ack-req-resp.ldb" tdb, err := leveldb.OpenFile(tdbFile, &leveldbConf) if err != nil { err = errors.Wrapf(err, "open leveldb %s", tdbFile) return } + // Open x.State + var ( + strg xi.Storage + xstate *x.State + ) + if strg, err = xs.NewSqlite(c.DataFile); err != nil { + return + } + if xstate, err = x.NewState(c.Server, strg); err != nil { + return + } + + // Cache local private key + var pk *asymmetric.PrivateKey + if pk, err = kms.GetLocalPrivateKey(); err != nil { + err = errors.Wrap(err, "failed to cache private key") + return + } + // Create chain state chain = &Chain{ - bdb: bdb, - tdb: tdb, - bi: newBlockIndex(c), - qi: newQueryIndex(), - cl: rpc.NewCaller(), - rt: newRunTime(c), - stopCh: make(chan struct{}), - blocks: make(chan *ct.Block), - heights: make(chan int32, 1), - responses: make(chan *wt.ResponseHeader), - acks: make(chan *wt.AckHeader), + bdb: bdb, + tdb: tdb, + bi: newBlockIndex(), + ai: newAckIndex(), + st: xstate, + cl: rpc.NewCaller(), + rt: newRunTime(c), + stopCh: make(chan struct{}), + blocks: make(chan *types.Block), + heights: make(chan int32, 1), + responses: make(chan *types.ResponseHeader), + acks: make(chan *types.AckHeader), + tokenType: c.TokenType, + gasPrice: c.GasPrice, + updatePeriod: c.UpdatePeriod, // Observer related observers: make(map[proto.NodeID]int32), observerReplicators: make(map[proto.NodeID]*observerReplicator), replCh: make(chan struct{}), + + pk: pk, } // Read state struct @@ -228,55 +306,61 @@ func LoadChain(c *Config) (chain *Chain, err error) { }).Debug("Loading state from database") // Read blocks and rebuild memory index - var last *blockNode - var index int32 - // TODO(lambda): select a better init length - nodes := make([]blockNode, 100) - blockIter := chain.bdb.NewIterator(util.BytesPrefix(metaBlockIndex[:]), nil) + var ( + id uint64 + index int32 + last *blockNode + blockIter = chain.bdb.NewIterator(util.BytesPrefix(metaBlockIndex[:]), nil) + ) defer blockIter.Release() - for blockIter.Next() { - k := blockIter.Key() - v := blockIter.Value() + for index = 0; blockIter.Next(); index++ { + var ( + k = blockIter.Key() + v = blockIter.Value() + block = &types.Block{} - block := &ct.Block{} + current, parent *blockNode + ) if err = utils.DecodeMsgPack(v, block); err != nil { - err = errors.Wrapf(err, "block height %d, key index %s", keyWithSymbolToHeight(k), string(k)) + err = errors.Wrapf(err, "decoding failed at height %d with key %s", + keyWithSymbolToHeight(k), string(k)) return } - log.WithFields(log.Fields{ "peer": chain.rt.getPeerInfoString(), "block": block.BlockHash().String(), }).Debug("Loading block from database") - parent := (*blockNode)(nil) if last == nil { if err = block.VerifyAsGenesis(); err != nil { + err = errors.Wrap(err, "genesis verification failed") return } - // Set constant fields from genesis block chain.rt.setGenesis(block) } else if block.ParentHash().IsEqual(&last.hash) { if err = block.Verify(); err != nil { + err = errors.Wrapf(err, "block verification failed at height %d with key %s", + keyWithSymbolToHeight(k), string(k)) return } - parent = last } else { - parent = chain.bi.lookupNode(block.ParentHash()) - - if parent == nil { + if parent = chain.bi.lookupNode(block.ParentHash()); parent == nil { return nil, ErrParentNotFound } } - height := chain.rt.getHeightFromTime(block.Timestamp()) - nodes[index].initBlockNode(height, block, parent) - chain.bi.addBlock(&nodes[index]) - last = &nodes[index] - index++ + // Update id + if nid, ok := block.CalcNextID(); ok && nid > id { + id = nid + } + + current = &blockNode{} + current.initBlockNode(chain.rt.getHeightFromTime(block.Timestamp()), block, parent) + chain.bi.addBlock(current) + last = current } if err = blockIter.Error(); err != nil { err = errors.Wrap(err, "load block") @@ -286,6 +370,8 @@ func LoadChain(c *Config) (chain *Chain, err error) { // Set chain state st.node = last chain.rt.setHead(st) + chain.st.InitTx(id) + chain.pruneBlockCache() // Read queries and rebuild memory index respIter := chain.tdb.NewIterator(util.BytesPrefix(metaResponseIndex[:]), nil) @@ -294,20 +380,15 @@ func LoadChain(c *Config) (chain *Chain, err error) { k := respIter.Key() v := respIter.Value() h := keyWithSymbolToHeight(k) - var resp = &wt.SignedResponseHeader{} + var resp = &types.SignedResponseHeader{} if err = utils.DecodeMsgPack(v, resp); err != nil { err = errors.Wrapf(err, "load resp, height %d, index %s", h, string(k)) return } log.WithFields(log.Fields{ "height": h, - "header": resp.HeaderHash.String(), + "header": resp.Hash().String(), }).Debug("Loaded new resp header") - err = chain.qi.addResponse(h, resp) - if err != nil { - err = errors.Wrapf(err, "load resp, height %d, hash %s", h, resp.HeaderHash.String()) - return - } } if err = respIter.Error(); err != nil { err = errors.Wrap(err, "load resp") @@ -320,20 +401,15 @@ func LoadChain(c *Config) (chain *Chain, err error) { k := ackIter.Key() v := ackIter.Value() h := keyWithSymbolToHeight(k) - var ack = &wt.SignedAckHeader{} + var ack = &types.SignedAckHeader{} if err = utils.DecodeMsgPack(v, ack); err != nil { err = errors.Wrapf(err, "load ack, height %d, index %s", h, string(k)) return } log.WithFields(log.Fields{ "height": h, - "header": ack.HeaderHash.String(), + "header": ack.Hash().String(), }).Debug("Loaded new ack header") - err = chain.qi.addAck(h, ack) - if err != nil { - err = errors.Wrapf(err, "load ack, height %d, hash %s", h, ack.HeaderHash.String()) - return - } } if err = respIter.Error(); err != nil { err = errors.Wrap(err, "load ack") @@ -344,7 +420,7 @@ func LoadChain(c *Config) (chain *Chain, err error) { } // pushBlock pushes the signed block header to extend the current main chain. -func (c *Chain) pushBlock(b *ct.Block) (err error) { +func (c *Chain) pushBlock(b *types.Block) (err error) { // Prepare and encode h := c.rt.getHeightFromTime(b.Timestamp()) node := newBlockNode(h, b, c.rt.getHead().node) @@ -383,58 +459,55 @@ func (c *Chain) pushBlock(b *ct.Block) (err error) { } c.rt.setHead(st) c.bi.addBlock(node) - c.qi.setSignedBlock(h, b) + + // Keep track of the queries from the new block + var ierr error + for i, v := range b.QueryTxs { + if ierr = c.addResponse(v.Response); ierr != nil { + log.WithFields(log.Fields{ + "index": i, + "producer": b.Producer(), + "block_hash": b.BlockHash(), + }).WithError(ierr).Warn("Failed to add response to ackIndex") + } + } + for i, v := range b.Acks { + if ierr = c.remove(v); ierr != nil { + log.WithFields(log.Fields{ + "index": i, + "producer": b.Producer(), + "block_hash": b.BlockHash(), + }).WithError(ierr).Warn("Failed to remove Ack from ackIndex") + } + } if err == nil { log.WithFields(log.Fields{ - "peer": c.rt.getPeerInfoString()[:14], - "time": c.rt.getChainTimeString(), - "block": b.BlockHash().String()[:8], - "producer": b.Producer()[:8], - "querycount": len(b.Queries), - "blocktime": b.Timestamp().Format(time.RFC3339Nano), - "blockheight": c.rt.getHeightFromTime(b.Timestamp()), - "headblock": fmt.Sprintf("%s <- %s", + "peer": c.rt.getPeerInfoString()[:14], + "time": c.rt.getChainTimeString(), + "block": b.BlockHash().String()[:8], + "producer": b.Producer()[:8], + "queryCount": len(b.QueryTxs), + "ackCount": len(b.Acks), + "blockTime": b.Timestamp().Format(time.RFC3339Nano), + "height": c.rt.getHeightFromTime(b.Timestamp()), + "head": fmt.Sprintf("%s <- %s", func() string { if st.node.parent != nil { return st.node.parent.hash.String()[:8] } return "|" }(), st.Head.String()[:8]), - "headheight": c.rt.getHead().Height, + "headHeight": c.rt.getHead().Height, }).Info("Pushed new block") } return } -// pushResponedQuery pushes a responsed, signed and verified query into the chain. -func (c *Chain) pushResponedQuery(resp *wt.SignedResponseHeader) (err error) { - h := c.rt.getHeightFromTime(resp.Request.Timestamp) - k := heightToKey(h) - var enc *bytes.Buffer - - if enc, err = utils.EncodeMsgPack(resp); err != nil { - return - } - - tdbKey := utils.ConcatAll(metaResponseIndex[:], k, resp.HeaderHash[:]) - if err = c.tdb.Put(tdbKey, enc.Bytes(), nil); err != nil { - err = errors.Wrapf(err, "put response %d %s", h, resp.HeaderHash.String()) - return - } - - if err = c.qi.addResponse(h, resp); err != nil { - err = errors.Wrapf(err, "add resp h %d hash %s", h, resp.HeaderHash) - return err - } - - return -} - // pushAckedQuery pushes a acknowledged, signed and verified query into the chain. -func (c *Chain) pushAckedQuery(ack *wt.SignedAckHeader) (err error) { - log.Debugf("push ack %s", ack.HeaderHash.String()) +func (c *Chain) pushAckedQuery(ack *types.SignedAckHeader) (err error) { + log.Debugf("push ack %s", ack.Hash().String()) h := c.rt.getHeightFromTime(ack.SignedResponseHeader().Timestamp) k := heightToKey(h) var enc *bytes.Buffer @@ -443,50 +516,61 @@ func (c *Chain) pushAckedQuery(ack *wt.SignedAckHeader) (err error) { return } - tdbKey := utils.ConcatAll(metaAckIndex[:], k, ack.HeaderHash[:]) + tdbKey := utils.ConcatAll(metaAckIndex[:], k, ack.Hash().AsBytes()) if err = c.tdb.Put(tdbKey, enc.Bytes(), nil); err != nil { - err = errors.Wrapf(err, "put ack %d %s", h, ack.HeaderHash.String()) + err = errors.Wrapf(err, "put ack %d %s", h, ack.Hash().String()) return } - if err = c.qi.addAck(h, ack); err != nil { - err = errors.Wrapf(err, "add ack h %d hash %s", h, ack.HeaderHash) - return err + if err = c.register(ack); err != nil { + err = errors.Wrapf(err, "register ack %v at height %d", ack.Hash(), h) + return } return } -// produceBlock prepares, signs and advises the pending block to the orther peers. -func (c *Chain) produceBlock(now time.Time) (err error) { - // Retrieve local key pair - priv, err := kms.GetLocalPrivateKey() - - if err != nil { +// produceBlockV2 prepares, signs and advises the pending block to the other peers. +func (c *Chain) produceBlockV2(now time.Time) (err error) { + var ( + frs []*types.Request + qts []*x.QueryTracker + ) + if frs, qts, err = c.st.CommitEx(); err != nil { return } - - // Pack and sign block - block := &ct.Block{ - SignedHeader: ct.SignedHeader{ - Header: ct.Header{ + var block = &types.Block{ + SignedHeader: types.SignedHeader{ + Header: types.Header{ Version: 0x01000000, - Producer: c.rt.getServer().ID, + Producer: c.rt.getServer(), GenesisHash: c.rt.genesisHash, ParentHash: c.rt.getHead().Head, // MerkleRoot: will be set by Block.PackAndSignBlock(PrivateKey) Timestamp: now, }, - // BlockHash/Signee/Signature: will be set by Block.PackAndSignBlock(PrivateKey) }, - Queries: c.qi.markAndCollectUnsignedAcks(c.rt.getNextTurn()), + FailedReqs: frs, + QueryTxs: make([]*types.QueryAsTx, len(qts)), + Acks: c.ai.acks(c.rt.getHeightFromTime(now)), + } + statBlock(block) + for i, v := range qts { + // TODO(leventeliu): maybe block waiting at a ready channel instead? + for !v.Ready() { + time.Sleep(1 * time.Millisecond) + } + block.QueryTxs[i] = &types.QueryAsTx{ + // TODO(leventeliu): add acks for billing. + Request: v.Req, + Response: &v.Resp.Header, + } } - - if err = block.PackAndSignBlock(priv); err != nil { + // Sign block + if err = block.PackAndSignBlock(c.pk); err != nil { return } - // Send to pending list c.blocks <- block log.WithFields(log.Fields{ @@ -496,31 +580,31 @@ func (c *Chain) produceBlock(now time.Time) (err error) { "using_timestamp": now.Format(time.RFC3339Nano), "block_hash": block.BlockHash().String(), }).Debug("Produced new block") - // Advise new block to the other peers - req := &MuxAdviseNewBlockReq{ - Envelope: proto.Envelope{ - // TODO(leventeliu): Add fields. - }, - DatabaseID: c.rt.databaseID, - AdviseNewBlockReq: AdviseNewBlockReq{ - Block: block, - Count: func() int32 { - if nd := c.bi.lookupNode(block.BlockHash()); nd != nil { - return nd.count - } - if pn := c.bi.lookupNode(block.ParentHash()); pn != nil { - return pn.count + 1 - } - return -1 - }(), - }, - } - peers := c.rt.getPeers() - wg := &sync.WaitGroup{} - + var ( + req = &MuxAdviseNewBlockReq{ + Envelope: proto.Envelope{ + // TODO(leventeliu): Add fields. + }, + DatabaseID: c.rt.databaseID, + AdviseNewBlockReq: AdviseNewBlockReq{ + Block: block, + Count: func() int32 { + if nd := c.bi.lookupNode(block.BlockHash()); nd != nil { + return nd.count + } + if pn := c.bi.lookupNode(block.ParentHash()); pn != nil { + return pn.count + 1 + } + return -1 + }(), + }, + } + peers = c.rt.getPeers() + wg = &sync.WaitGroup{} + ) for _, s := range peers.Servers { - if s.ID != c.rt.getServer().ID { + if s != c.rt.getServer() { wg.Add(1) go func(id proto.NodeID) { defer wg.Done() @@ -536,15 +620,12 @@ func (c *Chain) produceBlock(now time.Time) (err error) { }).WithError(err).Error( "Failed to advise new block") } - }(s.ID) + }(s) } } - wg.Wait() - // fire replication to observers c.startStopReplication() - return } @@ -566,25 +647,26 @@ func (c *Chain) syncHead() { succ := false for i, s := range peers.Servers { - if s.ID != c.rt.getServer().ID { + if s != c.rt.getServer() { if err = c.cl.CallNode( - s.ID, route.SQLCFetchBlock.String(), req, resp, + s, route.SQLCFetchBlock.String(), req, resp, ); err != nil || resp.Block == nil { log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), - "remote": fmt.Sprintf("[%d/%d] %s", i, len(peers.Servers), s.ID), + "remote": fmt.Sprintf("[%d/%d] %s", i, len(peers.Servers), s), "curr_turn": c.rt.getNextTurn(), "head_height": c.rt.getHead().Height, "head_block": c.rt.getHead().Head.String(), }).WithError(err).Debug( "Failed to fetch block from peer") } else { + statBlock(resp.Block) c.blocks <- resp.Block log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), - "remote": fmt.Sprintf("[%d/%d] %s", i, len(peers.Servers), s.ID), + "remote": fmt.Sprintf("[%d/%d] %s", i, len(peers.Servers), s), "curr_turn": c.rt.getNextTurn(), "head_height": c.rt.getHead().Height, "head_block": c.rt.getHead().Head.String(), @@ -612,8 +694,10 @@ func (c *Chain) syncHead() { // runCurrentTurn does the check and runs block producing if its my turn. func (c *Chain) runCurrentTurn(now time.Time) { defer func() { + c.stat() + c.pruneBlockCache() c.rt.setNextTurn() - c.qi.advanceBarrier(c.rt.getMinValidHeight()) + c.ai.advance(c.rt.getMinValidHeight()) // Info the block processing goroutine that the chain height has grown, so please return // any stashed blocks for further check. c.heights <- c.rt.getHead().Height @@ -643,7 +727,7 @@ func (c *Chain) runCurrentTurn(now time.Time) { return } - if err := c.produceBlock(now); err != nil { + if err := c.produceBlockV2(now); err != nil { log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), @@ -714,7 +798,7 @@ func (c *Chain) sync() (err error) { func (c *Chain) processBlocks() { rsCh := make(chan struct{}) rsWG := &sync.WaitGroup{} - returnStash := func(stash []*ct.Block) { + returnStash := func(stash []*types.Block) { defer rsWG.Done() for _, block := range stash { select { @@ -731,7 +815,7 @@ func (c *Chain) processBlocks() { c.rt.wg.Done() }() - var stash []*ct.Block + var stash []*types.Block for { select { case h := <-c.heights: @@ -841,21 +925,34 @@ func (c *Chain) Stop() (err error) { "time": c.rt.getChainTimeString(), }).Debug("Chain service stopped") // Close LevelDB file - err = c.bdb.Close() + var ierr error + if ierr = c.bdb.Close(); ierr != nil && err == nil { + err = ierr + } log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), - }).Debug("Chain database closed") - err = c.tdb.Close() + }).WithError(ierr).Debug("Chain database closed") + if ierr = c.tdb.Close(); ierr != nil && err == nil { + err = ierr + } log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), - }).Debug("Chain database closed") + }).WithError(ierr).Debug("Chain database closed") + // Close state + if ierr = c.st.Close(false); ierr != nil && err == nil { + err = ierr + } + log.WithFields(log.Fields{ + "peer": c.rt.getPeerInfoString(), + "time": c.rt.getChainTimeString(), + }).WithError(ierr).Debug("Chain state storage closed") return } // FetchBlock fetches the block at specified height from local cache. -func (c *Chain) FetchBlock(height int32) (b *ct.Block, err error) { +func (c *Chain) FetchBlock(height int32) (b *types.Block, err error) { if n := c.rt.getHead().node.ancestor(height); n != nil { k := utils.ConcatAll(metaBlockIndex[:], n.indexKey()) var v []byte @@ -865,7 +962,8 @@ func (c *Chain) FetchBlock(height int32) (b *ct.Block, err error) { return } - b = &ct.Block{} + b = &types.Block{} + statBlock(b) err = utils.DecodeMsgPack(v, b) if err != nil { err = errors.Wrapf(err, "fetch block %s", string(k)) @@ -876,86 +974,8 @@ func (c *Chain) FetchBlock(height int32) (b *ct.Block, err error) { return } -// FetchAckedQuery fetches the acknowledged query from local cache. -func (c *Chain) FetchAckedQuery(height int32, header *hash.Hash) ( - ack *wt.SignedAckHeader, err error, -) { - if ack, err = c.qi.getAck(height, header); err != nil || ack == nil { - for h := height - c.rt.queryTTL - 1; h <= height; h++ { - k := heightToKey(h) - ackKey := utils.ConcatAll(metaAckIndex[:], k, header[:]) - var v []byte - if v, err = c.tdb.Get(ackKey, nil); err != nil { - // if err == leveldb.ErrNotFound, just loop for next h - if err != leveldb.ErrNotFound { - err = errors.Wrapf(err, "fetch ack in height %d hash %s", h, header.String()) - return - } - } else { - var dec = &wt.SignedAckHeader{} - if err = utils.DecodeMsgPack(v, dec); err != nil { - err = errors.Wrapf(err, "fetch ack in height %d hash %s", h, header.String()) - return - } - ack = dec - break - } - } - } - if ack == nil { - err = errors.Wrapf(ErrAckQueryNotFound, "fetch ack not found") - } - return -} - -// syncAckedQuery uses RPC call to synchronize an acknowledged query from a remote node. -func (c *Chain) syncAckedQuery(height int32, header *hash.Hash, id proto.NodeID) ( - ack *wt.SignedAckHeader, err error, -) { - req := &MuxFetchAckedQueryReq{ - Envelope: proto.Envelope{ - // TODO(leventeliu): Add fields. - }, - DatabaseID: c.rt.databaseID, - FetchAckedQueryReq: FetchAckedQueryReq{ - Height: height, - SignedAckedHeaderHash: header, - }, - } - resp := &MuxFetchAckedQueryResp{} - - if err = c.cl.CallNode(id, route.SQLCFetchAckedQuery.String(), req, resp); err != nil { - log.WithFields(log.Fields{ - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), - }).WithError(err).Error( - "Failed to fetch acked query") - return - } - - if err = c.VerifyAndPushAckedQuery(resp.Ack); err != nil { - return - } - - ack = resp.Ack - return -} - -// queryOrSyncAckedQuery tries to query an acknowledged query from local index, and also tries to -// synchronize it from a remote node if not found locally. -func (c *Chain) queryOrSyncAckedQuery(height int32, header *hash.Hash, id proto.NodeID) ( - ack *wt.SignedAckHeader, err error, -) { - if ack, err = c.FetchAckedQuery( - height, header, - ); (err == nil && ack != nil) || id == c.rt.getServer().ID { - return - } - return c.syncAckedQuery(height, header, id) -} - // CheckAndPushNewBlock implements ChainRPCServer.CheckAndPushNewBlock. -func (c *Chain) CheckAndPushNewBlock(block *ct.Block) (err error) { +func (c *Chain) CheckAndPushNewBlock(block *types.Block) (err error) { height := c.rt.getHeightFromTime(block.Timestamp()) head := c.rt.getHead() peers := c.rt.getPeers() @@ -992,7 +1012,7 @@ func (c *Chain) CheckAndPushNewBlock(block *ct.Block) (err error) { } // Short circuit the checking process if it's a self-produced block - if block.Producer() == c.rt.server.ID { + if block.Producer() == c.rt.server { return c.pushBlock(block) } @@ -1019,46 +1039,16 @@ func (c *Chain) CheckAndPushNewBlock(block *ct.Block) (err error) { // ... // } - // Check queries - for _, q := range block.Queries { - var ok bool - - if ok, err = c.qi.checkAckFromBlock(height, block.BlockHash(), q); err != nil { - return - } - - if !ok { - if _, err = c.syncAckedQuery(height, q, block.Producer()); err != nil { - return - } - - if _, err = c.qi.checkAckFromBlock(height, block.BlockHash(), q); err != nil { - return - } - } - } - - return c.pushBlock(block) -} - -// VerifyAndPushResponsedQuery verifies a responsed and signed query, and pushed it if valid. -func (c *Chain) VerifyAndPushResponsedQuery(resp *wt.SignedResponseHeader) (err error) { - // TODO(leventeliu): check resp. - if c.rt.queryTimeIsExpired(resp.Timestamp) { - err = errors.Wrapf(ErrQueryExpired, "Verify response query, min valid height %d, response height %d", c.rt.getMinValidHeight(), c.rt.getHeightFromTime(resp.Timestamp)) + // Replicate local state from the new block + if err = c.st.ReplayBlock(block); err != nil { return } - if err = resp.Verify(); err != nil { - err = errors.Wrapf(err, "") - return - } - - return c.pushResponedQuery(resp) + return c.pushBlock(block) } // VerifyAndPushAckedQuery verifies a acknowledged and signed query, and pushed it if valid. -func (c *Chain) VerifyAndPushAckedQuery(ack *wt.SignedAckHeader) (err error) { +func (c *Chain) VerifyAndPushAckedQuery(ack *types.SignedAckHeader) (err error) { // TODO(leventeliu): check ack. if c.rt.queryTimeIsExpired(ack.SignedResponseHeader().Timestamp) { err = errors.Wrapf(ErrQueryExpired, "Verify ack query, min valid height %d, ack height %d", c.rt.getMinValidHeight(), c.rt.getHeightFromTime(ack.Timestamp)) @@ -1073,7 +1063,7 @@ func (c *Chain) VerifyAndPushAckedQuery(ack *wt.SignedAckHeader) (err error) { } // UpdatePeers updates peer list of the sql-chain. -func (c *Chain) UpdatePeers(peers *kayak.Peers) error { +func (c *Chain) UpdatePeers(peers *proto.Peers) error { return c.rt.updatePeers(peers) } @@ -1088,8 +1078,7 @@ func (c *Chain) getBilling(low, high int32) (req *pt.BillingRequest, err error) var ( n *blockNode addr proto.AccountAddress - ack *wt.SignedAckHeader - lowBlock, highBlock *ct.Block + lowBlock, highBlock *types.Block billings = make(map[proto.AccountAddress]*proto.AddrAndGas) ) @@ -1101,6 +1090,11 @@ func (c *Chain) getBilling(low, high int32) (req *pt.BillingRequest, err error) } for ; n != nil && n.height >= low; n = n.parent { + // TODO(leventeliu): block maybe released, use persistence version in this case. + if n.block == nil { + continue + } + if lowBlock == nil { lowBlock = n.block } @@ -1122,22 +1116,18 @@ func (c *Chain) getBilling(low, high int32) (req *pt.BillingRequest, err error) } } - for _, v := range n.block.Queries { - if ack, err = c.queryOrSyncAckedQuery(n.height, v, n.block.Producer()); err != nil { - return - } - - if addr, err = crypto.PubKeyHash(ack.SignedResponseHeader().Signee); err != nil { + for _, v := range n.block.Acks { + if addr, err = crypto.PubKeyHash(v.SignedResponseHeader().Signee); err != nil { return } if billing, ok := billings[addr]; ok { - billing.GasAmount += c.rt.price[ack.SignedRequestHeader().QueryType] * - ack.SignedRequestHeader().BatchCount + billing.GasAmount += c.rt.price[v.SignedRequestHeader().QueryType] * + v.SignedRequestHeader().BatchCount } else { billings[addr] = &proto.AddrAndGas{ AccountAddress: addr, - RawNodeID: *ack.SignedResponseHeader().NodeID.ToRawNodeID(), + RawNodeID: *v.SignedResponseHeader().NodeID.ToRawNodeID(), GasAmount: c.rt.producingReward, } } @@ -1189,7 +1179,7 @@ func (c *Chain) collectBillingSignatures(billings *pt.BillingRequest) { go func() { defer proWG.Done() - bpReq := &ct.AdviseBillingReq{ + bpReq := &types.AdviseBillingReq{ Req: billings, } @@ -1236,7 +1226,7 @@ func (c *Chain) collectBillingSignatures(billings *pt.BillingRequest) { }() for _, s := range peers.Servers { - if s.ID != c.rt.getServer().ID { + if s != c.rt.getServer() { rpcWG.Add(1) go func(id proto.NodeID) { defer rpcWG.Done() @@ -1251,7 +1241,7 @@ func (c *Chain) collectBillingSignatures(billings *pt.BillingRequest) { } respC <- &resp.SignBillingResp - }(s.ID) + }(s) } } } @@ -1301,24 +1291,13 @@ func (c *Chain) SignBilling(req *pt.BillingRequest) ( if err = req.VerifySignatures(); err != nil { return } - if loc, err = c.getBilling(req.Header.LowHeight, req.Header.HighHeight); err != nil { return } - if err = req.Compare(loc); err != nil { return } - - // Sign block with private key - priv, err := kms.GetLocalPrivateKey() - - if err != nil { - return - } - - pub, sig, err = req.SignRequestHeader(priv, false) - + pub, sig, err = req.SignRequestHeader(c.pk, false) return } @@ -1401,3 +1380,84 @@ func (c *Chain) replicationCycle() { } } } + +// Query queries req from local chain state and returns the query results in resp. +func (c *Chain) Query(req *types.Request) (resp *types.Response, err error) { + var ref *x.QueryTracker + if ref, resp, err = c.st.Query(req); err != nil { + return + } + if err = resp.Sign(c.pk); err != nil { + return + } + if err = c.addResponse(&resp.Header); err != nil { + return + } + ref.UpdateResp(resp) + return +} + +// Replay replays a write log from other peer to replicate storage state. +func (c *Chain) Replay(req *types.Request, resp *types.Response) (err error) { + switch req.Header.QueryType { + case types.ReadQuery: + return + case types.WriteQuery: + return c.st.Replay(req, resp) + default: + err = ErrInvalidRequest + } + if err = c.addResponse(&resp.Header); err != nil { + return + } + return +} + +func (c *Chain) addResponse(resp *types.SignedResponseHeader) (err error) { + return c.ai.addResponse(c.rt.getHeightFromTime(resp.Request.Timestamp), resp) +} + +func (c *Chain) register(ack *types.SignedAckHeader) (err error) { + return c.ai.register(c.rt.getHeightFromTime(ack.SignedRequestHeader().Timestamp), ack) +} + +func (c *Chain) remove(ack *types.SignedAckHeader) (err error) { + return c.ai.remove(c.rt.getHeightFromTime(ack.SignedRequestHeader().Timestamp), ack) +} + +func (c *Chain) pruneBlockCache() { + var ( + head = c.rt.getHead().node + lastCnt int32 + ) + if head == nil { + return + } + lastCnt = head.count - c.rt.blockCacheTTL + // Move to last count position + for ; head != nil && head.count > lastCnt; head = head.parent { + } + // Prune block references + for ; head != nil && head.block != nil; head = head.parent { + head.block = nil + } +} + +func (c *Chain) stat() { + var ( + ic = atomic.LoadInt32(&multiIndexCount) + rc = atomic.LoadInt32(&responseCount) + tc = atomic.LoadInt32(&ackTrackerCount) + bc = atomic.LoadInt32(&cachedBlockCount) + ) + // Print chain stats + log.WithFields(log.Fields{ + "database_id": c.rt.databaseID, + "multiIndex_count": ic, + "response_header_count": rc, + "query_tracker_count": tc, + "cached_block_count": bc, + }).Info("Chain mem stats") + // Print xeno stats + c.st.Stat(c.rt.databaseID) +} diff --git a/sqlchain/chain_test.go b/sqlchain/chain_test.go index 5afd1f227..798854417 100644 --- a/sqlchain/chain_test.go +++ b/sqlchain/chain_test.go @@ -109,20 +109,20 @@ func TestMultiChain(t *testing.T) { } for i, p := range peers.Servers { - t.Logf("Peer #%d: %s", i, p.ID) + t.Logf("Peer #%d: %s", i, p) } // Create config info from created nodes bpinfo := &conf.BPInfo{ PublicKey: testPubKey, - NodeID: peers.Servers[testPeersNumber].ID, + NodeID: peers.Servers[testPeersNumber], Nonce: nis[testPeersNumber].Nonce, } knownnodes := make([]proto.Node, 0, testPeersNumber+1) for i, v := range peers.Servers { knownnodes = append(knownnodes, proto.Node{ - ID: v.ID, + ID: v, Role: func() proto.ServerRole { if i < testPeersNumber { return proto.Miner @@ -156,19 +156,24 @@ func TestMultiChain(t *testing.T) { defer server.Stop() // Create multiplexing service from RPC server - mux := NewMuxService(route.SQLChainRPCName, server) + mux, err := NewMuxService(route.SQLChainRPCName, server) + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } // Create chain instance config := &Config{ - DatabaseID: testDatabaseID, - DataFile: dbfile, - Genesis: genesis, - Period: testPeriod, - Tick: testTick, - MuxService: mux, - Server: peers.Servers[i], - Peers: peers, - QueryTTL: testQueryTTL, + DatabaseID: testDatabaseID, + ChainFilePrefix: dbfile, + DataFile: dbfile, + Genesis: genesis, + Period: testPeriod, + Tick: testTick, + MuxService: mux, + Server: peers.Servers[i], + Peers: peers, + QueryTTL: testQueryTTL, } chain, err := NewChain(config) @@ -185,7 +190,6 @@ func TestMultiChain(t *testing.T) { chain: chain, } - } // Create a master BP for RPC test @@ -304,18 +308,9 @@ func TestMultiChain(t *testing.T) { i, c.rt.getPeerInfoString()) continue } - t.Logf("Checking block %v at height %d in peer %s", - node.block.BlockHash(), i, c.rt.getPeerInfoString()) - for _, v := range node.block.Queries { - if ack, err := c.queryOrSyncAckedQuery( - i, v, node.block.Producer(), - ); err != nil && ack == nil { - t.Errorf("Failed to fetch ack %v at height %d in peer %s: %v", - v, i, c.rt.getPeerInfoString(), err) - } else { - t.Logf("Successed to fetch ack %v at height %d in peer %s", - v, i, c.rt.getPeerInfoString()) - } + if node.block != nil { + t.Logf("Checking block %v at height %d in peer %s", + node.block.BlockHash(), i, c.rt.getPeerInfoString()) } } }(v.chain) @@ -326,7 +321,7 @@ func TestMultiChain(t *testing.T) { sC := make(chan struct{}) wg := &sync.WaitGroup{} wk := &nodeProfile{ - NodeID: peers.Servers[i].ID, + NodeID: peers.Servers[i], PrivateKey: testPrivKey, PublicKey: testPubKey, } @@ -352,7 +347,7 @@ func TestMultiChain(t *testing.T) { if err != nil { t.Errorf("Error occurred: %v", err) - } else if err = c.VerifyAndPushResponsedQuery(resp); err != nil { + } else if err = c.addResponse(resp); err != nil { t.Errorf("Error occurred: %v", err) } diff --git a/sqlchain/config.go b/sqlchain/config.go index 627f0380a..113e99718 100644 --- a/sqlchain/config.go +++ b/sqlchain/config.go @@ -19,30 +19,37 @@ package sqlchain import ( "time" - "github.com/CovenantSQL/CovenantSQL/kayak" + pt "github.com/CovenantSQL/CovenantSQL/blockproducer/types" "github.com/CovenantSQL/CovenantSQL/proto" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/CovenantSQL/CovenantSQL/types" ) // Config represents a sql-chain config. type Config struct { - DatabaseID proto.DatabaseID - DataFile string + DatabaseID proto.DatabaseID + ChainFilePrefix string + DataFile string - Genesis *ct.Block + Genesis *types.Block Period time.Duration Tick time.Duration MuxService *MuxService - Peers *kayak.Peers - Server *kayak.Server + Peers *proto.Peers + Server proto.NodeID // Price sets query price in gases. - Price map[wt.QueryType]uint64 + Price map[types.QueryType]uint64 ProducingReward uint64 BillingPeriods int32 // QueryTTL sets the unacknowledged query TTL in block periods. QueryTTL int32 + + BlockCacheTTL int32 + + // DBAccount info + TokenType pt.TokenType + GasPrice uint64 + UpdatePeriod uint64 } diff --git a/sqlchain/errors.go b/sqlchain/errors.go index 0cbf1c682..10fa9cde8 100644 --- a/sqlchain/errors.go +++ b/sqlchain/errors.go @@ -68,7 +68,7 @@ var ( // ErrCorruptedIndex indicates that a corrupted index item is detected. ErrCorruptedIndex = errors.New("corrupted index item") - // ErrUnknownMuxRequest indicates that the a multiplexing request endpoint is not found. + // ErrUnknownMuxRequest indicates that the multiplexing request endpoint is not found. ErrUnknownMuxRequest = errors.New("unknown multiplexing request") // ErrUnknownProducer indicates that the block has an unknown producer. @@ -77,8 +77,8 @@ var ( // ErrInvalidProducer indicates that the block has an invalid producer. ErrInvalidProducer = errors.New("invalid block producer") - // ErrUnavailableBillingRang indicates that the billing range is not abailable now. - ErrUnavailableBillingRang = errors.New("unabailable billing range") + // ErrUnavailableBillingRang indicates that the billing range is not available now. + ErrUnavailableBillingRang = errors.New("unavailable billing range") // ErrHashNotMatch indicates that a message hash value doesn't match the original hash value // given in its hash field. @@ -89,4 +89,14 @@ var ( // ErrAckQueryNotFound indicates that an acknowledged query record is not found. ErrAckQueryNotFound = errors.New("acknowledged query not found") + + // ErrQueryNotFound indicates that a query is not found in the index. + ErrQueryNotFound = errors.New("query not found") + + // ErrInvalidRequest indicates the query is invalid. + ErrInvalidRequest = errors.New("invalid request") + + // ErrResponseSeqNotMatch indicates that a response sequence id doesn't match the original one + // in the index. + ErrResponseSeqNotMatch = errors.New("response sequence id doesn't match") ) diff --git a/sqlchain/mux.go b/sqlchain/mux.go index 9d0c8f060..7dea6dfea 100644 --- a/sqlchain/mux.go +++ b/sqlchain/mux.go @@ -30,13 +30,13 @@ type MuxService struct { } // NewMuxService creates a new multiplexing service and registers it to rpc server. -func NewMuxService(serviceName string, server *rpc.Server) (service *MuxService) { +func NewMuxService(serviceName string, server *rpc.Server) (service *MuxService, err error) { service = &MuxService{ ServiceName: serviceName, } - server.RegisterService(serviceName, service) - return service + err = server.RegisterService(serviceName, service) + return } func (s *MuxService) register(id proto.DatabaseID, service *ChainRPCService) { @@ -75,20 +75,6 @@ type MuxAdviseBinLogResp struct { AdviseBinLogResp } -// MuxAdviseResponsedQueryReq defines a request of the AdviseAckedQuery RPC method. -type MuxAdviseResponsedQueryReq struct { - proto.Envelope - proto.DatabaseID - AdviseResponsedQueryReq -} - -// MuxAdviseResponsedQueryResp defines a response of the AdviseAckedQuery RPC method. -type MuxAdviseResponsedQueryResp struct { - proto.Envelope - proto.DatabaseID - AdviseResponsedQueryResp -} - // MuxAdviseAckedQueryReq defines a request of the AdviseAckedQuery RPC method. type MuxAdviseAckedQueryReq struct { proto.Envelope @@ -117,20 +103,6 @@ type MuxFetchBlockResp struct { FetchBlockResp } -// MuxFetchAckedQueryReq defines a request of the FetchAckedQuery RPC method. -type MuxFetchAckedQueryReq struct { - proto.Envelope - proto.DatabaseID - FetchAckedQueryReq -} - -// MuxFetchAckedQueryResp defines a request of the FetchAckedQuery RPC method. -type MuxFetchAckedQueryResp struct { - proto.Envelope - proto.DatabaseID - FetchAckedQueryResp -} - // MuxSignBillingReq defines a request of the SignBilling RPC method. type MuxSignBillingReq struct { proto.Envelope @@ -209,19 +181,6 @@ func (s *MuxService) AdviseBinLog(req *MuxAdviseBinLogReq, resp *MuxAdviseBinLog return ErrUnknownMuxRequest } -// AdviseResponsedQuery is the RPC method to advise a new responsed query to the target server. -func (s *MuxService) AdviseResponsedQuery( - req *MuxAdviseResponsedQueryReq, resp *MuxAdviseResponsedQueryResp) error { - if v, ok := s.serviceMap.Load(req.DatabaseID); ok { - resp.Envelope = req.Envelope - resp.DatabaseID = req.DatabaseID - return v.(*ChainRPCService).AdviseResponsedQuery( - &req.AdviseResponsedQueryReq, &resp.AdviseResponsedQueryResp) - } - - return ErrUnknownMuxRequest -} - // AdviseAckedQuery is the RPC method to advise a new acknowledged query to the target server. func (s *MuxService) AdviseAckedQuery( req *MuxAdviseAckedQueryReq, resp *MuxAdviseAckedQueryResp) error { @@ -246,19 +205,6 @@ func (s *MuxService) FetchBlock(req *MuxFetchBlockReq, resp *MuxFetchBlockResp) return ErrUnknownMuxRequest } -// FetchAckedQuery is the RPC method to fetch a known block from the target server. -func (s *MuxService) FetchAckedQuery( - req *MuxFetchAckedQueryReq, resp *MuxFetchAckedQueryResp) (err error) { - if v, ok := s.serviceMap.Load(req.DatabaseID); ok { - resp.Envelope = req.Envelope - resp.DatabaseID = req.DatabaseID - return v.(*ChainRPCService).FetchAckedQuery( - &req.FetchAckedQueryReq, &resp.FetchAckedQueryResp) - } - - return ErrUnknownMuxRequest -} - // SignBilling is the RPC method to get signature for a billing request from the target server. func (s *MuxService) SignBilling(req *MuxSignBillingReq, resp *MuxSignBillingResp) (err error) { if v, ok := s.serviceMap.Load(req.DatabaseID); ok { diff --git a/sqlchain/observer.go b/sqlchain/observer.go index 495c723d3..e1fa19960 100644 --- a/sqlchain/observer.go +++ b/sqlchain/observer.go @@ -21,23 +21,17 @@ import ( "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" ) /* -Observer implements interface like a sqlchain including AdviseNewBlock/AdviseAckedQuery. +Observer implements method AdviseNewBlock to receive blocks from sqlchain node. Request/Response entity from sqlchain api is re-used for simplicity. type Observer interface { AdviseNewBlock(*MuxAdviseNewBlockReq, *MuxAdviseNewBlockResp) error - AdviseAckedQuery(*MuxAdviseAckedQueryReq, *MuxAdviseAckedQueryResp) error } - -The observer could call DBS.GetRequest to fetch original request entity from the DBMS service. -The whole observation of block producing and write query execution would be as follows. -AdviseAckedQuery -> AdviseNewBlock -> GetRequest. */ // observerReplicator defines observer replication state. @@ -45,6 +39,7 @@ type observerReplicator struct { nodeID proto.NodeID height int32 triggerCh chan struct{} + stopOnce sync.Once stopCh chan struct{} replLock sync.Mutex c *Chain @@ -69,11 +64,13 @@ func (r *observerReplicator) setNewHeight(newHeight int32) { } func (r *observerReplicator) stop() { - select { - case <-r.stopCh: - default: - close(r.stopCh) - } + r.stopOnce.Do(func() { + select { + case <-r.stopCh: + default: + close(r.stopCh) + } + }) } func (r *observerReplicator) replicate() { @@ -90,7 +87,7 @@ func (r *observerReplicator) replicate() { curHeight := r.c.rt.getHead().Height - if r.height == ct.ReplicateFromNewest { + if r.height == types.ReplicateFromNewest { log.WithFields(log.Fields{ "node": r.nodeID, "height": curHeight, @@ -118,7 +115,7 @@ func (r *observerReplicator) replicate() { }).Debug("try replicating block for observer") // replicate one record - var block *ct.Block + var block *types.Block if block, err = r.c.FetchBlock(r.height); err != nil { // fetch block failed log.WithField("height", r.height).WithError(err).Warning("fetch block with height failed") @@ -133,7 +130,7 @@ func (r *observerReplicator) replicate() { // find last available block log.Debug("start block height hole detection") - var lastBlock, nextBlock *ct.Block + var lastBlock, nextBlock *types.Block var lastHeight, nextHeight int32 for h := r.height - 1; h >= 0; h-- { @@ -202,36 +199,6 @@ func (r *observerReplicator) replicate() { }).Debug("finish block height hole detection, skipping") } - // fetch acks in block - for _, h := range block.Queries { - var ack *wt.SignedAckHeader - if ack, err = r.c.queryOrSyncAckedQuery(r.height, h, block.Producer()); err != nil || ack == nil { - log.WithFields(log.Fields{ - "ack": h.String(), - "height": r.height, - }).WithError(err).Warning("fetch ack of block height") - continue - } - - // send advise to this block - req := &MuxAdviseAckedQueryReq{ - Envelope: proto.Envelope{}, - DatabaseID: r.c.rt.databaseID, - AdviseAckedQueryReq: AdviseAckedQueryReq{ - Query: ack, - }, - } - resp := &MuxAdviseAckedQueryResp{} - err = r.c.cl.CallNode(r.nodeID, route.OBSAdviseAckedQuery.String(), req, resp) - if err != nil { - log.WithFields(log.Fields{ - "node": r.nodeID, - "height": r.height, - }).WithError(err).Warning("send ack advise to observer") - return - } - } - // send block req := &MuxAdviseNewBlockReq{ Envelope: proto.Envelope{}, diff --git a/sqlchain/otypes/billing_req.go b/sqlchain/otypes/billing_req.go new file mode 100644 index 000000000..59521a6d4 --- /dev/null +++ b/sqlchain/otypes/billing_req.go @@ -0,0 +1,34 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package otypes + +import ( + pt "github.com/CovenantSQL/CovenantSQL/blockproducer/types" + "github.com/CovenantSQL/CovenantSQL/proto" +) + +// AdviseBillingReq defines a request of the AdviseBillingRequest RPC method. +type AdviseBillingReq struct { + proto.Envelope + Req *pt.BillingRequest +} + +// AdviseBillingResp defines a request of the AdviseBillingRequest RPC method. +type AdviseBillingResp struct { + proto.Envelope + Resp *pt.BillingRequest +} diff --git a/sqlchain/types/block.go b/sqlchain/otypes/block.go similarity index 81% rename from sqlchain/types/block.go rename to sqlchain/otypes/block.go index 22b10167d..9cf4b8ff0 100644 --- a/sqlchain/types/block.go +++ b/sqlchain/otypes/block.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package types +package otypes import ( "reflect" @@ -40,24 +40,6 @@ type Header struct { Timestamp time.Time } -//// MarshalHash marshals for hash -//func (h *Header) MarshalHash() ([]byte, error) { -// buffer := bytes.NewBuffer(nil) -// -// if err := utils.WriteElements(buffer, binary.BigEndian, -// h.Version, -// h.Producer, -// &h.GenesisHash, -// &h.ParentHash, -// &h.MerkleRoot, -// h.Timestamp, -// ); err != nil { -// return nil, err -// } -// -// return buffer.Bytes(), nil -//} - // SignedHeader is block header along with its producer signature. type SignedHeader struct { Header @@ -66,27 +48,6 @@ type SignedHeader struct { Signature *asymmetric.Signature } -//// MarshalHash marshals for hash. -//func (s *SignedHeader) MarshalHash() ([]byte, error) { -// buffer := bytes.NewBuffer(nil) -// -// if err := utils.WriteElements(buffer, binary.BigEndian, -// s.Version, -// s.Producer, -// &s.GenesisHash, -// &s.ParentHash, -// &s.MerkleRoot, -// s.Timestamp, -// &s.BlockHash, -// s.Signee, -// s.Signature, -// ); err != nil { -// return nil, err -// } -// -// return buffer.Bytes(), nil -//} - // Verify verifies the signature of the signed header. func (s *SignedHeader) Verify() error { if !s.Signature.Verify(s.BlockHash[:], s.Signee) { @@ -142,20 +103,6 @@ func (b *Block) PackAndSignBlock(signer *asymmetric.PrivateKey) (err error) { return } -//// MarshalHash marshals for hash -//func (b *Block) MarshalHash() ([]byte, error) { -// buffer := bytes.NewBuffer(nil) -// -// if err := utils.WriteElements(buffer, binary.BigEndian, -// &b.SignedHeader, -// b.Queries, -// ); err != nil { -// return nil, err -// } -// -// return buffer.Bytes(), nil -//} - // PushAckedQuery pushes a acknowledged and verified query into the block. func (b *Block) PushAckedQuery(h *hash.Hash) { if b.Queries == nil { diff --git a/sqlchain/types/block_gen.go b/sqlchain/otypes/block_gen.go similarity index 99% rename from sqlchain/types/block_gen.go rename to sqlchain/otypes/block_gen.go index 1d7b3630c..ebd75b2fe 100644 --- a/sqlchain/types/block_gen.go +++ b/sqlchain/otypes/block_gen.go @@ -1,4 +1,4 @@ -package types +package otypes // Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. diff --git a/sqlchain/types/block_gen_test.go b/sqlchain/otypes/block_gen_test.go similarity index 99% rename from sqlchain/types/block_gen_test.go rename to sqlchain/otypes/block_gen_test.go index 8266f1438..7743f3729 100644 --- a/sqlchain/types/block_gen_test.go +++ b/sqlchain/otypes/block_gen_test.go @@ -1,4 +1,4 @@ -package types +package otypes // Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. diff --git a/sqlchain/types/block_test.go b/sqlchain/otypes/block_test.go similarity index 99% rename from sqlchain/types/block_test.go rename to sqlchain/otypes/block_test.go index 2a0f74dee..a3dfdb1fe 100644 --- a/sqlchain/types/block_test.go +++ b/sqlchain/otypes/block_test.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package types +package otypes import ( "bytes" diff --git a/common/doc.go b/sqlchain/otypes/doc.go similarity index 86% rename from common/doc.go rename to sqlchain/otypes/doc.go index c42b8932e..00ef1d7d4 100644 --- a/common/doc.go +++ b/sqlchain/otypes/doc.go @@ -14,5 +14,5 @@ * limitations under the License. */ -// Package common defines some common types which are used by multiple modules. -package common +// Package otypes defines commonly used types for sql-chain. +package otypes diff --git a/sqlchain/types/errors.go b/sqlchain/otypes/errors.go similarity index 98% rename from sqlchain/types/errors.go rename to sqlchain/otypes/errors.go index 7d856cb81..9a065843d 100644 --- a/sqlchain/types/errors.go +++ b/sqlchain/otypes/errors.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package types +package otypes import ( "errors" diff --git a/common/types.go b/sqlchain/otypes/observer.go similarity index 69% rename from common/types.go rename to sqlchain/otypes/observer.go index eebb21745..3eb20dce2 100644 --- a/common/types.go +++ b/sqlchain/otypes/observer.go @@ -14,14 +14,11 @@ * limitations under the License. */ -package common +package otypes const ( - // AddressLength is the fixed length of a CovenantSQL node address. - AddressLength = 64 - // UUIDLength is the fixed length of a UUID. - UUIDLength = 16 + // ReplicateFromBeginning is the replication offset observes from genesis block. + ReplicateFromBeginning = int32(0) + // ReplicateFromNewest is the replication offset observes from block head of current node. + ReplicateFromNewest = int32(-1) ) - -// UUID is a unique identity which may be used as a Raft transaction ID. -type UUID [UUIDLength]byte diff --git a/sqlchain/types/xxx_test.go b/sqlchain/otypes/xxx_test.go similarity index 99% rename from sqlchain/types/xxx_test.go rename to sqlchain/otypes/xxx_test.go index d6545d79d..428be91c5 100644 --- a/sqlchain/types/xxx_test.go +++ b/sqlchain/otypes/xxx_test.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package types +package otypes import ( "io/ioutil" diff --git a/sqlchain/queryindex.go b/sqlchain/queryindex.go deleted file mode 100644 index 27fa77ca9..000000000 --- a/sqlchain/queryindex.go +++ /dev/null @@ -1,574 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sqlchain - -// TODO(leventeliu): use pooled objects to speed up this index. - -import ( - "github.com/pkg/errors" - "sync" - - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" - "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" -) - -var ( - placeHolder = &hash.Hash{} -) - -// requestTracker defines a tracker of a particular database query request. -// We use it to track and update queries in this index system. -type requestTracker struct { - // TODO(leventeliu): maybe we don't need them to be "signed" here. Given that the response or - // Ack is already verified, simply use Header. - response *wt.SignedResponseHeader - ack *wt.SignedAckHeader - // signedBlock is the hash of the block in the currently best chain which contains this query. - signedBlock *hash.Hash -} - -// queryTracker defines a tracker of a particular database query. It may contain multiple queries -// to differe workers. -type queryTracker struct { - firstAck *requestTracker - queries []*requestTracker -} - -// newQueryTracker returns a new queryTracker reference. -func newQueryTracker() *queryTracker { - return &queryTracker{ - // TODO(leventeliu): set appropriate capacity. - firstAck: nil, - queries: make([]*requestTracker, 0, 10), - } -} - -// updateAck updates the query tracker with a verified SignedAckHeader. -func (s *requestTracker) updateAck(ack *wt.SignedAckHeader) (isNew bool, err error) { - if s.ack == nil { - // A later Ack can overwrite the original Response setting - *s = requestTracker{ - response: ack.SignedResponseHeader(), - ack: ack, - } - - isNew = true - } else if !s.ack.HeaderHash.IsEqual(&ack.HeaderHash) { - // This may happen when a client sends multiple acknowledgements for a same query (same - // response header hash) - err = ErrMultipleAckOfResponse - } // else it's same as s.Ack, let's try not to overwrite it - - return -} - -// hashIndex defines a requestTracker index using hash as key. -type hashIndex map[hash.Hash]*requestTracker - -// seqIndex defines a queryTracker index using sequence number as key. -type seqIndex map[wt.QueryKey]*queryTracker - -// ensure returns the *queryTracker associated with the given key. It creates a new item if the -// key doesn't exist. -func (i seqIndex) ensure(k wt.QueryKey) (v *queryTracker) { - var ok bool - - if v, ok = i[k]; !ok { - v = newQueryTracker() - i[k] = v - } - - return -} - -// multiIndex defines a combination of multiple indexes. -// -// Index layout is as following: -// -// respIndex +----------------+ -// +---------------------------+->| requestTracker | +---------------------------+ -// | ... | | | | +-response |------>| signedresponseheader | -// +--------+ | | | +-ack (nil) | | +-ResponseHeader | -// | hash#1 |-----+ | | +-... | | | +-SignedRequestHeader | -// +--------+ | +----------------+ | | | +-RequestHeader | -// | ... | | | | | | +-... | -// +--------+ +------------------+ | | | | | +-SeqNo: seq#0 | -// | hash#3 |-----+ +->| queryTracker | | | | | | +-... | -// +--------+ | | | +-firstAck (nil) | | | | | +-HeaderHash = hash#0 | -// | ... | | | | +-queries | | | | | +-Signee ====> pubk#0 | -// +--------+ | | | +-[0] |--+ | | | +-Signature => sign#0 | -// | hash#6 |--+ | | | +-... | | | +-... | -// +--------+ | | | +------------------+ | +-HeaderHash = hash#1 | -// | ... | | | | | +-Signee ====> pubk#1 | -// | | | | +-Signature => sign#1 | -// | | | +---------------------------+ -// | | | +----------------+ -// | +-------------+---------+-+--->| requestTracker | -// | | | | | | +-response |----+ +-------------------------------+ -// ackindex | | | | | | +-ack |----|->| SignedAckHeader | -// | | | | | | +-... | | | +-AckHeader | -// | ... | | | | | | +----------------+ +->| | +-SignedResponseHeader | -// +--------+ | | | | | | | | +-ResponseHeader | -// | hash#4 |--|----------------+ | | | | | | +-SignedRequestHeader | -// +--------+ | | | | | | | | | +-RequestHeader | -// | ... | | | | | | | | | | | +-... | -// | | | | | | | | | | +-SeqNo: seq#1 | -// | | | | | | | | | | +-... | -// | | | | | | | | | +-HeaderHash = hash#2 | -// | | | | | | | | | +-Signee ====> pubk#2 | -// | | | | | | | | | +-Signature => sign#2 | -// seqIndex | | | | +----------------+ | | | | +-... | -// +------------------------------+->| requestTracker | | | | +-HeaderHash = hash#3 | -// | ... | | | | | | +-response |---+ | | | +-signee ====> pubk#3 | -// +--------+ | | | | | +-ack (nil) | | | | | +-Signature => sign#3 | -// | seq#0 |--------+ | | | | +-... | | | | +-... | -// +--------+ | | | +----------------+ | | +-HeaderHash = hash#4 | -// | ... | | | | | | +-Signee ====> pubk#2 | -// +--------+ +--------------+ | | | | | +-Signature => sign#4 | -// | seq#1 |---------->| queryTracker | | | | | +-------------------------------+ -// +--------+ | +-firstAck |--+ | | | -// | ... | | +-queries | | | | -// | +-[0] |----+ | | -// | +-[1] |------+ | +---------------------------+ -// | +-... | +-->| SignedResponseHeader | -// +--------------+ | +-ResponseHeader | -// | | +-SignedRequestHeader | -// | | | +-RequestHeader | -// | | | | +-... | -// | | | | +-SeqNo: seq#1 | -// | | | | +-... | -// | | | +-HeaderHash = hash#5 | -// | | | +-Signee ====> pubk#5 | -// | | | +-Signature => sign#5 | -// | | +-... | -// | +-HeaderHash = hash#6 | -// | +-Signee ====> pubk#6 | -// | +-Signature => sign#6 | -// +---------------------------+ -// -type multiIndex struct { - sync.Mutex - respIndex, ackIndex hashIndex - seqIndex -} - -// newMultiIndex returns a new multiIndex reference. -func newMultiIndex() *multiIndex { - return &multiIndex{ - respIndex: make(map[hash.Hash]*requestTracker), - ackIndex: make(map[hash.Hash]*requestTracker), - seqIndex: make(map[wt.QueryKey]*queryTracker), - } -} - -// addResponse adds the responsed query to the index. -func (i *multiIndex) addResponse(resp *wt.SignedResponseHeader) (err error) { - i.Lock() - defer i.Unlock() - - if v, ok := i.respIndex[resp.HeaderHash]; ok { - if v == nil || v.response == nil { - // TODO(leventeliu): consider to panic. - err = ErrCorruptedIndex - return - } - - // Given that `resp` is already verified by user, its header should be deeply equal to - // v.response.ResponseHeader. - // Considering that we may allow a node to update its key pair on-the-fly, just overwrite - // this response. - v.response = resp - return - } - - // Create new item - s := &requestTracker{ - response: resp, - } - - i.respIndex[resp.HeaderHash] = s - q := i.seqIndex.ensure(resp.Request.GetQueryKey()) - q.queries = append(q.queries, s) - - return nil -} - -// addAck adds the acknowledged query to the index. -func (i *multiIndex) addAck(ack *wt.SignedAckHeader) (err error) { - i.Lock() - defer i.Unlock() - var v *requestTracker - var ok bool - q := i.seqIndex.ensure(ack.SignedRequestHeader().GetQueryKey()) - - if v, ok = i.respIndex[ack.ResponseHeaderHash()]; ok { - if v == nil || v.response == nil { - // TODO(leventeliu): consider to panic. - err = ErrCorruptedIndex - return - } - - // Add hash -> ack index anyway, so that we can find the request tracker later, even if - // there is a earlier acknowledgement for the same request - i.ackIndex[ack.HeaderHash] = v - - // This also updates the item indexed by ackIndex and seqIndex - var isNew bool - - if isNew, err = v.updateAck(ack); err != nil { - return - } - - if isNew { - q.queries = append(q.queries, v) - } - } else { - // Build new queryTracker and update both indexes - v = &requestTracker{ - response: ack.SignedResponseHeader(), - ack: ack, - } - - i.respIndex[ack.ResponseHeaderHash()] = v - i.ackIndex[ack.HeaderHash] = v - q.queries = append(q.queries, v) - } - - // TODO(leventeliu): - // This query has multiple signed acknowledgements. It may be caused by a network problem. - // We will keep the first ack counted anyway. But, should we report it to someone? - if q.firstAck == nil { - q.firstAck = v - } else if !q.firstAck.ack.HeaderHash.IsEqual(&ack.HeaderHash) { - err = ErrMultipleAckOfSeqNo - } - - return -} - -func (i *multiIndex) getAck(header *hash.Hash) (ack *wt.SignedAckHeader, ok bool) { - i.Lock() - defer i.Unlock() - - var t *requestTracker - if t, ok = i.ackIndex[*header]; ok { - ack = t.ack - } - - return -} - -// setSignedBlock sets the signed block of the acknowledged query. -func (i *multiIndex) setSignedBlock(blockHash *hash.Hash, ackHeaderHash *hash.Hash) { - i.Lock() - defer i.Unlock() - - if v, ok := i.ackIndex[*ackHeaderHash]; ok { - v.signedBlock = blockHash - } -} - -// resetSignedBlock resets the signed block of the acknowledged query. -func (i *multiIndex) resetSignedBlock(blockHash *hash.Hash, ackHeaderHash *hash.Hash) { - i.Lock() - defer i.Unlock() - - if v, ok := i.ackIndex[*ackHeaderHash]; ok { - // TODO(leventeliu): check if v.signedBlock equals blockHash. - v.signedBlock = nil - } -} - -// checkBeforeExpire checks the index and does some necessary work before it expires. -func (i *multiIndex) checkBeforeExpire() { - i.Lock() - defer i.Unlock() - - for _, q := range i.seqIndex { - if ack := q.firstAck; ack == nil { - // TODO(leventeliu): - // This query is not acknowledged and expires now. - } else if ack.signedBlock == nil || ack.signedBlock == placeHolder { - // TODO(leventeliu): - // This query was acknowledged normally but collectors didn't pack it in any block. - // There is definitely something wrong with them. - } - - for _, s := range q.queries { - if s != q.firstAck { - // TODO(leventeliu): so these guys lost the competition in this query. Should we - // do something about it? - } - } - } -} - -// checkAckFromBlock checks a acknowledged query from a block in this index. -func (i *multiIndex) checkAckFromBlock(b *hash.Hash, ack *hash.Hash) (isKnown bool, err error) { - i.Lock() - defer i.Unlock() - - // Check acknowledgement - q, isKnown := i.ackIndex[*ack] - - if !isKnown { - return - } - - if q.signedBlock != nil && !q.signedBlock.IsEqual(b) { - err = ErrQuerySignedByAnotherBlock - log.WithFields(log.Fields{ - "query": ack.String(), - "block": b.String(), - "signed_block": q.signedBlock.String(), - }).WithError(err).Error( - "Failed to check acknowledgement from block") - return - } - - qs := i.seqIndex[q.ack.SignedRequestHeader().GetQueryKey()] - - // Check it as a first acknowledgement - if i.respIndex[q.response.HeaderHash] != q || qs == nil || qs.firstAck == nil { - err = ErrCorruptedIndex - return - } - - // If `q` is not considered first acknowledgement of this query locally - if qs.firstAck != q { - if qs.firstAck.signedBlock != nil { - err = ErrQuerySignedByAnotherBlock - log.WithFields(log.Fields{ - "query": ack.String(), - "block": b.String(), - "signed_block": func() string { - if q.signedBlock != nil { - return q.signedBlock.String() - } - return "nil" - }(), - }).WithError(err).Error( - "Failed to check acknowledgement from block") - return - } - - // But if the acknowledgement is not signed yet, it is also acceptable to promote another - // acknowledgement - qs.firstAck = q - } - - return -} - -// markAndCollectUnsignedAcks marks and collects all the unsigned acknowledgements in the index. -func (i *multiIndex) markAndCollectUnsignedAcks(qs *[]*hash.Hash) { - i.Lock() - defer i.Unlock() - - for _, q := range i.seqIndex { - if ack := q.firstAck; ack != nil && ack.signedBlock == nil { - ack.signedBlock = placeHolder - *qs = append(*qs, &ack.ack.HeaderHash) - } - } -} - -// heightIndex defines a MultiIndex index using height as key. -type heightIndex struct { - sync.Mutex - index map[int32]*multiIndex -} - -// ensureHeight returns the *MultiIndex associated with the given height. It creates a new item if -// the key doesn't exist. -func (i *heightIndex) ensureHeight(h int32) (v *multiIndex) { - i.Lock() - defer i.Unlock() - v, ok := i.index[h] - - if !ok { - v = newMultiIndex() - i.index[h] = v - } - - return -} - -// ensureRange creates new *multiIndex items associated within the given height range [l, h) for -// those don't exist. -func (i *heightIndex) ensureRange(l, h int32) { - i.Lock() - defer i.Unlock() - - for x := l; x < h; x++ { - if _, ok := i.index[x]; !ok { - i.index[x] = newMultiIndex() - } - } -} - -func (i *heightIndex) get(k int32) (v *multiIndex, ok bool) { - i.Lock() - defer i.Unlock() - v, ok = i.index[k] - return -} - -func (i *heightIndex) del(k int32) { - i.Lock() - defer i.Unlock() - delete(i.index, k) -} - -// queryIndex defines a query index maintainer. -type queryIndex struct { - heightIndex *heightIndex - - sync.Mutex - barrier int32 -} - -func (i *queryIndex) getBarrier() int32 { - i.Lock() - defer i.Unlock() - return i.barrier -} - -func (i *queryIndex) setBarrier(b int32) { - i.Lock() - defer i.Unlock() - i.barrier = b -} - -// newQueryIndex returns a new queryIndex reference. -func newQueryIndex() *queryIndex { - return &queryIndex{ - heightIndex: &heightIndex{ - index: make(map[int32]*multiIndex), - }, - } -} - -// addResponse adds the responsed query to the index. -func (i *queryIndex) addResponse(h int32, resp *wt.SignedResponseHeader) error { - // TODO(leventeliu): we should ensure that the Request uses coordinated timestamp, instead of - // any client local time. - return i.heightIndex.ensureHeight(h).addResponse(resp) -} - -// addAck adds the acknowledged query to the index. -func (i *queryIndex) addAck(h int32, ack *wt.SignedAckHeader) error { - return i.heightIndex.ensureHeight(h).addAck(ack) -} - -// checkAckFromBlock checks a acknowledged query from a block at the given height. -func (i *queryIndex) checkAckFromBlock(h int32, b *hash.Hash, ack *hash.Hash) ( - isKnown bool, err error) { - l := i.getBarrier() - - if h < l { - err = errors.Wrapf(ErrQueryExpired, "check Ack, height %d, barrier %d", h, l) - return - } - - for x := l; x <= h; x++ { - if hi, ok := i.heightIndex.get(x); ok { - if isKnown, err = hi.checkAckFromBlock(b, ack); err != nil || isKnown { - return - } - } - } - - return -} - -// setSignedBlock updates the signed block in index for the acknowledged queries in the block. -func (i *queryIndex) setSignedBlock(h int32, block *ct.Block) { - b := i.getBarrier() - - for _, v := range block.Queries { - for x := b; x <= h; x++ { - if hi, ok := i.heightIndex.get(x); ok { - hi.setSignedBlock(block.BlockHash(), v) - } - } - } -} - -func (i *queryIndex) resetSignedBlock(h int32, block *ct.Block) { - b := i.getBarrier() - - for _, v := range block.Queries { - for x := b; x <= h; x++ { - if hi, ok := i.heightIndex.get(x); ok { - hi.resetSignedBlock(block.BlockHash(), v) - } - } - } -} - -// getAck gets the acknowledged queries from the index. -func (i *queryIndex) getAck(h int32, header *hash.Hash) (ack *wt.SignedAckHeader, err error) { - b := i.getBarrier() - - if h < b { - err = errors.Wrapf(ErrQueryExpired, "get Ack, height %d, barrier %d", h, b) - return - } - - for x := b; x <= h; x++ { - if hi, ok := i.heightIndex.get(x); ok { - if ack, ok = hi.getAck(header); ok { - return - } - } - } - - err = ErrQueryNotCached - return -} - -// advanceBarrier moves barrier to given height. All buckets lower than this height will be set as -// expired, and all the queries which are not packed in these buckets will be reported. -func (i *queryIndex) advanceBarrier(height int32) { - b := i.getBarrier() - i.setBarrier(height) - - for x := b; x < height; x++ { - if hi, ok := i.heightIndex.get(x); ok { - hi.checkBeforeExpire() - i.heightIndex.del(x) - } - } -} - -// markAndCollectUnsignedAcks marks and collects all the unsigned acknowledgements which can be -// signed by a block at the given height. -func (i *queryIndex) markAndCollectUnsignedAcks(height int32) (qs []*hash.Hash) { - b := i.getBarrier() - qs = make([]*hash.Hash, 0, 1024) - - for x := b; x < height; x++ { - if hi, ok := i.heightIndex.get(x); ok { - hi.markAndCollectUnsignedAcks(&qs) - } - } - - return -} diff --git a/sqlchain/queryindex_test.go b/sqlchain/queryindex_test.go deleted file mode 100644 index 609b55b35..000000000 --- a/sqlchain/queryindex_test.go +++ /dev/null @@ -1,394 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sqlchain - -import ( - "github.com/pkg/errors" - "math/rand" - "reflect" - "testing" - - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/utils/log" -) - -const ( - testBucketNumber = 10 - testQueryNumberPerHeight = 10 - testClientNumber = 10 - testWorkerNumber = 10 - testQueryWorkerNumber = 3 -) - -func (i *heightIndex) mustGet(k int32) *multiIndex { - i.Lock() - defer i.Unlock() - return i.index[k] -} - -func TestCorruptedIndex(t *testing.T) { - ack, err := createRandomNodesAndAck() - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - resp := ack.SignedResponseHeader() - - // Create index - qi := newQueryIndex() - - if err = qi.addResponse(0, resp); err != nil { - t.Fatalf("Error occurred: %v", err) - } - - if err = qi.addAck(0, ack); err != nil { - t.Fatalf("Error occurred: %v", err) - } - - // Test repeatedly add - if err = qi.addResponse(0, resp); err != nil { - t.Fatalf("Error occurred: %v", err) - } - - if err = qi.addAck(0, ack); err != nil { - t.Fatalf("Error occurred: %v", err) - } - - // Test corrupted index - qi.heightIndex.mustGet(0).respIndex[resp.HeaderHash].response = nil - - if err = qi.addResponse(0, resp); err != ErrCorruptedIndex { - t.Fatalf("Unexpected error: %v", err) - } - - if err = qi.addAck(0, ack); err != ErrCorruptedIndex { - t.Fatalf("Unexpected error: %v", err) - } - - qi.heightIndex.mustGet(0).respIndex[resp.HeaderHash] = nil - - if err = qi.addResponse(0, resp); err != ErrCorruptedIndex { - t.Fatalf("Unexpected error: %v", err) - } - - if err = qi.addAck(0, ack); err != ErrCorruptedIndex { - t.Fatalf("Unexpected error: %v", err) - } -} - -func TestSingleAck(t *testing.T) { - ack, err := createRandomNodesAndAck() - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - qi := newQueryIndex() - - if err = qi.addAck(0, ack); err != nil { - t.Fatalf("Error occurred: %v", err) - } - - // Check not signed ack - qi.heightIndex.mustGet(0).checkBeforeExpire() -} - -func TestEnsureRange(t *testing.T) { - qi := newQueryIndex() - qi.heightIndex.ensureRange(0, 10) - - for i := 0; i < 10; i++ { - if _, ok := qi.heightIndex.get(int32(i)); !ok { - t.Fatalf("Failed to ensure height %d", i) - } - } -} - -func TestCheckAckFromBlock(t *testing.T) { - var height int32 = 10 - qi := newQueryIndex() - qi.advanceBarrier(height) - b1, err := createRandomBlock(genesisHash, false) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - if _, err := qi.checkAckFromBlock( - 0, b1.BlockHash(), b1.Queries[0], - ); errors.Cause(err) != ErrQueryExpired { - t.Fatalf("Unexpected error: %v", err) - } - - if isKnown, err := qi.checkAckFromBlock( - height, b1.BlockHash(), b1.Queries[0], - ); err != nil { - t.Fatalf("Error occurred: %v", err) - } else if isKnown { - t.Fatal("Unexpected result: index should not know this query") - } - - // Create a group of query for test - cli, err := newRandomNode() - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - worker1, err := newRandomNode() - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - worker2, err := newRandomNode() - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - req, err := createRandomQueryRequest(cli) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - resp1, err := createRandomQueryResponseWithRequest(req, worker1) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - ack1, err := createRandomQueryAckWithResponse(resp1, cli) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - resp2, err := createRandomQueryResponseWithRequest(req, worker2) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - ack2, err := createRandomQueryAckWithResponse(resp2, cli) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - // Test a query signed by another block - if err = qi.addAck(height, ack1); err != nil { - t.Fatalf("Error occurred: %v", err) - } - - if err = qi.addAck(height, ack2); err != ErrMultipleAckOfSeqNo { - t.Fatalf("Unexpected error: %v", err) - } - - b2, err := createRandomBlock(genesisHash, false) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - b1.Queries[0] = &ack1.HeaderHash - b2.Queries[0] = &ack1.HeaderHash - qi.setSignedBlock(height, b1) - - if _, err := qi.checkAckFromBlock( - height, b2.BlockHash(), b2.Queries[0], - ); err != ErrQuerySignedByAnotherBlock { - t.Fatalf("Unexpected error: %v", err) - } - - // Test checking same ack signed by another block - b2.Queries[0] = &ack2.HeaderHash - - if _, err = qi.checkAckFromBlock( - height, b2.BlockHash(), b2.Queries[0], - ); err != ErrQuerySignedByAnotherBlock { - t.Fatalf("Unexpected error: %v", err) - } - - // Revert index state for the first block, and test checking again - qi.heightIndex.mustGet(height).seqIndex[req.GetQueryKey()].firstAck.signedBlock = nil - - if _, err = qi.checkAckFromBlock( - height, b2.BlockHash(), b2.Queries[0], - ); err != nil { - t.Fatalf("Error occurred: %v", err) - } - - // Test corrupted index - qi.heightIndex.mustGet(height).seqIndex[req.GetQueryKey()] = nil - - if _, err = qi.checkAckFromBlock( - height, b2.BlockHash(), b2.Queries[0], - ); err != ErrCorruptedIndex { - t.Fatalf("Unexpected error: %v", err) - } -} - -func TestGetAck(t *testing.T) { - qi := newQueryIndex() - qh := &hash.Hash{} - - if _, err := qi.getAck(-1, qh); errors.Cause(err) != ErrQueryExpired { - t.Fatalf("Unexpected error: %v", err) - } - - if _, err := qi.getAck(0, qh); errors.Cause(err) != ErrQueryNotCached { - t.Fatalf("Unexpected error: %v", err) - } -} - -func TestQueryIndex(t *testing.T) { - log.SetLevel(log.InfoLevel) - // Initialize clients and workers - clients, err := newRandomNodes(testClientNumber) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - workers, err := newRandomNodes(testWorkerNumber) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - // Initialize index - qi := newQueryIndex() - - // Create some responses and acknowledgements and insert to index - for i := 0; i < testBucketNumber; i++ { - qi.advanceBarrier(int32(i)) - block, err := createRandomBlock(genesisHash, false) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - block.Queries = block.Queries[:0] - - for j := 0; j < testQueryNumberPerHeight; j++ { - cli := clients[rand.Intn(testClientNumber)] - req, err := createRandomQueryRequest(cli) - hasFirstAck := false - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - ackNumber := rand.Intn(testQueryWorkerNumber + 1) - - for k := 0; k < testQueryWorkerNumber; k++ { - worker := workers[(rand.Intn(testWorkerNumber)+k)%testWorkerNumber] - resp, err := createRandomQueryResponseWithRequest(req, worker) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - log.Debugf("i = %d, j = %d, k = %d\n\tseqno = %+v, req = %v, resp = %v", i, j, k, - resp.Request.GetQueryKey(), &req.HeaderHash, &resp.HeaderHash) - - if err = qi.addResponse(int32(i), resp); err != nil { - t.Fatalf("Error occurred: %v", err) - } - - if k < ackNumber { - dupAckNumber := 1 + rand.Intn(2) - - for l := 0; l < dupAckNumber; l++ { - ack, err := createRandomQueryAckWithResponse(resp, cli) - - log.Debugf("i = %d, j = %d, k = %d, l = %d\n\tseqno = %+v, "+ - "req = %v, resp = %v, ack = %v", - i, j, k, l, - ack.SignedRequestHeader().GetQueryKey(), - &ack.SignedRequestHeader().HeaderHash, - &ack.SignedResponseHeader().HeaderHash, - &ack.HeaderHash, - ) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - err = qi.addAck(int32(i), ack) - - if !hasFirstAck { - if l == 0 && err != nil || - l > 0 && err != nil && err != ErrMultipleAckOfResponse { - t.Fatalf("Error occurred: %v", err) - } - } else { - if l == 0 && err == nil { - t.Fatalf("Unexpected error: %v", err) - } - } - - if err == nil { - hasFirstAck = true - block.PushAckedQuery(&ack.HeaderHash) - } else { - continue - } - - if rAck, err := qi.getAck(int32(i), &ack.HeaderHash); err != nil { - t.Fatalf("Error occurred: %v", err) - } else if !reflect.DeepEqual(ack, rAck) { - t.Fatalf("Unexpected result:\n\torigin = %+v\n\toutput = %+v", - ack, rAck) - } else if !reflect.DeepEqual( - ack.SignedResponseHeader(), rAck.SignedResponseHeader()) { - t.Fatalf("Unexpected result:\n\torigin = %+v\n\toutput = %+v", - ack.SignedResponseHeader(), rAck.SignedResponseHeader()) - } - } - } - } - - qi.setSignedBlock(int32(i), block) - - for j := range block.Queries { - if isKnown, err := qi.checkAckFromBlock( - int32(i), block.BlockHash(), block.Queries[j], - ); err != nil { - t.Fatalf("Error occurred: %v", err) - } else if !isKnown { - t.Logf("Failed to check known ack: %s", block.Queries[j]) - } - } - - qi.resetSignedBlock(int32(i), block) - - for j := range block.Queries { - if isKnown, err := qi.checkAckFromBlock( - int32(i), block.BlockHash(), block.Queries[j], - ); err != nil { - t.Fatalf("Error occurred: %v", err) - } else if !isKnown { - t.Fatal("Unexpected result: block is known") - } - } - } - } -} diff --git a/sqlchain/rpc.go b/sqlchain/rpc.go index b662d67ca..5ed518267 100644 --- a/sqlchain/rpc.go +++ b/sqlchain/rpc.go @@ -21,8 +21,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/proto" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/CovenantSQL/CovenantSQL/types" ) // ChainRPCService defines a sql-chain RPC server. @@ -32,7 +31,7 @@ type ChainRPCService struct { // AdviseNewBlockReq defines a request of the AdviseNewBlock RPC method. type AdviseNewBlockReq struct { - Block *ct.Block + Block *types.Block Count int32 } @@ -48,18 +47,9 @@ type AdviseBinLogReq struct { type AdviseBinLogResp struct { } -// AdviseResponsedQueryReq defines a request of the AdviseAckedQuery RPC method. -type AdviseResponsedQueryReq struct { - Query *wt.SignedResponseHeader -} - -// AdviseResponsedQueryResp defines a response of the AdviseAckedQuery RPC method. -type AdviseResponsedQueryResp struct { -} - // AdviseAckedQueryReq defines a request of the AdviseAckedQuery RPC method. type AdviseAckedQueryReq struct { - Query *wt.SignedAckHeader + Query *types.SignedAckHeader } // AdviseAckedQueryResp defines a response of the AdviseAckedQuery RPC method. @@ -74,18 +64,7 @@ type FetchBlockReq struct { // FetchBlockResp defines a response of the FetchBlock RPC method. type FetchBlockResp struct { Height int32 - Block *ct.Block -} - -// FetchAckedQueryReq defines a request of the FetchAckedQuery RPC method. -type FetchAckedQueryReq struct { - Height int32 - SignedAckedHeaderHash *hash.Hash -} - -// FetchAckedQueryResp defines a request of the FetchAckedQuery RPC method. -type FetchAckedQueryResp struct { - Ack *wt.SignedAckHeader + Block *types.Block } // SignBillingReq defines a request of the SignBilling RPC method. @@ -140,12 +119,6 @@ func (s *ChainRPCService) AdviseBinLog(req *AdviseBinLogReq, resp *AdviseBinLogR return nil } -// AdviseResponsedQuery is the RPC method to advise a new responsed query to the target server. -func (s *ChainRPCService) AdviseResponsedQuery( - req *AdviseResponsedQueryReq, resp *AdviseResponsedQueryResp) error { - return s.chain.VerifyAndPushResponsedQuery(req.Query) -} - // AdviseAckedQuery is the RPC method to advise a new acknowledged query to the target server. func (s *ChainRPCService) AdviseAckedQuery( req *AdviseAckedQueryReq, resp *AdviseAckedQueryResp) error { @@ -159,13 +132,6 @@ func (s *ChainRPCService) FetchBlock(req *FetchBlockReq, resp *FetchBlockResp) ( return } -// FetchAckedQuery is the RPC method to fetch a known block from the target server. -func (s *ChainRPCService) FetchAckedQuery(req *FetchAckedQueryReq, resp *FetchAckedQueryResp, -) (err error) { - resp.Ack, err = s.chain.FetchAckedQuery(req.Height, req.SignedAckedHeaderHash) - return -} - // SignBilling is the RPC method to get signature for a billing request from the target server. func (s *ChainRPCService) SignBilling(req *SignBillingReq, resp *SignBillingResp) (err error) { resp.HeaderHash = req.BillingRequest.RequestHash diff --git a/sqlchain/runtime.go b/sqlchain/runtime.go index c846bf2b9..b1935558d 100644 --- a/sqlchain/runtime.go +++ b/sqlchain/runtime.go @@ -22,10 +22,9 @@ import ( "time" "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/CovenantSQL/CovenantSQL/types" + "github.com/CovenantSQL/CovenantSQL/utils/log" ) // runtime represents a chain runtime state. @@ -48,19 +47,21 @@ type runtime struct { tick time.Duration // queryTTL sets the unacknowledged query TTL in block periods. queryTTL int32 + // blockCacheTTL sets the cached block numbers. + blockCacheTTL int32 // muxServer is the multiplexing service of sql-chain PRC. muxService *MuxService // price sets query price in gases. - price map[wt.QueryType]uint64 + price map[types.QueryType]uint64 producingReward uint64 billingPeriods int32 // peersMutex protects following peers-relative fields. peersMutex sync.Mutex // peers is the peer list of the sql-chain. - peers *kayak.Peers + peers *proto.Peers // server is the local peer service instance. - server *kayak.Server + server proto.NodeID // index is the index of the current server in the peer list. index int32 // total is the total peer number of the sql-chain. @@ -86,11 +87,17 @@ type runtime struct { // newRunTime returns a new sql-chain runtime instance with the specified config. func newRunTime(c *Config) (r *runtime) { r = &runtime{ - stopCh: make(chan struct{}), - databaseID: c.DatabaseID, - period: c.Period, - tick: c.Tick, - queryTTL: c.QueryTTL, + stopCh: make(chan struct{}), + databaseID: c.DatabaseID, + period: c.Period, + tick: c.Tick, + queryTTL: c.QueryTTL, + blockCacheTTL: func() int32 { + if c.BlockCacheTTL < minBlockCacheTTL { + return minBlockCacheTTL + } + return c.BlockCacheTTL + }(), muxService: c.MuxService, price: c.Price, producingReward: c.ProducingReward, @@ -98,10 +105,15 @@ func newRunTime(c *Config) (r *runtime) { peers: c.Peers, server: c.Server, index: func() int32 { - if index, found := c.Peers.Find(c.Server.ID); found { + if index, found := c.Peers.Find(c.Server); found { return index } + log.WithFields(log.Fields{ + "node": c.Server, + "peers": c.Peers, + }).Warning("could not found server in peers") + return -1 }(), total: int32(len(c.Peers.Servers)), @@ -117,7 +129,7 @@ func newRunTime(c *Config) (r *runtime) { return } -func (r *runtime) setGenesis(b *ct.Block) { +func (r *runtime) setGenesis(b *types.Block) { r.chainInitTime = b.Timestamp() r.genesisHash = *b.BlockHash() r.head = &state{ @@ -185,7 +197,7 @@ func (r *runtime) setNextTurn() { } // getQueryGas gets the consumption of gas for a specified query type. -func (r *runtime) getQueryGas(t wt.QueryType) uint64 { +func (r *runtime) getQueryGas(t types.QueryType) uint64 { return r.price[t] } @@ -221,10 +233,10 @@ func (r *runtime) nextTick() (t time.Time, d time.Duration) { return } -func (r *runtime) updatePeers(peers *kayak.Peers) (err error) { +func (r *runtime) updatePeers(peers *proto.Peers) (err error) { r.peersMutex.Lock() defer r.peersMutex.Unlock() - index, found := peers.Find(r.server.ID) + index, found := peers.Find(r.server) if found { r.index = index @@ -259,7 +271,7 @@ func (r *runtime) getIndexTotal() (int32, int32) { return r.index, r.total } -func (r *runtime) getIndexTotalServer() (int32, int32, *kayak.Server) { +func (r *runtime) getIndexTotalServer() (int32, int32, proto.NodeID) { r.peersMutex.Lock() defer r.peersMutex.Unlock() return r.index, r.total, r.server @@ -267,10 +279,10 @@ func (r *runtime) getIndexTotalServer() (int32, int32, *kayak.Server) { func (r *runtime) getPeerInfoString() string { index, total, server := r.getIndexTotalServer() - return fmt.Sprintf("[%d/%d] %s", index, total, server.ID) + return fmt.Sprintf("[%d/%d] %s", index, total, server) } -func (r *runtime) getServer() *kayak.Server { +func (r *runtime) getServer() proto.NodeID { r.peersMutex.Lock() defer r.peersMutex.Unlock() return r.server @@ -298,7 +310,7 @@ func (r *runtime) isMyTurn() (ret bool) { return } -func (r *runtime) getPeers() *kayak.Peers { +func (r *runtime) getPeers() *proto.Peers { r.peersMutex.Lock() defer r.peersMutex.Unlock() peers := r.peers.Clone() diff --git a/sqlchain/storage/storage.go b/sqlchain/storage/storage.go deleted file mode 100644 index d902b3e61..000000000 --- a/sqlchain/storage/storage.go +++ /dev/null @@ -1,417 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package storage - -import ( - "context" - "database/sql" - "errors" - "fmt" - "io" - "sync" - - "github.com/CovenantSQL/CovenantSQL/twopc" - "github.com/CovenantSQL/CovenantSQL/utils/log" - // Register CovenantSQL/go-sqlite3-encrypt engine. - _ "github.com/CovenantSQL/go-sqlite3-encrypt" -) - -var ( - index = struct { - sync.Mutex - db map[string]*sql.DB - }{ - db: make(map[string]*sql.DB), - } -) - -// Query represents the single query of sqlite. -type Query struct { - Pattern string - Args []sql.NamedArg -} - -// ExecLog represents the execution log of sqlite. -type ExecLog struct { - ConnectionID uint64 - SeqNo uint64 - Timestamp int64 - Queries []Query -} - -func openDB(dsn string) (db *sql.DB, err error) { - // Rebuild DSN. - d, err := NewDSN(dsn) - - if err != nil { - return nil, err - } - - d.AddParam("_journal_mode", "WAL") - d.AddParam("_synchronous", "NORMAL") - fdsn := d.Format() - - fn := d.GetFileName() - mode, _ := d.GetParam("mode") - cache, _ := d.GetParam("cache") - - if (fn == ":memory:" || mode == "memory") && cache != "shared" { - // Return a new DB instance if it's in memory and private. - db, err = sql.Open("sqlite3", fdsn) - return - } - - index.Lock() - db, ok := index.db[d.filename] - index.Unlock() - - if !ok { - db, err = sql.Open("sqlite3", fdsn) - - if err != nil { - return nil, err - } - - index.Lock() - index.db[d.filename] = db - index.Unlock() - } - - return -} - -// TxID represents a transaction ID. -type TxID struct { - ConnectionID uint64 - SeqNo uint64 - Timestamp int64 -} - -func equalTxID(x, y *TxID) bool { - return x.ConnectionID == y.ConnectionID && x.SeqNo == y.SeqNo && x.Timestamp == y.Timestamp -} - -// Storage represents a underlying storage implementation based on sqlite3. -type Storage struct { - sync.Mutex - dsn string - db *sql.DB - tx *sql.Tx // Current tx - id TxID - queries []Query -} - -// New returns a new storage connected by dsn. -func New(dsn string) (st *Storage, err error) { - db, err := openDB(dsn) - - if err != nil { - return - } - - return &Storage{ - dsn: dsn, - db: db, - }, nil -} - -// Prepare implements prepare method of two-phase commit worker. -func (s *Storage) Prepare(ctx context.Context, wb twopc.WriteBatch) (err error) { - el, ok := wb.(*ExecLog) - - if !ok { - return errors.New("unexpected WriteBatch type") - } - - s.Lock() - defer s.Unlock() - - if s.tx != nil { - if equalTxID(&s.id, &TxID{el.ConnectionID, el.SeqNo, el.Timestamp}) { - s.queries = el.Queries - return nil - } - - return fmt.Errorf("twopc: inconsistent state, currently in tx: "+ - "conn = %d, seq = %d, time = %d", s.id.ConnectionID, s.id.SeqNo, s.id.Timestamp) - } - - s.tx, err = s.db.BeginTx(ctx, nil) - - if err != nil { - return - } - - s.id = TxID{el.ConnectionID, el.SeqNo, el.Timestamp} - s.queries = el.Queries - - return nil -} - -// Commit implements commit method of two-phase commit worker. -func (s *Storage) Commit(ctx context.Context, wb twopc.WriteBatch) (err error) { - el, ok := wb.(*ExecLog) - - if !ok { - return errors.New("unexpected WriteBatch type") - } - - s.Lock() - defer s.Unlock() - - if s.tx != nil { - if equalTxID(&s.id, &TxID{el.ConnectionID, el.SeqNo, el.Timestamp}) { - for _, q := range s.queries { - // convert arguments types - args := make([]interface{}, len(q.Args)) - - for i, v := range q.Args { - args[i] = v - } - - _, err = s.tx.ExecContext(ctx, q.Pattern, args...) - - if err != nil { - log.WithError(err).Debug("commit query failed") - s.tx.Rollback() - s.tx = nil - s.queries = nil - return - } - } - - s.tx.Commit() - s.tx = nil - s.queries = nil - return nil - } - - return fmt.Errorf("twopc: inconsistent state, currently in tx: "+ - "conn = %d, seq = %d, time = %d", s.id.ConnectionID, s.id.SeqNo, s.id.Timestamp) - } - - return errors.New("twopc: tx not prepared") -} - -// Rollback implements rollback method of two-phase commit worker. -func (s *Storage) Rollback(ctx context.Context, wb twopc.WriteBatch) (err error) { - el, ok := wb.(*ExecLog) - - if !ok { - return errors.New("unexpected WriteBatch type") - } - - s.Lock() - defer s.Unlock() - - if !equalTxID(&s.id, &TxID{el.ConnectionID, el.SeqNo, el.Timestamp}) { - return fmt.Errorf("twopc: inconsistent state, currently in tx: "+ - "conn = %d, seq = %d, time = %d", s.id.ConnectionID, s.id.SeqNo, s.id.Timestamp) - } - - if s.tx != nil { - s.tx.Rollback() - s.tx = nil - s.queries = nil - } - - return nil -} - -// Query implements read-only query feature. -func (s *Storage) Query(ctx context.Context, queries []Query) (columns []string, types []string, - data [][]interface{}, err error) { - data = make([][]interface{}, 0) - - if len(queries) == 0 { - return - } - - var tx *sql.Tx - var txOptions = &sql.TxOptions{ - ReadOnly: true, - } - - if tx, err = s.db.BeginTx(ctx, txOptions); err != nil { - return - } - - // always rollback on complete - defer tx.Rollback() - - q := queries[len(queries)-1] - - // convert arguments types - args := make([]interface{}, len(q.Args)) - - for i, v := range q.Args { - args[i] = v - } - - var rows *sql.Rows - if rows, err = tx.Query(q.Pattern, args...); err != nil { - return - } - - // free result set - defer rows.Close() - - // get rows meta - if columns, err = rows.Columns(); err != nil { - return - } - - // if there is empty columns, treat result as empty - if len(columns) == 0 { - return - } - - // get types meta - if types, err = s.transformColumnTypes(rows.ColumnTypes()); err != nil { - return - } - - rs := newRowScanner(len(columns)) - - for rows.Next() { - err = rows.Scan(rs.ScanArgs()...) - if err != nil { - return - } - - data = append(data, rs.GetRow()) - } - - err = rows.Err() - return -} - -// Exec implements write query feature. -func (s *Storage) Exec(ctx context.Context, queries []Query) (rowsAffected int64, err error) { - if len(queries) == 0 { - return - } - - var tx *sql.Tx - var txOptions = &sql.TxOptions{ - ReadOnly: false, - } - - if tx, err = s.db.BeginTx(ctx, txOptions); err != nil { - return - } - - defer tx.Rollback() - - for _, q := range queries { - // convert arguments types - args := make([]interface{}, len(q.Args)) - - for i, v := range q.Args { - args[i] = v - } - - var result sql.Result - if result, err = tx.Exec(q.Pattern, args...); err != nil { - log.WithError(err).Debug("execute query failed") - return - } - - var affected int64 - affected, err = result.RowsAffected() - - rowsAffected += affected - } - - tx.Commit() - - return -} - -// Close implements database safe close feature. -func (s *Storage) Close() (err error) { - d, err := NewDSN(s.dsn) - if err != nil { - return - } - - index.Lock() - defer index.Unlock() - delete(index.db, d.filename) - return s.db.Close() -} - -func (s *Storage) transformColumnTypes(columnTypes []*sql.ColumnType, e error) (types []string, err error) { - if e != nil { - err = e - return - } - - types = make([]string, len(columnTypes)) - - for i, c := range columnTypes { - types[i] = c.DatabaseTypeName() - } - - return -} - -// golang does trick convert, use rowScanner to return the original result type in sqlite3 driver -type rowScanner struct { - fieldCnt int - column int // current column - fields []interface{} // temp fields - scanArgs []interface{} -} - -func newRowScanner(fieldCnt int) (s *rowScanner) { - s = &rowScanner{ - fieldCnt: fieldCnt, - column: 0, - fields: make([]interface{}, fieldCnt), - scanArgs: make([]interface{}, fieldCnt), - } - - for i := 0; i != fieldCnt; i++ { - s.scanArgs[i] = s - } - - return -} - -func (s *rowScanner) Scan(src interface{}) error { - if s.fieldCnt <= s.column { - // read complete - return io.EOF - } - - s.fields[s.column] = src - s.column++ - - return nil -} - -func (s *rowScanner) GetRow() []interface{} { - return s.fields -} - -func (s *rowScanner) ScanArgs() []interface{} { - // reset - s.column = 0 - s.fields = make([]interface{}, s.fieldCnt) - return s.scanArgs -} diff --git a/sqlchain/storage/storage_test.go b/sqlchain/storage/storage_test.go deleted file mode 100644 index e72d57b26..000000000 --- a/sqlchain/storage/storage_test.go +++ /dev/null @@ -1,349 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package storage - -import ( - "context" - "database/sql" - "fmt" - "io/ioutil" - "reflect" - "testing" - "time" -) - -func newQuery(query string, args ...interface{}) (q Query) { - q.Pattern = query - - // convert args - q.Args = make([]sql.NamedArg, len(args)) - for i, v := range args { - q.Args[i] = sql.Named("", v) - } - - return -} - -func newNamedQuery(query string, args map[string]interface{}) (q Query) { - q.Pattern = query - q.Args = make([]sql.NamedArg, len(args)) - i := 0 - - // convert args - for n, v := range args { - q.Args[i] = sql.Named(n, v) - i++ - } - - return -} - -func TestBadType(t *testing.T) { - fl, err := ioutil.TempFile("", "sqlite3-") - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - st, err := New(fmt.Sprintf("file:%s", fl.Name())) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - if err = st.Prepare(context.Background(), struct{}{}); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %v", err) - } - - if err = st.Commit(context.Background(), struct{}{}); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %v", err) - } - - if err = st.Rollback(context.Background(), struct{}{}); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %v", err) - } -} - -func TestStorage(t *testing.T) { - fl, err := ioutil.TempFile("", "sqlite3-") - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - st, err := New(fmt.Sprintf("file:%s", fl.Name())) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - el1 := &ExecLog{ - ConnectionID: 1, - SeqNo: 1, - Timestamp: time.Now().UnixNano(), - Queries: []Query{ - newQuery("CREATE TABLE IF NOT EXISTS `kv` (`key` TEXT PRIMARY KEY, `value` BLOB)"), - newQuery("INSERT OR IGNORE INTO `kv` VALUES ('k0', NULL)"), - newQuery("INSERT OR IGNORE INTO `kv` VALUES ('k1', 'v1')"), - newQuery("INSERT OR IGNORE INTO `kv` VALUES ('k2', 'v2')"), - newQuery("INSERT OR IGNORE INTO `kv` VALUES ('k3', 'v3')"), - newQuery("INSERT OR REPLACE INTO `kv` VALUES ('k3', 'v3-2')"), - newQuery("DELETE FROM `kv` WHERE `key`='k2'"), - }, - } - - el2 := &ExecLog{ - ConnectionID: 1, - SeqNo: 2, - Timestamp: time.Now().UnixNano(), - Queries: []Query{ - newQuery("INSERT OR REPLACE INTO `kv` VALUES ('k1', 'v1-2')"), - }, - } - - if err = st.Prepare(context.Background(), el1); err != nil { - t.Fatalf("Error occurred: %v", err) - } - - if err = st.Prepare(context.Background(), el1); err != nil { - t.Fatalf("Error occurred: %v", err) - } - - if err = st.Prepare(context.Background(), el2); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %v", err) - } - - if err = st.Commit(context.Background(), el2); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %v", err) - } - - if err = st.Rollback(context.Background(), el2); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %v", err) - } - - if err = st.Commit(context.Background(), el1); err != nil { - t.Fatalf("Error occurred: %v", err) - } - - // test query - columns, types, data, err := st.Query(context.Background(), - []Query{newQuery("SELECT * FROM `kv` ORDER BY `key` ASC")}) - - if err != nil { - t.Fatalf("Query failed: %v", err.Error()) - } - if !reflect.DeepEqual(columns, []string{"key", "value"}) { - t.Fatalf("Error column result: %v", columns) - } - if !reflect.DeepEqual(types, []string{"TEXT", "BLOB"}) { - t.Fatalf("Error types result: %v", types) - } - if len(data) != 3 { - t.Fatalf("Error result count: %v, should be 3", len(data)) - } else { - // compare rows - should1 := []interface{}{[]byte("k0"), nil} - should2 := []interface{}{[]byte("k1"), []byte("v1")} - should3 := []interface{}{[]byte("k3"), []byte("v3-2")} - t.Logf("Rows: %v", data) - if !reflect.DeepEqual(data[0], should1) { - t.Fatalf("Error result row: %v, should: %v", data[0], should1) - } - if !reflect.DeepEqual(data[1], should2) { - t.Fatalf("Error result row: %v, should: %v", data[1], should2) - } - if !reflect.DeepEqual(data[2], should3) { - t.Fatalf("Error result row: %v, should: %v", data[2], should2) - } - } - - // test query with projection - columns, types, data, err = st.Query(context.Background(), - []Query{newQuery("SELECT `key` FROM `kv` ORDER BY `key` ASC")}) - - if err != nil { - t.Fatalf("Query failed: %v", err.Error()) - } - if !reflect.DeepEqual(columns, []string{"key"}) { - t.Fatalf("Error column result: %v", columns) - } - if !reflect.DeepEqual(types, []string{"TEXT"}) { - t.Fatalf("Error types result: %v", types) - } - if len(data) != 3 { - t.Fatalf("Error result count: %v, should be 3", len(data)) - } else { - // compare rows - should1 := []interface{}{[]byte("k0")} - should2 := []interface{}{[]byte("k1")} - should3 := []interface{}{[]byte("k3")} - t.Logf("Rows: %v", data) - if !reflect.DeepEqual(data[0], should1) { - t.Fatalf("Error result row: %v, should: %v", data[0], should1) - } - if !reflect.DeepEqual(data[1], should2) { - t.Fatalf("Error result row: %v, should: %v", data[1], should2) - } - if !reflect.DeepEqual(data[2], should3) { - t.Fatalf("Error result row: %v, should: %v", data[2], should2) - } - } - - // test query with condition - columns, types, data, err = st.Query(context.Background(), - []Query{newQuery("SELECT `key` FROM `kv` WHERE `value` IS NOT NULL ORDER BY `key` ASC")}) - - if err != nil { - t.Fatalf("Query failed: %v", err.Error()) - } - if !reflect.DeepEqual(columns, []string{"key"}) { - t.Fatalf("Error column result: %v", columns) - } - if !reflect.DeepEqual(types, []string{"TEXT"}) { - t.Fatalf("Error types result: %v", types) - } - if len(data) != 2 { - t.Fatalf("Error result count: %v, should be 3", len(data)) - } else { - // compare rows - should1 := []interface{}{[]byte("k1")} - should2 := []interface{}{[]byte("k3")} - t.Logf("Rows: %v", data) - if !reflect.DeepEqual(data[0], should1) { - t.Fatalf("Error result row: %v, should: %v", data[0], should1) - } - if !reflect.DeepEqual(data[1], should2) { - t.Fatalf("Error result row: %v, should: %v", data[1], should2) - } - } - - // test failed query - columns, types, data, err = st.Query(context.Background(), []Query{newQuery("SQL???? WHAT!!!!")}) - - if err == nil { - t.Fatal("Query should failed") - } else { - t.Logf("Query failed as expected with: %v", err.Error()) - } - - // test non-read query - columns, types, data, err = st.Query(context.Background(), - []Query{newQuery("DELETE FROM `kv` WHERE `value` IS NULL")}) - - affected, err := st.Exec(context.Background(), - []Query{newQuery("INSERT OR REPLACE INTO `kv` VALUES ('k4', 'v4')")}) - if err != nil || affected != 1 { - t.Fatalf("Exec INSERT failed: %v", err) - } - // test with arguments - affected, err = st.Exec(context.Background(), []Query{newQuery("DELETE FROM `kv` WHERE `key`='k4'")}) - if err != nil || affected != 1 { - t.Fatalf("Exec DELETE failed: %v", err) - } - affected, err = st.Exec(context.Background(), - []Query{newQuery("DELETE FROM `kv` WHERE `key`=?", "not_exist")}) - if err != nil || affected != 0 { - t.Fatalf("Exec DELETE failed: %v", err) - } - - // test again - columns, types, data, err = st.Query(context.Background(), []Query{newQuery("SELECT `key` FROM `kv`")}) - if err != nil { - t.Fatalf("Query failed: %v", err.Error()) - } else if len(data) != 3 { - t.Fatalf("Last write query should not take any effect, row count: %v", len(data)) - } else { - t.Logf("Rows: %v", data) - } - - // test with select - columns, types, data, err = st.Query(context.Background(), - []Query{newQuery("SELECT `key` FROM `kv` WHERE `key` IN (?)", "k1")}) - if err != nil { - t.Fatalf("Query failed: %v", err.Error()) - } else if len(data) != 1 { - t.Fatalf("Should only have one record, but actually %v", len(data)) - } else { - t.Logf("Rows: %v", data) - } - - // test with select with named arguments - columns, types, data, err = st.Query(context.Background(), - []Query{newNamedQuery("SELECT `key` FROM `kv` WHERE `key` IN (:test2, :test1)", map[string]interface{}{ - "test1": "k1", - "test2": "k3", - })}) - if err != nil { - t.Fatalf("Query failed: %v", err.Error()) - } else if len(data) != 2 { - t.Fatalf("Should only have two records, but actually %v", len(data)) - } else { - t.Logf("Rows: %v", data) - } - - // test with function - columns, types, data, err = st.Query(context.Background(), - []Query{newQuery("SELECT COUNT(1) AS `c` FROM `kv`")}) - if err != nil { - t.Fatalf("Query failed: %v", err.Error()) - } else { - if len(columns) != 1 { - t.Fatalf("Query result should contain only one column, now %v", len(columns)) - } else if columns[0] != "c" { - t.Fatalf("Query result column name is not defined alias, but :%v", columns[0]) - } - if len(types) != 1 { - t.Fatalf("Query result should contain only one column, now %v", len(types)) - } else { - t.Logf("Query result type is: %v", types[0]) - } - if len(data) != 1 || len(data[0]) != 1 { - t.Fatalf("Query result should contain only one row and one column, now %v", data) - } else if !reflect.DeepEqual(data[0][0], int64(3)) { - t.Fatalf("Query result should be table row count 3, but: %v", data[0]) - } - } - - // test with timestamp fields - _, err = st.Exec(context.Background(), []Query{ - newQuery("CREATE TABLE `tm` (tm TIMESTAMP)"), - newQuery("INSERT INTO `tm` VALUES(DATE('NOW'))"), - }) - if err != nil { - t.Fatalf("Query failed: %v", err.Error()) - } else { - // query for values - _, _, data, err = st.Query(context.Background(), []Query{newQuery("SELECT `tm` FROM `tm`")}) - if len(data) != 1 || len(data[0]) != 1 { - t.Fatalf("Query result should contain only one row and one column, now %v", data) - } else if !reflect.TypeOf(data[0][0]).AssignableTo(reflect.TypeOf(time.Time{})) { - t.Fatalf("Query result should be time.Time type, but: %v", reflect.TypeOf(data[0][0]).String()) - } - } -} diff --git a/sqlchain/xxx_test.go b/sqlchain/xxx_test.go index 96870cc01..3e45a73b3 100644 --- a/sqlchain/xxx_test.go +++ b/sqlchain/xxx_test.go @@ -28,12 +28,11 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" "github.com/CovenantSQL/CovenantSQL/proto" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" ) var ( @@ -106,8 +105,8 @@ func createRandomStrings(offset, length, soffset, slength int) (s []string) { return } -func createRandomStorageQueries(offset, length, soffset, slength int) (qs []wt.Query) { - qs = make([]wt.Query, rand.Intn(length)+offset) +func createRandomStorageQueries(offset, length, soffset, slength int) (qs []types.Query) { + qs = make([]types.Query, rand.Intn(length)+offset) for i := range qs { createRandomString(soffset, slength, &qs[i].Pattern) @@ -120,11 +119,11 @@ func createRandomTimeAfter(now time.Time, maxDelayMillisecond int) time.Time { return now.Add(time.Duration(rand.Intn(maxDelayMillisecond)+1) * time.Millisecond) } -func createRandomQueryRequest(cli *nodeProfile) (r *wt.SignedRequestHeader, err error) { - req := &wt.Request{ - Header: wt.SignedRequestHeader{ - RequestHeader: wt.RequestHeader{ - QueryType: wt.QueryType(rand.Intn(2)), +func createRandomQueryRequest(cli *nodeProfile) (r *types.SignedRequestHeader, err error) { + req := &types.Request{ + Header: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ + QueryType: types.QueryType(rand.Intn(2)), NodeID: cli.NodeID, ConnectionID: uint64(rand.Int63()), SeqNo: uint64(rand.Int63()), @@ -132,7 +131,7 @@ func createRandomQueryRequest(cli *nodeProfile) (r *wt.SignedRequestHeader, err // BatchCount and QueriesHash will be set by req.Sign() }, }, - Payload: wt.RequestPayload{ + Payload: types.RequestPayload{ Queries: createRandomStorageQueries(10, 10, 10, 10), }, } @@ -148,7 +147,7 @@ func createRandomQueryRequest(cli *nodeProfile) (r *wt.SignedRequestHeader, err } func createRandomQueryResponse(cli, worker *nodeProfile) ( - r *wt.SignedResponseHeader, err error, + r *types.SignedResponseHeader, err error, ) { req, err := createRandomQueryRequest(cli) @@ -156,18 +155,18 @@ func createRandomQueryResponse(cli, worker *nodeProfile) ( return } - resp := &wt.Response{ - Header: wt.SignedResponseHeader{ - ResponseHeader: wt.ResponseHeader{ + resp := &types.Response{ + Header: types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ Request: *req, NodeID: worker.NodeID, Timestamp: createRandomTimeAfter(req.Timestamp, 100), }, }, - Payload: wt.ResponsePayload{ + Payload: types.ResponsePayload{ Columns: createRandomStrings(10, 10, 10, 10), DeclTypes: createRandomStrings(10, 10, 10, 10), - Rows: make([]wt.ResponseRow, rand.Intn(10)+10), + Rows: make([]types.ResponseRow, rand.Intn(10)+10), }, } @@ -187,21 +186,21 @@ func createRandomQueryResponse(cli, worker *nodeProfile) ( return } -func createRandomQueryResponseWithRequest(req *wt.SignedRequestHeader, worker *nodeProfile) ( - r *wt.SignedResponseHeader, err error, +func createRandomQueryResponseWithRequest(req *types.SignedRequestHeader, worker *nodeProfile) ( + r *types.SignedResponseHeader, err error, ) { - resp := &wt.Response{ - Header: wt.SignedResponseHeader{ - ResponseHeader: wt.ResponseHeader{ + resp := &types.Response{ + Header: types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ Request: *req, NodeID: worker.NodeID, Timestamp: createRandomTimeAfter(req.Timestamp, 100), }, }, - Payload: wt.ResponsePayload{ + Payload: types.ResponsePayload{ Columns: createRandomStrings(10, 10, 10, 10), DeclTypes: createRandomStrings(10, 10, 10, 10), - Rows: make([]wt.ResponseRow, rand.Intn(10)+10), + Rows: make([]types.ResponseRow, rand.Intn(10)+10), }, } @@ -221,12 +220,12 @@ func createRandomQueryResponseWithRequest(req *wt.SignedRequestHeader, worker *n return } -func createRandomQueryAckWithResponse(resp *wt.SignedResponseHeader, cli *nodeProfile) ( - r *wt.SignedAckHeader, err error, +func createRandomQueryAckWithResponse(resp *types.SignedResponseHeader, cli *nodeProfile) ( + r *types.SignedAckHeader, err error, ) { - ack := &wt.Ack{ - Header: wt.SignedAckHeader{ - AckHeader: wt.AckHeader{ + ack := &types.Ack{ + Header: types.SignedAckHeader{ + AckHeader: types.AckHeader{ Response: *resp, NodeID: cli.NodeID, Timestamp: createRandomTimeAfter(resp.Timestamp, 100), @@ -242,16 +241,16 @@ func createRandomQueryAckWithResponse(resp *wt.SignedResponseHeader, cli *nodePr return } -func createRandomQueryAck(cli, worker *nodeProfile) (r *wt.SignedAckHeader, err error) { +func createRandomQueryAck(cli, worker *nodeProfile) (r *types.SignedAckHeader, err error) { resp, err := createRandomQueryResponse(cli, worker) if err != nil { return } - ack := &wt.Ack{ - Header: wt.SignedAckHeader{ - AckHeader: wt.AckHeader{ + ack := &types.Ack{ + Header: types.SignedAckHeader{ + AckHeader: types.AckHeader{ Response: *resp, NodeID: cli.NodeID, Timestamp: createRandomTimeAfter(resp.Timestamp, 100), @@ -267,7 +266,7 @@ func createRandomQueryAck(cli, worker *nodeProfile) (r *wt.SignedAckHeader, err return } -func createRandomNodesAndAck() (r *wt.SignedAckHeader, err error) { +func createRandomNodesAndAck() (r *types.SignedAckHeader, err error) { cli, err := newRandomNode() if err != nil { @@ -321,7 +320,7 @@ func registerNodesWithPublicKey(pub *asymmetric.PublicKey, diff int, num int) ( return } -func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error) { +func createRandomBlock(parent hash.Hash, isGenesis bool) (b *types.Block, err error) { // Generate key pair priv, pub, err := asymmetric.GenSecp256k1KeyPair() @@ -332,9 +331,9 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error h := hash.Hash{} rand.Read(h[:]) - b = &ct.Block{ - SignedHeader: ct.SignedHeader{ - Header: ct.Header{ + b = &types.Block{ + SignedHeader: types.SignedHeader{ + Header: types.Header{ Version: 0x01000000, Producer: proto.NodeID(h.String()), GenesisHash: genesisHash, @@ -347,7 +346,13 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error for i, n := 0, rand.Intn(10)+10; i < n; i++ { h := &hash.Hash{} rand.Read(h[:]) - b.PushAckedQuery(h) + b.Acks = []*types.SignedAckHeader{ + { + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: *h, + }, + }, + } } if isGenesis { @@ -359,7 +364,6 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error return } - b.Queries = nil b.SignedHeader.GenesisHash = hash.Hash{} b.SignedHeader.Header.Producer = proto.NodeID(nis[0].Hash.String()) } @@ -368,9 +372,9 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error return } -func createRandomQueries(x int) (acks []*wt.SignedAckHeader, err error) { +func createRandomQueries(x int) (acks []*types.SignedAckHeader, err error) { n := rand.Intn(x) - acks = make([]*wt.SignedAckHeader, n) + acks = make([]*types.SignedAckHeader, n) for i := range acks { if acks[i], err = createRandomNodesAndAck(); err != nil { @@ -381,8 +385,8 @@ func createRandomQueries(x int) (acks []*wt.SignedAckHeader, err error) { return } -func createRandomBlockWithQueries(genesis, parent hash.Hash, acks []*wt.SignedAckHeader) ( - b *ct.Block, err error, +func createRandomBlockWithQueries(genesis, parent hash.Hash, acks []*types.SignedAckHeader) ( + b *types.Block, err error, ) { // Generate key pair priv, _, err := asymmetric.GenSecp256k1KeyPair() @@ -394,9 +398,9 @@ func createRandomBlockWithQueries(genesis, parent hash.Hash, acks []*wt.SignedAc h := hash.Hash{} rand.Read(h[:]) - b = &ct.Block{ - SignedHeader: ct.SignedHeader{ - Header: ct.Header{ + b = &types.Block{ + SignedHeader: types.SignedHeader{ + Header: types.Header{ Version: 0x01000000, Producer: proto.NodeID(h.String()), GenesisHash: genesis, @@ -406,15 +410,11 @@ func createRandomBlockWithQueries(genesis, parent hash.Hash, acks []*wt.SignedAc }, } - for _, ack := range acks { - b.PushAckedQuery(&ack.HeaderHash) - } - err = b.PackAndSignBlock(priv) return } -func createTestPeers(num int) (nis []cpuminer.NonceInfo, p *kayak.Peers, err error) { +func createTestPeers(num int) (nis []cpuminer.NonceInfo, p *proto.Peers, err error) { if num <= 0 { return } @@ -439,29 +439,20 @@ func createTestPeers(num int) (nis []cpuminer.NonceInfo, p *kayak.Peers, err err return } - s := make([]*kayak.Server, num) + s := make([]proto.NodeID, num) h := &hash.Hash{} for i := range s { rand.Read(h[:]) - s[i] = &kayak.Server{ - Role: func() proto.ServerRole { - if i == 0 { - return proto.Leader - } - return proto.Follower - }(), - ID: proto.NodeID(nis[i].Hash.String()), - PubKey: pub, - } + s[i] = proto.NodeID(nis[i].Hash.String()) } - p = &kayak.Peers{ - Term: 0, - Leader: s[0], - Servers: s, - PubKey: pub, - Signature: nil, + p = &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: 0, + Leader: s[0], + Servers: s, + }, } if err = p.Sign(priv); err != nil { diff --git a/sqlchain/storage/doc.go b/storage/doc.go similarity index 100% rename from sqlchain/storage/doc.go rename to storage/doc.go diff --git a/sqlchain/storage/dsn.go b/storage/dsn.go similarity index 86% rename from sqlchain/storage/dsn.go rename to storage/dsn.go index 08177de94..e74663823 100644 --- a/sqlchain/storage/dsn.go +++ b/storage/dsn.go @@ -82,7 +82,11 @@ func (dsn *DSN) AddParam(key, value string) { dsn.params = make(map[string]string) } - dsn.params[key] = value + if value == "" { + delete(dsn.params, key) + } else { + dsn.params[key] = value + } } // GetParam gets the value. @@ -90,3 +94,16 @@ func (dsn *DSN) GetParam(key string) (value string, ok bool) { value, ok = dsn.params[key] return } + +// Clone returns a copy of current dsn. +func (dsn *DSN) Clone() (copy *DSN) { + copy = &DSN{} + copy.filename = dsn.filename + copy.params = make(map[string]string, len(dsn.params)) + + for k, v := range dsn.params { + copy.params[k] = v + } + + return +} diff --git a/sqlchain/storage/dsn_test.go b/storage/dsn_test.go similarity index 100% rename from sqlchain/storage/dsn_test.go rename to storage/dsn_test.go diff --git a/storage/storage.go b/storage/storage.go index 55141a8d9..7e36bad0e 100644 --- a/storage/storage.go +++ b/storage/storage.go @@ -14,371 +14,425 @@ * limitations under the License. */ -// Package storage implements simple key-value storage interfaces based on sqlite3. -// -// Although a sql.DB should be safe for concurrent use according to -// https://golang.org/pkg/database/sql/#OpenDB, the go-sqlite3 implementation only guarantees -// the safety of concurrent readers. See https://github.com/mattn/go-sqlite3/issues/148 for details. -// -// As a result, here are some suggestions: -// -// 1. Perform as many concurrent GetValue(s) operations as you like; -// 2. Use only one goroutine to perform SetValue(s)/DelValue(s) operations; -// 3. Or implement a simple busy waiting yourself on a go-sqlite3.ErrLocked error if you must use -// concurrent writers. package storage import ( + "context" "database/sql" + "errors" "fmt" + "io" "sync" + + "github.com/CovenantSQL/CovenantSQL/twopc" + "github.com/CovenantSQL/CovenantSQL/utils/log" // Register CovenantSQL/go-sqlite3-encrypt engine. _ "github.com/CovenantSQL/go-sqlite3-encrypt" ) var ( index = struct { - mu *sync.Mutex + sync.Mutex db map[string]*sql.DB }{ - &sync.Mutex{}, - make(map[string]*sql.DB), + db: make(map[string]*sql.DB), } ) -func openDB(dsn string) (db *sql.DB, err error) { - index.mu.Lock() - defer index.mu.Unlock() - - db = index.db[dsn] - if db == nil { - db, err = sql.Open("sqlite3", dsn) - if err != nil { - return nil, err - } - - index.db[dsn] = db - } - - return db, err +// Query represents the single query of sqlite. +type Query struct { + Pattern string + Args []sql.NamedArg } -// Storage represents a key-value storage. -type Storage struct { - dsn string - table string - db *sql.DB +// ExecLog represents the execution log of sqlite. +type ExecLog struct { + ConnectionID uint64 + SeqNo uint64 + Timestamp int64 + Queries []Query } -// KV represents a key-value pair. -type KV struct { - Key string - Value []byte +// ExecResult represents the execution result of sqlite. +type ExecResult struct { + LastInsertID int64 + RowsAffected int64 } -// OpenStorage opens a database using the specified DSN and ensures that the specified table exists. -func OpenStorage(dsn string, table string) (st *Storage, err error) { - // Open database - var db *sql.DB - db, err = openDB(dsn) +func openDB(dsn string) (db *sql.DB, err error) { + // Rebuild DSN. + d, err := NewDSN(dsn) if err != nil { - return st, err + return nil, err } - // Ensure table - stmt := fmt.Sprintf("CREATE TABLE IF NOT EXISTS `%s` (`key` TEXT PRIMARY KEY, `value` BLOB)", - table) + d.AddParam("_journal_mode", "WAL") + d.AddParam("_synchronous", "NORMAL") + fdsn := d.Format() + + fn := d.GetFileName() + mode, _ := d.GetParam("mode") + cache, _ := d.GetParam("cache") - if _, err = db.Exec(stmt); err != nil { - return st, err + if (fn == ":memory:" || mode == "memory") && cache != "shared" { + // Return a new DB instance if it's in memory and private. + db, err = sql.Open("sqlite3", fdsn) + return } - st = &Storage{dsn, table, db} - return st, err -} + index.Lock() + db, ok := index.db[d.filename] + index.Unlock() -// SetValue sets or replace the value to key. -func (s *Storage) SetValue(key string, value []byte) (err error) { - stmt := fmt.Sprintf("INSERT OR REPLACE INTO `%s` (`key`, `value`) VALUES (?, ?)", s.table) - _, err = s.db.Exec(stmt, key, value) + if !ok { + db, err = sql.Open("sqlite3", fdsn) - return err -} + if err != nil { + return nil, err + } + + index.Lock() + index.db[d.filename] = db + index.Unlock() + } -// SetValueIfNotExist sets the value to key if it doesn't exist. -func (s *Storage) SetValueIfNotExist(key string, value []byte) (err error) { - stmt := fmt.Sprintf("INSERT OR IGNORE INTO `%s` (`key`, `value`) VALUES (?, ?)", s.table) - _, err = s.db.Exec(stmt, key, value) + return +} - return err +// TxID represents a transaction ID. +type TxID struct { + ConnectionID uint64 + SeqNo uint64 + Timestamp int64 } -// DelValue deletes the value of key. -func (s *Storage) DelValue(key string) (err error) { - stmt := fmt.Sprintf("DELETE FROM `%s` WHERE `key` = ?", s.table) - _, err = s.db.Exec(stmt, key) +func equalTxID(x, y *TxID) bool { + return x.ConnectionID == y.ConnectionID && x.SeqNo == y.SeqNo && x.Timestamp == y.Timestamp +} - return err +// Storage represents a underlying storage implementation based on sqlite3. +type Storage struct { + sync.Mutex + dsn string + db *sql.DB + tx *sql.Tx // Current tx + id TxID + queries []Query } -// GetValue fetches the value of key. -func (s *Storage) GetValue(key string) (value []byte, err error) { - stmt := fmt.Sprintf("SELECT `value` FROM `%s` WHERE `key` = ?", s.table) +// New returns a new storage connected by dsn. +func New(dsn string) (st *Storage, err error) { + db, err := openDB(dsn) - if err = s.db.QueryRow(stmt, key).Scan(&value); err == sql.ErrNoRows { - err = nil + if err != nil { + return } - return value, err + return &Storage{ + dsn: dsn, + db: db, + }, nil } -// SetValues sets or replaces the key-value pairs in kvs. -// -// Note that this is not a transaction. We use a prepared statement to send these queries. Each -// call may fail while part of the queries succeed. -func (s *Storage) SetValues(kvs []KV) (err error) { - stmt := fmt.Sprintf("INSERT OR REPLACE INTO `%s` (`key`, `value`) VALUES (?, ?)", s.table) - pStmt, err := s.db.Prepare(stmt) +// Prepare implements prepare method of two-phase commit worker. +func (s *Storage) Prepare(ctx context.Context, wb twopc.WriteBatch) (err error) { + el, ok := wb.(*ExecLog) - if err != nil { - return err + if !ok { + return errors.New("unexpected WriteBatch type") } - defer pStmt.Close() + s.Lock() + defer s.Unlock() - for _, row := range kvs { - if _, err = pStmt.Exec(row.Key, row.Value); err != nil { - return err + if s.tx != nil { + if equalTxID(&s.id, &TxID{el.ConnectionID, el.SeqNo, el.Timestamp}) { + s.queries = el.Queries + return nil } + + return fmt.Errorf("twopc: inconsistent state, currently in tx: "+ + "conn = %d, seq = %d, time = %d", s.id.ConnectionID, s.id.SeqNo, s.id.Timestamp) } + s.tx, err = s.db.BeginTx(ctx, nil) + + if err != nil { + return + } + + s.id = TxID{el.ConnectionID, el.SeqNo, el.Timestamp} + s.queries = el.Queries + return nil } -// SetValuesIfNotExist sets the key-value pairs in kvs if the key doesn't exist. -// -// Note that this is not a transaction. We use a prepared statement to send these queries. Each -// call may fail while part of the queries succeed. -func (s *Storage) SetValuesIfNotExist(kvs []KV) (err error) { - stmt := fmt.Sprintf("INSERT OR IGNORE INTO `%s` (`key`, `value`) VALUES (?, ?)", s.table) - pStmt, err := s.db.Prepare(stmt) +// Commit implements commit method of two-phase commit worker. +func (s *Storage) Commit(ctx context.Context, wb twopc.WriteBatch) (result interface{}, err error) { + el, ok := wb.(*ExecLog) - if err != nil { - return err + if !ok { + err = errors.New("unexpected WriteBatch type") + return } - defer pStmt.Close() + s.Lock() + defer s.Unlock() + + if s.tx != nil { + if equalTxID(&s.id, &TxID{el.ConnectionID, el.SeqNo, el.Timestamp}) { + // get last insert id and affected rows result + execResult := ExecResult{} + + for _, q := range s.queries { + // convert arguments types + args := make([]interface{}, len(q.Args)) + + for i, v := range q.Args { + args[i] = v + } + + var res sql.Result + res, err = s.tx.ExecContext(ctx, q.Pattern, args...) + + if err != nil { + log.WithError(err).Debug("commit query failed") + s.tx.Rollback() + s.tx = nil + s.queries = nil + return + } + + lastInsertID, _ := res.LastInsertId() + rowsAffected, _ := res.RowsAffected() - for _, row := range kvs { - if _, err = pStmt.Exec(row.Key, row.Value); err != nil { - return err + execResult.LastInsertID = lastInsertID + execResult.RowsAffected += rowsAffected + } + + s.tx.Commit() + s.tx = nil + s.queries = nil + result = execResult + + return } + + err = fmt.Errorf("twopc: inconsistent state, currently in tx: "+ + "conn = %d, seq = %d, time = %d", s.id.ConnectionID, s.id.SeqNo, s.id.Timestamp) + return } - return nil + err = errors.New("twopc: tx not prepared") + return } -// DelValues deletes the values of the keys. -// -// Note that this is not a transaction. We use a prepared statement to send these queries. Each -// call may fail while part of the queries succeed. -func (s *Storage) DelValues(keys []string) (err error) { - stmt := fmt.Sprintf("DELETE FROM `%s` WHERE `key` = ?", s.table) - pStmt, err := s.db.Prepare(stmt) +// Rollback implements rollback method of two-phase commit worker. +func (s *Storage) Rollback(ctx context.Context, wb twopc.WriteBatch) (err error) { + el, ok := wb.(*ExecLog) - if err != nil { - return err + if !ok { + return errors.New("unexpected WriteBatch type") } - defer pStmt.Close() + s.Lock() + defer s.Unlock() - for _, key := range keys { - if _, err = pStmt.Exec(key); err != nil { - return err - } + if !equalTxID(&s.id, &TxID{el.ConnectionID, el.SeqNo, el.Timestamp}) { + return fmt.Errorf("twopc: inconsistent state, currently in tx: "+ + "conn = %d, seq = %d, time = %d", s.id.ConnectionID, s.id.SeqNo, s.id.Timestamp) + } + + if s.tx != nil { + s.tx.Rollback() + s.tx = nil + s.queries = nil } return nil } -// GetValues fetches the values of keys. -// -// Note that this is not a transaction. We use a prepared statement to send these queries. Each -// call may fail while part of the queries succeed and some values may be altered during the -// queries. But the results will be returned only if all the queries succeed. -func (s *Storage) GetValues(keys []string) (kvs []KV, err error) { - stmt := fmt.Sprintf("SELECT `value` FROM `%s` WHERE `key` = ?", s.table) - pStmt, err := s.db.Prepare(stmt) +// Query implements read-only query feature. +func (s *Storage) Query(ctx context.Context, queries []Query) (columns []string, types []string, + data [][]interface{}, err error) { + data = make([][]interface{}, 0) - if err != nil { - return nil, err + if len(queries) == 0 { + return + } + + var tx *sql.Tx + var txOptions = &sql.TxOptions{ + ReadOnly: true, } - defer pStmt.Close() + if tx, err = s.db.BeginTx(ctx, txOptions); err != nil { + return + } - kvs = make([]KV, len(keys)) + // always rollback on complete + defer tx.Rollback() - for index, key := range keys { - kvs[index].Key = key + q := queries[len(queries)-1] - if err = pStmt.QueryRow(key).Scan(&kvs[index].Value); err != nil && err != sql.ErrNoRows { - return nil, err - } + // convert arguments types + args := make([]interface{}, len(q.Args)) + + for i, v := range q.Args { + args[i] = v } - return kvs, nil -} + var rows *sql.Rows + if rows, err = tx.Query(q.Pattern, args...); err != nil { + return + } -// SetValuesTx sets or replaces the key-value pairs in kvs as a transaction. -func (s *Storage) SetValuesTx(kvs []KV) (err error) { - // Begin transaction - tx, err := s.db.Begin() + // free result set + defer rows.Close() - if err != nil { - return err + // get rows meta + if columns, err = rows.Columns(); err != nil { + return } - defer func() { - if err != nil { - tx.Rollback() - } else { - err = tx.Commit() - } - }() - - // Prepare statement - stmt := fmt.Sprintf("INSERT OR REPLACE INTO `%s` (`key`, `value`) VALUES (?, ?)", s.table) - pStmt, err := tx.Prepare(stmt) + // if there is empty columns, treat result as empty + if len(columns) == 0 { + return + } - if err != nil { - return err + // get types meta + if types, err = s.transformColumnTypes(rows.ColumnTypes()); err != nil { + return } - defer pStmt.Close() + rs := newRowScanner(len(columns)) - // Execute queries - for _, row := range kvs { - if _, err = pStmt.Exec(row.Key, row.Value); err != nil { - return err + for rows.Next() { + err = rows.Scan(rs.ScanArgs()...) + if err != nil { + return } + + data = append(data, rs.GetRow()) } - return nil + err = rows.Err() + return } -// SetValuesIfNotExistTx sets the key-value pairs in kvs if the key doesn't exist as a transaction. -func (s *Storage) SetValuesIfNotExistTx(kvs []KV) (err error) { - // Begin transaction - tx, err := s.db.Begin() +// Exec implements write query feature. +func (s *Storage) Exec(ctx context.Context, queries []Query) (result ExecResult, err error) { + if len(queries) == 0 { + return + } - if err != nil { - return err + var tx *sql.Tx + var txOptions = &sql.TxOptions{ + ReadOnly: false, } - defer func() { - if err != nil { - tx.Rollback() - } else { - err = tx.Commit() - } - }() + if tx, err = s.db.BeginTx(ctx, txOptions); err != nil { + return + } - // Prepare statement - stmt := fmt.Sprintf("INSERT OR IGNORE INTO `%s` (`key`, `value`) VALUES (?, ?)", s.table) - pStmt, err := tx.Prepare(stmt) + defer tx.Rollback() - if err != nil { - return err - } + for _, q := range queries { + // convert arguments types + args := make([]interface{}, len(q.Args)) - defer pStmt.Close() + for i, v := range q.Args { + args[i] = v + } - // Execute queries - for _, row := range kvs { - if _, err = pStmt.Exec(row.Key, row.Value); err != nil { - return err + var r sql.Result + if r, err = tx.Exec(q.Pattern, args...); err != nil { + log.WithError(err).Debug("execute query failed") + return } + + var affected int64 + affected, _ = r.RowsAffected() + result.RowsAffected += affected + result.LastInsertID, _ = r.LastInsertId() } - return nil -} + tx.Commit() -// DelValuesTx deletes the values of the keys as a transaction. -func (s *Storage) DelValuesTx(keys []string) (err error) { - // Begin transaction - tx, err := s.db.Begin() + return +} +// Close implements database safe close feature. +func (s *Storage) Close() (err error) { + d, err := NewDSN(s.dsn) if err != nil { - return err + return } - defer func() { - if err != nil { - tx.Rollback() - } else { - err = tx.Commit() - } - }() - - // Prepare statement - stmt := fmt.Sprintf("DELETE FROM `%s` WHERE `key` = ?", s.table) - pStmt, err := tx.Prepare(stmt) + index.Lock() + defer index.Unlock() + delete(index.db, d.filename) + return s.db.Close() +} - if err != nil { - return err +func (s *Storage) transformColumnTypes(columnTypes []*sql.ColumnType, e error) (types []string, err error) { + if e != nil { + err = e + return } - defer pStmt.Close() + types = make([]string, len(columnTypes)) - // Execute queries - for _, key := range keys { - if _, err = pStmt.Exec(key); err != nil { - return err - } + for i, c := range columnTypes { + types[i] = c.DatabaseTypeName() } - return nil + return } -// GetValuesTx fetches the values of keys as a transaction. -func (s *Storage) GetValuesTx(keys []string) (kvs []KV, err error) { - // Begin transaction - tx, err := s.db.Begin() +// golang does trick convert, use rowScanner to return the original result type in sqlite3 driver +type rowScanner struct { + fieldCnt int + column int // current column + fields []interface{} // temp fields + scanArgs []interface{} +} - if err != nil { - return nil, err +func newRowScanner(fieldCnt int) (s *rowScanner) { + s = &rowScanner{ + fieldCnt: fieldCnt, + column: 0, + fields: make([]interface{}, fieldCnt), + scanArgs: make([]interface{}, fieldCnt), } - defer func() { - if err != nil { - tx.Rollback() - } else { - err = tx.Commit() - } - }() + for i := 0; i != fieldCnt; i++ { + s.scanArgs[i] = s + } - // Prepare statement - stmt := fmt.Sprintf("SELECT `value` FROM `%s` WHERE `key` = ?", s.table) - pStmt, err := tx.Prepare(stmt) + return +} - if err != nil { - return nil, err +func (s *rowScanner) Scan(src interface{}) error { + if s.fieldCnt <= s.column { + // read complete + return io.EOF } - defer pStmt.Close() - - // Execute queries - kvs = make([]KV, len(keys)) + s.fields[s.column] = src + s.column++ - for index, key := range keys { - kvs[index].Key = key - err = pStmt.QueryRow(key).Scan(&kvs[index].Value) + return nil +} - if err != nil && err != sql.ErrNoRows { - return nil, err - } - } +func (s *rowScanner) GetRow() []interface{} { + return s.fields +} - return kvs, nil +func (s *rowScanner) ScanArgs() []interface{} { + // reset + s.column = 0 + s.fields = make([]interface{}, s.fieldCnt) + return s.scanArgs } diff --git a/storage/storage_test.go b/storage/storage_test.go index 970183471..0cda67128 100644 --- a/storage/storage_test.go +++ b/storage/storage_test.go @@ -17,1000 +17,337 @@ package storage import ( - "bytes" + "context" + "database/sql" "fmt" "io/ioutil" - "math/rand" - "os" "reflect" - "sync" "testing" "time" ) -var ( - sampleTexts = []KV{ - {"Philip K. Dick", []byte("All their equipment and instruments are alive.")}, - {"Philip K. Dick", []byte("The face of the moon was in shadow.")}, - {"Samuel R. Delany", []byte("A red flair silhouetted the jagged edge of a wing.")}, - {"Samuel R. Delany", []byte("Mist enveloped the ship three hours out from port.")}, - {"Samuel R. Delany", []byte("Silver mist suffused the deck of the ship.")}, - {"Samuel R. Delany", []byte("Waves flung themselves at the blue evening.")}, - {"Mary Shelley", []byte("I watched the storm, so beautiful yet terrific.")}, - {"John Munro", []byte("Almost before we knew it, we had left the ground.")}, - {"John Munro", []byte("The sky was cloudless and of a deep dark blue.")}, - {"John Munro", []byte("The spectacle before us was indeed sublime.")}, - {"E. E. Smith", []byte("A shining crescent far beneath the flying vessel.")}, - {"Isaac Asimov", []byte("It was going to be a lonely trip back.")}, - {"Robert Louis Stevenson", []byte("My two natures had memory in common.")}, - {"Harry Harrison", []byte("The face of the moon was in shadow.")}, - {"H. G. Wells", []byte("Then came the night of the first falling star.")}, - } - - ignoredSampleTexts map[string][]byte - replacedSampleTexts map[string][]byte - keysOfSampleTexts []string -) - -func buildReplacedMapFromKVs(kvs []KV) (kvsmap map[string][]byte) { - kvsmap = make(map[string][]byte) - - for _, row := range kvs { - if row.Value != nil { - kvsmap[row.Key] = row.Value - } - } - - return kvsmap -} - -func buildIgnoredMapFromKVs(kvs []KV) (kvsmap map[string][]byte) { - kvsmap = make(map[string][]byte) - - for _, row := range kvs { - if _, ok := kvsmap[row.Key]; !ok && row.Value != nil { - kvsmap[row.Key] = row.Value - } - } - - return kvsmap -} - -func randomDel(kvsmap map[string][]byte) (rkvsmap map[string][]byte, dkeys []string) { - knum := len(kvsmap) - dnum := knum / 2 - list := rand.Perm(knum) - dmap := make([]bool, knum) - - for index := range dmap { - dmap[index] = false - } - - for index, iindex := range list { - if index < dnum { - dmap[iindex] = true - } - } - - index := 0 - dkeys = make([]string, 0, dnum) - rkvsmap = make(map[string][]byte) - - for k, v := range kvsmap { - if dmap[index] { - dkeys = append(dkeys, k) - } else { - rkvsmap[k] = v - } - - index++ - } - - return rkvsmap, dkeys -} - -func testSetup() { - // Build datasets for test - ignoredSampleTexts = buildIgnoredMapFromKVs(sampleTexts) - replacedSampleTexts = buildReplacedMapFromKVs(sampleTexts) - - index := 0 - keysOfSampleTexts = make([]string, len(replacedSampleTexts)) - - for key := range replacedSampleTexts { - keysOfSampleTexts[index] = key - index++ - } -} - -func TestMain(m *testing.M) { - testSetup() - os.Exit(m.Run()) -} - -func TestBadDSN(t *testing.T) { - // Use bad DSN to open storage - if _, err := OpenStorage(os.TempDir(), "test-bad-dsn"); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) - } -} - -func TestOpenStorage(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - _, err = OpenStorage(fl.Name(), "test-open-storage") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } -} - -func TestSetValue(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - st, err := OpenStorage(fl.Name(), "test-set-value") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - for _, row := range sampleTexts { - if err = st.SetValue(row.Key, row.Value); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - } - - // Verify values - for k, v := range replacedSampleTexts { - ov, err := st.GetValue(k) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - if !reflect.DeepEqual(v, ov) { - t.Fatalf("Unexpected output result: input = %v, output = %v", v, ov) - } - } -} - -func TestSetValueIfNotExist(t *testing.T) { - // Open storage - fl, errTemp := ioutil.TempFile("", "db") - - if errTemp != nil { - t.Fatalf("Error occurred: %s", errTemp.Error()) - } - - st, err := OpenStorage(fl.Name(), "test-set-value-if-not-exist") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - for _, row := range sampleTexts { - if err = st.SetValueIfNotExist(row.Key, row.Value); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - } - - // Verify values - for k, v := range ignoredSampleTexts { - ov, err := st.GetValue(k) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - if !reflect.DeepEqual(v, ov) { - t.Fatalf("Unexpected output result: input = %v, output = %v", v, ov) - } - } -} - -func TestGetValue(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - st, err := OpenStorage(fl.Name(), "test-get-value") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - for _, row := range sampleTexts { - if err = st.SetValue(row.Key, row.Value); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - } - - // Verify values - for k, v := range replacedSampleTexts { - ov, err := st.GetValue(k) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - if !reflect.DeepEqual(v, ov) { - t.Fatalf("Unexpected output result: input = %v, output = %v", v, ov) - } - } - - // Test get nil value - nonexistentKey := "Jules Verne" - v, err := st.GetValue(nonexistentKey) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - if v != nil { - t.Fatalf("Unexpected output result: got %v while expecting nil", v) - } -} - -func TestDelValue(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - st, err := OpenStorage(fl.Name(), "test-del-value") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - for _, row := range sampleTexts { - if err = st.SetValue(row.Key, row.Value); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - } - - // Verify values - for k, v := range replacedSampleTexts { - ov, err := st.GetValue(k) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - if !reflect.DeepEqual(v, ov) { - t.Fatalf("Unexpected output result: input = %v, output = %v", v, ov) - } - } - - // Delete value - delKey := "Samuel R. Delany" - err = st.DelValue(delKey) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Verify nil result - v, err := st.GetValue(delKey) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - if v != nil { - t.Fatalf("Unexpected output result: got %v while expecting nil", v) - } - - // Test deleting a nonexistent key: it should not return any error - nonexistentKey := "Jules Verne" - - if err = st.DelValue(nonexistentKey); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } -} - -func TestSetValues(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - st, err := OpenStorage(fl.Name(), "test-set-values") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - if err = st.SetValues(sampleTexts); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Verify values - kvs, err := st.GetValues(keysOfSampleTexts) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - okvs := buildReplacedMapFromKVs(kvs) - - if !reflect.DeepEqual(replacedSampleTexts, okvs) { - t.Fatalf("Unexpected output result: input = %v, output = %v", replacedSampleTexts, okvs) - } -} - -func TestSetValuesIfNotExist(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - st, err := OpenStorage(fl.Name(), "test-set-values-if-not-exist") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - if err = st.SetValuesIfNotExist(sampleTexts); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } +func newQuery(query string, args ...interface{}) (q Query) { + q.Pattern = query - // Verify values - kvs, err := st.GetValues(keysOfSampleTexts) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + // convert args + q.Args = make([]sql.NamedArg, len(args)) + for i, v := range args { + q.Args[i] = sql.Named("", v) } - okvs := buildIgnoredMapFromKVs(kvs) - - if !reflect.DeepEqual(ignoredSampleTexts, okvs) { - t.Fatalf("Unexpected output result: input = %v, output = %v", ignoredSampleTexts, okvs) - } + return } -func TestDelValues(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") +func newNamedQuery(query string, args map[string]interface{}) (q Query) { + q.Pattern = query + q.Args = make([]sql.NamedArg, len(args)) + i := 0 - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + // convert args + for n, v := range args { + q.Args[i] = sql.Named(n, v) + i++ } - st, err := OpenStorage(fl.Name(), "test-del-values") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - if err = st.SetValues(sampleTexts); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Randomly delete some values - rkvs, dkeys := randomDel(replacedSampleTexts) - - if err = st.DelValues(dkeys); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Verify values - kvs, err := st.GetValues(keysOfSampleTexts) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - okvs := buildReplacedMapFromKVs(kvs) - - if !reflect.DeepEqual(rkvs, okvs) { - t.Fatalf("Unexpected output result: input = %v, output = %v", rkvs, okvs) - } + return } -func TestGetValues(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") +func TestBadType(t *testing.T) { + fl, err := ioutil.TempFile("", "sqlite3-") if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + t.Fatalf("Error occurred: %v", err) } - st, err := OpenStorage(fl.Name(), "test-get-values") + st, err := New(fmt.Sprintf("file:%s", fl.Name())) if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + t.Fatalf("Error occurred: %v", err) } - // Set values - if err = st.SetValues(sampleTexts); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Add some nonexistent keys - mixedKeys := append(keysOfSampleTexts, "Jules Verne", "Kathy Tyers", "Jack Vance") - - // Verify values - kvs, err := st.GetValues(mixedKeys) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - okvs := buildReplacedMapFromKVs(kvs) - - if !reflect.DeepEqual(replacedSampleTexts, okvs) { - t.Fatalf("Unexpected output result: input = %v, output = %v", replacedSampleTexts, okvs) - } -} - -func TestSetValuesTx(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - st, err := OpenStorage(fl.Name(), "test-set-values-tx") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - if err = st.SetValuesTx(sampleTexts); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Verify values - kvs, err := st.GetValuesTx(keysOfSampleTexts) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - okvs := buildReplacedMapFromKVs(kvs) - - if !reflect.DeepEqual(replacedSampleTexts, okvs) { - t.Fatalf("Unexpected output result: input = %v, output = %v", replacedSampleTexts, okvs) - } -} - -func TestSetValuesIfNotExistTx(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - st, err := OpenStorage(fl.Name(), "test-set-values-if-not-exist-tx") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - if err = st.SetValuesIfNotExistTx(sampleTexts); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Verify values - kvs, err := st.GetValuesTx(keysOfSampleTexts) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - okvs := buildIgnoredMapFromKVs(kvs) - - if !reflect.DeepEqual(ignoredSampleTexts, okvs) { - t.Fatalf("Unexpected output result: input = %v, output = %v", ignoredSampleTexts, okvs) - } -} - -func TestDelValuesTx(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - st, err := OpenStorage(fl.Name(), "test-del-values-tx") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - if err = st.SetValuesTx(sampleTexts); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Randomly delete some values - rkvs, dkeys := randomDel(replacedSampleTexts) - - if err = st.DelValuesTx(dkeys); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Verify values - kvs, err := st.GetValuesTx(keysOfSampleTexts) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - okvs := buildReplacedMapFromKVs(kvs) - - if !reflect.DeepEqual(rkvs, okvs) { - t.Fatalf("Unexpected output result: input = %v, output = %v", rkvs, okvs) - } -} - -func TestGetValuesTx(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - st, err := OpenStorage(fl.Name(), "test-get-values-tx") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - if err = st.SetValuesTx(sampleTexts); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Add some nonexistent keys - mixedKeys := append(keysOfSampleTexts, "Jules Verne", "Kathy Tyers", "Jack Vance") - - // Verify values - kvs, err := st.GetValuesTx(mixedKeys) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - okvs := buildReplacedMapFromKVs(kvs) - - if !reflect.DeepEqual(replacedSampleTexts, okvs) { - t.Fatalf("Unexpected output result: input = %v, output = %v", replacedSampleTexts, okvs) - } -} - -func TestDBError(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - st, err := OpenStorage(fl.Name(), "test-db-error") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Hack the internal structs and filesystem to wipe out the databse - if err = st.db.Close(); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - delete(index.db, fl.Name()) - - if err = os.Truncate(fl.Name(), 0); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - if st.db, err = openDB(fl.Name()); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Now try some operations opon it - if err = st.SetValue("", nil); err == nil { + if err = st.Prepare(context.Background(), struct{}{}); err == nil { t.Fatal("Unexpected result: returned nil while expecting an error") } else { - t.Logf("Error occurred as expected: %s", err.Error()) + t.Logf("Error occurred as expected: %v", err) } - if err = st.SetValues(sampleTexts); err == nil { + if _, err = st.Commit(context.Background(), struct{}{}); err == nil { t.Fatal("Unexpected result: returned nil while expecting an error") } else { - t.Logf("Error occurred as expected: %s", err.Error()) + t.Logf("Error occurred as expected: %v", err) } - if err = st.SetValuesTx(sampleTexts); err == nil { + if err = st.Rollback(context.Background(), struct{}{}); err == nil { t.Fatal("Unexpected result: returned nil while expecting an error") } else { - t.Logf("Error occurred as expected: %s", err.Error()) + t.Logf("Error occurred as expected: %v", err) } +} - if err = st.SetValueIfNotExist("", nil); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) - } +func TestStorage(t *testing.T) { + fl, err := ioutil.TempFile("", "sqlite3-") - if err = st.SetValuesIfNotExist(sampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) + if err != nil { + t.Fatalf("Error occurred: %v", err) } - if err = st.SetValuesIfNotExistTx(sampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) - } + st, err := New(fmt.Sprintf("file:%s", fl.Name())) - if err = st.DelValue(""); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) + if err != nil { + t.Fatalf("Error occurred: %v", err) } - if err = st.DelValues(keysOfSampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) + el1 := &ExecLog{ + ConnectionID: 1, + SeqNo: 1, + Timestamp: time.Now().UnixNano(), + Queries: []Query{ + newQuery("CREATE TABLE IF NOT EXISTS `kv` (`key` TEXT PRIMARY KEY, `value` BLOB)"), + newQuery("INSERT OR IGNORE INTO `kv` VALUES ('k0', NULL)"), + newQuery("INSERT OR IGNORE INTO `kv` VALUES ('k1', 'v1')"), + newQuery("INSERT OR IGNORE INTO `kv` VALUES ('k2', 'v2')"), + newQuery("INSERT OR IGNORE INTO `kv` VALUES ('k3', 'v3')"), + newQuery("INSERT OR REPLACE INTO `kv` VALUES ('k3', 'v3-2')"), + newQuery("DELETE FROM `kv` WHERE `key`='k2'"), + }, } - if err = st.DelValuesTx(keysOfSampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) + el2 := &ExecLog{ + ConnectionID: 1, + SeqNo: 2, + Timestamp: time.Now().UnixNano(), + Queries: []Query{ + newQuery("INSERT OR REPLACE INTO `kv` VALUES ('k1', 'v1-2')"), + }, } - if _, err = st.GetValue(""); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) + if err = st.Prepare(context.Background(), el1); err != nil { + t.Fatalf("Error occurred: %v", err) } - if _, err = st.GetValues(keysOfSampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) + if err = st.Prepare(context.Background(), el1); err != nil { + t.Fatalf("Error occurred: %v", err) } - if _, err = st.GetValuesTx(keysOfSampleTexts); err == nil { + if err = st.Prepare(context.Background(), el2); err == nil { t.Fatal("Unexpected result: returned nil while expecting an error") } else { - t.Logf("Error occurred as expected: %s", err.Error()) + t.Logf("Error occurred as expected: %v", err) } - // Hack the internal structs to close the database - if err = st.db.Close(); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - delete(index.db, fl.Name()) - - // Now try some operations opon it - if err = st.SetValue("", nil); err == nil { + if _, err = st.Commit(context.Background(), el2); err == nil { t.Fatal("Unexpected result: returned nil while expecting an error") } else { - t.Logf("Error occurred as expected: %s", err.Error()) + t.Logf("Error occurred as expected: %v", err) } - if err = st.SetValues(sampleTexts); err == nil { + if err = st.Rollback(context.Background(), el2); err == nil { t.Fatal("Unexpected result: returned nil while expecting an error") } else { - t.Logf("Error occurred as expected: %s", err.Error()) + t.Logf("Error occurred as expected: %v", err) } - if err = st.SetValuesTx(sampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") + var res interface{} + if res, err = st.Commit(context.Background(), el1); err != nil { + t.Fatalf("Error occurred: %v", err) } else { - t.Logf("Error occurred as expected: %s", err.Error()) + result := res.(ExecResult) + t.Logf("Result: %v", result) } - if err = st.SetValueIfNotExist("", nil); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) - } + // test query + columns, types, data, err := st.Query(context.Background(), + []Query{newQuery("SELECT * FROM `kv` ORDER BY `key` ASC")}) - if err = st.SetValuesIfNotExist(sampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) - } - - if err = st.SetValuesIfNotExistTx(sampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) + if err != nil { + t.Fatalf("Query failed: %v", err.Error()) } - - if err = st.DelValue(""); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) + if !reflect.DeepEqual(columns, []string{"key", "value"}) { + t.Fatalf("Error column result: %v", columns) } - - if err = st.DelValues(keysOfSampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) + if !reflect.DeepEqual(types, []string{"TEXT", "BLOB"}) { + t.Fatalf("Error types result: %v", types) } - - if err = st.DelValuesTx(keysOfSampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") + if len(data) != 3 { + t.Fatalf("Error result count: %v, should be 3", len(data)) } else { - t.Logf("Error occurred as expected: %s", err.Error()) - } - - if _, err = st.GetValue(""); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) - } - - if _, err = st.GetValues(keysOfSampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) - } - - if _, err = st.GetValuesTx(keysOfSampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } -} - -func TestDataPersistence(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - dsn := fmt.Sprintf("file:%s", fl.Name()) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - st, err := OpenStorage(dsn, "test-data-persistence") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - for _, row := range sampleTexts { - if err = st.SetValue(row.Key, row.Value); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + // compare rows + should1 := []interface{}{[]byte("k0"), nil} + should2 := []interface{}{[]byte("k1"), []byte("v1")} + should3 := []interface{}{[]byte("k3"), []byte("v3-2")} + t.Logf("Rows: %v", data) + if !reflect.DeepEqual(data[0], should1) { + t.Fatalf("Error result row: %v, should: %v", data[0], should1) } - } - - // Verify values - for k, v := range replacedSampleTexts { - ov, err := st.GetValue(k) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + if !reflect.DeepEqual(data[1], should2) { + t.Fatalf("Error result row: %v, should: %v", data[1], should2) } - - if !reflect.DeepEqual(v, ov) { - t.Fatalf("Unexpected output result: input = %v, output = %v", v, ov) + if !reflect.DeepEqual(data[2], should3) { + t.Fatalf("Error result row: %v, should: %v", data[2], should2) } } - // Hack the internal structs to close the database - if err = st.db.Close(); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - delete(index.db, dsn) - - // Now reopen the storage and verify the data - st, err = OpenStorage(dsn, "test-data-persistence") + // test query with projection + columns, types, data, err = st.Query(context.Background(), + []Query{newQuery("SELECT `key` FROM `kv` ORDER BY `key` ASC")}) if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + t.Fatalf("Query failed: %v", err.Error()) } - - content, _ := ioutil.ReadFile(fl.Name()) - if !bytes.Contains(content, []byte(sampleTexts[0].Key)) { - t.Fatal("db is corrupted") + if !reflect.DeepEqual(columns, []string{"key"}) { + t.Fatalf("Error column result: %v", columns) } - - for k, v := range replacedSampleTexts { - ov, err := st.GetValue(k) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - if !reflect.DeepEqual(v, ov) { - t.Fatalf("Unexpected output result: input = %v, output = %v", v, ov) - } - } -} - -func TestCipherDBDataPersistence(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - dsn := fmt.Sprintf("file:%s?_crypto_key=auxten", fl.Name()) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + if !reflect.DeepEqual(types, []string{"TEXT"}) { + t.Fatalf("Error types result: %v", types) } - - st, err := OpenStorage(dsn, "test-data-persistence") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - for _, row := range sampleTexts { - if err = st.SetValue(row.Key, row.Value); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + if len(data) != 3 { + t.Fatalf("Error result count: %v, should be 3", len(data)) + } else { + // compare rows + should1 := []interface{}{[]byte("k0")} + should2 := []interface{}{[]byte("k1")} + should3 := []interface{}{[]byte("k3")} + t.Logf("Rows: %v", data) + if !reflect.DeepEqual(data[0], should1) { + t.Fatalf("Error result row: %v, should: %v", data[0], should1) } - } - - // Verify values - for k, v := range replacedSampleTexts { - ov, err := st.GetValue(k) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + if !reflect.DeepEqual(data[1], should2) { + t.Fatalf("Error result row: %v, should: %v", data[1], should2) } - - if !reflect.DeepEqual(v, ov) { - t.Fatalf("Unexpected output result: input = %v, output = %v", v, ov) + if !reflect.DeepEqual(data[2], should3) { + t.Fatalf("Error result row: %v, should: %v", data[2], should2) } } - // Hack the internal structs to close the database - if err = st.db.Close(); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - delete(index.db, dsn) - - // Now reopen the storage and verify the data - st, err = OpenStorage(dsn, "test-data-persistence") + // test query with condition + columns, types, data, err = st.Query(context.Background(), + []Query{newQuery("SELECT `key` FROM `kv` WHERE `value` IS NOT NULL ORDER BY `key` ASC")}) if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + t.Fatalf("Query failed: %v", err.Error()) } - - content, _ := ioutil.ReadFile(fl.Name()) - if bytes.Contains(content, []byte(sampleTexts[0].Key)) { - t.Fatal("db not ciphered") + if !reflect.DeepEqual(columns, []string{"key"}) { + t.Fatalf("Error column result: %v", columns) } - - for k, v := range replacedSampleTexts { - ov, err := st.GetValue(k) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - if !reflect.DeepEqual(v, ov) { - t.Fatalf("Unexpected output result: input = %v, output = %v", v, ov) - } + if !reflect.DeepEqual(types, []string{"TEXT"}) { + t.Fatalf("Error types result: %v", types) } -} - -func randomSleep() { - r := rand.Intn(10) - time.Sleep(time.Duration(r) * time.Millisecond) -} - -func randomGetValue(wg *sync.WaitGroup, st *Storage, t *testing.T) { - defer wg.Done() - - for i := 0; i < 1000; i++ { - randomSleep() - key := keysOfSampleTexts[rand.Intn(len(keysOfSampleTexts))] - value := replacedSampleTexts[key] - - if ov, err := st.GetValue(key); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } else if !reflect.DeepEqual(value, ov) { - t.Fatalf("Unexpected output result: input = %v, output = %v", value, ov) + if len(data) != 2 { + t.Fatalf("Error result count: %v, should be 3", len(data)) + } else { + // compare rows + should1 := []interface{}{[]byte("k1")} + should2 := []interface{}{[]byte("k3")} + t.Logf("Rows: %v", data) + if !reflect.DeepEqual(data[0], should1) { + t.Fatalf("Error result row: %v, should: %v", data[0], should1) + } + if !reflect.DeepEqual(data[1], should2) { + t.Fatalf("Error result row: %v, should: %v", data[1], should2) } } -} -func TestConcurrency(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") + // test failed query + columns, types, data, err = st.Query(context.Background(), []Query{newQuery("SQL???? WHAT!!!!")}) - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + if err == nil { + t.Fatal("Query should failed") + } else { + t.Logf("Query failed as expected with: %v", err.Error()) } - st, err := OpenStorage(fl.Name(), "test-data-persistence") + // test non-read query + columns, types, data, err = st.Query(context.Background(), + []Query{newQuery("DELETE FROM `kv` WHERE `value` IS NULL")}) - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + execResult, err := st.Exec(context.Background(), + []Query{newQuery("INSERT OR REPLACE INTO `kv` VALUES ('k4', 'v4')")}) + if err != nil || execResult.RowsAffected != 1 { + t.Fatalf("Exec INSERT failed: %v", err) } - - // Set values - if err = st.SetValuesTx(sampleTexts); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + // test with arguments + execResult, err = st.Exec(context.Background(), []Query{newQuery("DELETE FROM `kv` WHERE `key`='k4'")}) + if err != nil || execResult.RowsAffected != 1 { + t.Fatalf("Exec DELETE failed: %v", err) } - - // Run concurrent GetValue - var wg sync.WaitGroup - - for i := 0; i < 10; i++ { - wg.Add(1) - go randomGetValue(&wg, st, t) + execResult, err = st.Exec(context.Background(), + []Query{newQuery("DELETE FROM `kv` WHERE `key`=?", "not_exist")}) + if err != nil || execResult.RowsAffected != 0 { + t.Fatalf("Exec DELETE failed: %v", err) } - wg.Wait() -} - -func TestCipherDBConcurrency(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") + // test again + columns, types, data, err = st.Query(context.Background(), []Query{newQuery("SELECT `key` FROM `kv`")}) if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + t.Fatalf("Query failed: %v", err.Error()) + } else if len(data) != 3 { + t.Fatalf("Last write query should not take any effect, row count: %v", len(data)) + } else { + t.Logf("Rows: %v", data) } - dsn := fmt.Sprintf("file:%s?_crypto_key=auxten", fl.Name()) - - st, err := OpenStorage(dsn, "test-data-persistence") + // test with select + columns, types, data, err = st.Query(context.Background(), + []Query{newQuery("SELECT `key` FROM `kv` WHERE `key` IN (?)", "k1")}) if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + t.Fatalf("Query failed: %v", err.Error()) + } else if len(data) != 1 { + t.Fatalf("Should only have one record, but actually %v", len(data)) + } else { + t.Logf("Rows: %v", data) } - // Set values - if err = st.SetValuesTx(sampleTexts); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + // test with select with named arguments + columns, types, data, err = st.Query(context.Background(), + []Query{newNamedQuery("SELECT `key` FROM `kv` WHERE `key` IN (:test2, :test1)", map[string]interface{}{ + "test1": "k1", + "test2": "k3", + })}) + if err != nil { + t.Fatalf("Query failed: %v", err.Error()) + } else if len(data) != 2 { + t.Fatalf("Should only have two records, but actually %v", len(data)) + } else { + t.Logf("Rows: %v", data) } - // Run concurrent GetValue - var wg sync.WaitGroup - - for i := 0; i < 10; i++ { - wg.Add(1) - go randomGetValue(&wg, st, t) + // test with function + columns, types, data, err = st.Query(context.Background(), + []Query{newQuery("SELECT COUNT(1) AS `c` FROM `kv`")}) + if err != nil { + t.Fatalf("Query failed: %v", err.Error()) + } else { + if len(columns) != 1 { + t.Fatalf("Query result should contain only one column, now %v", len(columns)) + } else if columns[0] != "c" { + t.Fatalf("Query result column name is not defined alias, but :%v", columns[0]) + } + if len(types) != 1 { + t.Fatalf("Query result should contain only one column, now %v", len(types)) + } else { + t.Logf("Query result type is: %v", types[0]) + } + if len(data) != 1 || len(data[0]) != 1 { + t.Fatalf("Query result should contain only one row and one column, now %v", data) + } else if !reflect.DeepEqual(data[0][0], int64(3)) { + t.Fatalf("Query result should be table row count 3, but: %v", data[0]) + } } - wg.Wait() - - content, _ := ioutil.ReadFile(fl.Name()) - if bytes.Contains(content, []byte(sampleTexts[0].Key)) { - t.Fatal("db not ciphered") + // test with timestamp fields + _, err = st.Exec(context.Background(), []Query{ + newQuery("CREATE TABLE `tm` (tm TIMESTAMP)"), + newQuery("INSERT INTO `tm` VALUES(DATE('NOW'))"), + }) + if err != nil { + t.Fatalf("Query failed: %v", err.Error()) + } else { + // query for values + _, _, data, err = st.Query(context.Background(), []Query{newQuery("SELECT `tm` FROM `tm`")}) + if len(data) != 1 || len(data[0]) != 1 { + t.Fatalf("Query result should contain only one row and one column, now %v", data) + } else if !reflect.TypeOf(data[0][0]).AssignableTo(reflect.TypeOf(time.Time{})) { + t.Fatalf("Query result should be time.Time type, but: %v", reflect.TypeOf(data[0][0]).String()) + } } } diff --git a/test/GNTE/GNTE b/test/GNTE/GNTE index 46a509a58..93e48d707 160000 --- a/test/GNTE/GNTE +++ b/test/GNTE/GNTE @@ -1 +1 @@ -Subproject commit 46a509a5800a5acf9f6f243fd259e48551b856c2 +Subproject commit 93e48d7072b002c3d070f9b712ff22b53c65c6b3 diff --git a/test/GNTE/conf/node_miner_10.250.100.2/config.yaml b/test/GNTE/conf/node_miner_10.250.100.2/config.yaml index 97cb6d3a4..2a301d150 100644 --- a/test/GNTE/conf/node_miner_10.250.100.2/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.2/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "60s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/GNTE/conf/node_miner_10.250.100.3/config.yaml b/test/GNTE/conf/node_miner_10.250.100.3/config.yaml index af65305fa..1106c28bb 100644 --- a/test/GNTE/conf/node_miner_10.250.100.3/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.3/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "60s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/GNTE/conf/node_miner_10.250.100.4/config.yaml b/test/GNTE/conf/node_miner_10.250.100.4/config.yaml index 1b829fb70..b19dace89 100644 --- a/test/GNTE/conf/node_miner_10.250.100.4/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.4/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "60s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/GNTE/conf/node_miner_10.250.100.5/config.yaml b/test/GNTE/conf/node_miner_10.250.100.5/config.yaml index 810af9a5f..a09d83606 100755 --- a/test/GNTE/conf/node_miner_10.250.100.5/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.5/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "60s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/GNTE/conf/node_miner_10.250.100.6/config.yaml b/test/GNTE/conf/node_miner_10.250.100.6/config.yaml index 7e07f7965..b96505d19 100755 --- a/test/GNTE/conf/node_miner_10.250.100.6/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.6/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "60s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/GNTE/conf/node_miner_10.250.100.7/config.yaml b/test/GNTE/conf/node_miner_10.250.100.7/config.yaml index 3b60c4402..5370e24f6 100755 --- a/test/GNTE/conf/node_miner_10.250.100.7/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.7/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "60s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/GNTE/conf/node_miner_10.250.100.8/config.yaml b/test/GNTE/conf/node_miner_10.250.100.8/config.yaml index d81332473..bc633a6fc 100755 --- a/test/GNTE/conf/node_miner_10.250.100.8/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.8/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "60s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/GNTE/conf/node_miner_10.250.100.9/config.yaml b/test/GNTE/conf/node_miner_10.250.100.9/config.yaml index 6ca3fae99..887064529 100755 --- a/test/GNTE/conf/node_miner_10.250.100.9/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.9/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "60s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/fuse/node_0/config.yaml b/test/fuse/node_0/config.yaml new file mode 100644 index 000000000..9919e977e --- /dev/null +++ b/test/fuse/node_0/config.yaml @@ -0,0 +1,99 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:6122" +ThisNodeID: "00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9" +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com +MinNodeIDDifficulty: 2 +DNSSeed: + EnforcedDNSSEC: false + DNSServers: + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 + +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:6122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:6121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:6120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:3144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:3145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:3146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/fuse/node_0/private.key b/test/fuse/node_0/private.key new file mode 100644 index 000000000..449618c0a --- /dev/null +++ b/test/fuse/node_0/private.key @@ -0,0 +1,2 @@ +WAð8#|TZԓ`mF}~e ʆ?~ *E%vpo*a߂ç_Bľ@8 +MC2 \ No newline at end of file diff --git a/test/fuse/node_1/config.yaml b/test/fuse/node_1/config.yaml new file mode 100644 index 000000000..caaa118d5 --- /dev/null +++ b/test/fuse/node_1/config.yaml @@ -0,0 +1,99 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:6121" +ThisNodeID: "00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35" +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com +MinNodeIDDifficulty: 2 +DNSSeed: + EnforcedDNSSEC: false + DNSServers: + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 + +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:6122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:6121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:6120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:3144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:3145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:3146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/fuse/node_1/private.key b/test/fuse/node_1/private.key new file mode 100644 index 000000000..449618c0a --- /dev/null +++ b/test/fuse/node_1/private.key @@ -0,0 +1,2 @@ +WAð8#|TZԓ`mF}~e ʆ?~ *E%vpo*a߂ç_Bľ@8 +MC2 \ No newline at end of file diff --git a/test/fuse/node_2/config.yaml b/test/fuse/node_2/config.yaml new file mode 100644 index 000000000..18c3409d0 --- /dev/null +++ b/test/fuse/node_2/config.yaml @@ -0,0 +1,99 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:6120" +ThisNodeID: "000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582" +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com +MinNodeIDDifficulty: 2 +DNSSeed: + EnforcedDNSSEC: false + DNSServers: + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 + +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:6122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:6121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:6120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:3144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:3145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:3146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/fuse/node_2/private.key b/test/fuse/node_2/private.key new file mode 100644 index 000000000..449618c0a --- /dev/null +++ b/test/fuse/node_2/private.key @@ -0,0 +1,2 @@ +WAð8#|TZԓ`mF}~e ʆ?~ *E%vpo*a߂ç_Bľ@8 +MC2 \ No newline at end of file diff --git a/test/fuse/node_c/config.yaml b/test/fuse/node_c/config.yaml new file mode 100644 index 000000000..d90eca3fe --- /dev/null +++ b/test/fuse/node_c/config.yaml @@ -0,0 +1,99 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:6120" +ThisNodeID: "00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d" +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com +MinNodeIDDifficulty: 2 +DNSSeed: + EnforcedDNSSEC: false + DNSServers: + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 + +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:6122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:6121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:6120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:3144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:3145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:3146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/fuse/node_c/private.key b/test/fuse/node_c/private.key new file mode 100644 index 000000000..f563980c1 Binary files /dev/null and b/test/fuse/node_c/private.key differ diff --git a/test/fuse/node_miner_0/config.yaml b/test/fuse/node_miner_0/config.yaml new file mode 100644 index 000000000..448bb795f --- /dev/null +++ b/test/fuse/node_miner_0/config.yaml @@ -0,0 +1,95 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:3144" +ThisNodeID: "000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade" +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z +Miner: + IsTestMode: true + RootDir: "./data" + MaxReqTimeGap: "2s" + MetricCollectInterval: "1h" +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:6122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:6121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:6120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:3144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:3145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:3146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/fuse/node_miner_0/private.key b/test/fuse/node_miner_0/private.key new file mode 100644 index 000000000..12e7d3d80 --- /dev/null +++ b/test/fuse/node_miner_0/private.key @@ -0,0 +1 @@ +8s_/W-7IyH_DyTG*M9C#8p%x>SߪRLmPB>{:̜뢷|| \ No newline at end of file diff --git a/test/fuse/node_miner_1/config.yaml b/test/fuse/node_miner_1/config.yaml new file mode 100644 index 000000000..558ca1cb1 --- /dev/null +++ b/test/fuse/node_miner_1/config.yaml @@ -0,0 +1,95 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:3145" +ThisNodeID: "000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5" +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z +Miner: + IsTestMode: true + RootDir: "./data" + MaxReqTimeGap: "2s" + MetricCollectInterval: "1h" +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:6122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:6121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:6120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:3144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:3145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:3146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/fuse/node_miner_1/private.key b/test/fuse/node_miner_1/private.key new file mode 100644 index 000000000..44e8915e6 --- /dev/null +++ b/test/fuse/node_miner_1/private.key @@ -0,0 +1,2 @@ +s]](o3R +D5*9C 7ZinƋSp*SS5^ޑax>Xо2#IxRw+Ŕ \ No newline at end of file diff --git a/test/fuse/node_miner_2/config.yaml b/test/fuse/node_miner_2/config.yaml new file mode 100644 index 000000000..e6edd4d68 --- /dev/null +++ b/test/fuse/node_miner_2/config.yaml @@ -0,0 +1,95 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:3146" +ThisNodeID: "000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8" +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z +Miner: + IsTestMode: true + RootDir: "./data" + MaxReqTimeGap: "2s" + MetricCollectInterval: "1h" +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:6122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:6121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:6120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:3144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:3145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:3146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/fuse/node_miner_2/private.key b/test/fuse/node_miner_2/private.key new file mode 100644 index 000000000..adb437e75 --- /dev/null +++ b/test/fuse/node_miner_2/private.key @@ -0,0 +1 @@ +6 i.i%8pVVrLBKb: 1;(fF &y췥 RW3?CA;e"K2 \ No newline at end of file diff --git a/test/integration/node_miner_0/config.yaml b/test/integration/node_miner_0/config.yaml index 566c0a9b3..8fd498a09 100644 --- a/test/integration/node_miner_0/config.yaml +++ b/test/integration/node_miner_0/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/integration/node_miner_1/config.yaml b/test/integration/node_miner_1/config.yaml index a4e87e5ce..a2b44aaf6 100644 --- a/test/integration/node_miner_1/config.yaml +++ b/test/integration/node_miner_1/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/integration/node_miner_2/config.yaml b/test/integration/node_miner_2/config.yaml index b63633264..900670988 100644 --- a/test/integration/node_miner_2/config.yaml +++ b/test/integration/node_miner_2/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/mainchain/node_miner_0/config.yaml b/test/mainchain/node_miner_0/config.yaml index ffd791a88..b5f65dc73 100644 --- a/test/mainchain/node_miner_0/config.yaml +++ b/test/mainchain/node_miner_0/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "5s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/mainchain/node_miner_1/config.yaml b/test/mainchain/node_miner_1/config.yaml index 615d25449..8d88cd5c9 100644 --- a/test/mainchain/node_miner_1/config.yaml +++ b/test/mainchain/node_miner_1/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "5s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/mainchain/node_miner_2/config.yaml b/test/mainchain/node_miner_2/config.yaml index 50f67a6b0..3e67adecf 100644 --- a/test/mainchain/node_miner_2/config.yaml +++ b/test/mainchain/node_miner_2/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "5s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/observation/node_miner_0/config.yaml b/test/observation/node_miner_0/config.yaml index ff26fc0eb..43934a95e 100644 --- a/test/observation/node_miner_0/config.yaml +++ b/test/observation/node_miner_0/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/observation/node_miner_1/config.yaml b/test/observation/node_miner_1/config.yaml index 95bc06940..400a21ad9 100644 --- a/test/observation/node_miner_1/config.yaml +++ b/test/observation/node_miner_1/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/observation/node_miner_2/config.yaml b/test/observation/node_miner_2/config.yaml index 0d3303606..edeb37ebc 100644 --- a/test/observation/node_miner_2/config.yaml +++ b/test/observation/node_miner_2/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/service/node_adapter/admin.test.covenantsql.io-key.pem b/test/service/node_adapter/admin.test.covenantsql.io-key.pem new file mode 100644 index 000000000..46206b1a4 --- /dev/null +++ b/test/service/node_adapter/admin.test.covenantsql.io-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEAo/ktdmTGZAxZedA2vCVwXTb8iWNHg2D+EfI79oZ23qCBf09T +bp3RyRnAAogWh833Po+wsvAeKt5qtKQozKABjzyF0KJbwZhpDUcpf94SRX0FHha4 +G60hCmB9I9XzL2qx5A18G+d/Y4l962KA2DTgEoxoN33HUhHafCmpdJH8npkGsNol +2q+LElNXnS07caRXFbjckFZzm1s0YBlyT60CaNhI/R4kJvnO0UHfJ05vLunBlnJ7 +WGFSxJkcdrPGTGP6TdkU0AZTVQMYnkTnkTp6cdwhVBj7FrLvzCJgPfzcEQvxkxpB +EQwureJUstLOarnMGCretg/dNszyoVzdMjAZtNxtM2u49TLIZstEgY1KEu4h3Gc8 +omIXC1F3EEf3X+3rrYd37gems1ki5q/ow6wblwwUtvbKCyggubHMFzSaRpmNueV7 +e6hKnKZU6GWZ3/Q21gj5Ma5d3eauHdurquCS/tzYf+GNbYZHmczIADrTRlemqfhB +5zMCbSSb8cZ5/APjoZtq30/WfrvmhHxdUnxgML0n6q3sq5oSnFZ55vyMEQOBX3z/ +Vx9jb1S+pxZywJlRHaJ8GTz+hhgr+ojXvMBtEsn99r2Ndu4R/FEgEJV/26GZP0JD +c/SJ3GkZQhZ7IjUZfpQJ6/VKW0yiu86doPNsoG2gWJ+fF5VeagICYE5iBFUCAwEA +AQKCAgB145pp+n4gRDi4OZiAoLIucnASHsy1ijBgmrW9wmMIIIG6FEA50UGYweio +aUs5jD1sP0ac/8HQtGQnR7cFlyxH3Q2gOHqbr4Ynw7f0dKbSStY5EcCANXMB0Oln +sFTNDHqlKYTHUyLlX16mswVLbIiFDWmIK+f3+1oH1rQ8WRE0vXRwBgcdOQRVwpHF +MVYBmFP8DBKXu3AWi/YV+XWUDyEiXA3t3ZPEaenlzOQxkFSjd/B0yA4iNqaZLjOm +rA2vslmtSpuKDGIxRq6Wa5fJdC/AWLGlkuhDI5cAPt7O8lMN9nZSepe5N/b/kS+v ++ZvqY3Z3EatZXgJ/ec4fcXKeuLJhjOLRg38pfr49nq+ewVKivbH7bvfeYSyJrIx6 +ZCiXHl1IvQmS1272gx7rTAvBUJa29sKLwo1hw5vcCi06R/6GxWnTqUXhcg6W4yA/ +ejpkUJduDh4drU9w9FZ7OPfP+AzqPP5yhdEcCvxI/9wOVHgdILA8aM8OLE1QdoEW +sRc/my0dG3rmtx2tNOKE0oghZknlRdOj6j1Uq6O2XHHjQJuTHO+bvqLe87kOwr4F +KOq9APYUs7hRhtUROgx3fygc6nfyG0qi7Khz/2cidosAmObb547d+5a827zQ4dYS +xQ2lZeEe6cabuAZg1Kz1roC4t4Vl6Xi++rkqPEms574ITSHdaQKCAQEA2H+xvTpE +PoIOEBvZ2ECVjwk05qCmyqZhJzIKRgpXRTwRYjAQsSuRvPuafxtRv/sUC1eXFbuV +zCDCP1NiO1YD1lxG1FFfjRdoeKHoZlKd/DNpjv9s1knPhMm7Nms408c9V1202ttS +zmQr5DM6o2K+f3V701cnuBPyKbFDA4IQ7sYKFr7mEs+O44cKVr5+NPdsCu/4dyj6 +9ailBoWy7nkjPUrsaFcTyszwZ9b1LMJ4NcT1InvJS1LuP1SWIczqUiv7iaj5i0Hf +lubNQ1tjHe0XYIj+tW0shg8e6oAu73yhf43iVKFDa3kZCMD5Ht1jnAUxg8Nowvvu +XGAbJIDAOIJoKwKCAQEAweQa4mZS0dR3Jtvz4sxrqxz+pS2mSQB8pqfekfQwE6mx +UobFPd6qztDBf5PmfprkDZBPFl8S9ZYyEiJyjVDqeUyANZdHq3NPYOUtbT8SiCrl +ymsP/OX1sf2vPsxdwJ48PET5iFrWbEHFXCkeNuwgZIM3EhaqE7cMC/Uj9DyZwatJ +j1er5w+3E5A5oLhPpy2XuM83wlXyKTWXH2bbDpdN1HRcujESiY+rSzLpixvLcwl4 +ejFr3T/MfQXC5fEDmQI0R4hG6BpzNfGznSyY1+J0uJ8gDqzJ911MyQyD9eMNTOZU +PhMqLmBt1VyMUz5ekcFxM5v5vgPmF+fn9A9M5baFfwKCAQEAng+ETVnHzzcWW05q +Gkb0qewX0jUB8LvN/Fa9R2tvUZ3MNzpORXtAuI+cuSXR5m7BsJIvPO+qKtDT4HXZ +JubigFL4ZzRNpW2smT3jtSimLSW/8GWtKTnUJuc9Jjrbz0oMD8fbLVmouARMQxvf +uL9zwwyb7a0Y03zEdQn0mhAQmrK9VOPkh2E/uf+yXahP7g2htM6EQUMLDeUlLoDY +JOEOCEa2GGtSiOJctgMrFpWYO/Fi4t0rFjIivNvdjCnV/U4dI+DY54GdYsd4nq+O +yp95TMJX608cjXdmo+AX5ELCiaSl1BG0bjeIPmrctlr9yT/FaaR1zL0vxgNobZsO +O3OB5wKCAQBDQu1sotCSSCF5ko4dnIqxVqKkDJ0F8CxN6ChW+53+BD0mguhD2U3p +5xNpPZaVTwhUCD7XZO3/0jXWgqq4iVx97eMANFXBjYP4+ifzIRE9uZvzx4ZJVkEQ +mQ/FOkI/wuTkh40FF3YRIhPkL8NyjCGEnNxq4v/nTPXZ5BWv8aHpRJGFL4XL53C8 +UakcLzQ6q59ZllEikowqbZPaaeUOP8DZNfDBCqsCm5txv9yyzFactqlbwm9H1o0K +xgfhmuWDm/ck5YqrlBlpmkqT+Neg9MdHELSfQqPhszUi/bt9fmGrzq9kxWM5qWwQ +u0VWz2khKTkrDS3rFBErM+EMko47lkDjAoIBAEdoQOdMnKn5hzbhxUhDit6I/NoX +K9xEc7VH0oBd9KLsINFzQyGYz857jSyCZ7L6o7JHTVLs/469lcjcuDJ/9JkNU7G0 +p3/h33sHN/w/cGh5OyWpaAt+m1PoP6fEoHomFAilAINCkXlT06+sLQo7dl7khJ7z +5qsogIVzeW1etFICikJHIHSsND21vCkVmRbrOA3MZxNpDwsTcK/LxmF3xq34PTS3 +1BKFZA872IuMf/xLGQ0RdEbLzxtSUppkMl2SWE1Vph1dV3xR+YUeYMziYq692cRE +6McNJpjK8RhdC9t3AlLrViyAphcU1v8T8YprQHMS/1xCbGZ/8nrCAnD81gU= +-----END RSA PRIVATE KEY----- diff --git a/test/service/node_adapter/admin.test.covenantsql.io.pem b/test/service/node_adapter/admin.test.covenantsql.io.pem new file mode 100644 index 000000000..1e3d7d608 --- /dev/null +++ b/test/service/node_adapter/admin.test.covenantsql.io.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFyDCCA7ACCQCofDYaBrdh6zANBgkqhkiG9w0BAQsFADCBojELMAkGA1UEBhMC +Q04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0JlaWppbmcxFjAUBgNVBAoT +DU1lcmlkaWFuIEx0ZC4xEDAOBgNVBAsTB0RldmVsb3AxHDAaBgNVBAMTE3Rlc3Qu +Y292ZW5hbnRzcWwuaW8xJzAlBgkqhkiG9w0BCQEWGHdlYm1hc3RlckBjb3ZlbmFu +dHNxbC5pbzAeFw0xODA4MDEwNDIwNDFaFw0yODA3MjkwNDIwNDFaMIGoMQswCQYD +VQQGEwJDTjEQMA4GA1UECBMHQmVpamluZzEQMA4GA1UEBxMHQmVpamluZzEWMBQG +A1UEChMNTWVyaWRpYW4gTHRkLjEQMA4GA1UECxMHRGV2ZWxvcDEiMCAGA1UEAxMZ +YWRtaW4udGVzdC5jb3ZlbmFudHNxbC5pbzEnMCUGCSqGSIb3DQEJARYYd2VibWFz +dGVyQGNvdmVuYW50c3FsLmlvMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC +AgEAo/ktdmTGZAxZedA2vCVwXTb8iWNHg2D+EfI79oZ23qCBf09Tbp3RyRnAAogW +h833Po+wsvAeKt5qtKQozKABjzyF0KJbwZhpDUcpf94SRX0FHha4G60hCmB9I9Xz +L2qx5A18G+d/Y4l962KA2DTgEoxoN33HUhHafCmpdJH8npkGsNol2q+LElNXnS07 +caRXFbjckFZzm1s0YBlyT60CaNhI/R4kJvnO0UHfJ05vLunBlnJ7WGFSxJkcdrPG +TGP6TdkU0AZTVQMYnkTnkTp6cdwhVBj7FrLvzCJgPfzcEQvxkxpBEQwureJUstLO +arnMGCretg/dNszyoVzdMjAZtNxtM2u49TLIZstEgY1KEu4h3Gc8omIXC1F3EEf3 +X+3rrYd37gems1ki5q/ow6wblwwUtvbKCyggubHMFzSaRpmNueV7e6hKnKZU6GWZ +3/Q21gj5Ma5d3eauHdurquCS/tzYf+GNbYZHmczIADrTRlemqfhB5zMCbSSb8cZ5 +/APjoZtq30/WfrvmhHxdUnxgML0n6q3sq5oSnFZ55vyMEQOBX3z/Vx9jb1S+pxZy +wJlRHaJ8GTz+hhgr+ojXvMBtEsn99r2Ndu4R/FEgEJV/26GZP0JDc/SJ3GkZQhZ7 +IjUZfpQJ6/VKW0yiu86doPNsoG2gWJ+fF5VeagICYE5iBFUCAwEAATANBgkqhkiG +9w0BAQsFAAOCAgEARu5lULDZastDfdkkWhdBlPphbSksyNqT0tr/RZr0EtWjtNjq +IEYLuqOyVom3r3FiNjBO9u74BJxSqzbH2GH7qjZPnGfMgFQaxnS96T9HnXjZlPn5 +spcYA1m0W5TpF17N/rzxH+/c5VyIhwsVBdRF/uVow/6r+GkM+knC1K4Md27Wz0KU +jqOQ5eUm5KV4kyOQUg7MmTafqQcwt1Xh10kJ/52hAG53IznMgCo5ZSqYZroLlF8j +WXTlQtGr6SnsK8poSJW/JuidgBfwliL7OGFMnvWrCVk6FhAL3rlY/PmhDZ+OnG8x ++b5JuuxZcHnA0JVvK01eWAmcMixHlgtnZ+6Cgsx4CtUUo+PKuOZBBo4lWqw+/y5V +A0cvPy+8DadAndT/xd/NHUXgxrNjbaTaFuDeAJwN/i2wWh2wibEPhv25rCVQTvOP +HG9b2izWR4eYTqBSbTZjrfagnt3Ikx9os1C+/wuwGRMC/1GEwQ58bSuWHaKXdXSy +1syTvm+tt2Jg7shaKsfw+ZMY6iChUJ49yBB5W1F6VBHUgKqsGxnKlrEC4z6YoOkl +E9WNb6R/8ROF+OCYPgbisYaxIUFp6KJXK3Eh3J7s7XqW6Fn6nw5e0eMn1SZZIZNt +XeLTiv7tjmSREMVzABvaIaFQk0s5GmWkZvqQVkRLJRiHuCCgbIWMrZUZf24= +-----END CERTIFICATE----- diff --git a/test/service/node_adapter/config.yaml b/test/service/node_adapter/config.yaml new file mode 100644 index 000000000..aeefebf7f --- /dev/null +++ b/test/service/node_adapter/config.yaml @@ -0,0 +1,110 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "172.254.1.4:4661" +ThisNodeID: "00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d" +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com +MinNodeIDDifficulty: 2 +DNSSeed: + EnforcedDNSSEC: false + DNSServers: + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 + +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 172.254.1.2:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 172.254.1.3:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 172.254.1.4:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 172.254.1.5:4661 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 172.254.1.6:4661 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 172.254.1.7:4661 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner +Adapter: + ListenAddr: 0.0.0.0:4661 + CertificatePath: ./server.test.covenantsql.io.pem + PrivateKeyPath: ./server.test.covenantsql.io-key.pem + VerifyCertificate: true + ClientCAPath: ./rootCA.pem + AdminCerts: + - ./admin.test.covenantsql.io.pem + WriteCerts: + - ./write.test.covenantsql.io.pem + StorageDriver: covenantsql diff --git a/test/service/node_adapter/private.key b/test/service/node_adapter/private.key new file mode 100644 index 000000000..f563980c1 Binary files /dev/null and b/test/service/node_adapter/private.key differ diff --git a/test/service/node_adapter/read.test.covenantsql.io-key.pem b/test/service/node_adapter/read.test.covenantsql.io-key.pem new file mode 100644 index 000000000..4cbc33ec5 --- /dev/null +++ b/test/service/node_adapter/read.test.covenantsql.io-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEA7cWb0RE4+hFRBhwJpAgQUSUOG+8H9evC85z5IgyrgwIPP7sl +6xz7VpiJ3O+1SP3Y+aHf1vNryp+AqW6Y2G9fPjPSusp4dFE19xC5hG7kYL2TLmtv +8B5ceLtQnI1XZd46TAiFqOg3rkB2X8oHOPfHY4zhPQ+4PC4EnIlFwiczoAbdomV2 +pQrKKM4F4ENwvthVr9uPg74pOEbJcy8NoW0l0WZlY82DVfC9ydvOLJH4nsncLTEY +BDUki9ETQIrWgqC03U21MZmDcyfZbi5M53aKky1iQOzNQZ3Rr/fFdNMcbLYKVlMH +hXQC05oXH8GvPYigqOzNyMngWdousKGiE/k3vncJPwVFhdovQXLfa/aaZGznBrP+ +dJe7lpBmAIpFd7LmdqVWPnuDwywhmVEneaI1aeBEXEl60/V6bFDUmSd62yBtFPcx +axUk8tABWDL7vN3kY3W4bUbNyvscQo8Q6waPjD5hTsbXAptQnXNsLzssDTnBX+Kk +ZNI1tTZ0suikRnbtvTDYd4hN6GFtmD6kF5J8F/e9iNZiBy2JnOIbvSckqkVhelyQ +o8zbr0k/rgNRXnV4UfJShYaWEaG74i+JcAVmp/P4Y9LyBwbiSPgH5oGg+eNdo6P/ +JOil0ArFvjBbCwDiHxKtd03jdSZ/B3pnwPGd9lHI0M8Tulp37LZsZm6bfI0CAwEA +AQKCAgB3I1rNyPlpo5+blhTmkfvLDOwi5wRwHq/SbUcP3pVZ0YBeiKGZSy5M16XM +hHermTZM7uU/yTyrjHxlaTtAx51Lh6ABZE4yyjZmE4VBbGcWaicDTWYLRMtE22aq +6s9uBYnkayi9141+zGID4TD5RH4tzXtWozfHP6+j18ySWh4uAwKuynRGgj+FbqXX +FzO5DKDyuusQMgppXl62Tk9gIVafs9T9yw4R08zlBjQqdQHEXpTqN/02roIfZKVm +46pUTb4SXUt7DNamrsLtyFlUaTtKP6VJrt2yESfuKhJQVS+a8SQA2R2dquF3sXAA +w4XRKVKHEhCSmUTHAOIAMx0JMQjSeYffbUR3dF8t8jK/RYHZ32oKuYgt6LLLMMUt +nfehiweMYKkjhLzW0WCyhuqlhk4T3x1Wgh6S+HiOHgvD8dW8wnmNL2k11h3STroM +g6Fc9+9KMBp97FrsCYFrIDeTY6uWCJxE5Dkb1Y7VXUdGMuIHztNnSKIHcNDOL2Mz +N6qr2smE1I5Wzm7CGv46AXTt0TOKnXgEyjNxp8LRkl+oYm/GlbqG1RrXHqWOcQEv +1Y6FSo1yP3SlOcPq3YLcZzTLH2hrrR1R4ie7hKRL6j19TnBR8R1CsP7LHOHJ4ahM +14SkS4srowtYsXJijoGh56K2H2sxnElVxQJ85qALdTBeR64eKQKCAQEA901lznD2 +5ZHFtYWLKhlqXaqgM/4Whu6cR0f3C9SLcQzAYjuaZjGmOKauackLRWSVpDnSR8zB +ol1QrRyY2upbVKRXxR9nkqamVtYZxSBS+8YjRBOvcESNY5HhjIIqwBQBTJY9v1DQ +kA5WSfivThQZGJDH1y2PrYFi5ZxmwRBnMMw+NXQy0ccRIagVMt4qQLEnA7diab9F +2ZYAgpk4o4d/tA4rF/22AcWX7pdk89zA99qoz0p2ko5/JrV+FZMGZ8PPbm5I4HsV +ahHXBXIUWExpOhonVjLNWXDvTHhkRS2zT2uhav2ohLf0+CouSA/aGoDBpOTzSndw +pL7yIQTu9B2W4wKCAQEA9iJnNuS8qfENZu+/5fzLlh7OaAerMN4JLdg251ESp4fs +LTOWFTlzU880/57SREcLO5RfYhMw2FUzyU+tXrc1cAZMNe8cGmPqDeUyQSgavs0M +WzSnFUk7z6jHH3GNCAPBC9A7M3oogyNiNm8fXZX51Fwv/EyAJX9lQhmXPMh/c53f +ulWCD28XKVEgsjEMMKZZVOvkRGBN9KLJ6DlLCtrixZSCfUyP04AjLxDeMGnhqi/P +nDADvVcxrRuMs5/8OQ7DVg4UxuOK4D/v3KcBq34hK13uOvupdxBHO1yYlUVa71G7 +ZX4KhumUOZZQkoYSEzW1N6IZXzA8+nV/Ulh8u4WJzwKCAQEAn+9eN/S2uCFeS9bh ++YgWUh1XHkjlKL6IM1FHZE9BHwuwH9eMMytI5LpnceKjd21lmaALboPtdqQC2PH1 +qR6HkmX2nXWB9kXwrZgpcmNFR68Mf6p7e4/aINrnk4dbPn2xmWZQ6LnLKF8dTxmV +xlkZIdoAZBkDIqLa4sQTcCi7k8ODN+6+Lw0e9zVNAGjNyqjHIpAnBVy+P8nS6qNN +DfVDkZ7YH9vlKaAwcg1XLJ9H7QNsySLPLFkbwlz9/dXn/pOUQ0bvur3fS4neFZeB +sNk59GmVpxmT1JRFLp9tuY+kt2hULG1/3tVZiGU/KTuXQiyjD5FCBpbYMrOKw+/8 +2cOJIQKCAQBO6ub3Jc4MGxr190crIavRHV2G43aTO43r4hhwgIEfsCgcsh6b/Yip +xZUzpKO8ep7yYndWxdpycpchI+ftp4Z9vbcvz9PN7l08SVGcrJQuuyYMFEzCOXHw ++iemQE081Z9O/1wL/E4DBhRWabi/0/d/jHNiTNEFtNwtnnDsb0jWNDdo0kPaWP8v +IzD9kVZcPuoDnYLaHZrBJnTgfYY/G8F8IkrYi/TNlpcxXxIuqbROUfgaFxcL3Woz +G9M4QMKpNL+S1v74ajq7/iQVNoMFjnJqKjrZNJm4cEK2mNDfg5ZNh4IzX39WlIwP +DtAUuuIOwLiy9sl1yMy0bXn+WBreMUnnAoIBAFkchyvXwhi/l+rRiHAuFMTc3BsO +br1fVA5Avlv0MSBNRa2sISoslCDcqgLgdRckJplz2Q7YX0tL5aYZvZtMpiNmvyES +RL2hNqulrKJ/8Yuf04hUW14MhXizq7+NgMCTtOeLo3W40+EGswV9wvq/wTgdE5Yo +WgstDYvQ1YlqVXP1kWZDcFY1kO0zLIOWwFWbtmmtM2TDi09kZFNLGOoGXsJvKCWE +6vJ8xORPmmrVQ83hHIPqGlFkxts7R209RLWgGWSSOatdhDEd3uiuVS/XlNA3Q16l +70ME8P5a/MqEwmCF1sODndfqnc2A9n/XBM65IdFproaANOwsIcL2jW6T/3U= +-----END RSA PRIVATE KEY----- diff --git a/test/service/node_adapter/read.test.covenantsql.io.pem b/test/service/node_adapter/read.test.covenantsql.io.pem new file mode 100644 index 000000000..1fa09dd22 --- /dev/null +++ b/test/service/node_adapter/read.test.covenantsql.io.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFxzCCA68CCQCofDYaBrdh7DANBgkqhkiG9w0BAQsFADCBojELMAkGA1UEBhMC +Q04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0JlaWppbmcxFjAUBgNVBAoT +DU1lcmlkaWFuIEx0ZC4xEDAOBgNVBAsTB0RldmVsb3AxHDAaBgNVBAMTE3Rlc3Qu +Y292ZW5hbnRzcWwuaW8xJzAlBgkqhkiG9w0BCQEWGHdlYm1hc3RlckBjb3ZlbmFu +dHNxbC5pbzAeFw0xODA4MDEwNDIwNTdaFw0yODA3MjkwNDIwNTdaMIGnMQswCQYD +VQQGEwJDTjEQMA4GA1UECBMHQmVpamluZzEQMA4GA1UEBxMHQmVpamluZzEWMBQG +A1UEChMNTWVyaWRpYW4gTHRkLjEQMA4GA1UECxMHRGV2ZWxvcDEhMB8GA1UEAxMY +cmVhZC50ZXN0LmNvdmVuYW50c3FsLmlvMScwJQYJKoZIhvcNAQkBFhh3ZWJtYXN0 +ZXJAY292ZW5hbnRzcWwuaW8wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDtxZvRETj6EVEGHAmkCBBRJQ4b7wf168LznPkiDKuDAg8/uyXrHPtWmInc77VI +/dj5od/W82vKn4CpbpjYb18+M9K6ynh0UTX3ELmEbuRgvZMua2/wHlx4u1CcjVdl +3jpMCIWo6DeuQHZfygc498djjOE9D7g8LgSciUXCJzOgBt2iZXalCsoozgXgQ3C+ +2FWv24+Dvik4RslzLw2hbSXRZmVjzYNV8L3J284skfieydwtMRgENSSL0RNAitaC +oLTdTbUxmYNzJ9luLkzndoqTLWJA7M1BndGv98V00xxstgpWUweFdALTmhcfwa89 +iKCo7M3IyeBZ2i6woaIT+Te+dwk/BUWF2i9Bct9r9ppkbOcGs/50l7uWkGYAikV3 +suZ2pVY+e4PDLCGZUSd5ojVp4ERcSXrT9XpsUNSZJ3rbIG0U9zFrFSTy0AFYMvu8 +3eRjdbhtRs3K+xxCjxDrBo+MPmFOxtcCm1Cdc2wvOywNOcFf4qRk0jW1NnSy6KRG +du29MNh3iE3oYW2YPqQXknwX972I1mIHLYmc4hu9JySqRWF6XJCjzNuvST+uA1Fe +dXhR8lKFhpYRobviL4lwBWan8/hj0vIHBuJI+AfmgaD5412jo/8k6KXQCsW+MFsL +AOIfEq13TeN1Jn8HemfA8Z32UcjQzxO6Wnfstmxmbpt8jQIDAQABMA0GCSqGSIb3 +DQEBCwUAA4ICAQCq3FVZnp9HGItWlAXpViXrJx51D5W+bh83yKKlo23fo4u/6BM0 +H0gXTtl0XpG/nsp1oqINpc9+NXzEbs7Twx4utN29WyboacbLu5KPD6q17bWTdIH3 +VijHcyOchlru0nPhweNVtSR7+hmVMZrqHy+Ib2uzuDieD7ulvHTaX/JDkRvZYhYS +8qCptWk9VObeNnA3cyoZo5WyvRLXBQ5Q6LW5EMmXXQIKWyejX3vzwraZXFyhkLzz +GwY3h/ez4dm5Vgbf+lodAtslO5SEKcA6tSQLcdCO4J5+aZrbyIuzEGUra+Y2ZiRl +xtYzSkgaMRpMYZU7y96v7qoj2UOJw7KYj+3bN8rb3iTiXKXBG2XoH6Kn7IQb8pYD +k0+KGZmtZQ38St5UNmT0V2G1eoZA0F0FpuyVPe+ZOF3TxCq4BkvQC9puTrpHZiFm +mWw9xQsjOX34B88GckJsldUq86f+SNLhBFUBQOVRxWWjOV9R7PHHr+d28foTdPfU +gjf6Ff8XGoDw40peFLodsJfuI7xvZHa/4IoDnhEYHyDml++jskDypfNmSBn4m8fx +EtcwxUmsjHdW/mXqdFtgMsT+NGiGZ766KNS+JTWkv9ZJQMUS/714v3q/ymgzIIQ1 +BNhosSnSqa/eyAzggu6+US/FaG69xDBZGwoI+xw3kzQ+WoTQzjwoz57Enw== +-----END CERTIFICATE----- diff --git a/test/service/node_adapter/rootCA-key.pem b/test/service/node_adapter/rootCA-key.pem new file mode 100644 index 000000000..d4e545428 --- /dev/null +++ b/test/service/node_adapter/rootCA-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEA0ry0EA1+Yeidf13L2GdhO1WiSEUKUDslVcYfzqbHh+NJyw4F +7J2cnolA2UcJ9zg7NoB5W3mYjoRBxr/SY3JVeDa7E3NnniNvO+n7ZD1sfCgjsJdB +89kMY4adzMbyJIvrXcllMawP2237jZ8LsZRl+NLJzWe296pfivWD1dI8RxnVk6Rr +Ub+VrcxFLp7W/NoCfW37xU3SYo4jFhVfCS5P9IMqI3w6aBK7mLcRj2HH8+M4xCxz +YLmRsHxkazZcn6KL/Tef5QyxM9a9yexwg8u16z4yxn74m6egQ/CNVtWcqg6zNpr4 +EI8WfgagJr1dus1OrpZrhdeNQDIQwMLa2RHhj72PF2qQ7bLkuWAY0UyuFvcwsGFi +EfxYtfNxnl2YPM5aVqrt31lQLi0AOLEvC6rYXd5sykDg9XAthuNe3cJAfqcGUpFr +3GrAwbPcuGUHPrgo6UjyNmwaBYLlGFmz3te8Pj2P1fLoLFXROtCg5hpYHsAqNZoV +zrR4/3uGvPn0eABzMR6BaNYl1m0mPkSY7bCDH2oulEmU/E1Ck0QuJ9+jd5vVrHPc +jV4B3jQsmK5UP7TXkcsJ5n8OozPahoItv5cwNYt4XGWpCqpEoqRmOSWZu6utX/nD +oByaw8mcEqPBHEotG7in8qpQLIboPO003e3lwq7mECwSz7UXFrIkfx8l7PMCAwEA +AQKCAgEAxfVjhCzYyqpTwMBga3F5Yd5oalEIhiN5R+CsIliC2MCGoksS/5ly9W3j +T/eugd/9p4358hcAWugcTdIFlUtixGFNTNE8xc1tgS+j6h7VGLAwDoOX/bOnMprT +Avjjn7ccKuazu3xxDOR8yCVeO7s2Kw3/aYeC1ZXi2EsXQ7WQ0A2RlnZ+JbW9qhxX +5JprQ+ybKC43srkO52uzw9vhgWNS0lKgM+NPjlICjUtzIGhvB0gsHAPRgkvvcoT3 +Y8sWKRLtQ7mL5wMMNrEDaXpEm1myE0BDPDkr2jQVlZyTeL2CxDC44pOicROowkwB +B0MdmAuiXNiKOpkoY+Rj3l9sazqj0cfzc1aFmUchAyb0Q+a2V3ubEUgRVtynRO75 +p41SrdB5Jo4rm83GmRoV2tbIK53rseRrXQ9VT72pu7D2XN6KhEgyUbc+4p9jbTY5 +GFGkWPbfp6ryoyiFWnwQyqlKQZnz+k74aweQQ0uroc5JUKgNxaS7kLIB2+4DrIRF +P0RwuUTR5wI9WjpdB4J17NzpBNgJ2s5eaQ40CCFHSictUX1a9kFk24nel7XI9br0 +F6tFwC9F3TdSxx5HyHna66WfOfG+vs6Kt6RC4Dzft08/jrQeQ8fnZcufjaeFG6Uy +xPZQQJ24krJ/SrsZiZmrR5bFCRFTE/n2N9npZpBHhajYhjbhs8kCggEBAP04RPKR +vw9knLkuqK78QVUBR4UzydqMDQpZFF9wM1x2lhg312K/og1Y8785SEHqsTgtXNvT +cleE9NhjUqsLfENfJov0ofCCXbjEUuRxCEZd+1R5XfX/SLOGmGmWqqiBMReHE/Cu +c0e8nBY/isGDtl5E9FPxdTUQDrPz61UAt94SThs0Jhq0oKT90QRm8/vxKkgOcYWf +s4D3BgGcvdDXA0zwH8RC36fAPvYLfi8i9OQ1upi9gNBs0EgYOtM3VLHZ3HQrZWTT +gUCwR+la0no19eZOgpbJQS2XzGLTVC3FFNQK6emOQ5g3h6bml8ukFQOHIWeHVOqJ +K0G7B/lT+S4WCEUCggEBANUNBunyt1/Y3+2JyRhF9C5Rq7Av5k+sedhsMLFHuQ8x +Cf/wAs8yKW09a0YrqX6laVmu7VcBHaMVY37lac/U6Slr11JnsHLTNPBgwwl20Z7U +QSG7/WdE/p0ylatKKg7dJ6iA2ctjYbjG0ML1XWuj6QbkvNDh/KR3cD/niNqXNCQ2 +KihJ62mQO1odKRRgBqImYtRVo7E6hgYvkYqK9TBgGQ5ZtX4tiMjHah/YR7AtEuOr +O3Yt4aaAww7w6JeRecIEg4JSW4KuK/ztJ7D5PNRg7sz2hECjELcFP6fTxF+qcEj5 +IzRgdTjs/bNUZz4H7ikH9ejBJdEvwPHlyDQHlFPsP9cCggEAKWGGsvVqecOBcSnU +2zPSIWgiHfyGojZ88xH3qFkXq6adhLurcTHL885zlu5vhoYqC/ot0KbPasoJkUs5 ++UXZOtFT5U9HH5zOYCGFQlvOdGFrbzSeTFM5uEzon2jF3t+t/CBQ++YmZLTH9ULR +FCrIJMO0AfvVoaRMItBbxvplEd9/8CYni/m0vwHTpJqGiMeyly/1EVc16H919dF9 +m6Fnoq0jI9mh3zIll+Ps7RsTVjAJnGhroqQFraJ4CohiSOZHhpyI519BIicsuU/k +UaB73PU7lhSxmBfUiNnsScaJTtWxwD9FgJyiiH3qlJbt8DOnG9ob4HAmJ4m/FdnJ +QOTM2QKCAQB1uesWH27A4eBrK/YZGZ6icbLDetRzNkVmF/KYI5/ZCyoRaRjEUV2e +5Y9/iOTx/IlIa2bu6sjrswf1uONNWsM0hkjHWlCgQqFAKtfbRPL0JymOcIjIJdHk +H22g5yxyZjZh4EF5KAN5zTLSaC8lKb+8dWz4p8epQe6fAVwYHfFMCTomZSJWhMKn +OvHWNnGz7C40UtZPOp2QkXyE5+AwyQlParblcFfjSn4T9rk2WtHTSG1lEllcXk5q +1ShRiKuVUFUzEDtM7N2Vt551JmQ8nwuV6qqN5Q15dMcF//jFPDMrv696Y8qimCJg +k8Uw+8TYm3OBGCnDe/XMNUL5rS6DaUqlAoIBAC8zFGOm1FFfSRAFOiKUEkGIYaBQ +Gd7GaBWzZFC2tDe6Dmp+WxFxqX7Dr/AG8nmCwOrbJOayhrEmIlYwjHmZNDSvphMp +L3dQYqVCqQRvCDx9ztXb+mus3iyhgD1vgWB/EwqhiK3S2n4rbaGU60h0YFC1JL0s +icrlRsZMkJV+l5O7gGFCVHCBZc9XZDeu6pqOjyMS0gx5IXyHGRBS7hS2HXD9QHid +/sufbNxzs2sCdwM/EwE8BlaKX0OiLGyxcQh7e5Ca4INuNzM5G+3ZEr2auVAkfTNF +u+sAmvfbC83U70HJakLGZuqq5F+xamj8dL/qnlYpo6D1wdnep1IeVvn83z8= +-----END RSA PRIVATE KEY----- diff --git a/test/service/node_adapter/rootCA.pem b/test/service/node_adapter/rootCA.pem new file mode 100644 index 000000000..1aa3ca429 --- /dev/null +++ b/test/service/node_adapter/rootCA.pem @@ -0,0 +1,39 @@ +-----BEGIN CERTIFICATE----- +MIIG1jCCBL6gAwIBAgIJAIMSiSlXKMA9MA0GCSqGSIb3DQEBCwUAMIGiMQswCQYD +VQQGEwJDTjEQMA4GA1UECBMHQmVpamluZzEQMA4GA1UEBxMHQmVpamluZzEWMBQG +A1UEChMNTWVyaWRpYW4gTHRkLjEQMA4GA1UECxMHRGV2ZWxvcDEcMBoGA1UEAxMT +dGVzdC5jb3ZlbmFudHNxbC5pbzEnMCUGCSqGSIb3DQEJARYYd2VibWFzdGVyQGNv +dmVuYW50c3FsLmlvMB4XDTE4MDgwMTA0MDc0OFoXDTI4MDcyOTA0MDc0OFowgaIx +CzAJBgNVBAYTAkNOMRAwDgYDVQQIEwdCZWlqaW5nMRAwDgYDVQQHEwdCZWlqaW5n +MRYwFAYDVQQKEw1NZXJpZGlhbiBMdGQuMRAwDgYDVQQLEwdEZXZlbG9wMRwwGgYD +VQQDExN0ZXN0LmNvdmVuYW50c3FsLmlvMScwJQYJKoZIhvcNAQkBFhh3ZWJtYXN0 +ZXJAY292ZW5hbnRzcWwuaW8wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDSvLQQDX5h6J1/XcvYZ2E7VaJIRQpQOyVVxh/OpseH40nLDgXsnZyeiUDZRwn3 +ODs2gHlbeZiOhEHGv9JjclV4NrsTc2eeI2876ftkPWx8KCOwl0Hz2Qxjhp3MxvIk +i+tdyWUxrA/bbfuNnwuxlGX40snNZ7b3ql+K9YPV0jxHGdWTpGtRv5WtzEUuntb8 +2gJ9bfvFTdJijiMWFV8JLk/0gyojfDpoEruYtxGPYcfz4zjELHNguZGwfGRrNlyf +oov9N5/lDLEz1r3J7HCDy7XrPjLGfvibp6BD8I1W1ZyqDrM2mvgQjxZ+BqAmvV26 +zU6ulmuF141AMhDAwtrZEeGPvY8XapDtsuS5YBjRTK4W9zCwYWIR/Fi183GeXZg8 +zlpWqu3fWVAuLQA4sS8Lqthd3mzKQOD1cC2G417dwkB+pwZSkWvcasDBs9y4ZQc+ +uCjpSPI2bBoFguUYWbPe17w+PY/V8ugsVdE60KDmGlgewCo1mhXOtHj/e4a8+fR4 +AHMxHoFo1iXWbSY+RJjtsIMfai6USZT8TUKTRC4n36N3m9Wsc9yNXgHeNCyYrlQ/ +tNeRywnmfw6jM9qGgi2/lzA1i3hcZakKqkSipGY5JZm7q61f+cOgHJrDyZwSo8Ec +Si0buKfyqlAshug87TTd7eXCruYQLBLPtRcWsiR/HyXs8wIDAQABo4IBCzCCAQcw +HQYDVR0OBBYEFFdgm7OKRRCg0gIK6kxGU4PuVhM7MIHXBgNVHSMEgc8wgcyAFFdg +m7OKRRCg0gIK6kxGU4PuVhM7oYGopIGlMIGiMQswCQYDVQQGEwJDTjEQMA4GA1UE +CBMHQmVpamluZzEQMA4GA1UEBxMHQmVpamluZzEWMBQGA1UEChMNTWVyaWRpYW4g +THRkLjEQMA4GA1UECxMHRGV2ZWxvcDEcMBoGA1UEAxMTdGVzdC5jb3ZlbmFudHNx +bC5pbzEnMCUGCSqGSIb3DQEJARYYd2VibWFzdGVyQGNvdmVuYW50c3FsLmlvggkA +gxKJKVcowD0wDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAqEjzbVox +aaxCXs/lQuJE5/l/QSBs5MeE98zwINxCNmZYMsA9JmekyphP286fgdC7G2uRP89G +5lf9+UkHjfWK/N8l1t0NAA4LScMaD09SYCq9p/s7BfxfG0ZS5hfZ6MXuf6svYhL4 +gg7RQEUNZsaFSLvhMG0hGnBzKjDEPurrRnOx9tbtQF6/O6evN2Ig2ssqKjn/m1As +1mxGZy1ZCyREQvHEyj0p36LQtWJOYGRDncflJbLSMBrWq/bxQkATMYJuPPetHIJH +nQzbsbagUrTGZPM8B4LJXD8RtnXmH7zrU+JOunshxTfnl0vo+ezvKT0ig2q2M/t1 +DH0Em8EUgJUlUEOxUfA2hZ2Oq2RrLNz01oK06D0De5JL3CwUpSqbzqJ7F5M5os53 +I9FXSiKbjJUxZijH6NkTZ1gP6GpsEEWc6qOXXAYJWNrW12L7+QjnjgjWI176xO0y +VrvVGBgeOCoFAD/4FSzmCiee9v9sbdzd1GkfkXztPJKdeorRPyetob/zK+4btW4n +0dxfv6XahyBgoKVA7a0kn8ZqM/g4hmkfX4LujTK+C75d8p669zopQ3O76XRBsyJF +dM7J2DwRudG2NphtJyXWXdDSdK9s3iPUiS0y+j4gg9I/cFBQUjKD0R5ZPcRrdG4N +9zeN5A/Kg7vHsbpREm0YtLO9LvlLUp0HUS4= +-----END CERTIFICATE----- diff --git a/test/service/node_adapter/server.test.covenantsql.io-key.pem b/test/service/node_adapter/server.test.covenantsql.io-key.pem new file mode 100644 index 000000000..97b9d4b6b --- /dev/null +++ b/test/service/node_adapter/server.test.covenantsql.io-key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDYqNT8V2PPMnWI +Ka2PxM7z2cf1WMrJq30EXFCboCJXyxLKFrBvb8LqgXpLonhug5kIVVaz3yHSph6d +lFEw4RCLzDm4PbvUxMbX3h0v/uirT8wCy8RvXYwQcisl2++bpO8IqFTiqg4O5Thr +O7BSqNXdS0/yi3PVN8UCzGckGXfPLsD7bCWPAFJq1YmvJ8XAqgAm7h3XPoWbUSEK +jOdLjU3jDq7/kCdZNN2DSkTDkE5JE2pf90BnALUIijggeLQn2080NfbFSW09j8kg +6BzsYfqBo+xxQ1MEP2N/0Zuqf+xW6jDYrHweznDXJdNHUN/yxp+64kxHgp2wyR4f +NxLYt7HvAgMBAAECggEBANNt9uMGGRWyxTWKjqBVTCx1o4fPDZ4+ZrLhr5wfakRI +nV5vQ+CLrSgSEJlMxL/8VlPmi8Teg/BAQnI+sfjEOdRjCRS90dXx7aXtUIhs9vtu +1MUJuvl+zdeiwm6gsbQvAUFum9/SWgO5NxSWXBxePM5G1472/aPeV7jCZgi5fczE +pC21VB7zzPG20UjWqVj2vAD8tS9/UQybc12/IOnS7z6pQP1wpn/2N99BEcEXWpDW +m7/jDbrZ6qJD18QmAoltMVfQF5Pi6qpLkU8qOYKFioO7GGNhapWz6lvgeLanux3l +mU71RAMANgmgdjs4RFdC0hfy0a/xPRfINCeVkSwC7mkCgYEA323IVoDaKSatFtS1 +W7QlX9tjtL1esuvQfHsR7B5EAqir6Lw4Hpn/gPlwNaTl+34GJy6dm4Shbn/A5pka +ow8bjWNPynTxuvlT5NXOW9TmgkrzATIhSrQfMHO7saDCo8JVqRZUvewFXXo4bitm +2bsHYh8Z1XClOz1fka97zEr3Wg0CgYEA+D5sEf9JdFU9LNuDC5JshG/Fc401mukg +AckE1Nmi1y4aa8XNEZYwUIlBb4r+OUlV/cMQvxEqnt0bl5oiL5myYW+sSnSCkHU6 +O3CQl1PO+uuiyNYl3QGNcq5Hw01joR/HejD+h0I5Mb642sXmUcba4fcLKBS1ZG6g +tCANeXBuKOsCgYEAzDYPMeE7hPkwovlveY3By0c+bmfXMjmOqPfjfah8COIZnfLK +aE3g1gUmpiE9gwvAm/djXk1vLwvdR+cQDZE1YZkvyJ/ygS55m2I/5ndE6DmQubsT +6q+PAj4Fg2in/f0VRiJ++cfLb5DSGv/YVZE4Qlqixg7bNrX1r7ZwtFygj9ECgYBA +S3qWFrahqMoVai1AvAXbL0/Go9Y0bxjZHYVg05V3gftZ2ntIiMuusD4Ac9FwaOwa +s4EM25dcWgwhccxU48vtrIzFI/QFEjeo2Xi5mP1Mw+b/eWeJHDPUdgskLFEXlDGI +FlR2F9LUbX9XOlZy67wZNnDvSp3Ii1aYEI0s3M/LTQKBgCadu59DWqTxHfzu/vRG +e7xIMuqXZ12zA/9Ks2pasw1Aa9ZWwgRpZmP4PiFn9tyXEsUXYVbNxWEu3ZUOMQEY +Pq4BeyADEWLDeoo1rHbAEv2X+cr7rm4Sobu2vxtfi0uMlUILtWyK3XuiRoTdlXOH +U9xfXHYXJp08l0Q2dXIHtEZl +-----END PRIVATE KEY----- diff --git a/test/service/node_adapter/server.test.covenantsql.io.pem b/test/service/node_adapter/server.test.covenantsql.io.pem new file mode 100644 index 000000000..1b9428afa --- /dev/null +++ b/test/service/node_adapter/server.test.covenantsql.io.pem @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEQDCCAqigAwIBAgIQEKobji5n26kQYHutrsnlgjANBgkqhkiG9w0BAQsFADBt +MR4wHAYDVQQKExVta2NlcnQgZGV2ZWxvcG1lbnQgQ0ExITAfBgNVBAsMGHhxMjYy +MTQ0QFFpcy1NYWNCb29rLVBybzEoMCYGA1UEAwwfbWtjZXJ0IHhxMjYyMTQ0QFFp +cy1NYWNCb29rLVBybzAeFw0xODA3MzExNTA5MDVaFw0yODA3MzExNTA5MDVaMEwx +JzAlBgNVBAoTHm1rY2VydCBkZXZlbG9wbWVudCBjZXJ0aWZpY2F0ZTEhMB8GA1UE +CwwYeHEyNjIxNDRAUWlzLU1hY0Jvb2stUHJvMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA2KjU/FdjzzJ1iCmtj8TO89nH9VjKyat9BFxQm6AiV8sSyhaw +b2/C6oF6S6J4boOZCFVWs98h0qYenZRRMOEQi8w5uD271MTG194dL/7oq0/MAsvE +b12MEHIrJdvvm6TvCKhU4qoODuU4azuwUqjV3UtP8otz1TfFAsxnJBl3zy7A+2wl +jwBSatWJryfFwKoAJu4d1z6Fm1EhCoznS41N4w6u/5AnWTTdg0pEw5BOSRNqX/dA +ZwC1CIo4IHi0J9tPNDX2xUltPY/JIOgc7GH6gaPscUNTBD9jf9Gbqn/sVuow2Kx8 +Hs5w1yXTR1Df8safuuJMR4KdsMkeHzcS2Lex7wIDAQABo30wezAOBgNVHQ8BAf8E +BAMCBaAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADAfBgNVHSME +GDAWgBSD0tobP0meocjRN1XBYqlSTOHglTAlBgNVHREEHjAcghpzZXJ2ZXIudGVz +dC5jb3ZlbmFudHNxbC5pbzANBgkqhkiG9w0BAQsFAAOCAYEARjlPL41xnYOUHz+k +Qrj/2figGRYGBwfnLVJrjkkSuWY1KRTLUlUYcc9ofkLzAcwRxVbdhcwLLHDA/ddZ +Yii7AY9Z/amzagu/btgvaWu1KMb8IKe6PKy1ZjzzpT6M9xGbW/YyxSWSfNXxD2t1 ++ThvFKZai+525IC2PjlOP8k9hKu4A55wNjvekleqQ+B944iXDRBVOHqgK3Fy3JQ5 +pcAGm9Q0Bn8xNZhEsVERPKeMOnxF/rfggEiCdPp6fexG9X+dUziPSXR8RGZDn16E +Ho8S4m3or0fMX2W2EsYkRY/ESxsE8Y5KFELh4RW2DrUfzibHaS3ZeXyJLAuBTUzj +s4BqXUwpKwqoQqv3d0Mi1RZanfVMWG470tuvGdmaW3HdZoIBmo44fVjx63/6wEGm +0A45avtOHRwQGObM446Q+Gs6zsZspLgEHjmPwr+0PsIjbR6weehXnAAOnr9RWX8n +UstyEkOSDZA8vJmSWSu8tXwky31ZF+cSC7DYZxBP7dhPWDCn +-----END CERTIFICATE----- diff --git a/test/service/node_adapter/write.test.covenantsql.io-key.pem b/test/service/node_adapter/write.test.covenantsql.io-key.pem new file mode 100644 index 000000000..1b0d20305 --- /dev/null +++ b/test/service/node_adapter/write.test.covenantsql.io-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJJwIBAAKCAgEArwFtfbMdNu7m332+1KH/6hY7/zv+YhPu3NJ7WZC2wJlyc9nI +BzuD7SV8lcwv75w2n4aclr/KWFXYBPeqtfn4ebV/pvIZtyd+P4MGKbT3FuHdxPhI +7uTcw7LENXUKtmqO01OO1mx4+cbn14Hd8f1NUoxs5vEnohBoV7etI84fLAxglNAd +dtGTeN+jlSN+ipFKI74aPeEqnnJeJ3j35ZgvYb79hYEHXPngKOCsAa01cDrpxzuD +L+ukveP5SLee1lCYHpUKEMt+0SMBalVW6XltBnmMrLL5tjfA4RyAmgRxle8q+O2w +DxivStX1kInUL22kj6AjSrBpOPy12rwQiPa6pJt0pjk7+S71p69NJeoYyYz2+l6g +uWXqJ8b/jlwT4+CBOlOuDuvCvU1almQADKcv1eN9g0X2d8F+9CHgUdHaVkpGJ9ue +Ogrw5HWipnjt2B/YFRErh3125CDnGap+SCEoPCJFTxVG0GahzmWC1sQr6EsHdMDB +k6YTINY+4iNvoxlTUhAMWiYeLNY4PZaqL9q3tcjuHMxMcykAYpMiEQe0mGsWiRjz +DfnDtGcyeagqHkcjBE/w1qwGjLQBJFmjYmwpk5cDtS0OhhEIOastqKoacc+L4rnq +ALEM94uuPS8VMfu+d9rspfBMy/a/PjMrOO5pYLD0yIIYVHDJxyHT0p0QVZkCAwEA +AQKCAgBzaAueQwnW+gCCDVhUvGgZJIR4MkX0w5RXRu5VCBucMxTI1SsVqee78WaR +Gk/aQTe8R3bn6p4zVpjX3cNTsf5rtIbzvt+6am7Uz0C3LEFtc5FdnSXrdD0pSLAf +WImx9d8t+QJO4MV+Ye7trRSByjq9XyFJwmoSc6N7hQLGg90GnTrrp7pmappHsaMc +bIW8N0ee/nQrrlr+lgkFGr7PR2annN1utsH2TEnIazDDAkglNJSJ7/L5HPpMxxPT +IlO6nPdT45D5tlhw7ha22oQv/wUoqetcz8Hgqi+lw7gC2T9WUpwSAByEOBEQ1rvT +jzC//hvxIvdi/6bED9KU5kQ5Lgux/fWUvg/l6u7EebM6TeG/Er5Tq1D6j2+IjfkR +bPHLEk2Cv2oE7W1PhP+yinJnoxwHeic+nu7wsuevzvPhNZ7lLaWgHFvc1YVTmLDq +E/DGm5Qj6mh8SP2NcxW79m6fjdbgjw0OuPxdEo2sj74cxcifTy54GuSoZqGnw18g +28qXpDLkWgHQFrm6LvLqnaY3uvNMYLBWd4kqH0Y9XKI1N3j82ensAwH5e6Ol8Stw +I7GWT/1GggOEPbIwYtVBfbwghfktPttmHU4vs8IQnufqExgEjDhWspjJVZEuoO8V +8weDCQADS8/266GqRN9CUtNWMmbM0jAHgL7Bq9AFwgYSrP/BiQKCAQEA3GNqpUw8 +Ix3yYxvEXbceOguwqjzOW1vGBhJ4k6ptPG9kPI0GqMY6sw/RgRlCpfCq7Ig/Renj +6LPbSyjkylfavlLNODi/iGbnd6cLKtsvaeexWP1wYU4T367rF8ifqKl/C+l4XqBN +4j4KooyPF889PrgXJ73517jWwhxnncgDabJ2zTetjkvFbf3bAua5F7rufHi39Zng +Rt0gEmFg99XJjrtbS7iND5fhZpGK14hhkdwHptu6XS+yGIiVwbCwS0odtZDID3vk +s9CEUzhjnK8ld04RJ4vSMlxfzlub3e88Lvii79mmZgdH4aP0cPhmFJ3i0mefUVpw +cSmQSVMsxHkh6wKCAQEAy0i1KjX3k7LwWgsvwtRjnJMuEWJ/SoRiE4+Cho9BSCVg +onG4NyBOUgfQI9pBKf2CPWVDBA5VQrDN81ozmTPTgb7isDcFDSiowqyVSyRCorUB +AfjbpD7z6QMdBt15xHR3CXWwpio5NwBQqQ+I2AJ1koBYUVj5TupDOZzwVY8/BbqD +fmhtqLd4c2q5Go2ESK+EVAA1jvFmZUTjr9jC9a/8s/cn5Xqv7/s4BCmmfqQSKZS2 +LPBA2Th1zsUrSW3Os9v+c6LUU92LVEZKKKZyRykTemQRH/oljGG9Dn/hUDcvaI2z +A2+T15rQd9p6ePySD8BuZzxwFvAJQPOYaqivrzsBiwKCAQAmd5fSuEa63mxDTkJt +FRxKh2XToP9nxNIAl1LCe3nLlanKQ9dIuCjgvj8UKIOQkTxUQsfAfT2RjWsWaFHe +24zLsYouaQFNXqDCKr7xQQa6ln1HCh2Gbmlbnp1cLmFnwAXz31FqOtK9TZTvoFcN +kdefzeQExM0KETIy+WBAkvu9hC/mS/SYJLOWKjwC+qCN+svLoAqD7NLPq6MAckzJ +lWAz8JHT2qeMdDccfwTb7+sP2XbgcfPKdhvA2n5BK4Tp70rWOSoiQb6+gAPIvsvs +Oknw1Ah8fZQ3xBXY3/aJu0sm67EM6lF394ddZA+zdDflG1XO4dVWDtIXfmi307O5 +q2b3AoIBAFkTMfceEK8SkIkUL8hyYnegcmZBv786RPOHRc2KhjOD1VU4+VyGdmsx +az3ajAVHRUN71KK5WRjQ+l2w37043WwT5acLZNZAQ7qR/xUe/WfoYlmn3y6YOy6W +I6j3cTzpP6PQgyg8hjeYlr+NxAvLABPC03BJyWyP8AcVwqXrD9WFxcqlHa/5PPlu +AVAmRJnI9vYL5WwOUSz8w7wxAjS/+b4uBbhjSyaf8Qq56W/CmwbHWBBW8kN8nvqM +oQwa5qEfO98VsW5SPJQf/KzVSmvuDs/peyuE4+EgjsQEuwj4NXjd5lwSDzlBaCms +fU/4dFQcoQPxkrgqVBO26cmKwvjIpUMCggEAIb85XP+bwnSOTjFbn61k8PdgyPBq +kBDkiofKiBuR2UzuYhVqxkBqWHtDUhqq1y7A0S/ya75bSv67q4ZlHWEjCEiA8thv +KZwn/8yRVFFKEgtB0afub62Zgc+pPXAr2JwwtZK5dg91QxPaKF20YEz6tOcZdjut +gcQ8Bt4dpRvoz2vOJQnqMIhQM9+HiE7XXV7fgUwT55nC+4wRhILd3xOZKYzDzgMJ +ShMUAb7QkLRujyQwcYPxjWiqRFGSodMoNE2OdofLmfwD1vQfZ/gAorLBH3BVyXAz +53zHfE7+kLgobJBYf7T7Jk246soVYOLSZbeVjAT0ajMKD4ay2jNdnSlxKA== +-----END RSA PRIVATE KEY----- diff --git a/test/service/node_adapter/write.test.covenantsql.io.pem b/test/service/node_adapter/write.test.covenantsql.io.pem new file mode 100644 index 000000000..ccebf106c --- /dev/null +++ b/test/service/node_adapter/write.test.covenantsql.io.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFyDCCA7ACCQCofDYaBrdh7TANBgkqhkiG9w0BAQsFADCBojELMAkGA1UEBhMC +Q04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0JlaWppbmcxFjAUBgNVBAoT +DU1lcmlkaWFuIEx0ZC4xEDAOBgNVBAsTB0RldmVsb3AxHDAaBgNVBAMTE3Rlc3Qu +Y292ZW5hbnRzcWwuaW8xJzAlBgkqhkiG9w0BCQEWGHdlYm1hc3RlckBjb3ZlbmFu +dHNxbC5pbzAeFw0xODA4MDEwNDIxMDZaFw0yODA3MjkwNDIxMDZaMIGoMQswCQYD +VQQGEwJDTjEQMA4GA1UECBMHQmVpamluZzEQMA4GA1UEBxMHQmVpamluZzEWMBQG +A1UEChMNTWVyaWRpYW4gTHRkLjEQMA4GA1UECxMHRGV2ZWxvcDEiMCAGA1UEAxMZ +d3JpdGUudGVzdC5jb3ZlbmFudHNxbC5pbzEnMCUGCSqGSIb3DQEJARYYd2VibWFz +dGVyQGNvdmVuYW50c3FsLmlvMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC +AgEArwFtfbMdNu7m332+1KH/6hY7/zv+YhPu3NJ7WZC2wJlyc9nIBzuD7SV8lcwv +75w2n4aclr/KWFXYBPeqtfn4ebV/pvIZtyd+P4MGKbT3FuHdxPhI7uTcw7LENXUK +tmqO01OO1mx4+cbn14Hd8f1NUoxs5vEnohBoV7etI84fLAxglNAddtGTeN+jlSN+ +ipFKI74aPeEqnnJeJ3j35ZgvYb79hYEHXPngKOCsAa01cDrpxzuDL+ukveP5SLee +1lCYHpUKEMt+0SMBalVW6XltBnmMrLL5tjfA4RyAmgRxle8q+O2wDxivStX1kInU +L22kj6AjSrBpOPy12rwQiPa6pJt0pjk7+S71p69NJeoYyYz2+l6guWXqJ8b/jlwT +4+CBOlOuDuvCvU1almQADKcv1eN9g0X2d8F+9CHgUdHaVkpGJ9ueOgrw5HWipnjt +2B/YFRErh3125CDnGap+SCEoPCJFTxVG0GahzmWC1sQr6EsHdMDBk6YTINY+4iNv +oxlTUhAMWiYeLNY4PZaqL9q3tcjuHMxMcykAYpMiEQe0mGsWiRjzDfnDtGcyeagq +HkcjBE/w1qwGjLQBJFmjYmwpk5cDtS0OhhEIOastqKoacc+L4rnqALEM94uuPS8V +Mfu+d9rspfBMy/a/PjMrOO5pYLD0yIIYVHDJxyHT0p0QVZkCAwEAATANBgkqhkiG +9w0BAQsFAAOCAgEAM+1j12Px/guyMnZFwwsRC9ITa5zJkAkfR++LzRZcT+Gz5du1 +FyQp+5L4Pws96OLFADKHVYE0EFlgdVbskVBErrEIQeZRw0bmp1zDEhfxr4c8fivY ++hW/AXjHsJuO8WVTlRctnefY1g6OdvfI6Sc2092GM9Nvquf1OhKIbPso1NxUUrnp +HQ4ffhQNAFsJk/PkPsjTBzP2iJrzynPdoIPK9jO6NbUg6XfZDQRwchvI7NduWq+x +nNTWV1D8oHvP0+FwHdRyctIVVjkxqd7wnenWl2mUr0SBf0FnfJPl9fz+YLVBLroF +4NGwGG/r6q9tRBAXATm+qbNlth589Tz8mMZMnq2+D6O4499I4MJLceuXw689rO05 +s9/BXWzjJThDnrFaQPyf/YTyMuFaf919F0UGLTLYLYf4vfuflUhaStmYyvArv229 +F4DJy/QDM+NWjo/pJH3ETeEA1stD7kQq7GGqy/MiB5YXqRLnGjpa9vqOECsMIm29 +1TUgdCVN9Gsk8JQPGm/lJUeJECq20LThSeXG+sY6RU+0rmOUJvR8Uv3kjkn0Xd+/ +p2xM/CboFXVcmU+fe9UfJar87MlPJcZP5SenVQuWZ3imI0kFeaObfHHKKJfNAoFl +agBFqnAc/EkYqekxGkxc3pVhBBiZ3D+FlinC2yRko9glPkRKA2WxINPVxm0= +-----END CERTIFICATE----- diff --git a/test/service/node_c/admin.test.covenantsql.io-key.pem b/test/service/node_c/admin.test.covenantsql.io-key.pem new file mode 100644 index 000000000..46206b1a4 --- /dev/null +++ b/test/service/node_c/admin.test.covenantsql.io-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEAo/ktdmTGZAxZedA2vCVwXTb8iWNHg2D+EfI79oZ23qCBf09T +bp3RyRnAAogWh833Po+wsvAeKt5qtKQozKABjzyF0KJbwZhpDUcpf94SRX0FHha4 +G60hCmB9I9XzL2qx5A18G+d/Y4l962KA2DTgEoxoN33HUhHafCmpdJH8npkGsNol +2q+LElNXnS07caRXFbjckFZzm1s0YBlyT60CaNhI/R4kJvnO0UHfJ05vLunBlnJ7 +WGFSxJkcdrPGTGP6TdkU0AZTVQMYnkTnkTp6cdwhVBj7FrLvzCJgPfzcEQvxkxpB +EQwureJUstLOarnMGCretg/dNszyoVzdMjAZtNxtM2u49TLIZstEgY1KEu4h3Gc8 +omIXC1F3EEf3X+3rrYd37gems1ki5q/ow6wblwwUtvbKCyggubHMFzSaRpmNueV7 +e6hKnKZU6GWZ3/Q21gj5Ma5d3eauHdurquCS/tzYf+GNbYZHmczIADrTRlemqfhB +5zMCbSSb8cZ5/APjoZtq30/WfrvmhHxdUnxgML0n6q3sq5oSnFZ55vyMEQOBX3z/ +Vx9jb1S+pxZywJlRHaJ8GTz+hhgr+ojXvMBtEsn99r2Ndu4R/FEgEJV/26GZP0JD +c/SJ3GkZQhZ7IjUZfpQJ6/VKW0yiu86doPNsoG2gWJ+fF5VeagICYE5iBFUCAwEA +AQKCAgB145pp+n4gRDi4OZiAoLIucnASHsy1ijBgmrW9wmMIIIG6FEA50UGYweio +aUs5jD1sP0ac/8HQtGQnR7cFlyxH3Q2gOHqbr4Ynw7f0dKbSStY5EcCANXMB0Oln +sFTNDHqlKYTHUyLlX16mswVLbIiFDWmIK+f3+1oH1rQ8WRE0vXRwBgcdOQRVwpHF +MVYBmFP8DBKXu3AWi/YV+XWUDyEiXA3t3ZPEaenlzOQxkFSjd/B0yA4iNqaZLjOm +rA2vslmtSpuKDGIxRq6Wa5fJdC/AWLGlkuhDI5cAPt7O8lMN9nZSepe5N/b/kS+v ++ZvqY3Z3EatZXgJ/ec4fcXKeuLJhjOLRg38pfr49nq+ewVKivbH7bvfeYSyJrIx6 +ZCiXHl1IvQmS1272gx7rTAvBUJa29sKLwo1hw5vcCi06R/6GxWnTqUXhcg6W4yA/ +ejpkUJduDh4drU9w9FZ7OPfP+AzqPP5yhdEcCvxI/9wOVHgdILA8aM8OLE1QdoEW +sRc/my0dG3rmtx2tNOKE0oghZknlRdOj6j1Uq6O2XHHjQJuTHO+bvqLe87kOwr4F +KOq9APYUs7hRhtUROgx3fygc6nfyG0qi7Khz/2cidosAmObb547d+5a827zQ4dYS +xQ2lZeEe6cabuAZg1Kz1roC4t4Vl6Xi++rkqPEms574ITSHdaQKCAQEA2H+xvTpE +PoIOEBvZ2ECVjwk05qCmyqZhJzIKRgpXRTwRYjAQsSuRvPuafxtRv/sUC1eXFbuV +zCDCP1NiO1YD1lxG1FFfjRdoeKHoZlKd/DNpjv9s1knPhMm7Nms408c9V1202ttS +zmQr5DM6o2K+f3V701cnuBPyKbFDA4IQ7sYKFr7mEs+O44cKVr5+NPdsCu/4dyj6 +9ailBoWy7nkjPUrsaFcTyszwZ9b1LMJ4NcT1InvJS1LuP1SWIczqUiv7iaj5i0Hf +lubNQ1tjHe0XYIj+tW0shg8e6oAu73yhf43iVKFDa3kZCMD5Ht1jnAUxg8Nowvvu +XGAbJIDAOIJoKwKCAQEAweQa4mZS0dR3Jtvz4sxrqxz+pS2mSQB8pqfekfQwE6mx +UobFPd6qztDBf5PmfprkDZBPFl8S9ZYyEiJyjVDqeUyANZdHq3NPYOUtbT8SiCrl +ymsP/OX1sf2vPsxdwJ48PET5iFrWbEHFXCkeNuwgZIM3EhaqE7cMC/Uj9DyZwatJ +j1er5w+3E5A5oLhPpy2XuM83wlXyKTWXH2bbDpdN1HRcujESiY+rSzLpixvLcwl4 +ejFr3T/MfQXC5fEDmQI0R4hG6BpzNfGznSyY1+J0uJ8gDqzJ911MyQyD9eMNTOZU +PhMqLmBt1VyMUz5ekcFxM5v5vgPmF+fn9A9M5baFfwKCAQEAng+ETVnHzzcWW05q +Gkb0qewX0jUB8LvN/Fa9R2tvUZ3MNzpORXtAuI+cuSXR5m7BsJIvPO+qKtDT4HXZ +JubigFL4ZzRNpW2smT3jtSimLSW/8GWtKTnUJuc9Jjrbz0oMD8fbLVmouARMQxvf +uL9zwwyb7a0Y03zEdQn0mhAQmrK9VOPkh2E/uf+yXahP7g2htM6EQUMLDeUlLoDY +JOEOCEa2GGtSiOJctgMrFpWYO/Fi4t0rFjIivNvdjCnV/U4dI+DY54GdYsd4nq+O +yp95TMJX608cjXdmo+AX5ELCiaSl1BG0bjeIPmrctlr9yT/FaaR1zL0vxgNobZsO +O3OB5wKCAQBDQu1sotCSSCF5ko4dnIqxVqKkDJ0F8CxN6ChW+53+BD0mguhD2U3p +5xNpPZaVTwhUCD7XZO3/0jXWgqq4iVx97eMANFXBjYP4+ifzIRE9uZvzx4ZJVkEQ +mQ/FOkI/wuTkh40FF3YRIhPkL8NyjCGEnNxq4v/nTPXZ5BWv8aHpRJGFL4XL53C8 +UakcLzQ6q59ZllEikowqbZPaaeUOP8DZNfDBCqsCm5txv9yyzFactqlbwm9H1o0K +xgfhmuWDm/ck5YqrlBlpmkqT+Neg9MdHELSfQqPhszUi/bt9fmGrzq9kxWM5qWwQ +u0VWz2khKTkrDS3rFBErM+EMko47lkDjAoIBAEdoQOdMnKn5hzbhxUhDit6I/NoX +K9xEc7VH0oBd9KLsINFzQyGYz857jSyCZ7L6o7JHTVLs/469lcjcuDJ/9JkNU7G0 +p3/h33sHN/w/cGh5OyWpaAt+m1PoP6fEoHomFAilAINCkXlT06+sLQo7dl7khJ7z +5qsogIVzeW1etFICikJHIHSsND21vCkVmRbrOA3MZxNpDwsTcK/LxmF3xq34PTS3 +1BKFZA872IuMf/xLGQ0RdEbLzxtSUppkMl2SWE1Vph1dV3xR+YUeYMziYq692cRE +6McNJpjK8RhdC9t3AlLrViyAphcU1v8T8YprQHMS/1xCbGZ/8nrCAnD81gU= +-----END RSA PRIVATE KEY----- diff --git a/test/service/node_c/admin.test.covenantsql.io.pem b/test/service/node_c/admin.test.covenantsql.io.pem new file mode 100644 index 000000000..1e3d7d608 --- /dev/null +++ b/test/service/node_c/admin.test.covenantsql.io.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFyDCCA7ACCQCofDYaBrdh6zANBgkqhkiG9w0BAQsFADCBojELMAkGA1UEBhMC +Q04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0JlaWppbmcxFjAUBgNVBAoT +DU1lcmlkaWFuIEx0ZC4xEDAOBgNVBAsTB0RldmVsb3AxHDAaBgNVBAMTE3Rlc3Qu +Y292ZW5hbnRzcWwuaW8xJzAlBgkqhkiG9w0BCQEWGHdlYm1hc3RlckBjb3ZlbmFu +dHNxbC5pbzAeFw0xODA4MDEwNDIwNDFaFw0yODA3MjkwNDIwNDFaMIGoMQswCQYD +VQQGEwJDTjEQMA4GA1UECBMHQmVpamluZzEQMA4GA1UEBxMHQmVpamluZzEWMBQG +A1UEChMNTWVyaWRpYW4gTHRkLjEQMA4GA1UECxMHRGV2ZWxvcDEiMCAGA1UEAxMZ +YWRtaW4udGVzdC5jb3ZlbmFudHNxbC5pbzEnMCUGCSqGSIb3DQEJARYYd2VibWFz +dGVyQGNvdmVuYW50c3FsLmlvMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC +AgEAo/ktdmTGZAxZedA2vCVwXTb8iWNHg2D+EfI79oZ23qCBf09Tbp3RyRnAAogW +h833Po+wsvAeKt5qtKQozKABjzyF0KJbwZhpDUcpf94SRX0FHha4G60hCmB9I9Xz +L2qx5A18G+d/Y4l962KA2DTgEoxoN33HUhHafCmpdJH8npkGsNol2q+LElNXnS07 +caRXFbjckFZzm1s0YBlyT60CaNhI/R4kJvnO0UHfJ05vLunBlnJ7WGFSxJkcdrPG +TGP6TdkU0AZTVQMYnkTnkTp6cdwhVBj7FrLvzCJgPfzcEQvxkxpBEQwureJUstLO +arnMGCretg/dNszyoVzdMjAZtNxtM2u49TLIZstEgY1KEu4h3Gc8omIXC1F3EEf3 +X+3rrYd37gems1ki5q/ow6wblwwUtvbKCyggubHMFzSaRpmNueV7e6hKnKZU6GWZ +3/Q21gj5Ma5d3eauHdurquCS/tzYf+GNbYZHmczIADrTRlemqfhB5zMCbSSb8cZ5 +/APjoZtq30/WfrvmhHxdUnxgML0n6q3sq5oSnFZ55vyMEQOBX3z/Vx9jb1S+pxZy +wJlRHaJ8GTz+hhgr+ojXvMBtEsn99r2Ndu4R/FEgEJV/26GZP0JDc/SJ3GkZQhZ7 +IjUZfpQJ6/VKW0yiu86doPNsoG2gWJ+fF5VeagICYE5iBFUCAwEAATANBgkqhkiG +9w0BAQsFAAOCAgEARu5lULDZastDfdkkWhdBlPphbSksyNqT0tr/RZr0EtWjtNjq +IEYLuqOyVom3r3FiNjBO9u74BJxSqzbH2GH7qjZPnGfMgFQaxnS96T9HnXjZlPn5 +spcYA1m0W5TpF17N/rzxH+/c5VyIhwsVBdRF/uVow/6r+GkM+knC1K4Md27Wz0KU +jqOQ5eUm5KV4kyOQUg7MmTafqQcwt1Xh10kJ/52hAG53IznMgCo5ZSqYZroLlF8j +WXTlQtGr6SnsK8poSJW/JuidgBfwliL7OGFMnvWrCVk6FhAL3rlY/PmhDZ+OnG8x ++b5JuuxZcHnA0JVvK01eWAmcMixHlgtnZ+6Cgsx4CtUUo+PKuOZBBo4lWqw+/y5V +A0cvPy+8DadAndT/xd/NHUXgxrNjbaTaFuDeAJwN/i2wWh2wibEPhv25rCVQTvOP +HG9b2izWR4eYTqBSbTZjrfagnt3Ikx9os1C+/wuwGRMC/1GEwQ58bSuWHaKXdXSy +1syTvm+tt2Jg7shaKsfw+ZMY6iChUJ49yBB5W1F6VBHUgKqsGxnKlrEC4z6YoOkl +E9WNb6R/8ROF+OCYPgbisYaxIUFp6KJXK3Eh3J7s7XqW6Fn6nw5e0eMn1SZZIZNt +XeLTiv7tjmSREMVzABvaIaFQk0s5GmWkZvqQVkRLJRiHuCCgbIWMrZUZf24= +-----END CERTIFICATE----- diff --git a/test/service/node_c/read.test.covenantsql.io-key.pem b/test/service/node_c/read.test.covenantsql.io-key.pem new file mode 100644 index 000000000..4cbc33ec5 --- /dev/null +++ b/test/service/node_c/read.test.covenantsql.io-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEA7cWb0RE4+hFRBhwJpAgQUSUOG+8H9evC85z5IgyrgwIPP7sl +6xz7VpiJ3O+1SP3Y+aHf1vNryp+AqW6Y2G9fPjPSusp4dFE19xC5hG7kYL2TLmtv +8B5ceLtQnI1XZd46TAiFqOg3rkB2X8oHOPfHY4zhPQ+4PC4EnIlFwiczoAbdomV2 +pQrKKM4F4ENwvthVr9uPg74pOEbJcy8NoW0l0WZlY82DVfC9ydvOLJH4nsncLTEY +BDUki9ETQIrWgqC03U21MZmDcyfZbi5M53aKky1iQOzNQZ3Rr/fFdNMcbLYKVlMH +hXQC05oXH8GvPYigqOzNyMngWdousKGiE/k3vncJPwVFhdovQXLfa/aaZGznBrP+ +dJe7lpBmAIpFd7LmdqVWPnuDwywhmVEneaI1aeBEXEl60/V6bFDUmSd62yBtFPcx +axUk8tABWDL7vN3kY3W4bUbNyvscQo8Q6waPjD5hTsbXAptQnXNsLzssDTnBX+Kk +ZNI1tTZ0suikRnbtvTDYd4hN6GFtmD6kF5J8F/e9iNZiBy2JnOIbvSckqkVhelyQ +o8zbr0k/rgNRXnV4UfJShYaWEaG74i+JcAVmp/P4Y9LyBwbiSPgH5oGg+eNdo6P/ +JOil0ArFvjBbCwDiHxKtd03jdSZ/B3pnwPGd9lHI0M8Tulp37LZsZm6bfI0CAwEA +AQKCAgB3I1rNyPlpo5+blhTmkfvLDOwi5wRwHq/SbUcP3pVZ0YBeiKGZSy5M16XM +hHermTZM7uU/yTyrjHxlaTtAx51Lh6ABZE4yyjZmE4VBbGcWaicDTWYLRMtE22aq +6s9uBYnkayi9141+zGID4TD5RH4tzXtWozfHP6+j18ySWh4uAwKuynRGgj+FbqXX +FzO5DKDyuusQMgppXl62Tk9gIVafs9T9yw4R08zlBjQqdQHEXpTqN/02roIfZKVm +46pUTb4SXUt7DNamrsLtyFlUaTtKP6VJrt2yESfuKhJQVS+a8SQA2R2dquF3sXAA +w4XRKVKHEhCSmUTHAOIAMx0JMQjSeYffbUR3dF8t8jK/RYHZ32oKuYgt6LLLMMUt +nfehiweMYKkjhLzW0WCyhuqlhk4T3x1Wgh6S+HiOHgvD8dW8wnmNL2k11h3STroM +g6Fc9+9KMBp97FrsCYFrIDeTY6uWCJxE5Dkb1Y7VXUdGMuIHztNnSKIHcNDOL2Mz +N6qr2smE1I5Wzm7CGv46AXTt0TOKnXgEyjNxp8LRkl+oYm/GlbqG1RrXHqWOcQEv +1Y6FSo1yP3SlOcPq3YLcZzTLH2hrrR1R4ie7hKRL6j19TnBR8R1CsP7LHOHJ4ahM +14SkS4srowtYsXJijoGh56K2H2sxnElVxQJ85qALdTBeR64eKQKCAQEA901lznD2 +5ZHFtYWLKhlqXaqgM/4Whu6cR0f3C9SLcQzAYjuaZjGmOKauackLRWSVpDnSR8zB +ol1QrRyY2upbVKRXxR9nkqamVtYZxSBS+8YjRBOvcESNY5HhjIIqwBQBTJY9v1DQ +kA5WSfivThQZGJDH1y2PrYFi5ZxmwRBnMMw+NXQy0ccRIagVMt4qQLEnA7diab9F +2ZYAgpk4o4d/tA4rF/22AcWX7pdk89zA99qoz0p2ko5/JrV+FZMGZ8PPbm5I4HsV +ahHXBXIUWExpOhonVjLNWXDvTHhkRS2zT2uhav2ohLf0+CouSA/aGoDBpOTzSndw +pL7yIQTu9B2W4wKCAQEA9iJnNuS8qfENZu+/5fzLlh7OaAerMN4JLdg251ESp4fs +LTOWFTlzU880/57SREcLO5RfYhMw2FUzyU+tXrc1cAZMNe8cGmPqDeUyQSgavs0M +WzSnFUk7z6jHH3GNCAPBC9A7M3oogyNiNm8fXZX51Fwv/EyAJX9lQhmXPMh/c53f +ulWCD28XKVEgsjEMMKZZVOvkRGBN9KLJ6DlLCtrixZSCfUyP04AjLxDeMGnhqi/P +nDADvVcxrRuMs5/8OQ7DVg4UxuOK4D/v3KcBq34hK13uOvupdxBHO1yYlUVa71G7 +ZX4KhumUOZZQkoYSEzW1N6IZXzA8+nV/Ulh8u4WJzwKCAQEAn+9eN/S2uCFeS9bh ++YgWUh1XHkjlKL6IM1FHZE9BHwuwH9eMMytI5LpnceKjd21lmaALboPtdqQC2PH1 +qR6HkmX2nXWB9kXwrZgpcmNFR68Mf6p7e4/aINrnk4dbPn2xmWZQ6LnLKF8dTxmV +xlkZIdoAZBkDIqLa4sQTcCi7k8ODN+6+Lw0e9zVNAGjNyqjHIpAnBVy+P8nS6qNN +DfVDkZ7YH9vlKaAwcg1XLJ9H7QNsySLPLFkbwlz9/dXn/pOUQ0bvur3fS4neFZeB +sNk59GmVpxmT1JRFLp9tuY+kt2hULG1/3tVZiGU/KTuXQiyjD5FCBpbYMrOKw+/8 +2cOJIQKCAQBO6ub3Jc4MGxr190crIavRHV2G43aTO43r4hhwgIEfsCgcsh6b/Yip +xZUzpKO8ep7yYndWxdpycpchI+ftp4Z9vbcvz9PN7l08SVGcrJQuuyYMFEzCOXHw ++iemQE081Z9O/1wL/E4DBhRWabi/0/d/jHNiTNEFtNwtnnDsb0jWNDdo0kPaWP8v +IzD9kVZcPuoDnYLaHZrBJnTgfYY/G8F8IkrYi/TNlpcxXxIuqbROUfgaFxcL3Woz +G9M4QMKpNL+S1v74ajq7/iQVNoMFjnJqKjrZNJm4cEK2mNDfg5ZNh4IzX39WlIwP +DtAUuuIOwLiy9sl1yMy0bXn+WBreMUnnAoIBAFkchyvXwhi/l+rRiHAuFMTc3BsO +br1fVA5Avlv0MSBNRa2sISoslCDcqgLgdRckJplz2Q7YX0tL5aYZvZtMpiNmvyES +RL2hNqulrKJ/8Yuf04hUW14MhXizq7+NgMCTtOeLo3W40+EGswV9wvq/wTgdE5Yo +WgstDYvQ1YlqVXP1kWZDcFY1kO0zLIOWwFWbtmmtM2TDi09kZFNLGOoGXsJvKCWE +6vJ8xORPmmrVQ83hHIPqGlFkxts7R209RLWgGWSSOatdhDEd3uiuVS/XlNA3Q16l +70ME8P5a/MqEwmCF1sODndfqnc2A9n/XBM65IdFproaANOwsIcL2jW6T/3U= +-----END RSA PRIVATE KEY----- diff --git a/test/service/node_c/read.test.covenantsql.io.pem b/test/service/node_c/read.test.covenantsql.io.pem new file mode 100644 index 000000000..1fa09dd22 --- /dev/null +++ b/test/service/node_c/read.test.covenantsql.io.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFxzCCA68CCQCofDYaBrdh7DANBgkqhkiG9w0BAQsFADCBojELMAkGA1UEBhMC +Q04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0JlaWppbmcxFjAUBgNVBAoT +DU1lcmlkaWFuIEx0ZC4xEDAOBgNVBAsTB0RldmVsb3AxHDAaBgNVBAMTE3Rlc3Qu +Y292ZW5hbnRzcWwuaW8xJzAlBgkqhkiG9w0BCQEWGHdlYm1hc3RlckBjb3ZlbmFu +dHNxbC5pbzAeFw0xODA4MDEwNDIwNTdaFw0yODA3MjkwNDIwNTdaMIGnMQswCQYD +VQQGEwJDTjEQMA4GA1UECBMHQmVpamluZzEQMA4GA1UEBxMHQmVpamluZzEWMBQG +A1UEChMNTWVyaWRpYW4gTHRkLjEQMA4GA1UECxMHRGV2ZWxvcDEhMB8GA1UEAxMY +cmVhZC50ZXN0LmNvdmVuYW50c3FsLmlvMScwJQYJKoZIhvcNAQkBFhh3ZWJtYXN0 +ZXJAY292ZW5hbnRzcWwuaW8wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDtxZvRETj6EVEGHAmkCBBRJQ4b7wf168LznPkiDKuDAg8/uyXrHPtWmInc77VI +/dj5od/W82vKn4CpbpjYb18+M9K6ynh0UTX3ELmEbuRgvZMua2/wHlx4u1CcjVdl +3jpMCIWo6DeuQHZfygc498djjOE9D7g8LgSciUXCJzOgBt2iZXalCsoozgXgQ3C+ +2FWv24+Dvik4RslzLw2hbSXRZmVjzYNV8L3J284skfieydwtMRgENSSL0RNAitaC +oLTdTbUxmYNzJ9luLkzndoqTLWJA7M1BndGv98V00xxstgpWUweFdALTmhcfwa89 +iKCo7M3IyeBZ2i6woaIT+Te+dwk/BUWF2i9Bct9r9ppkbOcGs/50l7uWkGYAikV3 +suZ2pVY+e4PDLCGZUSd5ojVp4ERcSXrT9XpsUNSZJ3rbIG0U9zFrFSTy0AFYMvu8 +3eRjdbhtRs3K+xxCjxDrBo+MPmFOxtcCm1Cdc2wvOywNOcFf4qRk0jW1NnSy6KRG +du29MNh3iE3oYW2YPqQXknwX972I1mIHLYmc4hu9JySqRWF6XJCjzNuvST+uA1Fe +dXhR8lKFhpYRobviL4lwBWan8/hj0vIHBuJI+AfmgaD5412jo/8k6KXQCsW+MFsL +AOIfEq13TeN1Jn8HemfA8Z32UcjQzxO6Wnfstmxmbpt8jQIDAQABMA0GCSqGSIb3 +DQEBCwUAA4ICAQCq3FVZnp9HGItWlAXpViXrJx51D5W+bh83yKKlo23fo4u/6BM0 +H0gXTtl0XpG/nsp1oqINpc9+NXzEbs7Twx4utN29WyboacbLu5KPD6q17bWTdIH3 +VijHcyOchlru0nPhweNVtSR7+hmVMZrqHy+Ib2uzuDieD7ulvHTaX/JDkRvZYhYS +8qCptWk9VObeNnA3cyoZo5WyvRLXBQ5Q6LW5EMmXXQIKWyejX3vzwraZXFyhkLzz +GwY3h/ez4dm5Vgbf+lodAtslO5SEKcA6tSQLcdCO4J5+aZrbyIuzEGUra+Y2ZiRl +xtYzSkgaMRpMYZU7y96v7qoj2UOJw7KYj+3bN8rb3iTiXKXBG2XoH6Kn7IQb8pYD +k0+KGZmtZQ38St5UNmT0V2G1eoZA0F0FpuyVPe+ZOF3TxCq4BkvQC9puTrpHZiFm +mWw9xQsjOX34B88GckJsldUq86f+SNLhBFUBQOVRxWWjOV9R7PHHr+d28foTdPfU +gjf6Ff8XGoDw40peFLodsJfuI7xvZHa/4IoDnhEYHyDml++jskDypfNmSBn4m8fx +EtcwxUmsjHdW/mXqdFtgMsT+NGiGZ766KNS+JTWkv9ZJQMUS/714v3q/ymgzIIQ1 +BNhosSnSqa/eyAzggu6+US/FaG69xDBZGwoI+xw3kzQ+WoTQzjwoz57Enw== +-----END CERTIFICATE----- diff --git a/test/service/node_c/rootCA-key.pem b/test/service/node_c/rootCA-key.pem new file mode 100644 index 000000000..d4e545428 --- /dev/null +++ b/test/service/node_c/rootCA-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEA0ry0EA1+Yeidf13L2GdhO1WiSEUKUDslVcYfzqbHh+NJyw4F +7J2cnolA2UcJ9zg7NoB5W3mYjoRBxr/SY3JVeDa7E3NnniNvO+n7ZD1sfCgjsJdB +89kMY4adzMbyJIvrXcllMawP2237jZ8LsZRl+NLJzWe296pfivWD1dI8RxnVk6Rr +Ub+VrcxFLp7W/NoCfW37xU3SYo4jFhVfCS5P9IMqI3w6aBK7mLcRj2HH8+M4xCxz +YLmRsHxkazZcn6KL/Tef5QyxM9a9yexwg8u16z4yxn74m6egQ/CNVtWcqg6zNpr4 +EI8WfgagJr1dus1OrpZrhdeNQDIQwMLa2RHhj72PF2qQ7bLkuWAY0UyuFvcwsGFi +EfxYtfNxnl2YPM5aVqrt31lQLi0AOLEvC6rYXd5sykDg9XAthuNe3cJAfqcGUpFr +3GrAwbPcuGUHPrgo6UjyNmwaBYLlGFmz3te8Pj2P1fLoLFXROtCg5hpYHsAqNZoV +zrR4/3uGvPn0eABzMR6BaNYl1m0mPkSY7bCDH2oulEmU/E1Ck0QuJ9+jd5vVrHPc +jV4B3jQsmK5UP7TXkcsJ5n8OozPahoItv5cwNYt4XGWpCqpEoqRmOSWZu6utX/nD +oByaw8mcEqPBHEotG7in8qpQLIboPO003e3lwq7mECwSz7UXFrIkfx8l7PMCAwEA +AQKCAgEAxfVjhCzYyqpTwMBga3F5Yd5oalEIhiN5R+CsIliC2MCGoksS/5ly9W3j +T/eugd/9p4358hcAWugcTdIFlUtixGFNTNE8xc1tgS+j6h7VGLAwDoOX/bOnMprT +Avjjn7ccKuazu3xxDOR8yCVeO7s2Kw3/aYeC1ZXi2EsXQ7WQ0A2RlnZ+JbW9qhxX +5JprQ+ybKC43srkO52uzw9vhgWNS0lKgM+NPjlICjUtzIGhvB0gsHAPRgkvvcoT3 +Y8sWKRLtQ7mL5wMMNrEDaXpEm1myE0BDPDkr2jQVlZyTeL2CxDC44pOicROowkwB +B0MdmAuiXNiKOpkoY+Rj3l9sazqj0cfzc1aFmUchAyb0Q+a2V3ubEUgRVtynRO75 +p41SrdB5Jo4rm83GmRoV2tbIK53rseRrXQ9VT72pu7D2XN6KhEgyUbc+4p9jbTY5 +GFGkWPbfp6ryoyiFWnwQyqlKQZnz+k74aweQQ0uroc5JUKgNxaS7kLIB2+4DrIRF +P0RwuUTR5wI9WjpdB4J17NzpBNgJ2s5eaQ40CCFHSictUX1a9kFk24nel7XI9br0 +F6tFwC9F3TdSxx5HyHna66WfOfG+vs6Kt6RC4Dzft08/jrQeQ8fnZcufjaeFG6Uy +xPZQQJ24krJ/SrsZiZmrR5bFCRFTE/n2N9npZpBHhajYhjbhs8kCggEBAP04RPKR +vw9knLkuqK78QVUBR4UzydqMDQpZFF9wM1x2lhg312K/og1Y8785SEHqsTgtXNvT +cleE9NhjUqsLfENfJov0ofCCXbjEUuRxCEZd+1R5XfX/SLOGmGmWqqiBMReHE/Cu +c0e8nBY/isGDtl5E9FPxdTUQDrPz61UAt94SThs0Jhq0oKT90QRm8/vxKkgOcYWf +s4D3BgGcvdDXA0zwH8RC36fAPvYLfi8i9OQ1upi9gNBs0EgYOtM3VLHZ3HQrZWTT +gUCwR+la0no19eZOgpbJQS2XzGLTVC3FFNQK6emOQ5g3h6bml8ukFQOHIWeHVOqJ +K0G7B/lT+S4WCEUCggEBANUNBunyt1/Y3+2JyRhF9C5Rq7Av5k+sedhsMLFHuQ8x +Cf/wAs8yKW09a0YrqX6laVmu7VcBHaMVY37lac/U6Slr11JnsHLTNPBgwwl20Z7U +QSG7/WdE/p0ylatKKg7dJ6iA2ctjYbjG0ML1XWuj6QbkvNDh/KR3cD/niNqXNCQ2 +KihJ62mQO1odKRRgBqImYtRVo7E6hgYvkYqK9TBgGQ5ZtX4tiMjHah/YR7AtEuOr +O3Yt4aaAww7w6JeRecIEg4JSW4KuK/ztJ7D5PNRg7sz2hECjELcFP6fTxF+qcEj5 +IzRgdTjs/bNUZz4H7ikH9ejBJdEvwPHlyDQHlFPsP9cCggEAKWGGsvVqecOBcSnU +2zPSIWgiHfyGojZ88xH3qFkXq6adhLurcTHL885zlu5vhoYqC/ot0KbPasoJkUs5 ++UXZOtFT5U9HH5zOYCGFQlvOdGFrbzSeTFM5uEzon2jF3t+t/CBQ++YmZLTH9ULR +FCrIJMO0AfvVoaRMItBbxvplEd9/8CYni/m0vwHTpJqGiMeyly/1EVc16H919dF9 +m6Fnoq0jI9mh3zIll+Ps7RsTVjAJnGhroqQFraJ4CohiSOZHhpyI519BIicsuU/k +UaB73PU7lhSxmBfUiNnsScaJTtWxwD9FgJyiiH3qlJbt8DOnG9ob4HAmJ4m/FdnJ +QOTM2QKCAQB1uesWH27A4eBrK/YZGZ6icbLDetRzNkVmF/KYI5/ZCyoRaRjEUV2e +5Y9/iOTx/IlIa2bu6sjrswf1uONNWsM0hkjHWlCgQqFAKtfbRPL0JymOcIjIJdHk +H22g5yxyZjZh4EF5KAN5zTLSaC8lKb+8dWz4p8epQe6fAVwYHfFMCTomZSJWhMKn +OvHWNnGz7C40UtZPOp2QkXyE5+AwyQlParblcFfjSn4T9rk2WtHTSG1lEllcXk5q +1ShRiKuVUFUzEDtM7N2Vt551JmQ8nwuV6qqN5Q15dMcF//jFPDMrv696Y8qimCJg +k8Uw+8TYm3OBGCnDe/XMNUL5rS6DaUqlAoIBAC8zFGOm1FFfSRAFOiKUEkGIYaBQ +Gd7GaBWzZFC2tDe6Dmp+WxFxqX7Dr/AG8nmCwOrbJOayhrEmIlYwjHmZNDSvphMp +L3dQYqVCqQRvCDx9ztXb+mus3iyhgD1vgWB/EwqhiK3S2n4rbaGU60h0YFC1JL0s +icrlRsZMkJV+l5O7gGFCVHCBZc9XZDeu6pqOjyMS0gx5IXyHGRBS7hS2HXD9QHid +/sufbNxzs2sCdwM/EwE8BlaKX0OiLGyxcQh7e5Ca4INuNzM5G+3ZEr2auVAkfTNF +u+sAmvfbC83U70HJakLGZuqq5F+xamj8dL/qnlYpo6D1wdnep1IeVvn83z8= +-----END RSA PRIVATE KEY----- diff --git a/test/service/node_c/rootCA.pem b/test/service/node_c/rootCA.pem new file mode 100644 index 000000000..1aa3ca429 --- /dev/null +++ b/test/service/node_c/rootCA.pem @@ -0,0 +1,39 @@ +-----BEGIN CERTIFICATE----- +MIIG1jCCBL6gAwIBAgIJAIMSiSlXKMA9MA0GCSqGSIb3DQEBCwUAMIGiMQswCQYD +VQQGEwJDTjEQMA4GA1UECBMHQmVpamluZzEQMA4GA1UEBxMHQmVpamluZzEWMBQG +A1UEChMNTWVyaWRpYW4gTHRkLjEQMA4GA1UECxMHRGV2ZWxvcDEcMBoGA1UEAxMT +dGVzdC5jb3ZlbmFudHNxbC5pbzEnMCUGCSqGSIb3DQEJARYYd2VibWFzdGVyQGNv +dmVuYW50c3FsLmlvMB4XDTE4MDgwMTA0MDc0OFoXDTI4MDcyOTA0MDc0OFowgaIx +CzAJBgNVBAYTAkNOMRAwDgYDVQQIEwdCZWlqaW5nMRAwDgYDVQQHEwdCZWlqaW5n +MRYwFAYDVQQKEw1NZXJpZGlhbiBMdGQuMRAwDgYDVQQLEwdEZXZlbG9wMRwwGgYD +VQQDExN0ZXN0LmNvdmVuYW50c3FsLmlvMScwJQYJKoZIhvcNAQkBFhh3ZWJtYXN0 +ZXJAY292ZW5hbnRzcWwuaW8wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDSvLQQDX5h6J1/XcvYZ2E7VaJIRQpQOyVVxh/OpseH40nLDgXsnZyeiUDZRwn3 +ODs2gHlbeZiOhEHGv9JjclV4NrsTc2eeI2876ftkPWx8KCOwl0Hz2Qxjhp3MxvIk +i+tdyWUxrA/bbfuNnwuxlGX40snNZ7b3ql+K9YPV0jxHGdWTpGtRv5WtzEUuntb8 +2gJ9bfvFTdJijiMWFV8JLk/0gyojfDpoEruYtxGPYcfz4zjELHNguZGwfGRrNlyf +oov9N5/lDLEz1r3J7HCDy7XrPjLGfvibp6BD8I1W1ZyqDrM2mvgQjxZ+BqAmvV26 +zU6ulmuF141AMhDAwtrZEeGPvY8XapDtsuS5YBjRTK4W9zCwYWIR/Fi183GeXZg8 +zlpWqu3fWVAuLQA4sS8Lqthd3mzKQOD1cC2G417dwkB+pwZSkWvcasDBs9y4ZQc+ +uCjpSPI2bBoFguUYWbPe17w+PY/V8ugsVdE60KDmGlgewCo1mhXOtHj/e4a8+fR4 +AHMxHoFo1iXWbSY+RJjtsIMfai6USZT8TUKTRC4n36N3m9Wsc9yNXgHeNCyYrlQ/ +tNeRywnmfw6jM9qGgi2/lzA1i3hcZakKqkSipGY5JZm7q61f+cOgHJrDyZwSo8Ec +Si0buKfyqlAshug87TTd7eXCruYQLBLPtRcWsiR/HyXs8wIDAQABo4IBCzCCAQcw +HQYDVR0OBBYEFFdgm7OKRRCg0gIK6kxGU4PuVhM7MIHXBgNVHSMEgc8wgcyAFFdg +m7OKRRCg0gIK6kxGU4PuVhM7oYGopIGlMIGiMQswCQYDVQQGEwJDTjEQMA4GA1UE +CBMHQmVpamluZzEQMA4GA1UEBxMHQmVpamluZzEWMBQGA1UEChMNTWVyaWRpYW4g +THRkLjEQMA4GA1UECxMHRGV2ZWxvcDEcMBoGA1UEAxMTdGVzdC5jb3ZlbmFudHNx +bC5pbzEnMCUGCSqGSIb3DQEJARYYd2VibWFzdGVyQGNvdmVuYW50c3FsLmlvggkA +gxKJKVcowD0wDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAqEjzbVox +aaxCXs/lQuJE5/l/QSBs5MeE98zwINxCNmZYMsA9JmekyphP286fgdC7G2uRP89G +5lf9+UkHjfWK/N8l1t0NAA4LScMaD09SYCq9p/s7BfxfG0ZS5hfZ6MXuf6svYhL4 +gg7RQEUNZsaFSLvhMG0hGnBzKjDEPurrRnOx9tbtQF6/O6evN2Ig2ssqKjn/m1As +1mxGZy1ZCyREQvHEyj0p36LQtWJOYGRDncflJbLSMBrWq/bxQkATMYJuPPetHIJH +nQzbsbagUrTGZPM8B4LJXD8RtnXmH7zrU+JOunshxTfnl0vo+ezvKT0ig2q2M/t1 +DH0Em8EUgJUlUEOxUfA2hZ2Oq2RrLNz01oK06D0De5JL3CwUpSqbzqJ7F5M5os53 +I9FXSiKbjJUxZijH6NkTZ1gP6GpsEEWc6qOXXAYJWNrW12L7+QjnjgjWI176xO0y +VrvVGBgeOCoFAD/4FSzmCiee9v9sbdzd1GkfkXztPJKdeorRPyetob/zK+4btW4n +0dxfv6XahyBgoKVA7a0kn8ZqM/g4hmkfX4LujTK+C75d8p669zopQ3O76XRBsyJF +dM7J2DwRudG2NphtJyXWXdDSdK9s3iPUiS0y+j4gg9I/cFBQUjKD0R5ZPcRrdG4N +9zeN5A/Kg7vHsbpREm0YtLO9LvlLUp0HUS4= +-----END CERTIFICATE----- diff --git a/test/service/node_c/server.test.covenantsql.io-key.pem b/test/service/node_c/server.test.covenantsql.io-key.pem new file mode 100644 index 000000000..97b9d4b6b --- /dev/null +++ b/test/service/node_c/server.test.covenantsql.io-key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDYqNT8V2PPMnWI +Ka2PxM7z2cf1WMrJq30EXFCboCJXyxLKFrBvb8LqgXpLonhug5kIVVaz3yHSph6d +lFEw4RCLzDm4PbvUxMbX3h0v/uirT8wCy8RvXYwQcisl2++bpO8IqFTiqg4O5Thr +O7BSqNXdS0/yi3PVN8UCzGckGXfPLsD7bCWPAFJq1YmvJ8XAqgAm7h3XPoWbUSEK +jOdLjU3jDq7/kCdZNN2DSkTDkE5JE2pf90BnALUIijggeLQn2080NfbFSW09j8kg +6BzsYfqBo+xxQ1MEP2N/0Zuqf+xW6jDYrHweznDXJdNHUN/yxp+64kxHgp2wyR4f +NxLYt7HvAgMBAAECggEBANNt9uMGGRWyxTWKjqBVTCx1o4fPDZ4+ZrLhr5wfakRI +nV5vQ+CLrSgSEJlMxL/8VlPmi8Teg/BAQnI+sfjEOdRjCRS90dXx7aXtUIhs9vtu +1MUJuvl+zdeiwm6gsbQvAUFum9/SWgO5NxSWXBxePM5G1472/aPeV7jCZgi5fczE +pC21VB7zzPG20UjWqVj2vAD8tS9/UQybc12/IOnS7z6pQP1wpn/2N99BEcEXWpDW +m7/jDbrZ6qJD18QmAoltMVfQF5Pi6qpLkU8qOYKFioO7GGNhapWz6lvgeLanux3l +mU71RAMANgmgdjs4RFdC0hfy0a/xPRfINCeVkSwC7mkCgYEA323IVoDaKSatFtS1 +W7QlX9tjtL1esuvQfHsR7B5EAqir6Lw4Hpn/gPlwNaTl+34GJy6dm4Shbn/A5pka +ow8bjWNPynTxuvlT5NXOW9TmgkrzATIhSrQfMHO7saDCo8JVqRZUvewFXXo4bitm +2bsHYh8Z1XClOz1fka97zEr3Wg0CgYEA+D5sEf9JdFU9LNuDC5JshG/Fc401mukg +AckE1Nmi1y4aa8XNEZYwUIlBb4r+OUlV/cMQvxEqnt0bl5oiL5myYW+sSnSCkHU6 +O3CQl1PO+uuiyNYl3QGNcq5Hw01joR/HejD+h0I5Mb642sXmUcba4fcLKBS1ZG6g +tCANeXBuKOsCgYEAzDYPMeE7hPkwovlveY3By0c+bmfXMjmOqPfjfah8COIZnfLK +aE3g1gUmpiE9gwvAm/djXk1vLwvdR+cQDZE1YZkvyJ/ygS55m2I/5ndE6DmQubsT +6q+PAj4Fg2in/f0VRiJ++cfLb5DSGv/YVZE4Qlqixg7bNrX1r7ZwtFygj9ECgYBA +S3qWFrahqMoVai1AvAXbL0/Go9Y0bxjZHYVg05V3gftZ2ntIiMuusD4Ac9FwaOwa +s4EM25dcWgwhccxU48vtrIzFI/QFEjeo2Xi5mP1Mw+b/eWeJHDPUdgskLFEXlDGI +FlR2F9LUbX9XOlZy67wZNnDvSp3Ii1aYEI0s3M/LTQKBgCadu59DWqTxHfzu/vRG +e7xIMuqXZ12zA/9Ks2pasw1Aa9ZWwgRpZmP4PiFn9tyXEsUXYVbNxWEu3ZUOMQEY +Pq4BeyADEWLDeoo1rHbAEv2X+cr7rm4Sobu2vxtfi0uMlUILtWyK3XuiRoTdlXOH +U9xfXHYXJp08l0Q2dXIHtEZl +-----END PRIVATE KEY----- diff --git a/test/service/node_c/server.test.covenantsql.io.pem b/test/service/node_c/server.test.covenantsql.io.pem new file mode 100644 index 000000000..1b9428afa --- /dev/null +++ b/test/service/node_c/server.test.covenantsql.io.pem @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEQDCCAqigAwIBAgIQEKobji5n26kQYHutrsnlgjANBgkqhkiG9w0BAQsFADBt +MR4wHAYDVQQKExVta2NlcnQgZGV2ZWxvcG1lbnQgQ0ExITAfBgNVBAsMGHhxMjYy +MTQ0QFFpcy1NYWNCb29rLVBybzEoMCYGA1UEAwwfbWtjZXJ0IHhxMjYyMTQ0QFFp +cy1NYWNCb29rLVBybzAeFw0xODA3MzExNTA5MDVaFw0yODA3MzExNTA5MDVaMEwx +JzAlBgNVBAoTHm1rY2VydCBkZXZlbG9wbWVudCBjZXJ0aWZpY2F0ZTEhMB8GA1UE +CwwYeHEyNjIxNDRAUWlzLU1hY0Jvb2stUHJvMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA2KjU/FdjzzJ1iCmtj8TO89nH9VjKyat9BFxQm6AiV8sSyhaw +b2/C6oF6S6J4boOZCFVWs98h0qYenZRRMOEQi8w5uD271MTG194dL/7oq0/MAsvE +b12MEHIrJdvvm6TvCKhU4qoODuU4azuwUqjV3UtP8otz1TfFAsxnJBl3zy7A+2wl +jwBSatWJryfFwKoAJu4d1z6Fm1EhCoznS41N4w6u/5AnWTTdg0pEw5BOSRNqX/dA +ZwC1CIo4IHi0J9tPNDX2xUltPY/JIOgc7GH6gaPscUNTBD9jf9Gbqn/sVuow2Kx8 +Hs5w1yXTR1Df8safuuJMR4KdsMkeHzcS2Lex7wIDAQABo30wezAOBgNVHQ8BAf8E +BAMCBaAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADAfBgNVHSME +GDAWgBSD0tobP0meocjRN1XBYqlSTOHglTAlBgNVHREEHjAcghpzZXJ2ZXIudGVz +dC5jb3ZlbmFudHNxbC5pbzANBgkqhkiG9w0BAQsFAAOCAYEARjlPL41xnYOUHz+k +Qrj/2figGRYGBwfnLVJrjkkSuWY1KRTLUlUYcc9ofkLzAcwRxVbdhcwLLHDA/ddZ +Yii7AY9Z/amzagu/btgvaWu1KMb8IKe6PKy1ZjzzpT6M9xGbW/YyxSWSfNXxD2t1 ++ThvFKZai+525IC2PjlOP8k9hKu4A55wNjvekleqQ+B944iXDRBVOHqgK3Fy3JQ5 +pcAGm9Q0Bn8xNZhEsVERPKeMOnxF/rfggEiCdPp6fexG9X+dUziPSXR8RGZDn16E +Ho8S4m3or0fMX2W2EsYkRY/ESxsE8Y5KFELh4RW2DrUfzibHaS3ZeXyJLAuBTUzj +s4BqXUwpKwqoQqv3d0Mi1RZanfVMWG470tuvGdmaW3HdZoIBmo44fVjx63/6wEGm +0A45avtOHRwQGObM446Q+Gs6zsZspLgEHjmPwr+0PsIjbR6weehXnAAOnr9RWX8n +UstyEkOSDZA8vJmSWSu8tXwky31ZF+cSC7DYZxBP7dhPWDCn +-----END CERTIFICATE----- diff --git a/test/service/node_c/write.test.covenantsql.io-key.pem b/test/service/node_c/write.test.covenantsql.io-key.pem new file mode 100644 index 000000000..1b0d20305 --- /dev/null +++ b/test/service/node_c/write.test.covenantsql.io-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJJwIBAAKCAgEArwFtfbMdNu7m332+1KH/6hY7/zv+YhPu3NJ7WZC2wJlyc9nI +BzuD7SV8lcwv75w2n4aclr/KWFXYBPeqtfn4ebV/pvIZtyd+P4MGKbT3FuHdxPhI +7uTcw7LENXUKtmqO01OO1mx4+cbn14Hd8f1NUoxs5vEnohBoV7etI84fLAxglNAd +dtGTeN+jlSN+ipFKI74aPeEqnnJeJ3j35ZgvYb79hYEHXPngKOCsAa01cDrpxzuD +L+ukveP5SLee1lCYHpUKEMt+0SMBalVW6XltBnmMrLL5tjfA4RyAmgRxle8q+O2w +DxivStX1kInUL22kj6AjSrBpOPy12rwQiPa6pJt0pjk7+S71p69NJeoYyYz2+l6g +uWXqJ8b/jlwT4+CBOlOuDuvCvU1almQADKcv1eN9g0X2d8F+9CHgUdHaVkpGJ9ue +Ogrw5HWipnjt2B/YFRErh3125CDnGap+SCEoPCJFTxVG0GahzmWC1sQr6EsHdMDB +k6YTINY+4iNvoxlTUhAMWiYeLNY4PZaqL9q3tcjuHMxMcykAYpMiEQe0mGsWiRjz +DfnDtGcyeagqHkcjBE/w1qwGjLQBJFmjYmwpk5cDtS0OhhEIOastqKoacc+L4rnq +ALEM94uuPS8VMfu+d9rspfBMy/a/PjMrOO5pYLD0yIIYVHDJxyHT0p0QVZkCAwEA +AQKCAgBzaAueQwnW+gCCDVhUvGgZJIR4MkX0w5RXRu5VCBucMxTI1SsVqee78WaR +Gk/aQTe8R3bn6p4zVpjX3cNTsf5rtIbzvt+6am7Uz0C3LEFtc5FdnSXrdD0pSLAf +WImx9d8t+QJO4MV+Ye7trRSByjq9XyFJwmoSc6N7hQLGg90GnTrrp7pmappHsaMc +bIW8N0ee/nQrrlr+lgkFGr7PR2annN1utsH2TEnIazDDAkglNJSJ7/L5HPpMxxPT +IlO6nPdT45D5tlhw7ha22oQv/wUoqetcz8Hgqi+lw7gC2T9WUpwSAByEOBEQ1rvT +jzC//hvxIvdi/6bED9KU5kQ5Lgux/fWUvg/l6u7EebM6TeG/Er5Tq1D6j2+IjfkR +bPHLEk2Cv2oE7W1PhP+yinJnoxwHeic+nu7wsuevzvPhNZ7lLaWgHFvc1YVTmLDq +E/DGm5Qj6mh8SP2NcxW79m6fjdbgjw0OuPxdEo2sj74cxcifTy54GuSoZqGnw18g +28qXpDLkWgHQFrm6LvLqnaY3uvNMYLBWd4kqH0Y9XKI1N3j82ensAwH5e6Ol8Stw +I7GWT/1GggOEPbIwYtVBfbwghfktPttmHU4vs8IQnufqExgEjDhWspjJVZEuoO8V +8weDCQADS8/266GqRN9CUtNWMmbM0jAHgL7Bq9AFwgYSrP/BiQKCAQEA3GNqpUw8 +Ix3yYxvEXbceOguwqjzOW1vGBhJ4k6ptPG9kPI0GqMY6sw/RgRlCpfCq7Ig/Renj +6LPbSyjkylfavlLNODi/iGbnd6cLKtsvaeexWP1wYU4T367rF8ifqKl/C+l4XqBN +4j4KooyPF889PrgXJ73517jWwhxnncgDabJ2zTetjkvFbf3bAua5F7rufHi39Zng +Rt0gEmFg99XJjrtbS7iND5fhZpGK14hhkdwHptu6XS+yGIiVwbCwS0odtZDID3vk +s9CEUzhjnK8ld04RJ4vSMlxfzlub3e88Lvii79mmZgdH4aP0cPhmFJ3i0mefUVpw +cSmQSVMsxHkh6wKCAQEAy0i1KjX3k7LwWgsvwtRjnJMuEWJ/SoRiE4+Cho9BSCVg +onG4NyBOUgfQI9pBKf2CPWVDBA5VQrDN81ozmTPTgb7isDcFDSiowqyVSyRCorUB +AfjbpD7z6QMdBt15xHR3CXWwpio5NwBQqQ+I2AJ1koBYUVj5TupDOZzwVY8/BbqD +fmhtqLd4c2q5Go2ESK+EVAA1jvFmZUTjr9jC9a/8s/cn5Xqv7/s4BCmmfqQSKZS2 +LPBA2Th1zsUrSW3Os9v+c6LUU92LVEZKKKZyRykTemQRH/oljGG9Dn/hUDcvaI2z +A2+T15rQd9p6ePySD8BuZzxwFvAJQPOYaqivrzsBiwKCAQAmd5fSuEa63mxDTkJt +FRxKh2XToP9nxNIAl1LCe3nLlanKQ9dIuCjgvj8UKIOQkTxUQsfAfT2RjWsWaFHe +24zLsYouaQFNXqDCKr7xQQa6ln1HCh2Gbmlbnp1cLmFnwAXz31FqOtK9TZTvoFcN +kdefzeQExM0KETIy+WBAkvu9hC/mS/SYJLOWKjwC+qCN+svLoAqD7NLPq6MAckzJ +lWAz8JHT2qeMdDccfwTb7+sP2XbgcfPKdhvA2n5BK4Tp70rWOSoiQb6+gAPIvsvs +Oknw1Ah8fZQ3xBXY3/aJu0sm67EM6lF394ddZA+zdDflG1XO4dVWDtIXfmi307O5 +q2b3AoIBAFkTMfceEK8SkIkUL8hyYnegcmZBv786RPOHRc2KhjOD1VU4+VyGdmsx +az3ajAVHRUN71KK5WRjQ+l2w37043WwT5acLZNZAQ7qR/xUe/WfoYlmn3y6YOy6W +I6j3cTzpP6PQgyg8hjeYlr+NxAvLABPC03BJyWyP8AcVwqXrD9WFxcqlHa/5PPlu +AVAmRJnI9vYL5WwOUSz8w7wxAjS/+b4uBbhjSyaf8Qq56W/CmwbHWBBW8kN8nvqM +oQwa5qEfO98VsW5SPJQf/KzVSmvuDs/peyuE4+EgjsQEuwj4NXjd5lwSDzlBaCms +fU/4dFQcoQPxkrgqVBO26cmKwvjIpUMCggEAIb85XP+bwnSOTjFbn61k8PdgyPBq +kBDkiofKiBuR2UzuYhVqxkBqWHtDUhqq1y7A0S/ya75bSv67q4ZlHWEjCEiA8thv +KZwn/8yRVFFKEgtB0afub62Zgc+pPXAr2JwwtZK5dg91QxPaKF20YEz6tOcZdjut +gcQ8Bt4dpRvoz2vOJQnqMIhQM9+HiE7XXV7fgUwT55nC+4wRhILd3xOZKYzDzgMJ +ShMUAb7QkLRujyQwcYPxjWiqRFGSodMoNE2OdofLmfwD1vQfZ/gAorLBH3BVyXAz +53zHfE7+kLgobJBYf7T7Jk246soVYOLSZbeVjAT0ajMKD4ay2jNdnSlxKA== +-----END RSA PRIVATE KEY----- diff --git a/test/service/node_c/write.test.covenantsql.io.pem b/test/service/node_c/write.test.covenantsql.io.pem new file mode 100644 index 000000000..ccebf106c --- /dev/null +++ b/test/service/node_c/write.test.covenantsql.io.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFyDCCA7ACCQCofDYaBrdh7TANBgkqhkiG9w0BAQsFADCBojELMAkGA1UEBhMC +Q04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0JlaWppbmcxFjAUBgNVBAoT +DU1lcmlkaWFuIEx0ZC4xEDAOBgNVBAsTB0RldmVsb3AxHDAaBgNVBAMTE3Rlc3Qu +Y292ZW5hbnRzcWwuaW8xJzAlBgkqhkiG9w0BCQEWGHdlYm1hc3RlckBjb3ZlbmFu +dHNxbC5pbzAeFw0xODA4MDEwNDIxMDZaFw0yODA3MjkwNDIxMDZaMIGoMQswCQYD +VQQGEwJDTjEQMA4GA1UECBMHQmVpamluZzEQMA4GA1UEBxMHQmVpamluZzEWMBQG +A1UEChMNTWVyaWRpYW4gTHRkLjEQMA4GA1UECxMHRGV2ZWxvcDEiMCAGA1UEAxMZ +d3JpdGUudGVzdC5jb3ZlbmFudHNxbC5pbzEnMCUGCSqGSIb3DQEJARYYd2VibWFz +dGVyQGNvdmVuYW50c3FsLmlvMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC +AgEArwFtfbMdNu7m332+1KH/6hY7/zv+YhPu3NJ7WZC2wJlyc9nIBzuD7SV8lcwv +75w2n4aclr/KWFXYBPeqtfn4ebV/pvIZtyd+P4MGKbT3FuHdxPhI7uTcw7LENXUK +tmqO01OO1mx4+cbn14Hd8f1NUoxs5vEnohBoV7etI84fLAxglNAddtGTeN+jlSN+ +ipFKI74aPeEqnnJeJ3j35ZgvYb79hYEHXPngKOCsAa01cDrpxzuDL+ukveP5SLee +1lCYHpUKEMt+0SMBalVW6XltBnmMrLL5tjfA4RyAmgRxle8q+O2wDxivStX1kInU +L22kj6AjSrBpOPy12rwQiPa6pJt0pjk7+S71p69NJeoYyYz2+l6guWXqJ8b/jlwT +4+CBOlOuDuvCvU1almQADKcv1eN9g0X2d8F+9CHgUdHaVkpGJ9ueOgrw5HWipnjt +2B/YFRErh3125CDnGap+SCEoPCJFTxVG0GahzmWC1sQr6EsHdMDBk6YTINY+4iNv +oxlTUhAMWiYeLNY4PZaqL9q3tcjuHMxMcykAYpMiEQe0mGsWiRjzDfnDtGcyeagq +HkcjBE/w1qwGjLQBJFmjYmwpk5cDtS0OhhEIOastqKoacc+L4rnqALEM94uuPS8V +Mfu+d9rspfBMy/a/PjMrOO5pYLD0yIIYVHDJxyHT0p0QVZkCAwEAATANBgkqhkiG +9w0BAQsFAAOCAgEAM+1j12Px/guyMnZFwwsRC9ITa5zJkAkfR++LzRZcT+Gz5du1 +FyQp+5L4Pws96OLFADKHVYE0EFlgdVbskVBErrEIQeZRw0bmp1zDEhfxr4c8fivY ++hW/AXjHsJuO8WVTlRctnefY1g6OdvfI6Sc2092GM9Nvquf1OhKIbPso1NxUUrnp +HQ4ffhQNAFsJk/PkPsjTBzP2iJrzynPdoIPK9jO6NbUg6XfZDQRwchvI7NduWq+x +nNTWV1D8oHvP0+FwHdRyctIVVjkxqd7wnenWl2mUr0SBf0FnfJPl9fz+YLVBLroF +4NGwGG/r6q9tRBAXATm+qbNlth589Tz8mMZMnq2+D6O4499I4MJLceuXw689rO05 +s9/BXWzjJThDnrFaQPyf/YTyMuFaf919F0UGLTLYLYf4vfuflUhaStmYyvArv229 +F4DJy/QDM+NWjo/pJH3ETeEA1stD7kQq7GGqy/MiB5YXqRLnGjpa9vqOECsMIm29 +1TUgdCVN9Gsk8JQPGm/lJUeJECq20LThSeXG+sY6RU+0rmOUJvR8Uv3kjkn0Xd+/ +p2xM/CboFXVcmU+fe9UfJar87MlPJcZP5SenVQuWZ3imI0kFeaObfHHKKJfNAoFl +agBFqnAc/EkYqekxGkxc3pVhBBiZ3D+FlinC2yRko9glPkRKA2WxINPVxm0= +-----END CERTIFICATE----- diff --git a/test/service/node_miner_0/config.yaml b/test/service/node_miner_0/config.yaml index 415a50fdd..4c981fcb7 100644 --- a/test/service/node_miner_0/config.yaml +++ b/test/service/node_miner_0/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/service/node_miner_1/config.yaml b/test/service/node_miner_1/config.yaml index 8e11e22e0..e41bbbb9f 100644 --- a/test/service/node_miner_1/config.yaml +++ b/test/service/node_miner_1/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/service/node_miner_2/config.yaml b/test/service/node_miner_2/config.yaml index 65d3f034f..00d4caed0 100644 --- a/test/service/node_miner_2/config.yaml +++ b/test/service/node_miner_2/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/twopc/twopc.go b/twopc/twopc.go index 3912077d7..53c2f1b16 100644 --- a/twopc/twopc.go +++ b/twopc/twopc.go @@ -22,7 +22,6 @@ import ( "time" "github.com/CovenantSQL/CovenantSQL/utils/log" - "github.com/pkg/errors" ) // Hook are called during 2PC running @@ -40,7 +39,7 @@ type Options struct { // Worker represents a 2PC worker who implements Prepare, Commit, and Rollback. type Worker interface { Prepare(ctx context.Context, wb WriteBatch) error - Commit(ctx context.Context, wb WriteBatch) error + Commit(ctx context.Context, wb WriteBatch) (interface{}, error) Rollback(ctx context.Context, wb WriteBatch) error } @@ -78,96 +77,121 @@ func NewOptionsWithCallback(timeout time.Duration, } } -func (c *Coordinator) rollback(ctx context.Context, workers []Worker, wb WriteBatch) (err error) { +func (c *Coordinator) prepare(ctx context.Context, workers []Worker, wb WriteBatch) (err error) { errs := make([]error, len(workers)) wg := sync.WaitGroup{} + workerFunc := func(n Worker, e *error, wg *sync.WaitGroup) { + defer wg.Done() + + *e = n.Prepare(ctx, wb) + } for index, worker := range workers { wg.Add(1) - go func(n Worker, e *error) { - *e = n.Rollback(ctx, wb) - wg.Done() - }(worker, &errs[index]) + go workerFunc(worker, &errs[index], &wg) } wg.Wait() - for _, err = range errs { + var index int + for index, err = range errs { if err != nil { - return err + log.WithField("worker", workers[index]).WithError(err).Debug("prepare failed") + return } } - return errors.New("twopc: rollback") + return } -func (c *Coordinator) commit(ctx context.Context, workers []Worker, wb WriteBatch) (err error) { +func (c *Coordinator) rollback(ctx context.Context, workers []Worker, wb WriteBatch) (err error) { errs := make([]error, len(workers)) wg := sync.WaitGroup{} + workerFunc := func(n Worker, e *error, wg *sync.WaitGroup) { + defer wg.Done() + + *e = n.Rollback(ctx, wb) + } for index, worker := range workers { wg.Add(1) - go func(n Worker, e *error) { - *e = n.Commit(ctx, wb) - wg.Done() - }(worker, &errs[index]) + go workerFunc(worker, &errs[index], &wg) } wg.Wait() - for _, err = range errs { + var index int + for index, err = range errs { if err != nil { - return err + log.WithField("worker", workers[index]).WithError(err).Debug("rollback failed") + return } } - return nil + return } -// Put initiates a 2PC process to apply given WriteBatch on all workers. -func (c *Coordinator) Put(workers []Worker, wb WriteBatch) (err error) { - // Initiate phase one: ask nodes to prepare for progress - ctx, cancel := context.WithTimeout(context.Background(), c.option.timeout) - defer cancel() +func (c *Coordinator) commit(ctx context.Context, workers []Worker, wb WriteBatch) (result interface{}, err error) { + errs := make([]error, len(workers)) + wg := sync.WaitGroup{} + workerFunc := func(n Worker, resPtr *interface{}, e *error, wg *sync.WaitGroup) { + defer wg.Done() - if c.option.beforePrepare != nil { - if err := c.option.beforePrepare(ctx); err != nil { - return err + var res interface{} + res, *e = n.Commit(ctx, wb) + if resPtr != nil { + *resPtr = res } } - errs := make([]error, len(workers)) - wg := sync.WaitGroup{} - for index, worker := range workers { wg.Add(1) - go func(n Worker, e *error) { - *e = n.Prepare(ctx, wb) - wg.Done() - }(worker, &errs[index]) + if index == 0 { + go workerFunc(worker, &result, &errs[index], &wg) + } else { + go workerFunc(worker, nil, &errs[index], &wg) + } } wg.Wait() - // Check prepare results and initiate phase two - var returnErr error - for index, err := range errs { + var index int + for index, err = range errs { if err != nil { - returnErr = err - log.WithField("worker", workers[index]).WithError(err).Debug("prepare failed") - goto ROLLBACK + log.WithField("worker", workers[index]).WithError(err).Debug("commit failed") + return } } + return +} + +// Put initiates a 2PC process to apply given WriteBatch on all workers. +func (c *Coordinator) Put(workers []Worker, wb WriteBatch) (result interface{}, err error) { + // Initiate phase one: ask nodes to prepare for progress + ctx, cancel := context.WithTimeout(context.Background(), c.option.timeout) + defer cancel() + + if c.option.beforePrepare != nil { + if err = c.option.beforePrepare(ctx); err != nil { + log.WithError(err).Debug("before prepared failed") + return + } + } + + // Check prepare results and initiate phase two + if err = c.prepare(ctx, workers, wb); err != nil { + goto ROLLBACK + } + if c.option.beforeCommit != nil { - if err := c.option.beforeCommit(ctx); err != nil { - returnErr = err + if err = c.option.beforeCommit(ctx); err != nil { log.WithError(err).Debug("before commit failed") goto ROLLBACK } } - err = c.commit(ctx, workers, wb) + result, err = c.commit(ctx, workers, wb) if c.option.afterCommit != nil { if err = c.option.afterCommit(ctx); err != nil { @@ -185,5 +209,5 @@ ROLLBACK: c.rollback(ctx, workers, wb) - return returnErr + return } diff --git a/twopc/twopc_test.go b/twopc/twopc_test.go index dfb3504a8..9cb6dd0ba 100644 --- a/twopc/twopc_test.go +++ b/twopc/twopc_test.go @@ -22,6 +22,7 @@ import ( "fmt" "net" "os" + "strconv" "sync" "testing" "time" @@ -83,11 +84,13 @@ type RaftWriteBatchResp struct { type RaftCommitReq struct { TxID RaftTxID + Cmds []string } type RaftCommitResp struct { ErrCode int ErrString string + Result int64 } type RaftRollbackReq struct { @@ -185,6 +188,19 @@ func (r *RaftNodeRPCServer) RPCCommit(req *RaftCommitReq, resp *RaftCommitResp) return nil } + // calculate + var total int64 + var val int64 + for _, cmd := range req.Cmds { + if val, err = strconv.ParseInt(cmd, 10, 64); err != nil { + return + } + + total += val + } + + resp.Result = total + r.state = Committed return nil } @@ -256,7 +272,7 @@ func (r *RaftNode) Prepare(ctx context.Context, wb WriteBatch) (err error) { return err } -func (r *RaftNode) Commit(ctx context.Context, wb WriteBatch) (err error) { +func (r *RaftNode) Commit(ctx context.Context, wb WriteBatch) (result interface{}, err error) { log.Debugf("executing 2pc: addr = %s, phase = commit", r.addr) defer log.Debugf("2pc result: addr = %s, phase = commit, result = %v", r.addr, err) @@ -264,20 +280,20 @@ func (r *RaftNode) Commit(ctx context.Context, wb WriteBatch) (err error) { if !ok { err = errors.New("unexpected WriteBatch type") - return err + return } cipher := etls.NewCipher([]byte(pass)) conn, err := etls.Dial("tcp", r.addr, cipher) if err != nil { - return err + return } client, err := rpc.InitClientConn(conn) if err != nil { - return err + return } d, ok := ctx.Deadline() @@ -286,22 +302,23 @@ func (r *RaftNode) Commit(ctx context.Context, wb WriteBatch) (err error) { err = conn.SetDeadline(d) if err != nil { - return err + return } } resp := new(RaftCommitResp) - err = client.Call("Raft.RPCCommit", &RaftCommitReq{rwb.TxID}, resp) + err = client.Call("Raft.RPCCommit", &RaftCommitReq{TxID: rwb.TxID, Cmds: rwb.Cmds}, resp) + result = resp.Result if err != nil { - return err + return } if resp.ErrCode > 0 { err = fmt.Errorf(resp.ErrString) } - return err + return } func (r *RaftNode) Rollback(ctx context.Context, wb WriteBatch) (err error) { @@ -339,7 +356,7 @@ func (r *RaftNode) Rollback(ctx context.Context, wb WriteBatch) (err error) { } resp := new(RaftRollbackResp) - err = client.Call("Raft.RPCRollback", &RaftRollbackReq{rwb.TxID}, resp) + err = client.Call("Raft.RPCRollback", &RaftRollbackReq{TxID: rwb.TxID}, resp) if err != nil { return err @@ -405,16 +422,20 @@ func TestTwoPhaseCommit(t *testing.T) { testNodeReset() policy = AllGood - err := c.Put(nodes, &RaftWriteBatchReq{TxID: 0, Cmds: []string{"+1", "-3", "+10"}}) + res, err := c.Put(nodes, &RaftWriteBatchReq{TxID: 0, Cmds: []string{"+1", "-3", "+10"}}) if err != nil { t.Fatalf("Error occurred: %s", err.Error()) } + if res.(int64) != 8 { + t.Fatalf("TwoPC returns invalid result: %v", res) + } + testNodeReset() policy = FailOnPrepare - err = c.Put(nodes, &RaftWriteBatchReq{TxID: 1, Cmds: []string{"-3", "-4", "+1"}}) + res, err = c.Put(nodes, &RaftWriteBatchReq{TxID: 1, Cmds: []string{"-3", "-4", "+1"}}) if err == nil { t.Fatal("Unexpected result: returned nil while expecting an error") @@ -425,7 +446,7 @@ func TestTwoPhaseCommit(t *testing.T) { testNodeReset() policy = FailOnCommit - err = c.Put(nodes, &RaftWriteBatchReq{TxID: 2, Cmds: []string{"-5", "+9", "+1"}}) + res, err = c.Put(nodes, &RaftWriteBatchReq{TxID: 2, Cmds: []string{"-5", "+9", "+1"}}) if err == nil { t.Fatal("Unexpected result: returned nil while expecting an error") @@ -469,11 +490,15 @@ func TestTwoPhaseCommit_WithHooks(t *testing.T) { testNodeReset() - err := c.Put(nodes, &RaftWriteBatchReq{TxID: 0, Cmds: []string{"+1", "-3", "+10"}}) + res, err := c.Put(nodes, &RaftWriteBatchReq{TxID: 0, Cmds: []string{"+1", "-3", "+10"}}) if err != nil { t.Fatalf("Error occurred: %s", err.Error()) } + if res.(int64) != 8 { + t.Fatalf("TwoPC returns invalid result: %v", res) + } + // error before prepare errorBeforePrepare = true errorBeforeCommit = false @@ -481,7 +506,7 @@ func TestTwoPhaseCommit_WithHooks(t *testing.T) { testNodeReset() - err = c.Put(nodes, &RaftWriteBatchReq{TxID: 1, Cmds: []string{"+1", "-3", "+10"}}) + res, err = c.Put(nodes, &RaftWriteBatchReq{TxID: 1, Cmds: []string{"+1", "-3", "+10"}}) if err == nil { t.Fatal("Unexpected result: returned nil while expecting an error") } else if err != beforePrepareError { @@ -497,7 +522,7 @@ func TestTwoPhaseCommit_WithHooks(t *testing.T) { testNodeReset() - err = c.Put(nodes, &RaftWriteBatchReq{TxID: 2, Cmds: []string{"+1", "-3", "+10"}}) + res, err = c.Put(nodes, &RaftWriteBatchReq{TxID: 2, Cmds: []string{"+1", "-3", "+10"}}) if err == nil { t.Fatal("Unexpected result: returned nil while expecting an error") } else if err != beforeCommitError { @@ -513,7 +538,7 @@ func TestTwoPhaseCommit_WithHooks(t *testing.T) { testNodeReset() - err = c.Put(nodes, &RaftWriteBatchReq{TxID: 3, Cmds: []string{"+1", "-3", "+10"}}) + res, err = c.Put(nodes, &RaftWriteBatchReq{TxID: 3, Cmds: []string{"+1", "-3", "+10"}}) if err == nil { t.Fatal("Unexpected result: returned nil while expecting an error") } else if err != beforeCommitError { diff --git a/types/ack_type.go b/types/ack_type.go new file mode 100644 index 000000000..af24a9da9 --- /dev/null +++ b/types/ack_type.go @@ -0,0 +1,99 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "time" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/CovenantSQL/CovenantSQL/proto" +) + +//go:generate hsp + +// AckHeader defines client ack entity. +type AckHeader struct { + Response SignedResponseHeader `json:"r"` + NodeID proto.NodeID `json:"i"` // ack node id + Timestamp time.Time `json:"t"` // time in UTC zone +} + +// SignedAckHeader defines client signed ack entity. +type SignedAckHeader struct { + AckHeader + verifier.DefaultHashSignVerifierImpl +} + +// Ack defines a whole client ack request entity. +type Ack struct { + proto.Envelope + Header SignedAckHeader `json:"h"` +} + +// AckResponse defines client ack response entity. +type AckResponse struct{} + +// Verify checks hash and signature in ack header. +func (sh *SignedAckHeader) Verify() (err error) { + // verify response + if err = sh.Response.Verify(); err != nil { + return + } + + return sh.DefaultHashSignVerifierImpl.Verify(&sh.AckHeader) +} + +// Sign the request. +func (sh *SignedAckHeader) Sign(signer *asymmetric.PrivateKey, verifyReqHeader bool) (err error) { + // Only used by ack worker, and ack.Header is verified before build ack + if verifyReqHeader { + // check original header signature + if err = sh.Response.Verify(); err != nil { + return + } + } + + return sh.DefaultHashSignVerifierImpl.Sign(&sh.AckHeader, signer) +} + +// Verify checks hash and signature in ack. +func (a *Ack) Verify() error { + return a.Header.Verify() +} + +// Sign the request. +func (a *Ack) Sign(signer *asymmetric.PrivateKey, verifyReqHeader bool) (err error) { + // sign + return a.Header.Sign(signer, verifyReqHeader) +} + +// ResponseHash returns the deep shadowed Response Hash field. +func (sh *SignedAckHeader) ResponseHash() hash.Hash { + return sh.AckHeader.Response.Hash() +} + +// SignedRequestHeader returns the deep shadowed Request reference. +func (sh *SignedAckHeader) SignedRequestHeader() *SignedRequestHeader { + return &sh.AckHeader.Response.Request +} + +// SignedResponseHeader returns the Response reference. +func (sh *SignedAckHeader) SignedResponseHeader() *SignedResponseHeader { + return &sh.Response +} diff --git a/types/ack_type_gen.go b/types/ack_type_gen.go new file mode 100644 index 000000000..90fc7a814 --- /dev/null +++ b/types/ack_type_gen.go @@ -0,0 +1,111 @@ +package types + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + hsp "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// MarshalHash marshals for hash +func (z *Ack) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.Header.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Ack) Msgsize() (s int) { + s = 1 + 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *AckHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 3 + o = append(o, 0x83, 0x83) + if oTemp, err := z.Response.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x83) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x83) + o = hsp.AppendTime(o, z.Timestamp) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *AckHeader) Msgsize() (s int) { + s = 1 + 9 + z.Response.Msgsize() + 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize + return +} + +// MarshalHash marshals for hash +func (z AckResponse) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 0 + o = append(o, 0x80) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z AckResponse) Msgsize() (s int) { + s = 1 + return +} + +// MarshalHash marshals for hash +func (z *SignedAckHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + // map header, size 3 + o = append(o, 0x82, 0x82, 0x83, 0x83) + if oTemp, err := z.AckHeader.Response.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x83) + if oTemp, err := z.AckHeader.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x83) + o = hsp.AppendTime(o, z.AckHeader.Timestamp) + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedAckHeader) Msgsize() (s int) { + s = 1 + 10 + 1 + 9 + z.AckHeader.Response.Msgsize() + 7 + z.AckHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + return +} diff --git a/worker/types/ack_type_gen_test.go b/types/ack_type_gen_test.go similarity index 100% rename from worker/types/ack_type_gen_test.go rename to types/ack_type_gen_test.go diff --git a/sqlchain/types/billing_req.go b/types/billing_req.go similarity index 100% rename from sqlchain/types/billing_req.go rename to types/billing_req.go diff --git a/types/block.go b/types/block.go new file mode 100644 index 000000000..e66e688e4 --- /dev/null +++ b/types/block.go @@ -0,0 +1,188 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "time" + + ca "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/kms" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/CovenantSQL/CovenantSQL/merkle" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +//go:generate hsp + +// Header is a block header. +type Header struct { + Version int32 + Producer proto.NodeID + GenesisHash hash.Hash + ParentHash hash.Hash + MerkleRoot hash.Hash + Timestamp time.Time +} + +// SignedHeader is block header along with its producer signature. +type SignedHeader struct { + Header + HSV verifier.DefaultHashSignVerifierImpl +} + +// Sign calls DefaultHashSignVerifierImpl to calculate header hash and sign it with signer. +func (s *SignedHeader) Sign(signer *ca.PrivateKey) error { + return s.HSV.Sign(&s.Header, signer) +} + +// Verify verifies the signature of the signed header. +func (s *SignedHeader) Verify() error { + return s.HSV.Verify(&s.Header) +} + +// VerifyAsGenesis verifies the signed header as a genesis block header. +func (s *SignedHeader) VerifyAsGenesis() (err error) { + var pk *ca.PublicKey + log.WithFields(log.Fields{ + "producer": s.Producer, + "root": s.GenesisHash.String(), + "parent": s.ParentHash.String(), + "merkle": s.MerkleRoot.String(), + "block": s.HSV.Hash().String(), + }).Debug("Verifying genesis block header") + if pk, err = kms.GetPublicKey(s.Producer); err != nil { + return + } + if !pk.IsEqual(s.HSV.Signee) { + err = ErrNodePublicKeyNotMatch + return + } + return s.Verify() +} + +// QueryAsTx defines a tx struct which is combined with request and signed response header +// for block. +type QueryAsTx struct { + Request *Request + Response *SignedResponseHeader +} + +// Block is a node of blockchain. +type Block struct { + SignedHeader SignedHeader + FailedReqs []*Request + QueryTxs []*QueryAsTx + Acks []*SignedAckHeader +} + +// CalcNextID calculates the next query id by examinating every query in block, and adds write +// query number to the last offset. +// +// TODO(leventeliu): too tricky. Consider simply adding next id to each block header. +func (b *Block) CalcNextID() (id uint64, ok bool) { + for _, v := range b.QueryTxs { + if v.Request.Header.QueryType == WriteQuery { + var nid = v.Response.LogOffset + uint64(len(v.Request.Payload.Queries)) + if nid > id { + id = nid + } + ok = true + } + } + return +} + +// PackAndSignBlock generates the signature for the Block from the given PrivateKey. +func (b *Block) PackAndSignBlock(signer *ca.PrivateKey) (err error) { + // Calculate merkle root + b.SignedHeader.MerkleRoot = b.computeMerkleRoot() + return b.SignedHeader.Sign(signer) +} + +// Verify verifies the merkle root and header signature of the block. +func (b *Block) Verify() (err error) { + // Verify merkle root + if merkleRoot := b.computeMerkleRoot(); !merkleRoot.IsEqual(&b.SignedHeader.MerkleRoot) { + return ErrMerkleRootVerification + } + return b.SignedHeader.Verify() +} + +// VerifyAsGenesis verifies the block as a genesis block. +func (b *Block) VerifyAsGenesis() (err error) { + var pk *ca.PublicKey + if pk, err = kms.GetPublicKey(b.SignedHeader.Producer); err != nil { + return + } + if !pk.IsEqual(b.SignedHeader.HSV.Signee) { + err = ErrNodePublicKeyNotMatch + return + } + return b.Verify() +} + +// Timestamp returns the timestamp field of the block header. +func (b *Block) Timestamp() time.Time { + return b.SignedHeader.Timestamp +} + +// Producer returns the producer field of the block header. +func (b *Block) Producer() proto.NodeID { + return b.SignedHeader.Producer +} + +// ParentHash returns the parent hash field of the block header. +func (b *Block) ParentHash() *hash.Hash { + return &b.SignedHeader.ParentHash +} + +// BlockHash returns the parent hash field of the block header. +func (b *Block) BlockHash() *hash.Hash { + return &b.SignedHeader.HSV.DataHash +} + +// GenesisHash returns the parent hash field of the block header. +func (b *Block) GenesisHash() *hash.Hash { + return &b.SignedHeader.GenesisHash +} + +// Signee returns the signee field of the block signed header. +func (b *Block) Signee() *ca.PublicKey { + return b.SignedHeader.HSV.Signee +} + +func (b *Block) computeMerkleRoot() hash.Hash { + var hs = make([]*hash.Hash, 0, len(b.FailedReqs)+len(b.QueryTxs)+len(b.Acks)) + for i := range b.FailedReqs { + h := b.FailedReqs[i].Header.Hash() + hs = append(hs, &h) + } + for i := range b.QueryTxs { + h := b.QueryTxs[i].Response.Hash() + hs = append(hs, &h) + } + for i := range b.Acks { + h := b.Acks[i].Hash() + hs = append(hs, &h) + } + return *merkle.NewMerkle(hs).GetRoot() +} + +// Blocks is Block (reference) array. +type Blocks []*Block diff --git a/types/block_gen.go b/types/block_gen.go new file mode 100644 index 000000000..bf8ef8bd6 --- /dev/null +++ b/types/block_gen.go @@ -0,0 +1,241 @@ +package types + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + hsp "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// MarshalHash marshals for hash +func (z *Block) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 4 + // map header, size 2 + o = append(o, 0x84, 0x84, 0x82, 0x82) + if oTemp, err := z.SignedHeader.Header.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.SignedHeader.HSV.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x84) + o = hsp.AppendArrayHeader(o, uint32(len(z.QueryTxs))) + for za0002 := range z.QueryTxs { + if z.QueryTxs[za0002] == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.QueryTxs[za0002].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + } + o = append(o, 0x84) + o = hsp.AppendArrayHeader(o, uint32(len(z.FailedReqs))) + for za0001 := range z.FailedReqs { + if z.FailedReqs[za0001] == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.FailedReqs[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + } + o = append(o, 0x84) + o = hsp.AppendArrayHeader(o, uint32(len(z.Acks))) + for za0003 := range z.Acks { + if z.Acks[za0003] == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Acks[za0003].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Block) Msgsize() (s int) { + s = 1 + 13 + 1 + 7 + z.SignedHeader.Header.Msgsize() + 4 + z.SignedHeader.HSV.Msgsize() + 9 + hsp.ArrayHeaderSize + for za0002 := range z.QueryTxs { + if z.QueryTxs[za0002] == nil { + s += hsp.NilSize + } else { + s += z.QueryTxs[za0002].Msgsize() + } + } + s += 11 + hsp.ArrayHeaderSize + for za0001 := range z.FailedReqs { + if z.FailedReqs[za0001] == nil { + s += hsp.NilSize + } else { + s += z.FailedReqs[za0001].Msgsize() + } + } + s += 5 + hsp.ArrayHeaderSize + for za0003 := range z.Acks { + if z.Acks[za0003] == nil { + s += hsp.NilSize + } else { + s += z.Acks[za0003].Msgsize() + } + } + return +} + +// MarshalHash marshals for hash +func (z Blocks) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + o = hsp.AppendArrayHeader(o, uint32(len(z))) + for za0001 := range z { + if z[za0001] == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z Blocks) Msgsize() (s int) { + s = hsp.ArrayHeaderSize + for za0001 := range z { + if z[za0001] == nil { + s += hsp.NilSize + } else { + s += z[za0001].Msgsize() + } + } + return +} + +// MarshalHash marshals for hash +func (z *Header) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 6 + o = append(o, 0x86, 0x86) + if oTemp, err := z.GenesisHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x86) + if oTemp, err := z.ParentHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x86) + if oTemp, err := z.MerkleRoot.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x86) + o = hsp.AppendInt32(o, z.Version) + o = append(o, 0x86) + if oTemp, err := z.Producer.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x86) + o = hsp.AppendTime(o, z.Timestamp) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Header) Msgsize() (s int) { + s = 1 + 12 + z.GenesisHash.Msgsize() + 11 + z.ParentHash.Msgsize() + 11 + z.MerkleRoot.Msgsize() + 8 + hsp.Int32Size + 9 + z.Producer.Msgsize() + 10 + hsp.TimeSize + return +} + +// MarshalHash marshals for hash +func (z *QueryAsTx) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if z.Request == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Request.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x82) + if z.Response == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Response.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *QueryAsTx) Msgsize() (s int) { + s = 1 + 8 + if z.Request == nil { + s += hsp.NilSize + } else { + s += z.Request.Msgsize() + } + s += 9 + if z.Response == nil { + s += hsp.NilSize + } else { + s += z.Response.Msgsize() + } + return +} + +// MarshalHash marshals for hash +func (z *SignedHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.Header.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.HSV.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedHeader) Msgsize() (s int) { + s = 1 + 7 + z.Header.Msgsize() + 4 + z.HSV.Msgsize() + return +} diff --git a/types/block_gen_test.go b/types/block_gen_test.go new file mode 100644 index 000000000..ff2b94d1d --- /dev/null +++ b/types/block_gen_test.go @@ -0,0 +1,195 @@ +package types + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "testing" +) + +func TestMarshalHashBlock(t *testing.T) { + v := Block{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashBlock(b *testing.B) { + v := Block{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgBlock(b *testing.B) { + v := Block{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashBlocks(t *testing.T) { + v := Blocks{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashBlocks(b *testing.B) { + v := Blocks{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgBlocks(b *testing.B) { + v := Blocks{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashHeader(t *testing.T) { + v := Header{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashHeader(b *testing.B) { + v := Header{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgHeader(b *testing.B) { + v := Header{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashQueryAsTx(t *testing.T) { + v := QueryAsTx{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashQueryAsTx(b *testing.B) { + v := QueryAsTx{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgQueryAsTx(b *testing.B) { + v := QueryAsTx{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedHeader(t *testing.T) { + v := SignedHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedHeader(b *testing.B) { + v := SignedHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedHeader(b *testing.B) { + v := SignedHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} diff --git a/types/block_test.go b/types/block_test.go new file mode 100644 index 000000000..e028babde --- /dev/null +++ b/types/block_test.go @@ -0,0 +1,450 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "bytes" + "math/big" + "reflect" + "testing" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/CovenantSQL/CovenantSQL/utils" + "github.com/pkg/errors" + . "github.com/smartystreets/goconvey/convey" +) + +func TestSignAndVerify(t *testing.T) { + block, err := createRandomBlock(genesisHash, true) + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + if err = block.Verify(); err != nil { + t.Fatalf("Error occurred: %v", err) + } + + block.SignedHeader.HSV.DataHash[0]++ + + if err = errors.Cause(block.Verify()); err != verifier.ErrHashValueNotMatch { + t.Fatalf("Unexpected error: %v", err) + } + + block.Acks = append(block.Acks, &SignedAckHeader{ + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: hash.Hash{0x01}, + }, + }) + + if err = block.Verify(); err != ErrMerkleRootVerification { + t.Fatalf("Unexpected error: %v", err) + } +} + +func TestHeaderMarshalUnmarshaler(t *testing.T) { + block, err := createRandomBlock(genesisHash, false) + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + origin := &block.SignedHeader.Header + enc, err := utils.EncodeMsgPack(origin) + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + dec := &Header{} + if err = utils.DecodeMsgPack(enc.Bytes(), dec); err != nil { + t.Fatalf("Error occurred: %v", err) + } + + bts1, err := origin.MarshalHash() + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + bts2, err := dec.MarshalHash() + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } + + if !reflect.DeepEqual(origin, dec) { + t.Fatalf("Values don't match:\n\tv1 = %+v\n\tv2 = %+v", origin, dec) + } +} + +func TestSignedHeaderMarshaleUnmarshaler(t *testing.T) { + block, err := createRandomBlock(genesisHash, true) + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + origin := &block.SignedHeader + enc, err := utils.EncodeMsgPack(origin) + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + dec := &SignedHeader{} + + if err = utils.DecodeMsgPack(enc.Bytes(), dec); err != nil { + t.Fatalf("Error occurred: %v", err) + } + + bts1, err := origin.MarshalHash() + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + bts2, err := dec.MarshalHash() + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } + + if !reflect.DeepEqual(origin.Header, dec.Header) { + t.Fatalf("Values don't match:\n\tv1 = %+v\n\tv2 = %+v", origin.Header, dec.Header) + } + + if err = origin.Verify(); err != nil { + t.Fatalf("Error occurred: %v", err) + } + + if err = dec.Verify(); err != nil { + t.Fatalf("Error occurred: %v", err) + } +} + +func TestBlockMarshalUnmarshaler(t *testing.T) { + origin, err := createRandomBlock(genesisHash, false) + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + origin2, err := createRandomBlock(genesisHash, false) + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + blocks := make(Blocks, 0, 2) + blocks = append(blocks, origin) + blocks = append(blocks, origin2) + blocks = append(blocks, nil) + + blocks2 := make(Blocks, 0, 2) + blocks2 = append(blocks2, origin) + blocks2 = append(blocks2, origin2) + blocks2 = append(blocks2, nil) + + bts1, err := blocks.MarshalHash() + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + bts2, err := blocks2.MarshalHash() + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } + + enc, err := utils.EncodeMsgPack(origin) + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + dec := &Block{} + + if err = utils.DecodeMsgPack(enc.Bytes(), dec); err != nil { + t.Fatalf("Error occurred: %v", err) + } + + bts1, err = origin.MarshalHash() + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + bts2, err = dec.MarshalHash() + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } + + if !reflect.DeepEqual(origin, dec) { + t.Fatalf("Values don't match:\n\tv1 = %+v\n\tv2 = %+v", origin, dec) + } +} + +func TestGenesis(t *testing.T) { + genesis, err := createRandomBlock(genesisHash, true) + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + if err = genesis.VerifyAsGenesis(); err != nil { + t.Fatalf("Error occurred: %v", err) + } + + if err = genesis.SignedHeader.VerifyAsGenesis(); err != nil { + t.Fatalf("Error occurred: %v", err) + } + + // Test non-genesis block + genesis, err = createRandomBlock(genesisHash, false) + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + if err = genesis.VerifyAsGenesis(); err != nil { + t.Logf("Error occurred as expected: %v", err) + } else { + t.Fatal("Unexpected result: returned nil while expecting an error") + } + + if err = genesis.SignedHeader.VerifyAsGenesis(); err != nil { + t.Logf("Error occurred as expected: %v", err) + } else { + t.Fatal("Unexpected result: returned nil while expecting an error") + } + + // Test altered public key block + genesis, err = createRandomBlock(genesisHash, true) + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + _, pub, err := asymmetric.GenSecp256k1KeyPair() + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + genesis.SignedHeader.HSV.Signee = pub + + if err = genesis.VerifyAsGenesis(); err != nil { + t.Logf("Error occurred as expected: %v", err) + } else { + t.Fatal("Unexpected result: returned nil while expecting an error") + } + + if err = genesis.SignedHeader.VerifyAsGenesis(); err != nil { + t.Logf("Error occurred as expected: %v", err) + } else { + t.Fatal("Unexpected result: returned nil while expecting an error") + } + + // Test altered signature + genesis, err = createRandomBlock(genesisHash, true) + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + genesis.SignedHeader.HSV.Signature.R.Add(genesis.SignedHeader.HSV.Signature.R, big.NewInt(int64(1))) + genesis.SignedHeader.HSV.Signature.S.Add(genesis.SignedHeader.HSV.Signature.S, big.NewInt(int64(1))) + + if err = genesis.VerifyAsGenesis(); err != nil { + t.Logf("Error occurred as expected: %v", err) + } else { + t.Fatalf("Unexpected error: %v", err) + } + + if err = genesis.SignedHeader.VerifyAsGenesis(); err != nil { + t.Logf("Error occurred as expected: %v", err) + } else { + t.Fatal("Unexpected result: returned nil while expecting an error") + } +} + +func Test(t *testing.T) { + Convey("CalcNextID should return correct id of each testing block", t, func() { + var ( + nextid uint64 + ok bool + + cases = [...]struct { + block *Block + nextid uint64 + ok bool + }{ + { + block: &Block{ + QueryTxs: []*QueryAsTx{}, + }, + nextid: 0, + ok: false, + }, { + block: &Block{ + QueryTxs: nil, + }, + nextid: 0, + ok: false, + }, { + block: &Block{ + QueryTxs: []*QueryAsTx{ + &QueryAsTx{ + Request: &Request{ + Header: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: ReadQuery, + }, + }, + Payload: RequestPayload{ + Queries: make([]Query, 10), + }, + }, + Response: &SignedResponseHeader{ + ResponseHeader: ResponseHeader{ + LogOffset: 0, + }, + }, + }, + }, + }, + nextid: 0, + ok: false, + }, { + block: &Block{ + QueryTxs: []*QueryAsTx{ + &QueryAsTx{ + Request: &Request{ + Header: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: WriteQuery, + }, + }, + Payload: RequestPayload{ + Queries: make([]Query, 10), + }, + }, + Response: &SignedResponseHeader{ + ResponseHeader: ResponseHeader{ + LogOffset: 0, + }, + }, + }, + }, + }, + nextid: 10, + ok: true, + }, { + block: &Block{ + QueryTxs: []*QueryAsTx{ + &QueryAsTx{ + Request: &Request{ + Header: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: ReadQuery, + }, + }, + Payload: RequestPayload{ + Queries: make([]Query, 10), + }, + }, + Response: &SignedResponseHeader{ + ResponseHeader: ResponseHeader{ + LogOffset: 0, + }, + }, + }, &QueryAsTx{ + Request: &Request{ + Header: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: WriteQuery, + }, + }, + Payload: RequestPayload{ + Queries: make([]Query, 10), + }, + }, + Response: &SignedResponseHeader{ + ResponseHeader: ResponseHeader{ + LogOffset: 0, + }, + }, + }, &QueryAsTx{ + Request: &Request{ + Header: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: ReadQuery, + }, + }, + Payload: RequestPayload{ + Queries: make([]Query, 10), + }, + }, + Response: &SignedResponseHeader{ + ResponseHeader: ResponseHeader{ + LogOffset: 10, + }, + }, + }, &QueryAsTx{ + Request: &Request{ + Header: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: WriteQuery, + }, + }, + Payload: RequestPayload{ + Queries: make([]Query, 20), + }, + }, + Response: &SignedResponseHeader{ + ResponseHeader: ResponseHeader{ + LogOffset: 10, + }, + }, + }, + }, + }, + nextid: 30, + ok: true, + }, + } + ) + + for _, v := range cases { + nextid, ok = v.block.CalcNextID() + So(ok, ShouldEqual, v.ok) + if ok { + So(nextid, ShouldEqual, v.nextid) + } + } + }) +} diff --git a/types/build.sh b/types/build.sh deleted file mode 100755 index 2b9fd9b67..000000000 --- a/types/build.sh +++ /dev/null @@ -1,4 +0,0 @@ -#! /usr/bin/env bash - -declare -r PB_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -protoc -I="$PB_DIR" --go_out="$PB_DIR" "$PB_DIR"/*.proto diff --git a/blockproducer/db_service_types.go b/types/db_service_types.go similarity index 58% rename from blockproducer/db_service_types.go rename to types/db_service_types.go index 6edac9581..89c69bafa 100644 --- a/blockproducer/db_service_types.go +++ b/types/db_service_types.go @@ -14,60 +14,35 @@ * limitations under the License. */ -package blockproducer +package types import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/proto" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" ) +//go:generate hsp + // CreateDatabaseRequestHeader defines client create database rpc header. type CreateDatabaseRequestHeader struct { - ResourceMeta wt.ResourceMeta -} - -// Serialize structure to bytes. -func (h *CreateDatabaseRequestHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - return h.ResourceMeta.Serialize() + ResourceMeta ResourceMeta } // SignedCreateDatabaseRequestHeader defines signed client create database request header. type SignedCreateDatabaseRequestHeader struct { CreateDatabaseRequestHeader - HeaderHash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + verifier.DefaultHashSignVerifierImpl } // Verify checks hash and signature in create database request header. func (sh *SignedCreateDatabaseRequestHeader) Verify() (err error) { - // verify hash - if err = verifyHash(&sh.CreateDatabaseRequestHeader, &sh.HeaderHash); err != nil { - return - } - // verify sign - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { - return wt.ErrSignVerification - } - return + return sh.DefaultHashSignVerifierImpl.Verify(&sh.CreateDatabaseRequestHeader) } // Sign the request. func (sh *SignedCreateDatabaseRequestHeader) Sign(signer *asymmetric.PrivateKey) (err error) { - // build hash - buildHash(&sh.CreateDatabaseRequestHeader, &sh.HeaderHash) - - // sign - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) - sh.Signee = signer.PubKey() - - return + return sh.DefaultHashSignVerifierImpl.Sign(&sh.CreateDatabaseRequestHeader, signer) } // CreateDatabaseRequest defines client create database rpc request entity. @@ -89,49 +64,23 @@ func (r *CreateDatabaseRequest) Sign(signer *asymmetric.PrivateKey) (err error) // CreateDatabaseResponseHeader defines client create database rpc response header. type CreateDatabaseResponseHeader struct { - InstanceMeta wt.ServiceInstance -} - -// Serialize structure to bytes. -func (h *CreateDatabaseResponseHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - return h.InstanceMeta.Serialize() + InstanceMeta ServiceInstance } // SignedCreateDatabaseResponseHeader defines signed client create database response header. type SignedCreateDatabaseResponseHeader struct { CreateDatabaseResponseHeader - HeaderHash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + verifier.DefaultHashSignVerifierImpl } // Verify checks hash and signature in create database response header. func (sh *SignedCreateDatabaseResponseHeader) Verify() (err error) { - // verify hash - if err = verifyHash(&sh.CreateDatabaseResponseHeader, &sh.HeaderHash); err != nil { - return - } - // verify sign - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { - return wt.ErrSignVerification - } - return + return sh.DefaultHashSignVerifierImpl.Verify(&sh.CreateDatabaseResponseHeader) } // Sign the response. func (sh *SignedCreateDatabaseResponseHeader) Sign(signer *asymmetric.PrivateKey) (err error) { - // build hash - buildHash(&sh.CreateDatabaseResponseHeader, &sh.HeaderHash) - - // sign - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) - sh.Signee = signer.PubKey() - - return + return sh.DefaultHashSignVerifierImpl.Sign(&sh.CreateDatabaseResponseHeader, signer) } // CreateDatabaseResponse defines client create database rpc response entity. @@ -156,46 +105,20 @@ type DropDatabaseRequestHeader struct { DatabaseID proto.DatabaseID } -// Serialize structure to bytes. -func (h *DropDatabaseRequestHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - return []byte(h.DatabaseID) -} - // SignedDropDatabaseRequestHeader defines signed client drop database rpc request header. type SignedDropDatabaseRequestHeader struct { DropDatabaseRequestHeader - HeaderHash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + verifier.DefaultHashSignVerifierImpl } // Verify checks hash and signature in request header. func (sh *SignedDropDatabaseRequestHeader) Verify() (err error) { - // verify hash - if err = verifyHash(&sh.DropDatabaseRequestHeader, &sh.HeaderHash); err != nil { - return - } - // verify sign - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { - return wt.ErrSignVerification - } - return + return sh.DefaultHashSignVerifierImpl.Verify(&sh.DropDatabaseRequestHeader) } // Sign the request. func (sh *SignedDropDatabaseRequestHeader) Sign(signer *asymmetric.PrivateKey) (err error) { - // build hash - buildHash(&sh.DropDatabaseRequestHeader, &sh.HeaderHash) - - // sign - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) - sh.Signee = signer.PubKey() - - return + return sh.DefaultHashSignVerifierImpl.Sign(&sh.DropDatabaseRequestHeader, signer) } // DropDatabaseRequest defines client drop database rpc request entity. @@ -222,46 +145,20 @@ type GetDatabaseRequestHeader struct { DatabaseID proto.DatabaseID } -// Serialize structure to bytes. -func (h *GetDatabaseRequestHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - return []byte(h.DatabaseID) -} - // SignedGetDatabaseRequestHeader defines signed client get database rpc request header entity. type SignedGetDatabaseRequestHeader struct { GetDatabaseRequestHeader - HeaderHash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + verifier.DefaultHashSignVerifierImpl } // Verify checks hash and signature in request header. func (sh *SignedGetDatabaseRequestHeader) Verify() (err error) { - // verify hash - if err = verifyHash(&sh.GetDatabaseRequestHeader, &sh.HeaderHash); err != nil { - return - } - // verify sign - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { - return wt.ErrSignVerification - } - return + return sh.DefaultHashSignVerifierImpl.Verify(&sh.GetDatabaseRequestHeader) } // Sign the request. func (sh *SignedGetDatabaseRequestHeader) Sign(signer *asymmetric.PrivateKey) (err error) { - // build hash - buildHash(&sh.GetDatabaseRequestHeader, &sh.HeaderHash) - - // sign - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) - sh.Signee = signer.PubKey() - - return + return sh.DefaultHashSignVerifierImpl.Sign(&sh.GetDatabaseRequestHeader, signer) } // GetDatabaseRequest defines client get database rpc request entity. @@ -282,49 +179,23 @@ func (r *GetDatabaseRequest) Sign(signer *asymmetric.PrivateKey) error { // GetDatabaseResponseHeader defines client get database rpc response header entity. type GetDatabaseResponseHeader struct { - InstanceMeta wt.ServiceInstance -} - -// Serialize structure to bytes. -func (h *GetDatabaseResponseHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - return h.InstanceMeta.Serialize() + InstanceMeta ServiceInstance } // SignedGetDatabaseResponseHeader defines client get database rpc response header entity. type SignedGetDatabaseResponseHeader struct { GetDatabaseResponseHeader - HeaderHash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + verifier.DefaultHashSignVerifierImpl } // Verify checks hash and signature in response header. func (sh *SignedGetDatabaseResponseHeader) Verify() (err error) { - // verify hash - if err = verifyHash(&sh.GetDatabaseResponseHeader, &sh.HeaderHash); err != nil { - return - } - // verify sign - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { - return wt.ErrSignVerification - } - return + return sh.DefaultHashSignVerifierImpl.Verify(&sh.GetDatabaseResponseHeader) } // Sign the request. func (sh *SignedGetDatabaseResponseHeader) Sign(signer *asymmetric.PrivateKey) (err error) { - // build hash - buildHash(&sh.GetDatabaseResponseHeader, &sh.HeaderHash) - - // sign - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) - sh.Signee = signer.PubKey() - - return + return sh.DefaultHashSignVerifierImpl.Sign(&sh.GetDatabaseResponseHeader, signer) } // GetDatabaseResponse defines client get database rpc response entity. @@ -342,22 +213,3 @@ func (r *GetDatabaseResponse) Verify() (err error) { func (r *GetDatabaseResponse) Sign(signer *asymmetric.PrivateKey) (err error) { return r.Header.Sign(signer) } - -// FIXIT(xq262144) remove duplicated interface in utils package. -type canSerialize interface { - Serialize() []byte -} - -func verifyHash(data canSerialize, h *hash.Hash) (err error) { - var newHash hash.Hash - buildHash(data, &newHash) - if !newHash.IsEqual(h) { - return wt.ErrHashVerification - } - return -} - -func buildHash(data canSerialize, h *hash.Hash) { - newHash := hash.THashH(data.Serialize()) - copy(h[:], newHash[:]) -} diff --git a/types/db_service_types_gen.go b/types/db_service_types_gen.go new file mode 100644 index 000000000..188fcada8 --- /dev/null +++ b/types/db_service_types_gen.go @@ -0,0 +1,427 @@ +package types + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + hsp "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// MarshalHash marshals for hash +func (z *CreateDatabaseRequest) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x82, 0x82, 0x82, 0x81, 0x81) + if oTemp, err := z.Header.CreateDatabaseRequestHeader.ResourceMeta.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *CreateDatabaseRequest) Msgsize() (s int) { + s = 1 + 7 + 1 + 28 + 1 + 13 + z.Header.CreateDatabaseRequestHeader.ResourceMeta.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *CreateDatabaseRequestHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 1 + o = append(o, 0x81, 0x81) + if oTemp, err := z.ResourceMeta.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *CreateDatabaseRequestHeader) Msgsize() (s int) { + s = 1 + 13 + z.ResourceMeta.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *CreateDatabaseResponse) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x82, 0x82, 0x82, 0x81, 0x81) + if oTemp, err := z.Header.CreateDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *CreateDatabaseResponse) Msgsize() (s int) { + s = 1 + 7 + 1 + 29 + 1 + 13 + z.Header.CreateDatabaseResponseHeader.InstanceMeta.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *CreateDatabaseResponseHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 1 + o = append(o, 0x81, 0x81) + if oTemp, err := z.InstanceMeta.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *CreateDatabaseResponseHeader) Msgsize() (s int) { + s = 1 + 13 + z.InstanceMeta.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *DropDatabaseRequest) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x82, 0x82, 0x82, 0x81, 0x81) + if oTemp, err := z.Header.DropDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *DropDatabaseRequest) Msgsize() (s int) { + s = 1 + 7 + 1 + 26 + 1 + 11 + z.Header.DropDatabaseRequestHeader.DatabaseID.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *DropDatabaseRequestHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 1 + o = append(o, 0x81, 0x81) + if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *DropDatabaseRequestHeader) Msgsize() (s int) { + s = 1 + 11 + z.DatabaseID.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z DropDatabaseResponse) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 0 + o = append(o, 0x80) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z DropDatabaseResponse) Msgsize() (s int) { + s = 1 + return +} + +// MarshalHash marshals for hash +func (z *GetDatabaseRequest) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x82, 0x82, 0x82, 0x81, 0x81) + if oTemp, err := z.Header.GetDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *GetDatabaseRequest) Msgsize() (s int) { + s = 1 + 7 + 1 + 25 + 1 + 11 + z.Header.GetDatabaseRequestHeader.DatabaseID.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *GetDatabaseRequestHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 1 + o = append(o, 0x81, 0x81) + if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *GetDatabaseRequestHeader) Msgsize() (s int) { + s = 1 + 11 + z.DatabaseID.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *GetDatabaseResponse) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x82, 0x82, 0x82, 0x81, 0x81) + if oTemp, err := z.Header.GetDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *GetDatabaseResponse) Msgsize() (s int) { + s = 1 + 7 + 1 + 26 + 1 + 13 + z.Header.GetDatabaseResponseHeader.InstanceMeta.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *GetDatabaseResponseHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 1 + o = append(o, 0x81, 0x81) + if oTemp, err := z.InstanceMeta.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *GetDatabaseResponseHeader) Msgsize() (s int) { + s = 1 + 13 + z.InstanceMeta.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *SignedCreateDatabaseRequestHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x82, 0x81, 0x81) + if oTemp, err := z.CreateDatabaseRequestHeader.ResourceMeta.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedCreateDatabaseRequestHeader) Msgsize() (s int) { + s = 1 + 28 + 1 + 13 + z.CreateDatabaseRequestHeader.ResourceMeta.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *SignedCreateDatabaseResponseHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x82, 0x81, 0x81) + if oTemp, err := z.CreateDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedCreateDatabaseResponseHeader) Msgsize() (s int) { + s = 1 + 29 + 1 + 13 + z.CreateDatabaseResponseHeader.InstanceMeta.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *SignedDropDatabaseRequestHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x82, 0x81, 0x81) + if oTemp, err := z.DropDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedDropDatabaseRequestHeader) Msgsize() (s int) { + s = 1 + 26 + 1 + 11 + z.DropDatabaseRequestHeader.DatabaseID.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *SignedGetDatabaseRequestHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x82, 0x81, 0x81) + if oTemp, err := z.GetDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedGetDatabaseRequestHeader) Msgsize() (s int) { + s = 1 + 25 + 1 + 11 + z.GetDatabaseRequestHeader.DatabaseID.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *SignedGetDatabaseResponseHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x82, 0x81, 0x81) + if oTemp, err := z.GetDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedGetDatabaseResponseHeader) Msgsize() (s int) { + s = 1 + 26 + 1 + 13 + z.GetDatabaseResponseHeader.InstanceMeta.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + return +} diff --git a/types/db_service_types_gen_test.go b/types/db_service_types_gen_test.go new file mode 100644 index 000000000..dca3773e8 --- /dev/null +++ b/types/db_service_types_gen_test.go @@ -0,0 +1,602 @@ +package types + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "testing" +) + +func TestMarshalHashCreateDatabaseRequest(t *testing.T) { + v := CreateDatabaseRequest{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashCreateDatabaseRequest(b *testing.B) { + v := CreateDatabaseRequest{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgCreateDatabaseRequest(b *testing.B) { + v := CreateDatabaseRequest{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashCreateDatabaseRequestHeader(t *testing.T) { + v := CreateDatabaseRequestHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashCreateDatabaseRequestHeader(b *testing.B) { + v := CreateDatabaseRequestHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgCreateDatabaseRequestHeader(b *testing.B) { + v := CreateDatabaseRequestHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashCreateDatabaseResponse(t *testing.T) { + v := CreateDatabaseResponse{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashCreateDatabaseResponse(b *testing.B) { + v := CreateDatabaseResponse{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgCreateDatabaseResponse(b *testing.B) { + v := CreateDatabaseResponse{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashCreateDatabaseResponseHeader(t *testing.T) { + v := CreateDatabaseResponseHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashCreateDatabaseResponseHeader(b *testing.B) { + v := CreateDatabaseResponseHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgCreateDatabaseResponseHeader(b *testing.B) { + v := CreateDatabaseResponseHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashDropDatabaseRequest(t *testing.T) { + v := DropDatabaseRequest{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashDropDatabaseRequest(b *testing.B) { + v := DropDatabaseRequest{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgDropDatabaseRequest(b *testing.B) { + v := DropDatabaseRequest{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashDropDatabaseRequestHeader(t *testing.T) { + v := DropDatabaseRequestHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashDropDatabaseRequestHeader(b *testing.B) { + v := DropDatabaseRequestHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgDropDatabaseRequestHeader(b *testing.B) { + v := DropDatabaseRequestHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashDropDatabaseResponse(t *testing.T) { + v := DropDatabaseResponse{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashDropDatabaseResponse(b *testing.B) { + v := DropDatabaseResponse{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgDropDatabaseResponse(b *testing.B) { + v := DropDatabaseResponse{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashGetDatabaseRequest(t *testing.T) { + v := GetDatabaseRequest{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashGetDatabaseRequest(b *testing.B) { + v := GetDatabaseRequest{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgGetDatabaseRequest(b *testing.B) { + v := GetDatabaseRequest{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashGetDatabaseRequestHeader(t *testing.T) { + v := GetDatabaseRequestHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashGetDatabaseRequestHeader(b *testing.B) { + v := GetDatabaseRequestHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgGetDatabaseRequestHeader(b *testing.B) { + v := GetDatabaseRequestHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashGetDatabaseResponse(t *testing.T) { + v := GetDatabaseResponse{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashGetDatabaseResponse(b *testing.B) { + v := GetDatabaseResponse{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgGetDatabaseResponse(b *testing.B) { + v := GetDatabaseResponse{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashGetDatabaseResponseHeader(t *testing.T) { + v := GetDatabaseResponseHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashGetDatabaseResponseHeader(b *testing.B) { + v := GetDatabaseResponseHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgGetDatabaseResponseHeader(b *testing.B) { + v := GetDatabaseResponseHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedCreateDatabaseRequestHeader(t *testing.T) { + v := SignedCreateDatabaseRequestHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedCreateDatabaseRequestHeader(b *testing.B) { + v := SignedCreateDatabaseRequestHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedCreateDatabaseRequestHeader(b *testing.B) { + v := SignedCreateDatabaseRequestHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedCreateDatabaseResponseHeader(t *testing.T) { + v := SignedCreateDatabaseResponseHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedCreateDatabaseResponseHeader(b *testing.B) { + v := SignedCreateDatabaseResponseHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedCreateDatabaseResponseHeader(b *testing.B) { + v := SignedCreateDatabaseResponseHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedDropDatabaseRequestHeader(t *testing.T) { + v := SignedDropDatabaseRequestHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedDropDatabaseRequestHeader(b *testing.B) { + v := SignedDropDatabaseRequestHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedDropDatabaseRequestHeader(b *testing.B) { + v := SignedDropDatabaseRequestHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedGetDatabaseRequestHeader(t *testing.T) { + v := SignedGetDatabaseRequestHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedGetDatabaseRequestHeader(b *testing.B) { + v := SignedGetDatabaseRequestHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedGetDatabaseRequestHeader(b *testing.B) { + v := SignedGetDatabaseRequestHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedGetDatabaseResponseHeader(t *testing.T) { + v := SignedGetDatabaseResponseHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedGetDatabaseResponseHeader(b *testing.B) { + v := SignedGetDatabaseResponseHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedGetDatabaseResponseHeader(b *testing.B) { + v := SignedGetDatabaseResponseHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} diff --git a/types/db_service_types_test.go b/types/db_service_types_test.go new file mode 100644 index 000000000..9cb3cafee --- /dev/null +++ b/types/db_service_types_test.go @@ -0,0 +1,100 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "testing" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + . "github.com/smartystreets/goconvey/convey" +) + +func TestTypes(t *testing.T) { + Convey("test nils", t, func() { + priv, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + + h1 := &SignedCreateDatabaseRequestHeader{} + err = h1.Sign(priv) + So(err, ShouldBeNil) + h1.Signee = nil + err = h1.Verify() + So(err, ShouldNotBeNil) + + h2 := &SignedCreateDatabaseResponseHeader{} + err = h2.Sign(priv) + So(err, ShouldBeNil) + h2.Signee = nil + err = h2.Verify() + So(err, ShouldNotBeNil) + + h3 := &SignedDropDatabaseRequestHeader{} + err = h3.Sign(priv) + So(err, ShouldBeNil) + h3.Signee = nil + err = h3.Verify() + So(err, ShouldNotBeNil) + + h4 := &SignedGetDatabaseRequestHeader{} + err = h4.Sign(priv) + So(err, ShouldBeNil) + h4.Signee = nil + err = h4.Verify() + So(err, ShouldNotBeNil) + + h5 := &SignedGetDatabaseResponseHeader{} + err = h5.Sign(priv) + So(err, ShouldBeNil) + h5.Signee = nil + err = h5.Verify() + So(err, ShouldNotBeNil) + }) + Convey("test nested sign/verify", t, func() { + priv, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + + r1 := &CreateDatabaseRequest{} + err = r1.Sign(priv) + So(err, ShouldBeNil) + err = r1.Verify() + So(err, ShouldBeNil) + + r2 := &CreateDatabaseResponse{} + err = r2.Sign(priv) + So(err, ShouldBeNil) + err = r2.Verify() + So(err, ShouldBeNil) + + r3 := &DropDatabaseRequest{} + err = r3.Sign(priv) + So(err, ShouldBeNil) + err = r3.Verify() + So(err, ShouldBeNil) + + r4 := &GetDatabaseRequest{} + err = r4.Sign(priv) + So(err, ShouldBeNil) + err = r4.Verify() + So(err, ShouldBeNil) + + r5 := &GetDatabaseResponse{} + err = r5.Sign(priv) + So(err, ShouldBeNil) + err = r5.Verify() + So(err, ShouldBeNil) + }) +} diff --git a/sqlchain/types/doc.go b/types/doc.go similarity index 100% rename from sqlchain/types/doc.go rename to types/doc.go diff --git a/types/errors.go b/types/errors.go new file mode 100644 index 000000000..650e46728 --- /dev/null +++ b/types/errors.go @@ -0,0 +1,31 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "errors" +) + +var ( + // ErrMerkleRootVerification indicates a failed merkle root verificatin. + ErrMerkleRootVerification = errors.New("merkle root verification failed") + // ErrNodePublicKeyNotMatch indicates that the public key given with a node does not match the + // one in the key store. + ErrNodePublicKeyNotMatch = errors.New("node publick key doesn't match") + // ErrSignRequest indicates a failed signature compute operation. + ErrSignRequest = errors.New("signature compute failed") +) diff --git a/types/init_service_type.go b/types/init_service_type.go new file mode 100644 index 000000000..9d5462788 --- /dev/null +++ b/types/init_service_type.go @@ -0,0 +1,84 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/CovenantSQL/CovenantSQL/proto" +) + +//go:generate hsp + +// InitService defines worker service init request. +type InitService struct { + proto.Envelope +} + +// ResourceMeta defines single database resource meta. +type ResourceMeta struct { + Node uint16 // reserved node count + Space uint64 // reserved storage space in bytes + Memory uint64 // reserved memory in bytes + LoadAvgPerCPU uint64 // max loadAvg15 per CPU + EncryptionKey string `hspack:"-"` // encryption key for database instance +} + +// ServiceInstance defines single instance to be initialized. +type ServiceInstance struct { + DatabaseID proto.DatabaseID + Peers *proto.Peers + ResourceMeta ResourceMeta + GenesisBlock *Block +} + +// InitServiceResponseHeader defines worker service init response header. +type InitServiceResponseHeader struct { + Instances []ServiceInstance +} + +// SignedInitServiceResponseHeader defines signed worker service init response header. +type SignedInitServiceResponseHeader struct { + InitServiceResponseHeader + verifier.DefaultHashSignVerifierImpl +} + +// InitServiceResponse defines worker service init response. +type InitServiceResponse struct { + Header SignedInitServiceResponseHeader +} + +// Verify checks hash and signature in init service response header. +func (sh *SignedInitServiceResponseHeader) Verify() (err error) { + return sh.DefaultHashSignVerifierImpl.Verify(&sh.InitServiceResponseHeader) +} + +// Sign the request. +func (sh *SignedInitServiceResponseHeader) Sign(signer *asymmetric.PrivateKey) (err error) { + return sh.DefaultHashSignVerifierImpl.Sign(&sh.InitServiceResponseHeader, signer) +} + +// Verify checks hash and signature in init service response header. +func (rs *InitServiceResponse) Verify() error { + return rs.Header.Verify() +} + +// Sign the request. +func (rs *InitServiceResponse) Sign(signer *asymmetric.PrivateKey) (err error) { + // sign + return rs.Header.Sign(signer) +} diff --git a/types/init_service_type_gen.go b/types/init_service_type_gen.go new file mode 100644 index 000000000..f859b2e88 --- /dev/null +++ b/types/init_service_type_gen.go @@ -0,0 +1,187 @@ +package types + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + hsp "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// MarshalHash marshals for hash +func (z *InitService) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 1 + o = append(o, 0x81, 0x81) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *InitService) Msgsize() (s int) { + s = 1 + 9 + z.Envelope.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *InitServiceResponse) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 1 + o = append(o, 0x81, 0x81) + if oTemp, err := z.Header.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *InitServiceResponse) Msgsize() (s int) { + s = 1 + 7 + z.Header.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *InitServiceResponseHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 1 + o = append(o, 0x81, 0x81) + o = hsp.AppendArrayHeader(o, uint32(len(z.Instances))) + for za0001 := range z.Instances { + if oTemp, err := z.Instances[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *InitServiceResponseHeader) Msgsize() (s int) { + s = 1 + 10 + hsp.ArrayHeaderSize + for za0001 := range z.Instances { + s += z.Instances[za0001].Msgsize() + } + return +} + +// MarshalHash marshals for hash +func (z *ResourceMeta) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 4 + o = append(o, 0x84, 0x84) + o = hsp.AppendUint16(o, z.Node) + o = append(o, 0x84) + o = hsp.AppendUint64(o, z.Space) + o = append(o, 0x84) + o = hsp.AppendUint64(o, z.Memory) + o = append(o, 0x84) + o = hsp.AppendUint64(o, z.LoadAvgPerCPU) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ResourceMeta) Msgsize() (s int) { + s = 1 + 5 + hsp.Uint16Size + 6 + hsp.Uint64Size + 7 + hsp.Uint64Size + 14 + hsp.Uint64Size + return +} + +// MarshalHash marshals for hash +func (z *ServiceInstance) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 4 + o = append(o, 0x84, 0x84) + if z.GenesisBlock == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.GenesisBlock.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x84) + if z.Peers == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Peers.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x84) + if oTemp, err := z.ResourceMeta.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x84) + if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ServiceInstance) Msgsize() (s int) { + s = 1 + 13 + if z.GenesisBlock == nil { + s += hsp.NilSize + } else { + s += z.GenesisBlock.Msgsize() + } + s += 6 + if z.Peers == nil { + s += hsp.NilSize + } else { + s += z.Peers.Msgsize() + } + s += 13 + z.ResourceMeta.Msgsize() + 11 + z.DatabaseID.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *SignedInitServiceResponseHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x82, 0x81, 0x81) + o = hsp.AppendArrayHeader(o, uint32(len(z.InitServiceResponseHeader.Instances))) + for za0001 := range z.InitServiceResponseHeader.Instances { + if oTemp, err := z.InitServiceResponseHeader.Instances[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedInitServiceResponseHeader) Msgsize() (s int) { + s = 1 + 26 + 1 + 10 + hsp.ArrayHeaderSize + for za0001 := range z.InitServiceResponseHeader.Instances { + s += z.InitServiceResponseHeader.Instances[za0001].Msgsize() + } + s += 28 + z.DefaultHashSignVerifierImpl.Msgsize() + return +} diff --git a/worker/types/init_service_type_gen_test.go b/types/init_service_type_gen_test.go similarity index 100% rename from worker/types/init_service_type_gen_test.go rename to types/init_service_type_gen_test.go diff --git a/types/no_ack_report_type.go b/types/no_ack_report_type.go new file mode 100644 index 000000000..29187a451 --- /dev/null +++ b/types/no_ack_report_type.go @@ -0,0 +1,129 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "time" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/CovenantSQL/CovenantSQL/proto" +) + +//go:generate hsp + +// NoAckReportHeader defines worker issued client no ack report. +type NoAckReportHeader struct { + NodeID proto.NodeID // reporter node id + Timestamp time.Time // time in UTC zone + Response SignedResponseHeader +} + +// SignedNoAckReportHeader defines worker worker issued/signed client no ack report. +type SignedNoAckReportHeader struct { + NoAckReportHeader + verifier.DefaultHashSignVerifierImpl +} + +// NoAckReport defines whole worker no client ack report. +type NoAckReport struct { + proto.Envelope + Header SignedNoAckReportHeader +} + +// AggrNoAckReportHeader defines worker leader aggregated client no ack report. +type AggrNoAckReportHeader struct { + NodeID proto.NodeID // aggregated report node id + Timestamp time.Time // time in UTC zone + Reports []SignedNoAckReportHeader // no-ack reports + Peers *proto.Peers // serving peers during report +} + +// SignedAggrNoAckReportHeader defines worker leader aggregated/signed client no ack report. +type SignedAggrNoAckReportHeader struct { + AggrNoAckReportHeader + verifier.DefaultHashSignVerifierImpl +} + +// AggrNoAckReport defines whole worker leader no client ack report. +type AggrNoAckReport struct { + proto.Envelope + Header SignedAggrNoAckReportHeader +} + +// Verify checks hash and signature in signed no ack report header. +func (sh *SignedNoAckReportHeader) Verify() (err error) { + // verify original response + if err = sh.Response.Verify(); err != nil { + return + } + + return sh.DefaultHashSignVerifierImpl.Verify(&sh.NoAckReportHeader) +} + +// Sign the request. +func (sh *SignedNoAckReportHeader) Sign(signer *asymmetric.PrivateKey) (err error) { + // verify original response + if err = sh.Response.Verify(); err != nil { + return + } + + return sh.DefaultHashSignVerifierImpl.Sign(&sh.NoAckReportHeader, signer) +} + +// Verify checks hash and signature in whole no ack report. +func (r *NoAckReport) Verify() error { + return r.Header.Verify() +} + +// Sign the request. +func (r *NoAckReport) Sign(signer *asymmetric.PrivateKey) error { + return r.Header.Sign(signer) +} + +// Verify checks hash and signature in aggregated no ack report. +func (sh *SignedAggrNoAckReportHeader) Verify() (err error) { + // verify original reports + for _, r := range sh.Reports { + if err = r.Verify(); err != nil { + return + } + } + + return sh.DefaultHashSignVerifierImpl.Verify(&sh.AggrNoAckReportHeader) +} + +// Sign the request. +func (sh *SignedAggrNoAckReportHeader) Sign(signer *asymmetric.PrivateKey) (err error) { + for _, r := range sh.Reports { + if err = r.Verify(); err != nil { + return + } + } + + return sh.DefaultHashSignVerifierImpl.Sign(&sh.AggrNoAckReportHeader, signer) +} + +// Verify the whole aggregation no ack report. +func (r *AggrNoAckReport) Verify() (err error) { + return r.Header.Verify() +} + +// Sign the request. +func (r *AggrNoAckReport) Sign(signer *asymmetric.PrivateKey) error { + return r.Header.Sign(signer) +} diff --git a/types/no_ack_report_type_gen.go b/types/no_ack_report_type_gen.go new file mode 100644 index 000000000..e9e89abc6 --- /dev/null +++ b/types/no_ack_report_type_gen.go @@ -0,0 +1,206 @@ +package types + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + hsp "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// MarshalHash marshals for hash +func (z *AggrNoAckReport) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + // map header, size 2 + o = append(o, 0x82, 0x82, 0x82, 0x82) + if oTemp, err := z.Header.AggrNoAckReportHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *AggrNoAckReport) Msgsize() (s int) { + s = 1 + 7 + 1 + 22 + z.Header.AggrNoAckReportHeader.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *AggrNoAckReportHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 4 + o = append(o, 0x84, 0x84) + if z.Peers == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Peers.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x84) + o = hsp.AppendArrayHeader(o, uint32(len(z.Reports))) + for za0001 := range z.Reports { + if oTemp, err := z.Reports[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x84) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x84) + o = hsp.AppendTime(o, z.Timestamp) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *AggrNoAckReportHeader) Msgsize() (s int) { + s = 1 + 6 + if z.Peers == nil { + s += hsp.NilSize + } else { + s += z.Peers.Msgsize() + } + s += 8 + hsp.ArrayHeaderSize + for za0001 := range z.Reports { + s += z.Reports[za0001].Msgsize() + } + s += 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize + return +} + +// MarshalHash marshals for hash +func (z *NoAckReport) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.Header.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *NoAckReport) Msgsize() (s int) { + s = 1 + 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *NoAckReportHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 3 + o = append(o, 0x83, 0x83) + if oTemp, err := z.Response.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x83) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x83) + o = hsp.AppendTime(o, z.Timestamp) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *NoAckReportHeader) Msgsize() (s int) { + s = 1 + 9 + z.Response.Msgsize() + 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize + return +} + +// MarshalHash marshals for hash +func (z *SignedAggrNoAckReportHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.AggrNoAckReportHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedAggrNoAckReportHeader) Msgsize() (s int) { + s = 1 + 22 + z.AggrNoAckReportHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *SignedNoAckReportHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + // map header, size 3 + o = append(o, 0x82, 0x82, 0x83, 0x83) + if oTemp, err := z.NoAckReportHeader.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x83) + o = hsp.AppendTime(o, z.NoAckReportHeader.Timestamp) + o = append(o, 0x83) + if oTemp, err := z.NoAckReportHeader.Response.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedNoAckReportHeader) Msgsize() (s int) { + s = 1 + 18 + 1 + 7 + z.NoAckReportHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 9 + z.NoAckReportHeader.Response.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + return +} diff --git a/worker/types/no_ack_report_type_gen_test.go b/types/no_ack_report_type_gen_test.go similarity index 100% rename from worker/types/no_ack_report_type_gen_test.go rename to types/no_ack_report_type_gen_test.go diff --git a/sqlchain/types/observer.go b/types/observer.go similarity index 100% rename from sqlchain/types/observer.go rename to types/observer.go diff --git a/types/request_type.go b/types/request_type.go new file mode 100644 index 000000000..2105e641c --- /dev/null +++ b/types/request_type.go @@ -0,0 +1,147 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "fmt" + "time" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/CovenantSQL/CovenantSQL/proto" +) + +//go:generate hsp + +// QueryType enumerates available query type, currently read/write. +type QueryType int32 + +const ( + // ReadQuery defines a read query type. + ReadQuery QueryType = iota + // WriteQuery defines a write query type. + WriteQuery +) + +// NamedArg defines the named argument structure for database. +type NamedArg struct { + Name string + Value interface{} +} + +// Query defines single query. +type Query struct { + Pattern string + Args []NamedArg +} + +// RequestPayload defines a queries payload. +type RequestPayload struct { + Queries []Query `json:"qs"` +} + +// RequestHeader defines a query request header. +type RequestHeader struct { + QueryType QueryType `json:"qt"` + NodeID proto.NodeID `json:"id"` // request node id + DatabaseID proto.DatabaseID `json:"dbid"` // request database id + ConnectionID uint64 `json:"cid"` + SeqNo uint64 `json:"seq"` + Timestamp time.Time `json:"t"` // time in UTC zone + BatchCount uint64 `json:"bc"` // query count in this request + QueriesHash hash.Hash `json:"qh"` // hash of query payload +} + +// QueryKey defines an unique query key of a request. +type QueryKey struct { + NodeID proto.NodeID `json:"id"` + ConnectionID uint64 `json:"cid"` + SeqNo uint64 `json:"seq"` +} + +// String implements fmt.Stringer for logging purpose. +func (k *QueryKey) String() string { + return fmt.Sprintf("%s#%016x#%016x", string(k.NodeID[:8]), k.ConnectionID, k.SeqNo) +} + +// SignedRequestHeader defines a signed query request header. +type SignedRequestHeader struct { + RequestHeader + verifier.DefaultHashSignVerifierImpl +} + +// Request defines a complete query request. +type Request struct { + proto.Envelope + Header SignedRequestHeader `json:"h"` + Payload RequestPayload `json:"p"` +} + +// String implements fmt.Stringer for logging purpose. +func (t QueryType) String() string { + switch t { + case ReadQuery: + return "read" + case WriteQuery: + return "write" + default: + return "unknown" + } +} + +// Verify checks hash and signature in request header. +func (sh *SignedRequestHeader) Verify() (err error) { + return sh.DefaultHashSignVerifierImpl.Verify(&sh.RequestHeader) +} + +// Sign the request. +func (sh *SignedRequestHeader) Sign(signer *asymmetric.PrivateKey) (err error) { + return sh.DefaultHashSignVerifierImpl.Sign(&sh.RequestHeader, signer) +} + +// Verify checks hash and signature in whole request. +func (r *Request) Verify() (err error) { + // verify payload hash in signed header + if err = verifyHash(&r.Payload, &r.Header.QueriesHash); err != nil { + return + } + // verify header sign + return r.Header.Verify() +} + +// Sign the request. +func (r *Request) Sign(signer *asymmetric.PrivateKey) (err error) { + // set query count + r.Header.BatchCount = uint64(len(r.Payload.Queries)) + + // compute payload hash + if err = buildHash(&r.Payload, &r.Header.QueriesHash); err != nil { + return + } + + return r.Header.Sign(signer) +} + +// GetQueryKey returns a unique query key of this request. +func (sh *SignedRequestHeader) GetQueryKey() QueryKey { + return QueryKey{ + NodeID: sh.NodeID, + ConnectionID: sh.ConnectionID, + SeqNo: sh.SeqNo, + } +} diff --git a/types/request_type_gen.go b/types/request_type_gen.go new file mode 100644 index 000000000..709e66bb9 --- /dev/null +++ b/types/request_type_gen.go @@ -0,0 +1,239 @@ +package types + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + hsp "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// MarshalHash marshals for hash +func (z NamedArg) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + o, err = hsp.AppendIntf(o, z.Value) + if err != nil { + return + } + o = append(o, 0x82) + o = hsp.AppendString(o, z.Name) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z NamedArg) Msgsize() (s int) { + s = 1 + 6 + hsp.GuessSize(z.Value) + 5 + hsp.StringPrefixSize + len(z.Name) + return +} + +// MarshalHash marshals for hash +func (z *Query) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + o = hsp.AppendArrayHeader(o, uint32(len(z.Args))) + for za0001 := range z.Args { + // map header, size 2 + o = append(o, 0x82, 0x82) + o = hsp.AppendString(o, z.Args[za0001].Name) + o = append(o, 0x82) + o, err = hsp.AppendIntf(o, z.Args[za0001].Value) + if err != nil { + return + } + } + o = append(o, 0x82) + o = hsp.AppendString(o, z.Pattern) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Query) Msgsize() (s int) { + s = 1 + 5 + hsp.ArrayHeaderSize + for za0001 := range z.Args { + s += 1 + 5 + hsp.StringPrefixSize + len(z.Args[za0001].Name) + 6 + hsp.GuessSize(z.Args[za0001].Value) + } + s += 8 + hsp.StringPrefixSize + len(z.Pattern) + return +} + +// MarshalHash marshals for hash +func (z *QueryKey) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 3 + o = append(o, 0x83, 0x83) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x83) + o = hsp.AppendUint64(o, z.ConnectionID) + o = append(o, 0x83) + o = hsp.AppendUint64(o, z.SeqNo) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *QueryKey) Msgsize() (s int) { + s = 1 + 7 + z.NodeID.Msgsize() + 13 + hsp.Uint64Size + 6 + hsp.Uint64Size + return +} + +// MarshalHash marshals for hash +func (z QueryType) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + o = hsp.AppendInt32(o, int32(z)) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z QueryType) Msgsize() (s int) { + s = hsp.Int32Size + return +} + +// MarshalHash marshals for hash +func (z *Request) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 3 + // map header, size 1 + o = append(o, 0x83, 0x83, 0x81, 0x81) + o = hsp.AppendArrayHeader(o, uint32(len(z.Payload.Queries))) + for za0001 := range z.Payload.Queries { + if oTemp, err := z.Payload.Queries[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + // map header, size 2 + o = append(o, 0x83, 0x82, 0x82) + if oTemp, err := z.Header.RequestHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x83) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Request) Msgsize() (s int) { + s = 1 + 8 + 1 + 8 + hsp.ArrayHeaderSize + for za0001 := range z.Payload.Queries { + s += z.Payload.Queries[za0001].Msgsize() + } + s += 7 + 1 + 14 + z.Header.RequestHeader.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *RequestHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 8 + o = append(o, 0x88, 0x88) + o = hsp.AppendInt32(o, int32(z.QueryType)) + o = append(o, 0x88) + if oTemp, err := z.QueriesHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x88) + if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x88) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x88) + o = hsp.AppendTime(o, z.Timestamp) + o = append(o, 0x88) + o = hsp.AppendUint64(o, z.ConnectionID) + o = append(o, 0x88) + o = hsp.AppendUint64(o, z.SeqNo) + o = append(o, 0x88) + o = hsp.AppendUint64(o, z.BatchCount) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *RequestHeader) Msgsize() (s int) { + s = 1 + 10 + hsp.Int32Size + 12 + z.QueriesHash.Msgsize() + 11 + z.DatabaseID.Msgsize() + 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize + 13 + hsp.Uint64Size + 6 + hsp.Uint64Size + 11 + hsp.Uint64Size + return +} + +// MarshalHash marshals for hash +func (z *RequestPayload) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 1 + o = append(o, 0x81, 0x81) + o = hsp.AppendArrayHeader(o, uint32(len(z.Queries))) + for za0001 := range z.Queries { + if oTemp, err := z.Queries[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *RequestPayload) Msgsize() (s int) { + s = 1 + 8 + hsp.ArrayHeaderSize + for za0001 := range z.Queries { + s += z.Queries[za0001].Msgsize() + } + return +} + +// MarshalHash marshals for hash +func (z *SignedRequestHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.RequestHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedRequestHeader) Msgsize() (s int) { + s = 1 + 14 + z.RequestHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + return +} diff --git a/types/request_type_gen_test.go b/types/request_type_gen_test.go new file mode 100644 index 000000000..93e351222 --- /dev/null +++ b/types/request_type_gen_test.go @@ -0,0 +1,269 @@ +package types + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "testing" +) + +func TestMarshalHashNamedArg(t *testing.T) { + v := NamedArg{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashNamedArg(b *testing.B) { + v := NamedArg{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgNamedArg(b *testing.B) { + v := NamedArg{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashQuery(t *testing.T) { + v := Query{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashQuery(b *testing.B) { + v := Query{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgQuery(b *testing.B) { + v := Query{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashQueryKey(t *testing.T) { + v := QueryKey{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashQueryKey(b *testing.B) { + v := QueryKey{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgQueryKey(b *testing.B) { + v := QueryKey{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashRequest(t *testing.T) { + v := Request{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashRequest(b *testing.B) { + v := Request{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgRequest(b *testing.B) { + v := Request{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashRequestHeader(t *testing.T) { + v := RequestHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashRequestHeader(b *testing.B) { + v := RequestHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgRequestHeader(b *testing.B) { + v := RequestHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashRequestPayload(t *testing.T) { + v := RequestPayload{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashRequestPayload(b *testing.B) { + v := RequestPayload{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgRequestPayload(b *testing.B) { + v := RequestPayload{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedRequestHeader(t *testing.T) { + v := SignedRequestHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedRequestHeader(b *testing.B) { + v := SignedRequestHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedRequestHeader(b *testing.B) { + v := SignedRequestHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} diff --git a/types/response_type.go b/types/response_type.go new file mode 100644 index 000000000..968385eca --- /dev/null +++ b/types/response_type.go @@ -0,0 +1,110 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "time" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/pkg/errors" +) + +//go:generate hsp + +// ResponseRow defines single row of query response. +type ResponseRow struct { + Values []interface{} +} + +// ResponsePayload defines column names and rows of query response. +type ResponsePayload struct { + Columns []string `json:"c"` + DeclTypes []string `json:"t"` + Rows []ResponseRow `json:"r"` +} + +// ResponseHeader defines a query response header. +type ResponseHeader struct { + Request SignedRequestHeader `json:"r"` + NodeID proto.NodeID `json:"id"` // response node id + Timestamp time.Time `json:"t"` // time in UTC zone + RowCount uint64 `json:"c"` // response row count of payload + LogOffset uint64 `json:"o"` // request log offset + LastInsertID int64 `json:"l"` // insert insert id + AffectedRows int64 `json:"a"` // affected rows + PayloadHash hash.Hash `json:"dh"` // hash of query response payload +} + +// SignedResponseHeader defines a signed query response header. +type SignedResponseHeader struct { + ResponseHeader + verifier.DefaultHashSignVerifierImpl +} + +// Response defines a complete query response. +type Response struct { + Header SignedResponseHeader `json:"h"` + Payload ResponsePayload `json:"p"` +} + +// Verify checks hash and signature in response header. +func (sh *SignedResponseHeader) Verify() (err error) { + // verify original request header + if err = sh.Request.Verify(); err != nil { + return + } + + return sh.DefaultHashSignVerifierImpl.Verify(&sh.ResponseHeader) +} + +// Sign the request. +func (sh *SignedResponseHeader) Sign(signer *asymmetric.PrivateKey) (err error) { + // make sure original header is signed + if err = sh.Request.Verify(); err != nil { + err = errors.Wrapf(err, "SignedResponseHeader %v", sh) + return + } + + return sh.DefaultHashSignVerifierImpl.Sign(&sh.ResponseHeader, signer) +} + +// Verify checks hash and signature in whole response. +func (sh *Response) Verify() (err error) { + // verify data hash in header + if err = verifyHash(&sh.Payload, &sh.Header.PayloadHash); err != nil { + return + } + + return sh.Header.Verify() +} + +// Sign the request. +func (sh *Response) Sign(signer *asymmetric.PrivateKey) (err error) { + // set rows count + sh.Header.RowCount = uint64(len(sh.Payload.Rows)) + + // build hash in header + if err = buildHash(&sh.Payload, &sh.Header.PayloadHash); err != nil { + return + } + + // sign the request + return sh.Header.Sign(signer) +} diff --git a/types/response_type_gen.go b/types/response_type_gen.go new file mode 100644 index 000000000..78c9db9c3 --- /dev/null +++ b/types/response_type_gen.go @@ -0,0 +1,184 @@ +package types + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + hsp "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// MarshalHash marshals for hash +func (z *Response) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.Payload.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + // map header, size 2 + o = append(o, 0x82, 0x82, 0x82) + if oTemp, err := z.Header.ResponseHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Response) Msgsize() (s int) { + s = 1 + 8 + z.Payload.Msgsize() + 7 + 1 + 15 + z.Header.ResponseHeader.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *ResponseHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 8 + o = append(o, 0x88, 0x88) + if oTemp, err := z.Request.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x88) + if oTemp, err := z.PayloadHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x88) + o = hsp.AppendInt64(o, z.LastInsertID) + o = append(o, 0x88) + o = hsp.AppendInt64(o, z.AffectedRows) + o = append(o, 0x88) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x88) + o = hsp.AppendTime(o, z.Timestamp) + o = append(o, 0x88) + o = hsp.AppendUint64(o, z.RowCount) + o = append(o, 0x88) + o = hsp.AppendUint64(o, z.LogOffset) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ResponseHeader) Msgsize() (s int) { + s = 1 + 8 + z.Request.Msgsize() + 12 + z.PayloadHash.Msgsize() + 13 + hsp.Int64Size + 13 + hsp.Int64Size + 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize + 9 + hsp.Uint64Size + 10 + hsp.Uint64Size + return +} + +// MarshalHash marshals for hash +func (z *ResponsePayload) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 3 + o = append(o, 0x83, 0x83) + o = hsp.AppendArrayHeader(o, uint32(len(z.Rows))) + for za0003 := range z.Rows { + // map header, size 1 + o = append(o, 0x81, 0x81) + o = hsp.AppendArrayHeader(o, uint32(len(z.Rows[za0003].Values))) + for za0004 := range z.Rows[za0003].Values { + o, err = hsp.AppendIntf(o, z.Rows[za0003].Values[za0004]) + if err != nil { + return + } + } + } + o = append(o, 0x83) + o = hsp.AppendArrayHeader(o, uint32(len(z.Columns))) + for za0001 := range z.Columns { + o = hsp.AppendString(o, z.Columns[za0001]) + } + o = append(o, 0x83) + o = hsp.AppendArrayHeader(o, uint32(len(z.DeclTypes))) + for za0002 := range z.DeclTypes { + o = hsp.AppendString(o, z.DeclTypes[za0002]) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ResponsePayload) Msgsize() (s int) { + s = 1 + 5 + hsp.ArrayHeaderSize + for za0003 := range z.Rows { + s += 1 + 7 + hsp.ArrayHeaderSize + for za0004 := range z.Rows[za0003].Values { + s += hsp.GuessSize(z.Rows[za0003].Values[za0004]) + } + } + s += 8 + hsp.ArrayHeaderSize + for za0001 := range z.Columns { + s += hsp.StringPrefixSize + len(z.Columns[za0001]) + } + s += 10 + hsp.ArrayHeaderSize + for za0002 := range z.DeclTypes { + s += hsp.StringPrefixSize + len(z.DeclTypes[za0002]) + } + return +} + +// MarshalHash marshals for hash +func (z *ResponseRow) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 1 + o = append(o, 0x81, 0x81) + o = hsp.AppendArrayHeader(o, uint32(len(z.Values))) + for za0001 := range z.Values { + o, err = hsp.AppendIntf(o, z.Values[za0001]) + if err != nil { + return + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ResponseRow) Msgsize() (s int) { + s = 1 + 7 + hsp.ArrayHeaderSize + for za0001 := range z.Values { + s += hsp.GuessSize(z.Values[za0001]) + } + return +} + +// MarshalHash marshals for hash +func (z *SignedResponseHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.ResponseHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedResponseHeader) Msgsize() (s int) { + s = 1 + 15 + z.ResponseHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + return +} diff --git a/worker/types/response_type_gen_test.go b/types/response_type_gen_test.go similarity index 100% rename from worker/types/response_type_gen_test.go rename to types/response_type_gen_test.go diff --git a/types/types.proto b/types/types.proto deleted file mode 100644 index a2abe9f05..000000000 --- a/types/types.proto +++ /dev/null @@ -1,112 +0,0 @@ -syntax = "proto3"; -package types; - -message Signature { - string R = 1; - string S = 2; -} - -message PublicKey { - bytes PublicKey = 1; -} - -message Hash { - bytes Hash = 1; -} - -message UtxoEntry { - bool IsCoinbase = 1; - bool FromMainChain = 2; - uint32 BlockHeight = 3; - map SparseOutputs = 4; -} - -message Utxo { - UtxoHeader UtxoHeader = 1; - bool Spent = 2; - uint64 amount = 3; -} - -message UtxoHeader { - int32 Version = 1; - Hash PrevTxHash = 2; - PublicKey Signee = 3; - Signature Signature = 4; -} - -enum TxType { - QUERY = 0; - STORAGE = 1; -} - -message Tx { - repeated Utxo UtxoIn = 1; - repeated Utxo UtxoOut = 2; - TxType type = 3; - string Content = 4; -} - -message NodeID { - string NodeID = 1; -} - -message AccountAddress { - string AccountAddress = 1; -} - -message Header { - int32 Version = 1; - NodeID Producer = 2; - Hash Root = 3; - Hash Parent = 4; - Hash MerkleRoot = 5; - int64 Timestamp = 6; -} - -message SignedHeader { - Header Header = 1; - Hash BlockHash = 2; - PublicKey Signee = 3; - Signature Signature = 4; -} - -message State { - Hash Head = 1; - int32 Height = 2; -} - -message BPTx { - Hash TxHash = 1; - BPTxData TxData = 2; -} - -message BPTxData { - uint64 AccountNonce = 1; - AccountAddress Recipient = 2; - bytes Amount = 3; - bytes Payload = 4; - - Signature Signature = 5; - PublicKey Signee = 6; -} - -message BPHeader { - int32 Version = 1; - AccountAddress Producer = 2; - Hash Root = 3; - Hash Parent = 4; - Hash MerkleRoot = 5; - int64 Timestamp = 6; -} - -message BPSignedHeader { - BPHeader Header = 1; - Hash BlockHash = 2; - PublicKey Signee = 3; - Signature Signature = 4; -} - -message BPBlock { - BPSignedHeader Header = 1; - repeated BPTx Tx = 2; -} diff --git a/types/types_test.go b/types/types_test.go new file mode 100644 index 000000000..39960d09d --- /dev/null +++ b/types/types_test.go @@ -0,0 +1,692 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "fmt" + "testing" + "time" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils" + "github.com/pkg/errors" + . "github.com/smartystreets/goconvey/convey" +) + +func getCommKeys() (*asymmetric.PrivateKey, *asymmetric.PublicKey) { + testPriv := []byte{ + 0xea, 0xf0, 0x2c, 0xa3, 0x48, 0xc5, 0x24, 0xe6, + 0x39, 0x26, 0x55, 0xba, 0x4d, 0x29, 0x60, 0x3c, + 0xd1, 0xa7, 0x34, 0x7d, 0x9d, 0x65, 0xcf, 0xe9, + 0x3c, 0xe1, 0xeb, 0xff, 0xdc, 0xa2, 0x26, 0x94, + } + return asymmetric.PrivKeyFromBytes(testPriv) +} + +func TestSignedRequestHeader_Sign(t *testing.T) { + privKey, _ := getCommKeys() + + Convey("sign", t, func() { + req := &SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: WriteQuery, + NodeID: proto.NodeID("node"), + DatabaseID: proto.DatabaseID("db1"), + ConnectionID: uint64(1), + SeqNo: uint64(2), + Timestamp: time.Now().UTC(), + }, + } + + var err error + + err = req.Sign(privKey) + So(err, ShouldBeNil) + + Convey("verify", func() { + err = req.Verify() + So(err, ShouldBeNil) + + // modify structure + req.Timestamp = req.Timestamp.Add(time.Second) + + err = req.Verify() + So(err, ShouldNotBeNil) + + s, err := req.MarshalHash() + So(err, ShouldBeNil) + So(s, ShouldNotBeEmpty) + }) + }) +} + +func TestRequest_Sign(t *testing.T) { + privKey, _ := getCommKeys() + + Convey("sign", t, func() { + req := &Request{ + Header: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: WriteQuery, + NodeID: proto.NodeID("node"), + DatabaseID: proto.DatabaseID("db1"), + ConnectionID: uint64(1), + SeqNo: uint64(2), + Timestamp: time.Now().UTC(), + }, + }, + Payload: RequestPayload{ + Queries: []Query{ + { + Pattern: "INSERT INTO test VALUES(?)", + Args: []NamedArg{ + { + Value: 1, + }, + }, + }, + { + Pattern: "INSERT INTO test VALUES(?)", + Args: []NamedArg{ + { + Value: "happy", + }, + }, + }, + }, + }, + } + + var err error + + // sign + err = req.Sign(privKey) + So(err, ShouldBeNil) + So(req.Header.BatchCount, ShouldEqual, uint64(len(req.Payload.Queries))) + + // test queries hash + err = verifyHash(&req.Payload, &req.Header.QueriesHash) + So(err, ShouldBeNil) + + Convey("verify", func() { + err = req.Verify() + So(err, ShouldBeNil) + + Convey("header change", func() { + // modify structure + req.Header.Timestamp = req.Header.Timestamp.Add(time.Second) + + err = req.Verify() + So(err, ShouldNotBeNil) + }) + + Convey("header change with invalid queries hash", func() { + req.Payload.Queries = append(req.Payload.Queries, + Query{ + Pattern: "select 1", + }, + ) + + err = req.Verify() + So(err, ShouldNotBeNil) + }) + }) + }) +} + +func TestResponse_Sign(t *testing.T) { + privKey, _ := getCommKeys() + + Convey("sign", t, func() { + res := &Response{ + Header: SignedResponseHeader{ + ResponseHeader: ResponseHeader{ + Request: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: WriteQuery, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), + DatabaseID: proto.DatabaseID("db1"), + ConnectionID: uint64(1), + SeqNo: uint64(2), + Timestamp: time.Now().UTC(), + }, + }, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + Timestamp: time.Now().UTC(), + RowCount: uint64(1), + }, + }, + Payload: ResponsePayload{ + Columns: []string{ + "test_integer", + "test_boolean", + "test_time", + "test_nil", + "test_float", + "test_binary_string", + "test_string", + "test_empty_time", + }, + DeclTypes: []string{ + "INTEGER", + "BOOLEAN", + "DATETIME", + "INTEGER", + "FLOAT", + "BLOB", + "TEXT", + "DATETIME", + }, + Rows: []ResponseRow{ + { + Values: []interface{}{ + int(1), + true, + time.Now().UTC(), + nil, + float64(1.0001), + "11111\0001111111", + "11111111111111", + time.Time{}, + }, + }, + }, + }, + } + + var err error + + // sign directly, embedded original request is not filled + err = res.Sign(privKey) + So(err, ShouldNotBeNil) + So(errors.Cause(err), ShouldBeIn, []error{ + verifier.ErrHashValueNotMatch, + verifier.ErrSignatureNotMatch, + }) + + // sign original request first + err = res.Header.Request.Sign(privKey) + So(err, ShouldBeNil) + + // sign again + err = res.Sign(privKey) + So(err, ShouldBeNil) + + // test hash + err = verifyHash(&res.Payload, &res.Header.PayloadHash) + So(err, ShouldBeNil) + + // verify + Convey("verify", func() { + err = res.Verify() + So(err, ShouldBeNil) + + Convey("encode/decode verify", func() { + buf, err := utils.EncodeMsgPack(res) + So(err, ShouldBeNil) + var r *Response + err = utils.DecodeMsgPack(buf.Bytes(), &r) + So(err, ShouldBeNil) + err = r.Verify() + So(err, ShouldBeNil) + }) + Convey("request change", func() { + res.Header.Request.BatchCount = 200 + + err = res.Verify() + So(err, ShouldNotBeNil) + }) + Convey("payload change", func() { + res.Payload.DeclTypes[0] = "INT" + + err = res.Verify() + So(err, ShouldNotBeNil) + }) + Convey("header change", func() { + res.Header.Timestamp = res.Header.Timestamp.Add(time.Second) + + err = res.Verify() + So(err, ShouldNotBeNil) + }) + }) + }) +} + +func TestAck_Sign(t *testing.T) { + privKey, _ := getCommKeys() + + Convey("sign", t, func() { + ack := &Ack{ + Header: SignedAckHeader{ + AckHeader: AckHeader{ + Response: SignedResponseHeader{ + ResponseHeader: ResponseHeader{ + Request: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: WriteQuery, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), + DatabaseID: proto.DatabaseID("db1"), + ConnectionID: uint64(1), + SeqNo: uint64(2), + Timestamp: time.Now().UTC(), + }, + }, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + Timestamp: time.Now().UTC(), + RowCount: uint64(1), + }, + }, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), + Timestamp: time.Now().UTC(), + }, + }, + } + + var err error + + Convey("get query key", func() { + key := ack.Header.SignedRequestHeader().GetQueryKey() + So(key.NodeID, ShouldEqual, ack.Header.SignedRequestHeader().NodeID) + So(key.ConnectionID, ShouldEqual, ack.Header.SignedRequestHeader().ConnectionID) + So(key.SeqNo, ShouldEqual, ack.Header.SignedRequestHeader().SeqNo) + }) + + // sign directly, embedded original response is not filled + err = ack.Sign(privKey, false) + So(err, ShouldBeNil) + err = ack.Sign(privKey, true) + So(err, ShouldNotBeNil) + So(errors.Cause(err), ShouldBeIn, []error{ + verifier.ErrHashValueNotMatch, + verifier.ErrSignatureNotMatch, + }) + + // sign nested structure, step by step + // this is not required during runtime + // during runtime, nested structures is signed and provided by peers + err = ack.Header.Response.Request.Sign(privKey) + So(err, ShouldBeNil) + err = ack.Header.Response.Sign(privKey) + So(err, ShouldBeNil) + err = ack.Sign(privKey, true) + So(err, ShouldBeNil) + + Convey("verify", func() { + err = ack.Verify() + So(err, ShouldBeNil) + + Convey("request change", func() { + ack.Header.Response.Request.QueryType = ReadQuery + + err = ack.Verify() + So(err, ShouldNotBeNil) + }) + Convey("response change", func() { + ack.Header.Response.RowCount = 100 + + err = ack.Verify() + So(err, ShouldNotBeNil) + }) + Convey("header change", func() { + ack.Header.Timestamp = ack.Header.Timestamp.Add(time.Second) + + err = ack.Verify() + So(err, ShouldNotBeNil) + }) + }) + }) +} + +func TestNoAckReport_Sign(t *testing.T) { + privKey, _ := getCommKeys() + + Convey("sign", t, func() { + noAck := &NoAckReport{ + Header: SignedNoAckReportHeader{ + NoAckReportHeader: NoAckReportHeader{ + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + Timestamp: time.Now().UTC(), + Response: SignedResponseHeader{ + ResponseHeader: ResponseHeader{ + Request: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: WriteQuery, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), + DatabaseID: proto.DatabaseID("db1"), + ConnectionID: uint64(1), + SeqNo: uint64(2), + Timestamp: time.Now().UTC(), + }, + }, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + Timestamp: time.Now().UTC(), + RowCount: uint64(1), + }, + }, + }, + }, + } + + var err error + + // sign directly, embedded original response/request is not filled + err = noAck.Sign(privKey) + So(err, ShouldNotBeNil) + So(errors.Cause(err), ShouldBeIn, []error{ + verifier.ErrHashValueNotMatch, + verifier.ErrSignatureNotMatch, + }) + + // sign nested structure + err = noAck.Header.Response.Request.Sign(privKey) + So(err, ShouldBeNil) + err = noAck.Header.Response.Sign(privKey) + So(err, ShouldBeNil) + err = noAck.Sign(privKey) + So(err, ShouldBeNil) + + Convey("verify", func() { + err = noAck.Verify() + So(err, ShouldBeNil) + + Convey("request change", func() { + noAck.Header.Response.Request.QueryType = ReadQuery + + err = noAck.Verify() + So(err, ShouldNotBeNil) + }) + + Convey("response change", func() { + noAck.Header.Response.RowCount = 100 + + err = noAck.Verify() + So(err, ShouldNotBeNil) + }) + + Convey("header change", func() { + noAck.Header.Timestamp = noAck.Header.Timestamp.Add(time.Second) + + err = noAck.Verify() + So(err, ShouldNotBeNil) + }) + }) + }) +} + +func TestAggrNoAckReport_Sign(t *testing.T) { + privKey, _ := getCommKeys() + + Convey("sign", t, func() { + aggrNoAck := &AggrNoAckReport{ + Header: SignedAggrNoAckReportHeader{ + AggrNoAckReportHeader: AggrNoAckReportHeader{ + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + Timestamp: time.Now().UTC(), + Reports: []SignedNoAckReportHeader{ + { + NoAckReportHeader: NoAckReportHeader{ + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + Timestamp: time.Now().UTC(), + Response: SignedResponseHeader{ + ResponseHeader: ResponseHeader{ + Request: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: WriteQuery, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), + DatabaseID: proto.DatabaseID("db1"), + ConnectionID: uint64(1), + SeqNo: uint64(2), + Timestamp: time.Now().UTC(), + }, + }, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + Timestamp: time.Now().UTC(), + RowCount: uint64(1), + }, + }, + }, + }, + { + NoAckReportHeader: NoAckReportHeader{ + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + Timestamp: time.Now().UTC(), + Response: SignedResponseHeader{ + ResponseHeader: ResponseHeader{ + Request: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: WriteQuery, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), + DatabaseID: proto.DatabaseID("db1"), + ConnectionID: uint64(1), + SeqNo: uint64(2), + Timestamp: time.Now().UTC(), + }, + }, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + Timestamp: time.Now().UTC(), + RowCount: uint64(1), + }, + }, + }, + }, + }, + Peers: &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: uint64(1), + Leader: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + Servers: []proto.NodeID{ + proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + }, + }, + }, + }, + }, + } + + var err error + + // sign directly, embedded original response/request is not filled + err = aggrNoAck.Sign(privKey) + So(err, ShouldNotBeNil) + So(errors.Cause(err), ShouldBeIn, []error{ + verifier.ErrHashValueNotMatch, + verifier.ErrSignatureNotMatch, + }) + + // sign nested structure + err = aggrNoAck.Header.Reports[0].Response.Request.Sign(privKey) + So(err, ShouldBeNil) + err = aggrNoAck.Header.Reports[1].Response.Request.Sign(privKey) + So(err, ShouldBeNil) + err = aggrNoAck.Header.Reports[0].Response.Sign(privKey) + So(err, ShouldBeNil) + err = aggrNoAck.Header.Reports[1].Response.Sign(privKey) + So(err, ShouldBeNil) + err = aggrNoAck.Header.Reports[0].Sign(privKey) + So(err, ShouldBeNil) + err = aggrNoAck.Header.Reports[1].Sign(privKey) + So(err, ShouldBeNil) + err = aggrNoAck.Sign(privKey) + So(err, ShouldBeNil) + + Convey("verify", func() { + err = aggrNoAck.Verify() + So(err, ShouldBeNil) + + Convey("request change", func() { + aggrNoAck.Header.Reports[0].Response.Request.QueryType = ReadQuery + + err = aggrNoAck.Verify() + So(err, ShouldNotBeNil) + }) + + Convey("response change", func() { + aggrNoAck.Header.Reports[0].Response.RowCount = 1000 + + err = aggrNoAck.Verify() + So(err, ShouldNotBeNil) + }) + + Convey("report change", func() { + aggrNoAck.Header.Reports[0].Timestamp = aggrNoAck.Header.Reports[0].Timestamp.Add(time.Second) + + err = aggrNoAck.Verify() + So(err, ShouldNotBeNil) + }) + + Convey("header change", func() { + aggrNoAck.Header.Timestamp = aggrNoAck.Header.Timestamp.Add(time.Second) + + err = aggrNoAck.Verify() + So(err, ShouldNotBeNil) + }) + }) + }) +} + +func TestInitServiceResponse_Sign(t *testing.T) { + privKey, _ := getCommKeys() + + Convey("sign", t, func() { + var err error + + initServiceResponse := &InitServiceResponse{ + Header: SignedInitServiceResponseHeader{ + InitServiceResponseHeader: InitServiceResponseHeader{ + Instances: []ServiceInstance{ + { + DatabaseID: proto.DatabaseID("db1"), + Peers: &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: uint64(1), + Leader: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + Servers: []proto.NodeID{ + proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + }, + }, + }, + // TODO(xq262144), should integrated with genesis block serialization test + GenesisBlock: nil, + }, + }, + }, + }, + } + + // sign + err = initServiceResponse.Sign(privKey) + + Convey("verify", func() { + err = initServiceResponse.Verify() + So(err, ShouldBeNil) + + Convey("header change", func() { + initServiceResponse.Header.Instances[0].DatabaseID = proto.DatabaseID("db2") + + err = initServiceResponse.Verify() + So(err, ShouldNotBeNil) + }) + }) + }) +} + +func TestUpdateService_Sign(t *testing.T) { + privKey, _ := getCommKeys() + + Convey("sign", t, func() { + var err error + + updateServiceReq := &UpdateService{ + Header: SignedUpdateServiceHeader{ + UpdateServiceHeader: UpdateServiceHeader{ + Op: CreateDB, + Instance: ServiceInstance{ + DatabaseID: proto.DatabaseID("db1"), + Peers: &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: uint64(1), + Leader: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + Servers: []proto.NodeID{ + proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + }, + }, + }, + // TODO(xq262144), should integrated with genesis block serialization test + GenesisBlock: nil, + }, + }, + }, + } + + // sign + err = updateServiceReq.Sign(privKey) + + Convey("verify", func() { + err = updateServiceReq.Verify() + So(err, ShouldBeNil) + + Convey("header change", func() { + updateServiceReq.Header.Instance.DatabaseID = proto.DatabaseID("db2") + + err = updateServiceReq.Verify() + So(err, ShouldNotBeNil) + }) + }) + }) +} + +func TestOther_MarshalHash(t *testing.T) { + Convey("marshal hash", t, func() { + tm := UpdateType(1) + s, err := tm.MarshalHash() + So(err, ShouldBeNil) + So(s, ShouldNotBeEmpty) + + tm2 := QueryType(1) + s, err = tm2.MarshalHash() + So(err, ShouldBeNil) + So(s, ShouldNotBeEmpty) + }) +} + +func TestQueryTypeStringer(t *testing.T) { + Convey("Query type stringer should return expected string", t, func() { + var cases = [...]struct { + i fmt.Stringer + s string + }{ + { + i: ReadQuery, + s: "read", + }, { + i: WriteQuery, + s: "write", + }, { + i: QueryType(0xffff), + s: "unknown", + }, + } + for _, v := range cases { + So(v.s, ShouldEqual, fmt.Sprintf("%v", v.i)) + } + }) +} diff --git a/types/update_service_type.go b/types/update_service_type.go new file mode 100644 index 000000000..7eca089bf --- /dev/null +++ b/types/update_service_type.go @@ -0,0 +1,79 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/CovenantSQL/CovenantSQL/proto" +) + +//go:generate hsp + +// UpdateType defines service update type. +type UpdateType int32 + +const ( + // CreateDB indicates create database operation. + CreateDB UpdateType = iota + // UpdateDB indicates database peers update operation. + UpdateDB + // DropDB indicates drop database operation. + DropDB +) + +// UpdateServiceHeader defines service update header. +type UpdateServiceHeader struct { + Op UpdateType + Instance ServiceInstance +} + +// SignedUpdateServiceHeader defines signed service update header. +type SignedUpdateServiceHeader struct { + UpdateServiceHeader + verifier.DefaultHashSignVerifierImpl +} + +// UpdateService defines service update type. +type UpdateService struct { + proto.Envelope + Header SignedUpdateServiceHeader +} + +// UpdateServiceResponse defines empty response entity. +type UpdateServiceResponse struct{} + +// Verify checks hash and signature in update service header. +func (sh *SignedUpdateServiceHeader) Verify() (err error) { + return sh.DefaultHashSignVerifierImpl.Verify(&sh.UpdateServiceHeader) +} + +// Sign the request. +func (sh *SignedUpdateServiceHeader) Sign(signer *asymmetric.PrivateKey) (err error) { + return sh.DefaultHashSignVerifierImpl.Sign(&sh.UpdateServiceHeader, signer) +} + +// Verify checks hash and signature in update service. +func (s *UpdateService) Verify() error { + return s.Header.Verify() +} + +// Sign the request. +func (s *UpdateService) Sign(signer *asymmetric.PrivateKey) (err error) { + // sign + return s.Header.Sign(signer) +} diff --git a/types/update_service_type_gen.go b/types/update_service_type_gen.go new file mode 100644 index 000000000..c134c6635 --- /dev/null +++ b/types/update_service_type_gen.go @@ -0,0 +1,113 @@ +package types + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + hsp "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// MarshalHash marshals for hash +func (z *SignedUpdateServiceHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + // map header, size 2 + o = append(o, 0x82, 0x82, 0x82, 0x82) + o = hsp.AppendInt32(o, int32(z.UpdateServiceHeader.Op)) + o = append(o, 0x82) + if oTemp, err := z.UpdateServiceHeader.Instance.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedUpdateServiceHeader) Msgsize() (s int) { + s = 1 + 20 + 1 + 3 + hsp.Int32Size + 9 + z.UpdateServiceHeader.Instance.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *UpdateService) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.Header.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *UpdateService) Msgsize() (s int) { + s = 1 + 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *UpdateServiceHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.Instance.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + o = hsp.AppendInt32(o, int32(z.Op)) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *UpdateServiceHeader) Msgsize() (s int) { + s = 1 + 9 + z.Instance.Msgsize() + 3 + hsp.Int32Size + return +} + +// MarshalHash marshals for hash +func (z UpdateServiceResponse) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 0 + o = append(o, 0x80) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z UpdateServiceResponse) Msgsize() (s int) { + s = 1 + return +} + +// MarshalHash marshals for hash +func (z UpdateType) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + o = hsp.AppendInt32(o, int32(z)) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z UpdateType) Msgsize() (s int) { + s = hsp.Int32Size + return +} diff --git a/worker/types/update_service_type_gen_test.go b/types/update_service_type_gen_test.go similarity index 100% rename from worker/types/update_service_type_gen_test.go rename to types/update_service_type_gen_test.go diff --git a/types/util.go b/types/util.go new file mode 100644 index 000000000..ab09e4310 --- /dev/null +++ b/types/util.go @@ -0,0 +1,48 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/pkg/errors" +) + +type canMarshalHash interface { + MarshalHash() ([]byte, error) +} + +func verifyHash(data canMarshalHash, h *hash.Hash) (err error) { + var newHash hash.Hash + if err = buildHash(data, &newHash); err != nil { + return + } + if !newHash.IsEqual(h) { + return errors.Cause(verifier.ErrHashValueNotMatch) + } + return +} + +func buildHash(data canMarshalHash, h *hash.Hash) (err error) { + var hashBytes []byte + if hashBytes, err = data.MarshalHash(); err != nil { + return + } + newHash := hash.THashH(hashBytes) + copy(h[:], newHash[:]) + return +} diff --git a/types/xxx_test.go b/types/xxx_test.go new file mode 100644 index 000000000..55d87c586 --- /dev/null +++ b/types/xxx_test.go @@ -0,0 +1,136 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "io/ioutil" + "math/rand" + "os" + "testing" + "time" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/kms" + "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +var ( + genesisHash = hash.Hash{} +) + +func setup() { + rand.Seed(time.Now().UnixNano()) + rand.Read(genesisHash[:]) + f, err := ioutil.TempFile("", "keystore") + + if err != nil { + panic(err) + } + + f.Close() + + if err = kms.InitPublicKeyStore(f.Name(), nil); err != nil { + panic(err) + } + + kms.Unittest = true + + if priv, pub, err := asymmetric.GenSecp256k1KeyPair(); err == nil { + kms.SetLocalKeyPair(priv, pub) + } else { + panic(err) + } + + log.SetOutput(os.Stdout) + log.SetLevel(log.DebugLevel) +} + +func createRandomString(offset, length int, s *string) { + buff := make([]byte, rand.Intn(length)+offset) + rand.Read(buff) + *s = string(buff) +} + +func createRandomStrings(offset, length, soffset, slength int) (s []string) { + s = make([]string, rand.Intn(length)+offset) + + for i := range s { + createRandomString(soffset, slength, &s[i]) + } + + return +} + +func createRandomBlock(parent hash.Hash, isGenesis bool) (b *Block, err error) { + // Generate key pair + priv, pub, err := asymmetric.GenSecp256k1KeyPair() + + if err != nil { + return + } + + h := hash.Hash{} + rand.Read(h[:]) + + b = &Block{ + SignedHeader: SignedHeader{ + Header: Header{ + Version: 0x01000000, + Producer: proto.NodeID(h.String()), + GenesisHash: genesisHash, + ParentHash: parent, + Timestamp: time.Now().UTC(), + }, + }, + } + + if isGenesis { + // Compute nonce with public key + nonceCh := make(chan cpuminer.NonceInfo) + quitCh := make(chan struct{}) + miner := cpuminer.NewCPUMiner(quitCh) + go miner.ComputeBlockNonce(cpuminer.MiningBlock{ + Data: pub.Serialize(), + NonceChan: nonceCh, + Stop: nil, + }, cpuminer.Uint256{A: 0, B: 0, C: 0, D: 0}, 4) + nonce := <-nonceCh + close(quitCh) + close(nonceCh) + // Add public key to KMS + id := cpuminer.HashBlock(pub.Serialize(), nonce.Nonce) + b.SignedHeader.Header.Producer = proto.NodeID(id.String()) + + if err = kms.SetPublicKey(proto.NodeID(id.String()), nonce.Nonce, pub); err != nil { + return nil, err + } + + // Set genesis hash as zero value + b.SignedHeader.GenesisHash = hash.Hash{} + } + + err = b.PackAndSignBlock(priv) + return +} + +func TestMain(m *testing.M) { + setup() + os.Exit(m.Run()) +} diff --git a/utils/bytes_test.go b/utils/bytes_test.go index 9276681de..1d056aaae 100644 --- a/utils/bytes_test.go +++ b/utils/bytes_test.go @@ -36,7 +36,7 @@ func TestNewLevelDBKey(t *testing.T) { So(ConcatAll([]byte{'0', '1', '2', '3'}, nil, []byte{'x', 'y', 'z'}), ShouldResemble, []byte{'0', '1', '2', '3', 'x', 'y', 'z'}) So(ConcatAll([]byte{'0', '1', '2', '3'}, []byte{}, []byte{'x', 'y', 'z'}), - ShouldResemble, []byte{'0', '1', '2', '3','x', 'y', 'z'}) + ShouldResemble, []byte{'0', '1', '2', '3', 'x', 'y', 'z'}) So(ConcatAll(nil, []byte{'0', '1', '2', '3'}, nil, []byte{'x', 'y', 'z'}), ShouldResemble, []byte{'0', '1', '2', '3', 'x', 'y', 'z'}) So(ConcatAll([]byte{}, []byte{'0', '1', '2', '3'}, nil, []byte{'x', 'y', 'z'}, nil), diff --git a/utils/exec.go b/utils/exec.go index 303e000aa..9509b538f 100644 --- a/utils/exec.go +++ b/utils/exec.go @@ -58,23 +58,6 @@ func Build() (err error) { return } -// CleanupDB runs cleanupDB.sh -func CleanupDB() (err error) { - wd := GetProjectSrcDir() - err = os.Chdir(wd) - if err != nil { - log.WithError(err).Error("change working dir failed") - return - } - cmd := exec.Command("./cleanupDB.sh") - output, err := cmd.CombinedOutput() - if err != nil { - log.WithError(err).Error("cleanupDB failed") - } - log.Debugf("cleanupDB output info: %#v", string(output)) - return -} - // RunCommand runs a command and capture its output to a log file, // if toStd is true also output to stdout and stderr func RunCommand(bin string, args []string, processName string, workingDir string, logDir string, toStd bool) (err error) { diff --git a/utils/exec_test.go b/utils/exec_test.go index 695b40ee9..f0e37b55e 100644 --- a/utils/exec_test.go +++ b/utils/exec_test.go @@ -38,13 +38,6 @@ func TestBuild(t *testing.T) { }) } -func TestCleanupDB(t *testing.T) { - Convey("CleanupDB", t, func() { - log.SetLevel(log.DebugLevel) - So(CleanupDB(), ShouldBeNil) - }) -} - func TestRunServer(t *testing.T) { Convey("build", t, func() { log.SetLevel(log.DebugLevel) diff --git a/utils/log/entrylogwrapper.go b/utils/log/entrylogwrapper.go index 125c05b74..666558b9a 100644 --- a/utils/log/entrylogwrapper.go +++ b/utils/log/entrylogwrapper.go @@ -22,8 +22,10 @@ import ( "github.com/sirupsen/logrus" ) +// Entry defines alias for logrus entry. type Entry logrus.Entry +// NewEntry returns new entry for logrus logger. func NewEntry(logger *Logger) *Entry { return &Entry{ Logger: (*logrus.Logger)(logger), @@ -32,128 +34,151 @@ func NewEntry(logger *Logger) *Entry { } } -// Returns the string representation from the reader and ultimately the -// formatter. +// Returns the string representation from the reader and ultimately the formatter. func (entry *Entry) String() (string, error) { return (*logrus.Entry)(entry).String() } -// Add an error as single field (using the key defined in ErrorKey) to the Entry. +// WithError adds an error as single field (using the key defined in ErrorKey) to the Entry. func (entry *Entry) WithError(err error) *Entry { return (*Entry)((*logrus.Entry)(entry).WithError(err)) } -// Add a single field to the Entry. +// WithField add a single field to the Entry. func (entry *Entry) WithField(key string, value interface{}) *Entry { return (*Entry)((*logrus.Entry)(entry).WithField(key, value)) } -// Add a map of fields to the Entry. +// WithFields add a map of fields to the Entry. func (entry *Entry) WithFields(fields Fields) *Entry { return (*Entry)((*logrus.Entry)(entry).WithFields((logrus.Fields)(fields))) } -// Overrides the time of the Entry. +// WithTime overrides the time of the Entry. func (entry *Entry) WithTime(t time.Time) *Entry { return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t} } +// Debug record a new debug level log. func (entry *Entry) Debug(args ...interface{}) { (*logrus.Entry)(entry).Debug(args...) } +// Print record a new non-level log. func (entry *Entry) Print(args ...interface{}) { (*logrus.Entry)(entry).Print(args...) } +// Info record a new info level log. func (entry *Entry) Info(args ...interface{}) { (*logrus.Entry)(entry).Info(args...) } +// Warn record a new warning level log. func (entry *Entry) Warn(args ...interface{}) { (*logrus.Entry)(entry).Warn(args...) } +// Warning record a new warning level log. func (entry *Entry) Warning(args ...interface{}) { (*logrus.Entry)(entry).Warning(args...) } +// Error record a new error level log. func (entry *Entry) Error(args ...interface{}) { (*logrus.Entry)(entry).Error(args...) } +// Fatal record a fatal level log. func (entry *Entry) Fatal(args ...interface{}) { (*logrus.Entry)(entry).Fatal(args...) } +// Panic record a panic level log. func (entry *Entry) Panic(args ...interface{}) { (*logrus.Entry)(entry).Panic(args...) } // Entry Printf family functions +// Debugf record a debug level log. func (entry *Entry) Debugf(format string, args ...interface{}) { (*logrus.Entry)(entry).Debugf(format, args...) } +// Infof record a info level log. func (entry *Entry) Infof(format string, args ...interface{}) { (*logrus.Entry)(entry).Infof(format, args...) } +// Printf record a new non-level log. func (entry *Entry) Printf(format string, args ...interface{}) { (*logrus.Entry)(entry).Printf(format, args...) } +// Warnf record a warning level log. func (entry *Entry) Warnf(format string, args ...interface{}) { (*logrus.Entry)(entry).Warnf(format, args...) } +// Warningf record a warning level log. func (entry *Entry) Warningf(format string, args ...interface{}) { (*logrus.Entry)(entry).Warningf(format, args...) } +// Errorf record a error level log. func (entry *Entry) Errorf(format string, args ...interface{}) { (*logrus.Entry)(entry).Errorf(format, args...) } +// Fatalf record a fatal level log. func (entry *Entry) Fatalf(format string, args ...interface{}) { (*logrus.Entry)(entry).Fatalf(format, args...) } +// Panicf record a panic level log. func (entry *Entry) Panicf(format string, args ...interface{}) { (*logrus.Entry)(entry).Panicf(format, args...) } // Entry Println family functions +// Debugln record a debug level log. func (entry *Entry) Debugln(args ...interface{}) { (*logrus.Entry)(entry).Debugln(args...) } +// Infoln record a info level log. func (entry *Entry) Infoln(args ...interface{}) { (*logrus.Entry)(entry).Infoln(args...) } +// Println record a non-level log. func (entry *Entry) Println(args ...interface{}) { (*logrus.Entry)(entry).Println(args...) } +// Warnln record a warning level log. func (entry *Entry) Warnln(args ...interface{}) { (*logrus.Entry)(entry).Warnln(args...) } +// Warningln record a warning level log. func (entry *Entry) Warningln(args ...interface{}) { (*logrus.Entry)(entry).Warningln(args...) } +// Errorln record a error level log. func (entry *Entry) Errorln(args ...interface{}) { (*logrus.Entry)(entry).Errorln(args...) } +// Fatalln record a fatal level log. func (entry *Entry) Fatalln(args ...interface{}) { (*logrus.Entry)(entry).Fatalln(args...) } +// Panicln record a panic level log. func (entry *Entry) Panicln(args ...interface{}) { (*logrus.Entry)(entry).Panicln(args...) } diff --git a/utils/log/logwrapper.go b/utils/log/logwrapper.go index 3c32421f5..684e04a8f 100644 --- a/utils/log/logwrapper.go +++ b/utils/log/logwrapper.go @@ -102,9 +102,9 @@ func (hook *CallerHook) Levels() []logrus.Level { logrus.PanicLevel, logrus.FatalLevel, logrus.ErrorLevel, - logrus.WarnLevel, - logrus.InfoLevel, - logrus.DebugLevel, + //logrus.WarnLevel, + //logrus.InfoLevel, + //logrus.DebugLevel, } } @@ -224,6 +224,7 @@ func WithFields(fields Fields) *Entry { return (*Entry)(logrus.WithFields(logrus.Fields(fields))) } +// WithTime add time fields to log entry. func WithTime(t time.Time) *Entry { return (*Entry)(logrus.WithTime(t)) } diff --git a/utils/log/logwrapper_test.go b/utils/log/logwrapper_test.go index acc6dfc0d..26fbfe485 100644 --- a/utils/log/logwrapper_test.go +++ b/utils/log/logwrapper_test.go @@ -22,7 +22,6 @@ import ( "time" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) diff --git a/utils/net.go b/utils/net.go index 40c6d7a48..71055bf6e 100644 --- a/utils/net.go +++ b/utils/net.go @@ -41,10 +41,10 @@ func testPortConnectable(addr string, timeout time.Duration) bool { if err != nil { log.Infof("test dial to %s failed", addr) return false - } else { - conn.Close() - return true } + + conn.Close() + return true } func testPort(bindAddr string, port int, excludeAllocated bool) bool { diff --git a/utils/profiler.go b/utils/profiler.go index 013dd3e7c..01b5f9b67 100644 --- a/utils/profiler.go +++ b/utils/profiler.go @@ -48,7 +48,7 @@ func StartProfile(cpuprofile, memprofile string) error { log.WithField("file", memprofile).WithError(err).Error("failed to create memory profile file") return err } - log.WithField("file", cpuprofile).WithError(err).Info("writing memory profiling to file") + log.WithField("file", memprofile).WithError(err).Info("writing memory profiling to file") prof.mem = f runtime.MemProfileRate = 4096 } @@ -63,7 +63,7 @@ func StopProfile() { log.Info("CPU profiling stopped") } if prof.mem != nil { - pprof.Lookup("heap").WriteTo(prof.mem, 0) + pprof.WriteHeapProfile(prof.mem) prof.mem.Close() log.Info("memory profiling stopped") } diff --git a/vendor/bazil.org/fuse/.gitattributes b/vendor/bazil.org/fuse/.gitattributes new file mode 100644 index 000000000..b65f2a9ff --- /dev/null +++ b/vendor/bazil.org/fuse/.gitattributes @@ -0,0 +1,2 @@ +*.go filter=gofmt +*.cgo filter=gofmt diff --git a/vendor/bazil.org/fuse/LICENSE b/vendor/bazil.org/fuse/LICENSE new file mode 100644 index 000000000..4ac7cd838 --- /dev/null +++ b/vendor/bazil.org/fuse/LICENSE @@ -0,0 +1,93 @@ +Copyright (c) 2013-2015 Tommi Virtanen. +Copyright (c) 2009, 2011, 2012 The Go Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +The following included software components have additional copyright +notices and license terms that may differ from the above. + + +File fuse.go: + +// Adapted from Plan 9 from User Space's src/cmd/9pfuse/fuse.c, +// which carries this notice: +// +// The files in this directory are subject to the following license. +// +// The author of this software is Russ Cox. +// +// Copyright (c) 2006 Russ Cox +// +// Permission to use, copy, modify, and distribute this software for any +// purpose without fee is hereby granted, provided that this entire notice +// is included in all copies of any software which is or includes a copy +// or modification of this software and in all copies of the supporting +// documentation for such software. +// +// THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED +// WARRANTY. IN PARTICULAR, THE AUTHOR MAKES NO REPRESENTATION OR WARRANTY +// OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS +// FITNESS FOR ANY PARTICULAR PURPOSE. + + +File fuse_kernel.go: + +// Derived from FUSE's fuse_kernel.h +/* + This file defines the kernel interface of FUSE + Copyright (C) 2001-2007 Miklos Szeredi + + + This -- and only this -- header file may also be distributed under + the terms of the BSD Licence as follows: + + Copyright (C) 2001-2007 Miklos Szeredi. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + SUCH DAMAGE. +*/ diff --git a/vendor/bazil.org/fuse/README.md b/vendor/bazil.org/fuse/README.md new file mode 100644 index 000000000..8c6d556ee --- /dev/null +++ b/vendor/bazil.org/fuse/README.md @@ -0,0 +1,23 @@ +bazil.org/fuse -- Filesystems in Go +=================================== + +`bazil.org/fuse` is a Go library for writing FUSE userspace +filesystems. + +It is a from-scratch implementation of the kernel-userspace +communication protocol, and does not use the C library from the +project called FUSE. `bazil.org/fuse` embraces Go fully for safety and +ease of programming. + +Here’s how to get going: + + go get bazil.org/fuse + +Website: http://bazil.org/fuse/ + +Github repository: https://github.com/bazil/fuse + +API docs: http://godoc.org/bazil.org/fuse + +Our thanks to Russ Cox for his fuse library, which this project is +based on. diff --git a/vendor/bazil.org/fuse/buffer.go b/vendor/bazil.org/fuse/buffer.go new file mode 100644 index 000000000..bb1d2b776 --- /dev/null +++ b/vendor/bazil.org/fuse/buffer.go @@ -0,0 +1,35 @@ +package fuse + +import "unsafe" + +// buffer provides a mechanism for constructing a message from +// multiple segments. +type buffer []byte + +// alloc allocates size bytes and returns a pointer to the new +// segment. +func (w *buffer) alloc(size uintptr) unsafe.Pointer { + s := int(size) + if len(*w)+s > cap(*w) { + old := *w + *w = make([]byte, len(*w), 2*cap(*w)+s) + copy(*w, old) + } + l := len(*w) + *w = (*w)[:l+s] + return unsafe.Pointer(&(*w)[l]) +} + +// reset clears out the contents of the buffer. +func (w *buffer) reset() { + for i := range (*w)[:cap(*w)] { + (*w)[i] = 0 + } + *w = (*w)[:0] +} + +func newBuffer(extra uintptr) buffer { + const hdrSize = unsafe.Sizeof(outHeader{}) + buf := make(buffer, hdrSize, hdrSize+extra) + return buf +} diff --git a/vendor/bazil.org/fuse/debug.go b/vendor/bazil.org/fuse/debug.go new file mode 100644 index 000000000..be9f900d5 --- /dev/null +++ b/vendor/bazil.org/fuse/debug.go @@ -0,0 +1,21 @@ +package fuse + +import ( + "runtime" +) + +func stack() string { + buf := make([]byte, 1024) + return string(buf[:runtime.Stack(buf, false)]) +} + +func nop(msg interface{}) {} + +// Debug is called to output debug messages, including protocol +// traces. The default behavior is to do nothing. +// +// The messages have human-friendly string representations and are +// safe to marshal to JSON. +// +// Implementations must not retain msg. +var Debug func(msg interface{}) = nop diff --git a/vendor/bazil.org/fuse/error_darwin.go b/vendor/bazil.org/fuse/error_darwin.go new file mode 100644 index 000000000..a3fb89ca2 --- /dev/null +++ b/vendor/bazil.org/fuse/error_darwin.go @@ -0,0 +1,17 @@ +package fuse + +import ( + "syscall" +) + +const ( + ENOATTR = Errno(syscall.ENOATTR) +) + +const ( + errNoXattr = ENOATTR +) + +func init() { + errnoNames[errNoXattr] = "ENOATTR" +} diff --git a/vendor/bazil.org/fuse/error_freebsd.go b/vendor/bazil.org/fuse/error_freebsd.go new file mode 100644 index 000000000..c6ea6d6e7 --- /dev/null +++ b/vendor/bazil.org/fuse/error_freebsd.go @@ -0,0 +1,15 @@ +package fuse + +import "syscall" + +const ( + ENOATTR = Errno(syscall.ENOATTR) +) + +const ( + errNoXattr = ENOATTR +) + +func init() { + errnoNames[errNoXattr] = "ENOATTR" +} diff --git a/vendor/bazil.org/fuse/error_linux.go b/vendor/bazil.org/fuse/error_linux.go new file mode 100644 index 000000000..6f113e71e --- /dev/null +++ b/vendor/bazil.org/fuse/error_linux.go @@ -0,0 +1,17 @@ +package fuse + +import ( + "syscall" +) + +const ( + ENODATA = Errno(syscall.ENODATA) +) + +const ( + errNoXattr = ENODATA +) + +func init() { + errnoNames[errNoXattr] = "ENODATA" +} diff --git a/vendor/bazil.org/fuse/error_std.go b/vendor/bazil.org/fuse/error_std.go new file mode 100644 index 000000000..398f43fbf --- /dev/null +++ b/vendor/bazil.org/fuse/error_std.go @@ -0,0 +1,31 @@ +package fuse + +// There is very little commonality in extended attribute errors +// across platforms. +// +// getxattr return value for "extended attribute does not exist" is +// ENOATTR on OS X, and ENODATA on Linux and apparently at least +// NetBSD. There may be a #define ENOATTR on Linux too, but the value +// is ENODATA in the actual syscalls. FreeBSD and OpenBSD have no +// ENODATA, only ENOATTR. ENOATTR is not in any of the standards, +// ENODATA exists but is only used for STREAMs. +// +// Each platform will define it a errNoXattr constant, and this file +// will enforce that it implements the right interfaces and hide the +// implementation. +// +// https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/getxattr.2.html +// http://mail-index.netbsd.org/tech-kern/2012/04/30/msg013090.html +// http://mail-index.netbsd.org/tech-kern/2012/04/30/msg013097.html +// http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html +// http://www.freebsd.org/cgi/man.cgi?query=extattr_get_file&sektion=2 +// http://nixdoc.net/man-pages/openbsd/man2/extattr_get_file.2.html + +// ErrNoXattr is a platform-independent error value meaning the +// extended attribute was not found. It can be used to respond to +// GetxattrRequest and such. +const ErrNoXattr = errNoXattr + +var _ error = ErrNoXattr +var _ Errno = ErrNoXattr +var _ ErrorNumber = ErrNoXattr diff --git a/vendor/bazil.org/fuse/fs/fstestutil/checkdir.go b/vendor/bazil.org/fuse/fs/fstestutil/checkdir.go new file mode 100644 index 000000000..74e5899e9 --- /dev/null +++ b/vendor/bazil.org/fuse/fs/fstestutil/checkdir.go @@ -0,0 +1,70 @@ +package fstestutil + +import ( + "fmt" + "io/ioutil" + "os" +) + +// FileInfoCheck is a function that validates an os.FileInfo according +// to some criteria. +type FileInfoCheck func(fi os.FileInfo) error + +type checkDirError struct { + missing map[string]struct{} + extra map[string]os.FileMode +} + +func (e *checkDirError) Error() string { + return fmt.Sprintf("wrong directory contents: missing %v, extra %v", e.missing, e.extra) +} + +// CheckDir checks the contents of the directory at path, making sure +// every directory entry listed in want is present. If the check is +// not nil, it must also pass. +// +// If want contains the impossible filename "", unexpected files are +// checked with that. If the key is not in want, unexpected files are +// an error. +// +// Missing entries, that are listed in want but not seen, are an +// error. +func CheckDir(path string, want map[string]FileInfoCheck) error { + problems := &checkDirError{ + missing: make(map[string]struct{}, len(want)), + extra: make(map[string]os.FileMode), + } + for k := range want { + if k == "" { + continue + } + problems.missing[k] = struct{}{} + } + + fis, err := ioutil.ReadDir(path) + if err != nil { + return fmt.Errorf("cannot read directory: %v", err) + } + + for _, fi := range fis { + check, ok := want[fi.Name()] + if !ok { + check, ok = want[""] + } + if !ok { + problems.extra[fi.Name()] = fi.Mode() + continue + } + delete(problems.missing, fi.Name()) + if check != nil { + if err := check(fi); err != nil { + return fmt.Errorf("check failed: %v: %v", fi.Name(), err) + } + } + } + + if len(problems.missing) > 0 || len(problems.extra) > 0 { + return problems + } + return nil +} diff --git a/vendor/bazil.org/fuse/fs/fstestutil/debug.go b/vendor/bazil.org/fuse/fs/fstestutil/debug.go new file mode 100644 index 000000000..df44a0c65 --- /dev/null +++ b/vendor/bazil.org/fuse/fs/fstestutil/debug.go @@ -0,0 +1,65 @@ +package fstestutil + +import ( + "flag" + "log" + "strconv" + + "bazil.org/fuse" +) + +type flagDebug bool + +var debug flagDebug + +var _ = flag.Value(&debug) + +func (f *flagDebug) IsBoolFlag() bool { + return true +} + +func nop(msg interface{}) {} + +func (f *flagDebug) Set(s string) error { + v, err := strconv.ParseBool(s) + if err != nil { + return err + } + *f = flagDebug(v) + if v { + fuse.Debug = logMsg + } else { + fuse.Debug = nop + } + return nil +} + +func (f *flagDebug) String() string { + return strconv.FormatBool(bool(*f)) +} + +func logMsg(msg interface{}) { + log.Printf("FUSE: %s\n", msg) +} + +func init() { + flag.Var(&debug, "fuse.debug", "log FUSE processing details") +} + +// DebugByDefault changes the default of the `-fuse.debug` flag to +// true. +// +// This package registers a command line flag `-fuse.debug` and when +// run with that flag (and activated inside the tests), logs FUSE +// debug messages. +// +// This is disabled by default, as most callers probably won't care +// about FUSE details. Use DebugByDefault for tests where you'd +// normally be passing `-fuse.debug` all the time anyway. +// +// Call from an init function. +func DebugByDefault() { + f := flag.Lookup("fuse.debug") + f.DefValue = "true" + f.Value.Set(f.DefValue) +} diff --git a/vendor/bazil.org/fuse/fs/fstestutil/doc.go b/vendor/bazil.org/fuse/fs/fstestutil/doc.go new file mode 100644 index 000000000..3f729dddc --- /dev/null +++ b/vendor/bazil.org/fuse/fs/fstestutil/doc.go @@ -0,0 +1 @@ +package fstestutil // import "bazil.org/fuse/fs/fstestutil" diff --git a/vendor/bazil.org/fuse/fs/fstestutil/mounted.go b/vendor/bazil.org/fuse/fs/fstestutil/mounted.go new file mode 100644 index 000000000..2fae1588a --- /dev/null +++ b/vendor/bazil.org/fuse/fs/fstestutil/mounted.go @@ -0,0 +1,141 @@ +package fstestutil + +import ( + "errors" + "io/ioutil" + "log" + "os" + "testing" + "time" + + "bazil.org/fuse" + "bazil.org/fuse/fs" +) + +// Mount contains information about the mount for the test to use. +type Mount struct { + // Dir is the temporary directory where the filesystem is mounted. + Dir string + + Conn *fuse.Conn + Server *fs.Server + + // Error will receive the return value of Serve. + Error <-chan error + + done <-chan struct{} + closed bool +} + +// Close unmounts the filesystem and waits for fs.Serve to return. Any +// returned error will be stored in Err. It is safe to call Close +// multiple times. +func (mnt *Mount) Close() { + if mnt.closed { + return + } + mnt.closed = true + for tries := 0; tries < 1000; tries++ { + err := fuse.Unmount(mnt.Dir) + if err != nil { + // TODO do more than log? + log.Printf("unmount error: %v", err) + time.Sleep(10 * time.Millisecond) + continue + } + break + } + <-mnt.done + mnt.Conn.Close() + os.Remove(mnt.Dir) +} + +// MountedFunc mounts a filesystem at a temporary directory. The +// filesystem used is constructed by calling a function, to allow +// storing fuse.Conn and fs.Server in the FS. +// +// It also waits until the filesystem is known to be visible (OS X +// workaround). +// +// After successful return, caller must clean up by calling Close. +func MountedFunc(fn func(*Mount) fs.FS, conf *fs.Config, options ...fuse.MountOption) (*Mount, error) { + dir, err := ioutil.TempDir("", "fusetest") + if err != nil { + return nil, err + } + c, err := fuse.Mount(dir, options...) + if err != nil { + return nil, err + } + server := fs.New(c, conf) + done := make(chan struct{}) + serveErr := make(chan error, 1) + mnt := &Mount{ + Dir: dir, + Conn: c, + Server: server, + Error: serveErr, + done: done, + } + filesys := fn(mnt) + go func() { + defer close(done) + serveErr <- server.Serve(filesys) + }() + + select { + case <-mnt.Conn.Ready: + if err := mnt.Conn.MountError; err != nil { + return nil, err + } + return mnt, nil + case err = <-mnt.Error: + // Serve quit early + if err != nil { + return nil, err + } + return nil, errors.New("Serve exited early") + } +} + +// Mounted mounts the fuse.Server at a temporary directory. +// +// It also waits until the filesystem is known to be visible (OS X +// workaround). +// +// After successful return, caller must clean up by calling Close. +func Mounted(filesys fs.FS, conf *fs.Config, options ...fuse.MountOption) (*Mount, error) { + fn := func(*Mount) fs.FS { return filesys } + return MountedFunc(fn, conf, options...) +} + +// MountedFuncT mounts a filesystem at a temporary directory, +// directing it's debug log to the testing logger. +// +// See MountedFunc for usage. +// +// The debug log is not enabled by default. Use `-fuse.debug` or call +// DebugByDefault to enable. +func MountedFuncT(t testing.TB, fn func(*Mount) fs.FS, conf *fs.Config, options ...fuse.MountOption) (*Mount, error) { + if conf == nil { + conf = &fs.Config{} + } + if debug && conf.Debug == nil { + conf.Debug = func(msg interface{}) { + t.Logf("FUSE: %s", msg) + } + } + return MountedFunc(fn, conf, options...) +} + +// MountedT mounts the filesystem at a temporary directory, +// directing it's debug log to the testing logger. +// +// See Mounted for usage. +// +// The debug log is not enabled by default. Use `-fuse.debug` or call +// DebugByDefault to enable. +func MountedT(t testing.TB, filesys fs.FS, conf *fs.Config, options ...fuse.MountOption) (*Mount, error) { + fn := func(*Mount) fs.FS { return filesys } + return MountedFuncT(t, fn, conf, options...) +} diff --git a/vendor/bazil.org/fuse/fs/fstestutil/mountinfo.go b/vendor/bazil.org/fuse/fs/fstestutil/mountinfo.go new file mode 100644 index 000000000..654417bc4 --- /dev/null +++ b/vendor/bazil.org/fuse/fs/fstestutil/mountinfo.go @@ -0,0 +1,26 @@ +package fstestutil + +// MountInfo describes a mounted file system. +type MountInfo struct { + FSName string + Type string +} + +// GetMountInfo finds information about the mount at mnt. It is +// intended for use by tests only, and only fetches information +// relevant to the current tests. +func GetMountInfo(mnt string) (*MountInfo, error) { + return getMountInfo(mnt) +} + +// cstr converts a nil-terminated C string into a Go string +func cstr(ca []int8) string { + s := make([]byte, 0, len(ca)) + for _, c := range ca { + if c == 0x00 { + break + } + s = append(s, byte(c)) + } + return string(s) +} diff --git a/vendor/bazil.org/fuse/fs/fstestutil/mountinfo_darwin.go b/vendor/bazil.org/fuse/fs/fstestutil/mountinfo_darwin.go new file mode 100644 index 000000000..f987bd8e7 --- /dev/null +++ b/vendor/bazil.org/fuse/fs/fstestutil/mountinfo_darwin.go @@ -0,0 +1,29 @@ +package fstestutil + +import ( + "regexp" + "syscall" +) + +var re = regexp.MustCompile(`\\(.)`) + +// unescape removes backslash-escaping. The escaped characters are not +// mapped in any way; that is, unescape(`\n` ) == `n`. +func unescape(s string) string { + return re.ReplaceAllString(s, `$1`) +} + +func getMountInfo(mnt string) (*MountInfo, error) { + var st syscall.Statfs_t + err := syscall.Statfs(mnt, &st) + if err != nil { + return nil, err + } + i := &MountInfo{ + // osx getmntent(3) fails to un-escape the data, so we do it.. + // this might lead to double-unescaping in the future. fun. + // TestMountOptionFSNameEvilBackslashDouble checks for that. + FSName: unescape(cstr(st.Mntfromname[:])), + } + return i, nil +} diff --git a/vendor/bazil.org/fuse/fs/fstestutil/mountinfo_freebsd.go b/vendor/bazil.org/fuse/fs/fstestutil/mountinfo_freebsd.go new file mode 100644 index 000000000..f70e9975e --- /dev/null +++ b/vendor/bazil.org/fuse/fs/fstestutil/mountinfo_freebsd.go @@ -0,0 +1,7 @@ +package fstestutil + +import "errors" + +func getMountInfo(mnt string) (*MountInfo, error) { + return nil, errors.New("FreeBSD has no useful mount information") +} diff --git a/vendor/bazil.org/fuse/fs/fstestutil/mountinfo_linux.go b/vendor/bazil.org/fuse/fs/fstestutil/mountinfo_linux.go new file mode 100644 index 000000000..c502cf59b --- /dev/null +++ b/vendor/bazil.org/fuse/fs/fstestutil/mountinfo_linux.go @@ -0,0 +1,51 @@ +package fstestutil + +import ( + "errors" + "io/ioutil" + "strings" +) + +// Linux /proc/mounts shows current mounts. +// Same format as /etc/fstab. Quoting getmntent(3): +// +// Since fields in the mtab and fstab files are separated by whitespace, +// octal escapes are used to represent the four characters space (\040), +// tab (\011), newline (\012) and backslash (\134) in those files when +// they occur in one of the four strings in a mntent structure. +// +// http://linux.die.net/man/3/getmntent + +var fstabUnescape = strings.NewReplacer( + `\040`, "\040", + `\011`, "\011", + `\012`, "\012", + `\134`, "\134", +) + +var errNotFound = errors.New("mount not found") + +func getMountInfo(mnt string) (*MountInfo, error) { + data, err := ioutil.ReadFile("/proc/mounts") + if err != nil { + return nil, err + } + for _, line := range strings.Split(string(data), "\n") { + fields := strings.Fields(line) + if len(fields) < 3 { + continue + } + // Fields are: fsname dir type opts freq passno + fsname := fstabUnescape.Replace(fields[0]) + dir := fstabUnescape.Replace(fields[1]) + fstype := fstabUnescape.Replace(fields[2]) + if mnt == dir { + info := &MountInfo{ + FSName: fsname, + Type: fstype, + } + return info, nil + } + } + return nil, errNotFound +} diff --git a/vendor/bazil.org/fuse/fs/fstestutil/testfs.go b/vendor/bazil.org/fuse/fs/fstestutil/testfs.go new file mode 100644 index 000000000..c1988bf70 --- /dev/null +++ b/vendor/bazil.org/fuse/fs/fstestutil/testfs.go @@ -0,0 +1,55 @@ +package fstestutil + +import ( + "os" + + "bazil.org/fuse" + "bazil.org/fuse/fs" + "golang.org/x/net/context" +) + +// SimpleFS is a trivial FS that just implements the Root method. +type SimpleFS struct { + Node fs.Node +} + +var _ = fs.FS(SimpleFS{}) + +func (f SimpleFS) Root() (fs.Node, error) { + return f.Node, nil +} + +// File can be embedded in a struct to make it look like a file. +type File struct{} + +func (f File) Attr(ctx context.Context, a *fuse.Attr) error { + a.Mode = 0666 + return nil +} + +// Dir can be embedded in a struct to make it look like a directory. +type Dir struct{} + +func (f Dir) Attr(ctx context.Context, a *fuse.Attr) error { + a.Mode = os.ModeDir | 0777 + return nil +} + +// ChildMap is a directory with child nodes looked up from a map. +type ChildMap map[string]fs.Node + +var _ = fs.Node(&ChildMap{}) +var _ = fs.NodeStringLookuper(&ChildMap{}) + +func (f *ChildMap) Attr(ctx context.Context, a *fuse.Attr) error { + a.Mode = os.ModeDir | 0777 + return nil +} + +func (f *ChildMap) Lookup(ctx context.Context, name string) (fs.Node, error) { + child, ok := (*f)[name] + if !ok { + return nil, fuse.ENOENT + } + return child, nil +} diff --git a/vendor/bazil.org/fuse/fs/serve.go b/vendor/bazil.org/fuse/fs/serve.go new file mode 100644 index 000000000..e9fc56590 --- /dev/null +++ b/vendor/bazil.org/fuse/fs/serve.go @@ -0,0 +1,1568 @@ +// FUSE service loop, for servers that wish to use it. + +package fs // import "bazil.org/fuse/fs" + +import ( + "encoding/binary" + "fmt" + "hash/fnv" + "io" + "log" + "reflect" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/context" +) + +import ( + "bytes" + + "bazil.org/fuse" + "bazil.org/fuse/fuseutil" +) + +const ( + attrValidTime = 1 * time.Minute + entryValidTime = 1 * time.Minute +) + +// TODO: FINISH DOCS + +// An FS is the interface required of a file system. +// +// Other FUSE requests can be handled by implementing methods from the +// FS* interfaces, for example FSStatfser. +type FS interface { + // Root is called to obtain the Node for the file system root. + Root() (Node, error) +} + +type FSStatfser interface { + // Statfs is called to obtain file system metadata. + // It should write that data to resp. + Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) error +} + +type FSDestroyer interface { + // Destroy is called when the file system is shutting down. + // + // Linux only sends this request for block device backed (fuseblk) + // filesystems, to allow them to flush writes to disk before the + // unmount completes. + Destroy() +} + +type FSInodeGenerator interface { + // GenerateInode is called to pick a dynamic inode number when it + // would otherwise be 0. + // + // Not all filesystems bother tracking inodes, but FUSE requires + // the inode to be set, and fewer duplicates in general makes UNIX + // tools work better. + // + // Operations where the nodes may return 0 inodes include Getattr, + // Setattr and ReadDir. + // + // If FS does not implement FSInodeGenerator, GenerateDynamicInode + // is used. + // + // Implementing this is useful to e.g. constrain the range of + // inode values used for dynamic inodes. + GenerateInode(parentInode uint64, name string) uint64 +} + +// A Node is the interface required of a file or directory. +// See the documentation for type FS for general information +// pertaining to all methods. +// +// A Node must be usable as a map key, that is, it cannot be a +// function, map or slice. +// +// Other FUSE requests can be handled by implementing methods from the +// Node* interfaces, for example NodeOpener. +// +// Methods returning Node should take care to return the same Node +// when the result is logically the same instance. Without this, each +// Node will get a new NodeID, causing spurious cache invalidations, +// extra lookups and aliasing anomalies. This may not matter for a +// simple, read-only filesystem. +type Node interface { + // Attr fills attr with the standard metadata for the node. + // + // Fields with reasonable defaults are prepopulated. For example, + // all times are set to a fixed moment when the program started. + // + // If Inode is left as 0, a dynamic inode number is chosen. + // + // The result may be cached for the duration set in Valid. + Attr(ctx context.Context, attr *fuse.Attr) error +} + +type NodeGetattrer interface { + // Getattr obtains the standard metadata for the receiver. + // It should store that metadata in resp. + // + // If this method is not implemented, the attributes will be + // generated based on Attr(), with zero values filled in. + Getattr(ctx context.Context, req *fuse.GetattrRequest, resp *fuse.GetattrResponse) error +} + +type NodeSetattrer interface { + // Setattr sets the standard metadata for the receiver. + // + // Note, this is also used to communicate changes in the size of + // the file, outside of Writes. + // + // req.Valid is a bitmask of what fields are actually being set. + // For example, the method should not change the mode of the file + // unless req.Valid.Mode() is true. + Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error +} + +type NodeSymlinker interface { + // Symlink creates a new symbolic link in the receiver, which must be a directory. + // + // TODO is the above true about directories? + Symlink(ctx context.Context, req *fuse.SymlinkRequest) (Node, error) +} + +// This optional request will be called only for symbolic link nodes. +type NodeReadlinker interface { + // Readlink reads a symbolic link. + Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) +} + +type NodeLinker interface { + // Link creates a new directory entry in the receiver based on an + // existing Node. Receiver must be a directory. + Link(ctx context.Context, req *fuse.LinkRequest, old Node) (Node, error) +} + +type NodeRemover interface { + // Remove removes the entry with the given name from + // the receiver, which must be a directory. The entry to be removed + // may correspond to a file (unlink) or to a directory (rmdir). + Remove(ctx context.Context, req *fuse.RemoveRequest) error +} + +type NodeAccesser interface { + // Access checks whether the calling context has permission for + // the given operations on the receiver. If so, Access should + // return nil. If not, Access should return EPERM. + // + // Note that this call affects the result of the access(2) system + // call but not the open(2) system call. If Access is not + // implemented, the Node behaves as if it always returns nil + // (permission granted), relying on checks in Open instead. + Access(ctx context.Context, req *fuse.AccessRequest) error +} + +type NodeStringLookuper interface { + // Lookup looks up a specific entry in the receiver, + // which must be a directory. Lookup should return a Node + // corresponding to the entry. If the name does not exist in + // the directory, Lookup should return ENOENT. + // + // Lookup need not to handle the names "." and "..". + Lookup(ctx context.Context, name string) (Node, error) +} + +type NodeRequestLookuper interface { + // Lookup looks up a specific entry in the receiver. + // See NodeStringLookuper for more. + Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (Node, error) +} + +type NodeMkdirer interface { + Mkdir(ctx context.Context, req *fuse.MkdirRequest) (Node, error) +} + +type NodeOpener interface { + // Open opens the receiver. After a successful open, a client + // process has a file descriptor referring to this Handle. + // + // Open can also be also called on non-files. For example, + // directories are Opened for ReadDir or fchdir(2). + // + // If this method is not implemented, the open will always + // succeed, and the Node itself will be used as the Handle. + // + // XXX note about access. XXX OpenFlags. + Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (Handle, error) +} + +type NodeCreater interface { + // Create creates a new directory entry in the receiver, which + // must be a directory. + Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (Node, Handle, error) +} + +type NodeForgetter interface { + // Forget about this node. This node will not receive further + // method calls. + // + // Forget is not necessarily seen on unmount, as all nodes are + // implicitly forgotten as part part of the unmount. + Forget() +} + +type NodeRenamer interface { + Rename(ctx context.Context, req *fuse.RenameRequest, newDir Node) error +} + +type NodeMknoder interface { + Mknod(ctx context.Context, req *fuse.MknodRequest) (Node, error) +} + +// TODO this should be on Handle not Node +type NodeFsyncer interface { + Fsync(ctx context.Context, req *fuse.FsyncRequest) error +} + +type NodeGetxattrer interface { + // Getxattr gets an extended attribute by the given name from the + // node. + // + // If there is no xattr by that name, returns fuse.ErrNoXattr. + Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error +} + +type NodeListxattrer interface { + // Listxattr lists the extended attributes recorded for the node. + Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error +} + +type NodeSetxattrer interface { + // Setxattr sets an extended attribute with the given name and + // value for the node. + Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error +} + +type NodeRemovexattrer interface { + // Removexattr removes an extended attribute for the name. + // + // If there is no xattr by that name, returns fuse.ErrNoXattr. + Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error +} + +var startTime = time.Now() + +func nodeAttr(ctx context.Context, n Node, attr *fuse.Attr) error { + attr.Valid = attrValidTime + attr.Nlink = 1 + attr.Atime = startTime + attr.Mtime = startTime + attr.Ctime = startTime + attr.Crtime = startTime + if err := n.Attr(ctx, attr); err != nil { + return err + } + return nil +} + +// A Handle is the interface required of an opened file or directory. +// See the documentation for type FS for general information +// pertaining to all methods. +// +// Other FUSE requests can be handled by implementing methods from the +// Handle* interfaces. The most common to implement are HandleReader, +// HandleReadDirer, and HandleWriter. +// +// TODO implement methods: Getlk, Setlk, Setlkw +type Handle interface { +} + +type HandleFlusher interface { + // Flush is called each time the file or directory is closed. + // Because there can be multiple file descriptors referring to a + // single opened file, Flush can be called multiple times. + Flush(ctx context.Context, req *fuse.FlushRequest) error +} + +type HandleReadAller interface { + ReadAll(ctx context.Context) ([]byte, error) +} + +type HandleReadDirAller interface { + ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) +} + +type HandleReader interface { + // Read requests to read data from the handle. + // + // There is a page cache in the kernel that normally submits only + // page-aligned reads spanning one or more pages. However, you + // should not rely on this. To see individual requests as + // submitted by the file system clients, set OpenDirectIO. + // + // Note that reads beyond the size of the file as reported by Attr + // are not even attempted (except in OpenDirectIO mode). + Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error +} + +type HandleWriter interface { + // Write requests to write data into the handle at the given offset. + // Store the amount of data written in resp.Size. + // + // There is a writeback page cache in the kernel that normally submits + // only page-aligned writes spanning one or more pages. However, + // you should not rely on this. To see individual requests as + // submitted by the file system clients, set OpenDirectIO. + // + // Writes that grow the file are expected to update the file size + // (as seen through Attr). Note that file size changes are + // communicated also through Setattr. + Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error +} + +type HandleReleaser interface { + Release(ctx context.Context, req *fuse.ReleaseRequest) error +} + +type Config struct { + // Function to send debug log messages to. If nil, use fuse.Debug. + // Note that changing this or fuse.Debug may not affect existing + // calls to Serve. + // + // See fuse.Debug for the rules that log functions must follow. + Debug func(msg interface{}) + + // Function to put things into context for processing the request. + // The returned context must have ctx as its parent. + // + // Note that changing this may not affect existing calls to Serve. + // + // Must not retain req. + WithContext func(ctx context.Context, req fuse.Request) context.Context +} + +// New returns a new FUSE server ready to serve this kernel FUSE +// connection. +// +// Config may be nil. +func New(conn *fuse.Conn, config *Config) *Server { + s := &Server{ + conn: conn, + req: map[fuse.RequestID]*serveRequest{}, + nodeRef: map[Node]fuse.NodeID{}, + dynamicInode: GenerateDynamicInode, + } + if config != nil { + s.debug = config.Debug + s.context = config.WithContext + } + if s.debug == nil { + s.debug = fuse.Debug + } + return s +} + +type Server struct { + // set in New + conn *fuse.Conn + debug func(msg interface{}) + context func(ctx context.Context, req fuse.Request) context.Context + + // set once at Serve time + fs FS + dynamicInode func(parent uint64, name string) uint64 + + // state, protected by meta + meta sync.Mutex + req map[fuse.RequestID]*serveRequest + node []*serveNode + nodeRef map[Node]fuse.NodeID + handle []*serveHandle + freeNode []fuse.NodeID + freeHandle []fuse.HandleID + nodeGen uint64 + + // Used to ensure worker goroutines finish before Serve returns + wg sync.WaitGroup +} + +// Serve serves the FUSE connection by making calls to the methods +// of fs and the Nodes and Handles it makes available. It returns only +// when the connection has been closed or an unexpected error occurs. +func (s *Server) Serve(fs FS) error { + defer s.wg.Wait() // Wait for worker goroutines to complete before return + + s.fs = fs + if dyn, ok := fs.(FSInodeGenerator); ok { + s.dynamicInode = dyn.GenerateInode + } + + root, err := fs.Root() + if err != nil { + return fmt.Errorf("cannot obtain root node: %v", err) + } + // Recognize the root node if it's ever returned from Lookup, + // passed to Invalidate, etc. + s.nodeRef[root] = 1 + s.node = append(s.node, nil, &serveNode{ + inode: 1, + generation: s.nodeGen, + node: root, + refs: 1, + }) + s.handle = append(s.handle, nil) + + for { + req, err := s.conn.ReadRequest() + if err != nil { + if err == io.EOF { + break + } + return err + } + + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.serve(req) + }() + } + return nil +} + +// Serve serves a FUSE connection with the default settings. See +// Server.Serve. +func Serve(c *fuse.Conn, fs FS) error { + server := New(c, nil) + return server.Serve(fs) +} + +type nothing struct{} + +type serveRequest struct { + Request fuse.Request + cancel func() +} + +type serveNode struct { + inode uint64 + generation uint64 + node Node + refs uint64 + + // Delay freeing the NodeID until waitgroup is done. This allows + // using the NodeID for short periods of time without holding the + // Server.meta lock. + // + // Rules: + // + // - hold Server.meta while calling wg.Add, then unlock + // - do NOT try to reacquire Server.meta + wg sync.WaitGroup +} + +func (sn *serveNode) attr(ctx context.Context, attr *fuse.Attr) error { + err := nodeAttr(ctx, sn.node, attr) + if attr.Inode == 0 { + attr.Inode = sn.inode + } + return err +} + +type serveHandle struct { + handle Handle + readData []byte + nodeID fuse.NodeID +} + +// NodeRef is deprecated. It remains here to decrease code churn on +// FUSE library users. You may remove it from your program now; +// returning the same Node values are now recognized automatically, +// without needing NodeRef. +type NodeRef struct{} + +func (c *Server) saveNode(inode uint64, node Node) (id fuse.NodeID, gen uint64) { + c.meta.Lock() + defer c.meta.Unlock() + + if id, ok := c.nodeRef[node]; ok { + sn := c.node[id] + sn.refs++ + return id, sn.generation + } + + sn := &serveNode{inode: inode, node: node, refs: 1} + if n := len(c.freeNode); n > 0 { + id = c.freeNode[n-1] + c.freeNode = c.freeNode[:n-1] + c.node[id] = sn + c.nodeGen++ + } else { + id = fuse.NodeID(len(c.node)) + c.node = append(c.node, sn) + } + sn.generation = c.nodeGen + c.nodeRef[node] = id + return id, sn.generation +} + +func (c *Server) saveHandle(handle Handle, nodeID fuse.NodeID) (id fuse.HandleID) { + c.meta.Lock() + shandle := &serveHandle{handle: handle, nodeID: nodeID} + if n := len(c.freeHandle); n > 0 { + id = c.freeHandle[n-1] + c.freeHandle = c.freeHandle[:n-1] + c.handle[id] = shandle + } else { + id = fuse.HandleID(len(c.handle)) + c.handle = append(c.handle, shandle) + } + c.meta.Unlock() + return +} + +type nodeRefcountDropBug struct { + N uint64 + Refs uint64 + Node fuse.NodeID +} + +func (n *nodeRefcountDropBug) String() string { + return fmt.Sprintf("bug: trying to drop %d of %d references to %v", n.N, n.Refs, n.Node) +} + +func (c *Server) dropNode(id fuse.NodeID, n uint64) (forget bool) { + c.meta.Lock() + defer c.meta.Unlock() + snode := c.node[id] + + if snode == nil { + // this should only happen if refcounts kernel<->us disagree + // *and* two ForgetRequests for the same node race each other; + // this indicates a bug somewhere + c.debug(nodeRefcountDropBug{N: n, Node: id}) + + // we may end up triggering Forget twice, but that's better + // than not even once, and that's the best we can do + return true + } + + if n > snode.refs { + c.debug(nodeRefcountDropBug{N: n, Refs: snode.refs, Node: id}) + n = snode.refs + } + + snode.refs -= n + if snode.refs == 0 { + snode.wg.Wait() + c.node[id] = nil + delete(c.nodeRef, snode.node) + c.freeNode = append(c.freeNode, id) + return true + } + return false +} + +func (c *Server) dropHandle(id fuse.HandleID) { + c.meta.Lock() + c.handle[id] = nil + c.freeHandle = append(c.freeHandle, id) + c.meta.Unlock() +} + +type missingHandle struct { + Handle fuse.HandleID + MaxHandle fuse.HandleID +} + +func (m missingHandle) String() string { + return fmt.Sprint("missing handle: ", m.Handle, m.MaxHandle) +} + +// Returns nil for invalid handles. +func (c *Server) getHandle(id fuse.HandleID) (shandle *serveHandle) { + c.meta.Lock() + defer c.meta.Unlock() + if id < fuse.HandleID(len(c.handle)) { + shandle = c.handle[uint(id)] + } + if shandle == nil { + c.debug(missingHandle{ + Handle: id, + MaxHandle: fuse.HandleID(len(c.handle)), + }) + } + return +} + +type request struct { + Op string + Request *fuse.Header + In interface{} `json:",omitempty"` +} + +func (r request) String() string { + return fmt.Sprintf("<- %s", r.In) +} + +type logResponseHeader struct { + ID fuse.RequestID +} + +func (m logResponseHeader) String() string { + return fmt.Sprintf("ID=%v", m.ID) +} + +type response struct { + Op string + Request logResponseHeader + Out interface{} `json:",omitempty"` + // Errno contains the errno value as a string, for example "EPERM". + Errno string `json:",omitempty"` + // Error may contain a free form error message. + Error string `json:",omitempty"` +} + +func (r response) errstr() string { + s := r.Errno + if r.Error != "" { + // prefix the errno constant to the long form message + s = s + ": " + r.Error + } + return s +} + +func (r response) String() string { + switch { + case r.Errno != "" && r.Out != nil: + return fmt.Sprintf("-> [%v] %v error=%s", r.Request, r.Out, r.errstr()) + case r.Errno != "": + return fmt.Sprintf("-> [%v] %s error=%s", r.Request, r.Op, r.errstr()) + case r.Out != nil: + // make sure (seemingly) empty values are readable + switch r.Out.(type) { + case string: + return fmt.Sprintf("-> [%v] %s %q", r.Request, r.Op, r.Out) + case []byte: + return fmt.Sprintf("-> [%v] %s [% x]", r.Request, r.Op, r.Out) + default: + return fmt.Sprintf("-> [%v] %v", r.Request, r.Out) + } + default: + return fmt.Sprintf("-> [%v] %s", r.Request, r.Op) + } +} + +type notification struct { + Op string + Node fuse.NodeID + Out interface{} `json:",omitempty"` + Err string `json:",omitempty"` +} + +func (n notification) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "=> %s %v", n.Op, n.Node) + if n.Out != nil { + // make sure (seemingly) empty values are readable + switch n.Out.(type) { + case string: + fmt.Fprintf(&buf, " %q", n.Out) + case []byte: + fmt.Fprintf(&buf, " [% x]", n.Out) + default: + fmt.Fprintf(&buf, " %s", n.Out) + } + } + if n.Err != "" { + fmt.Fprintf(&buf, " Err:%v", n.Err) + } + return buf.String() +} + +type logMissingNode struct { + MaxNode fuse.NodeID +} + +func opName(req fuse.Request) string { + t := reflect.Indirect(reflect.ValueOf(req)).Type() + s := t.Name() + s = strings.TrimSuffix(s, "Request") + return s +} + +type logLinkRequestOldNodeNotFound struct { + Request *fuse.Header + In *fuse.LinkRequest +} + +func (m *logLinkRequestOldNodeNotFound) String() string { + return fmt.Sprintf("In LinkRequest (request %v), node %d not found", m.Request.Hdr().ID, m.In.OldNode) +} + +type renameNewDirNodeNotFound struct { + Request *fuse.Header + In *fuse.RenameRequest +} + +func (m *renameNewDirNodeNotFound) String() string { + return fmt.Sprintf("In RenameRequest (request %v), node %d not found", m.Request.Hdr().ID, m.In.NewDir) +} + +type handlerPanickedError struct { + Request interface{} + Err interface{} +} + +var _ error = handlerPanickedError{} + +func (h handlerPanickedError) Error() string { + return fmt.Sprintf("handler panicked: %v", h.Err) +} + +var _ fuse.ErrorNumber = handlerPanickedError{} + +func (h handlerPanickedError) Errno() fuse.Errno { + if err, ok := h.Err.(fuse.ErrorNumber); ok { + return err.Errno() + } + return fuse.DefaultErrno +} + +// handlerTerminatedError happens when a handler terminates itself +// with runtime.Goexit. This is most commonly because of incorrect use +// of testing.TB.FailNow, typically via t.Fatal. +type handlerTerminatedError struct { + Request interface{} +} + +var _ error = handlerTerminatedError{} + +func (h handlerTerminatedError) Error() string { + return fmt.Sprintf("handler terminated (called runtime.Goexit)") +} + +var _ fuse.ErrorNumber = handlerTerminatedError{} + +func (h handlerTerminatedError) Errno() fuse.Errno { + return fuse.DefaultErrno +} + +type handleNotReaderError struct { + handle Handle +} + +var _ error = handleNotReaderError{} + +func (e handleNotReaderError) Error() string { + return fmt.Sprintf("handle has no Read: %T", e.handle) +} + +var _ fuse.ErrorNumber = handleNotReaderError{} + +func (e handleNotReaderError) Errno() fuse.Errno { + return fuse.ENOTSUP +} + +func initLookupResponse(s *fuse.LookupResponse) { + s.EntryValid = entryValidTime +} + +func (c *Server) serve(r fuse.Request) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + parentCtx := ctx + if c.context != nil { + ctx = c.context(ctx, r) + } + + req := &serveRequest{Request: r, cancel: cancel} + + c.debug(request{ + Op: opName(r), + Request: r.Hdr(), + In: r, + }) + var node Node + var snode *serveNode + c.meta.Lock() + hdr := r.Hdr() + if id := hdr.Node; id != 0 { + if id < fuse.NodeID(len(c.node)) { + snode = c.node[uint(id)] + } + if snode == nil { + c.meta.Unlock() + c.debug(response{ + Op: opName(r), + Request: logResponseHeader{ID: hdr.ID}, + Error: fuse.ESTALE.ErrnoName(), + // this is the only place that sets both Error and + // Out; not sure if i want to do that; might get rid + // of len(c.node) things altogether + Out: logMissingNode{ + MaxNode: fuse.NodeID(len(c.node)), + }, + }) + r.RespondError(fuse.ESTALE) + return + } + node = snode.node + } + if c.req[hdr.ID] != nil { + // This happens with OSXFUSE. Assume it's okay and + // that we'll never see an interrupt for this one. + // Otherwise everything wedges. TODO: Report to OSXFUSE? + // + // TODO this might have been because of missing done() calls + } else { + c.req[hdr.ID] = req + } + c.meta.Unlock() + + // Call this before responding. + // After responding is too late: we might get another request + // with the same ID and be very confused. + done := func(resp interface{}) { + msg := response{ + Op: opName(r), + Request: logResponseHeader{ID: hdr.ID}, + } + if err, ok := resp.(error); ok { + msg.Error = err.Error() + if ferr, ok := err.(fuse.ErrorNumber); ok { + errno := ferr.Errno() + msg.Errno = errno.ErrnoName() + if errno == err { + // it's just a fuse.Errno with no extra detail; + // skip the textual message for log readability + msg.Error = "" + } + } else { + msg.Errno = fuse.DefaultErrno.ErrnoName() + } + } else { + msg.Out = resp + } + c.debug(msg) + + c.meta.Lock() + delete(c.req, hdr.ID) + c.meta.Unlock() + } + + var responded bool + defer func() { + if rec := recover(); rec != nil { + const size = 1 << 16 + buf := make([]byte, size) + n := runtime.Stack(buf, false) + buf = buf[:n] + log.Printf("fuse: panic in handler for %v: %v\n%s", r, rec, buf) + err := handlerPanickedError{ + Request: r, + Err: rec, + } + done(err) + r.RespondError(err) + return + } + + if !responded { + err := handlerTerminatedError{ + Request: r, + } + done(err) + r.RespondError(err) + } + }() + + if err := c.handleRequest(ctx, node, snode, r, done); err != nil { + if err == context.Canceled { + select { + case <-parentCtx.Done(): + // We canceled the parent context because of an + // incoming interrupt request, so return EINTR + // to trigger the right behavior in the client app. + // + // Only do this when it's the parent context that was + // canceled, not a context controlled by the program + // using this library, so we don't return EINTR too + // eagerly -- it might cause busy loops. + // + // Decent write-up on role of EINTR: + // http://250bpm.com/blog:12 + err = fuse.EINTR + default: + // nothing + } + } + done(err) + r.RespondError(err) + } + + // disarm runtime.Goexit protection + responded = true +} + +// handleRequest will either a) call done(s) and r.Respond(s) OR b) return an error. +func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, r fuse.Request, done func(resp interface{})) error { + switch r := r.(type) { + default: + // Note: To FUSE, ENOSYS means "this server never implements this request." + // It would be inappropriate to return ENOSYS for other operations in this + // switch that might only be unavailable in some contexts, not all. + return fuse.ENOSYS + + case *fuse.StatfsRequest: + s := &fuse.StatfsResponse{} + if fs, ok := c.fs.(FSStatfser); ok { + if err := fs.Statfs(ctx, r, s); err != nil { + return err + } + } + done(s) + r.Respond(s) + return nil + + // Node operations. + case *fuse.GetattrRequest: + s := &fuse.GetattrResponse{} + if n, ok := node.(NodeGetattrer); ok { + if err := n.Getattr(ctx, r, s); err != nil { + return err + } + } else { + if err := snode.attr(ctx, &s.Attr); err != nil { + return err + } + } + done(s) + r.Respond(s) + return nil + + case *fuse.SetattrRequest: + s := &fuse.SetattrResponse{} + if n, ok := node.(NodeSetattrer); ok { + if err := n.Setattr(ctx, r, s); err != nil { + return err + } + } + + if err := snode.attr(ctx, &s.Attr); err != nil { + return err + } + done(s) + r.Respond(s) + return nil + + case *fuse.SymlinkRequest: + s := &fuse.SymlinkResponse{} + initLookupResponse(&s.LookupResponse) + n, ok := node.(NodeSymlinker) + if !ok { + return fuse.EIO // XXX or EPERM like Mkdir? + } + n2, err := n.Symlink(ctx, r) + if err != nil { + return err + } + if err := c.saveLookup(ctx, &s.LookupResponse, snode, r.NewName, n2); err != nil { + return err + } + done(s) + r.Respond(s) + return nil + + case *fuse.ReadlinkRequest: + n, ok := node.(NodeReadlinker) + if !ok { + return fuse.EIO /// XXX or EPERM? + } + target, err := n.Readlink(ctx, r) + if err != nil { + return err + } + done(target) + r.Respond(target) + return nil + + case *fuse.LinkRequest: + n, ok := node.(NodeLinker) + if !ok { + return fuse.EIO /// XXX or EPERM? + } + c.meta.Lock() + var oldNode *serveNode + if int(r.OldNode) < len(c.node) { + oldNode = c.node[r.OldNode] + } + c.meta.Unlock() + if oldNode == nil { + c.debug(logLinkRequestOldNodeNotFound{ + Request: r.Hdr(), + In: r, + }) + return fuse.EIO + } + n2, err := n.Link(ctx, r, oldNode.node) + if err != nil { + return err + } + s := &fuse.LookupResponse{} + initLookupResponse(s) + if err := c.saveLookup(ctx, s, snode, r.NewName, n2); err != nil { + return err + } + done(s) + r.Respond(s) + return nil + + case *fuse.RemoveRequest: + n, ok := node.(NodeRemover) + if !ok { + return fuse.EIO /// XXX or EPERM? + } + err := n.Remove(ctx, r) + if err != nil { + return err + } + done(nil) + r.Respond() + return nil + + case *fuse.AccessRequest: + if n, ok := node.(NodeAccesser); ok { + if err := n.Access(ctx, r); err != nil { + return err + } + } + done(nil) + r.Respond() + return nil + + case *fuse.LookupRequest: + var n2 Node + var err error + s := &fuse.LookupResponse{} + initLookupResponse(s) + if n, ok := node.(NodeStringLookuper); ok { + n2, err = n.Lookup(ctx, r.Name) + } else if n, ok := node.(NodeRequestLookuper); ok { + n2, err = n.Lookup(ctx, r, s) + } else { + return fuse.ENOENT + } + if err != nil { + return err + } + if err := c.saveLookup(ctx, s, snode, r.Name, n2); err != nil { + return err + } + done(s) + r.Respond(s) + return nil + + case *fuse.MkdirRequest: + s := &fuse.MkdirResponse{} + initLookupResponse(&s.LookupResponse) + n, ok := node.(NodeMkdirer) + if !ok { + return fuse.EPERM + } + n2, err := n.Mkdir(ctx, r) + if err != nil { + return err + } + if err := c.saveLookup(ctx, &s.LookupResponse, snode, r.Name, n2); err != nil { + return err + } + done(s) + r.Respond(s) + return nil + + case *fuse.OpenRequest: + s := &fuse.OpenResponse{} + var h2 Handle + if n, ok := node.(NodeOpener); ok { + hh, err := n.Open(ctx, r, s) + if err != nil { + return err + } + h2 = hh + } else { + h2 = node + } + s.Handle = c.saveHandle(h2, r.Hdr().Node) + done(s) + r.Respond(s) + return nil + + case *fuse.CreateRequest: + n, ok := node.(NodeCreater) + if !ok { + // If we send back ENOSYS, FUSE will try mknod+open. + return fuse.EPERM + } + s := &fuse.CreateResponse{OpenResponse: fuse.OpenResponse{}} + initLookupResponse(&s.LookupResponse) + n2, h2, err := n.Create(ctx, r, s) + if err != nil { + return err + } + if err := c.saveLookup(ctx, &s.LookupResponse, snode, r.Name, n2); err != nil { + return err + } + s.Handle = c.saveHandle(h2, r.Hdr().Node) + done(s) + r.Respond(s) + return nil + + case *fuse.GetxattrRequest: + n, ok := node.(NodeGetxattrer) + if !ok { + return fuse.ENOTSUP + } + s := &fuse.GetxattrResponse{} + err := n.Getxattr(ctx, r, s) + if err != nil { + return err + } + if r.Size != 0 && uint64(len(s.Xattr)) > uint64(r.Size) { + return fuse.ERANGE + } + done(s) + r.Respond(s) + return nil + + case *fuse.ListxattrRequest: + n, ok := node.(NodeListxattrer) + if !ok { + return fuse.ENOTSUP + } + s := &fuse.ListxattrResponse{} + err := n.Listxattr(ctx, r, s) + if err != nil { + return err + } + if r.Size != 0 && uint64(len(s.Xattr)) > uint64(r.Size) { + return fuse.ERANGE + } + done(s) + r.Respond(s) + return nil + + case *fuse.SetxattrRequest: + n, ok := node.(NodeSetxattrer) + if !ok { + return fuse.ENOTSUP + } + err := n.Setxattr(ctx, r) + if err != nil { + return err + } + done(nil) + r.Respond() + return nil + + case *fuse.RemovexattrRequest: + n, ok := node.(NodeRemovexattrer) + if !ok { + return fuse.ENOTSUP + } + err := n.Removexattr(ctx, r) + if err != nil { + return err + } + done(nil) + r.Respond() + return nil + + case *fuse.ForgetRequest: + forget := c.dropNode(r.Hdr().Node, r.N) + if forget { + n, ok := node.(NodeForgetter) + if ok { + n.Forget() + } + } + done(nil) + r.Respond() + return nil + + // Handle operations. + case *fuse.ReadRequest: + shandle := c.getHandle(r.Handle) + if shandle == nil { + return fuse.ESTALE + } + handle := shandle.handle + + s := &fuse.ReadResponse{Data: make([]byte, 0, r.Size)} + if r.Dir { + if h, ok := handle.(HandleReadDirAller); ok { + // detect rewinddir(3) or similar seek and refresh + // contents + if r.Offset == 0 { + shandle.readData = nil + } + + if shandle.readData == nil { + dirs, err := h.ReadDirAll(ctx) + if err != nil { + return err + } + var data []byte + for _, dir := range dirs { + if dir.Inode == 0 { + dir.Inode = c.dynamicInode(snode.inode, dir.Name) + } + data = fuse.AppendDirent(data, dir) + } + shandle.readData = data + } + fuseutil.HandleRead(r, s, shandle.readData) + done(s) + r.Respond(s) + return nil + } + } else { + if h, ok := handle.(HandleReadAller); ok { + if shandle.readData == nil { + data, err := h.ReadAll(ctx) + if err != nil { + return err + } + if data == nil { + data = []byte{} + } + shandle.readData = data + } + fuseutil.HandleRead(r, s, shandle.readData) + done(s) + r.Respond(s) + return nil + } + h, ok := handle.(HandleReader) + if !ok { + err := handleNotReaderError{handle: handle} + return err + } + if err := h.Read(ctx, r, s); err != nil { + return err + } + } + done(s) + r.Respond(s) + return nil + + case *fuse.WriteRequest: + shandle := c.getHandle(r.Handle) + if shandle == nil { + return fuse.ESTALE + } + + s := &fuse.WriteResponse{} + if h, ok := shandle.handle.(HandleWriter); ok { + if err := h.Write(ctx, r, s); err != nil { + return err + } + done(s) + r.Respond(s) + return nil + } + return fuse.EIO + + case *fuse.FlushRequest: + shandle := c.getHandle(r.Handle) + if shandle == nil { + return fuse.ESTALE + } + handle := shandle.handle + + if h, ok := handle.(HandleFlusher); ok { + if err := h.Flush(ctx, r); err != nil { + return err + } + } + done(nil) + r.Respond() + return nil + + case *fuse.ReleaseRequest: + shandle := c.getHandle(r.Handle) + if shandle == nil { + return fuse.ESTALE + } + handle := shandle.handle + + // No matter what, release the handle. + c.dropHandle(r.Handle) + + if h, ok := handle.(HandleReleaser); ok { + if err := h.Release(ctx, r); err != nil { + return err + } + } + done(nil) + r.Respond() + return nil + + case *fuse.DestroyRequest: + if fs, ok := c.fs.(FSDestroyer); ok { + fs.Destroy() + } + done(nil) + r.Respond() + return nil + + case *fuse.RenameRequest: + c.meta.Lock() + var newDirNode *serveNode + if int(r.NewDir) < len(c.node) { + newDirNode = c.node[r.NewDir] + } + c.meta.Unlock() + if newDirNode == nil { + c.debug(renameNewDirNodeNotFound{ + Request: r.Hdr(), + In: r, + }) + return fuse.EIO + } + n, ok := node.(NodeRenamer) + if !ok { + return fuse.EIO // XXX or EPERM like Mkdir? + } + err := n.Rename(ctx, r, newDirNode.node) + if err != nil { + return err + } + done(nil) + r.Respond() + return nil + + case *fuse.MknodRequest: + n, ok := node.(NodeMknoder) + if !ok { + return fuse.EIO + } + n2, err := n.Mknod(ctx, r) + if err != nil { + return err + } + s := &fuse.LookupResponse{} + initLookupResponse(s) + if err := c.saveLookup(ctx, s, snode, r.Name, n2); err != nil { + return err + } + done(s) + r.Respond(s) + return nil + + case *fuse.FsyncRequest: + n, ok := node.(NodeFsyncer) + if !ok { + return fuse.EIO + } + err := n.Fsync(ctx, r) + if err != nil { + return err + } + done(nil) + r.Respond() + return nil + + case *fuse.InterruptRequest: + c.meta.Lock() + ireq := c.req[r.IntrID] + if ireq != nil && ireq.cancel != nil { + ireq.cancel() + ireq.cancel = nil + } + c.meta.Unlock() + done(nil) + r.Respond() + return nil + + /* case *FsyncdirRequest: + return ENOSYS + + case *GetlkRequest, *SetlkRequest, *SetlkwRequest: + return ENOSYS + + case *BmapRequest: + return ENOSYS + + case *SetvolnameRequest, *GetxtimesRequest, *ExchangeRequest: + return ENOSYS + */ + } + + panic("not reached") +} + +func (c *Server) saveLookup(ctx context.Context, s *fuse.LookupResponse, snode *serveNode, elem string, n2 Node) error { + if err := nodeAttr(ctx, n2, &s.Attr); err != nil { + return err + } + if s.Attr.Inode == 0 { + s.Attr.Inode = c.dynamicInode(snode.inode, elem) + } + + s.Node, s.Generation = c.saveNode(s.Attr.Inode, n2) + return nil +} + +type invalidateNodeDetail struct { + Off int64 + Size int64 +} + +func (i invalidateNodeDetail) String() string { + return fmt.Sprintf("Off:%d Size:%d", i.Off, i.Size) +} + +func errstr(err error) string { + if err == nil { + return "" + } + return err.Error() +} + +func (s *Server) invalidateNode(node Node, off int64, size int64) error { + s.meta.Lock() + id, ok := s.nodeRef[node] + if ok { + snode := s.node[id] + snode.wg.Add(1) + defer snode.wg.Done() + } + s.meta.Unlock() + if !ok { + // This is what the kernel would have said, if we had been + // able to send this message; it's not cached. + return fuse.ErrNotCached + } + // Delay logging until after we can record the error too. We + // consider a /dev/fuse write to be instantaneous enough to not + // need separate before and after messages. + err := s.conn.InvalidateNode(id, off, size) + s.debug(notification{ + Op: "InvalidateNode", + Node: id, + Out: invalidateNodeDetail{ + Off: off, + Size: size, + }, + Err: errstr(err), + }) + return err +} + +// InvalidateNodeAttr invalidates the kernel cache of the attributes +// of node. +// +// Returns fuse.ErrNotCached if the kernel is not currently caching +// the node. +func (s *Server) InvalidateNodeAttr(node Node) error { + return s.invalidateNode(node, 0, 0) +} + +// InvalidateNodeData invalidates the kernel cache of the attributes +// and data of node. +// +// Returns fuse.ErrNotCached if the kernel is not currently caching +// the node. +func (s *Server) InvalidateNodeData(node Node) error { + return s.invalidateNode(node, 0, -1) +} + +// InvalidateNodeDataRange invalidates the kernel cache of the +// attributes and a range of the data of node. +// +// Returns fuse.ErrNotCached if the kernel is not currently caching +// the node. +func (s *Server) InvalidateNodeDataRange(node Node, off int64, size int64) error { + return s.invalidateNode(node, off, size) +} + +type invalidateEntryDetail struct { + Name string +} + +func (i invalidateEntryDetail) String() string { + return fmt.Sprintf("%q", i.Name) +} + +// InvalidateEntry invalidates the kernel cache of the directory entry +// identified by parent node and entry basename. +// +// Kernel may or may not cache directory listings. To invalidate +// those, use InvalidateNode to invalidate all of the data for a +// directory. (As of 2015-06, Linux FUSE does not cache directory +// listings.) +// +// Returns ErrNotCached if the kernel is not currently caching the +// node. +func (s *Server) InvalidateEntry(parent Node, name string) error { + s.meta.Lock() + id, ok := s.nodeRef[parent] + if ok { + snode := s.node[id] + snode.wg.Add(1) + defer snode.wg.Done() + } + s.meta.Unlock() + if !ok { + // This is what the kernel would have said, if we had been + // able to send this message; it's not cached. + return fuse.ErrNotCached + } + err := s.conn.InvalidateEntry(id, name) + s.debug(notification{ + Op: "InvalidateEntry", + Node: id, + Out: invalidateEntryDetail{ + Name: name, + }, + Err: errstr(err), + }) + return err +} + +// DataHandle returns a read-only Handle that satisfies reads +// using the given data. +func DataHandle(data []byte) Handle { + return &dataHandle{data} +} + +type dataHandle struct { + data []byte +} + +func (d *dataHandle) ReadAll(ctx context.Context) ([]byte, error) { + return d.data, nil +} + +// GenerateDynamicInode returns a dynamic inode. +// +// The parent inode and current entry name are used as the criteria +// for choosing a pseudorandom inode. This makes it likely the same +// entry will get the same inode on multiple runs. +func GenerateDynamicInode(parent uint64, name string) uint64 { + h := fnv.New64a() + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], parent) + _, _ = h.Write(buf[:]) + _, _ = h.Write([]byte(name)) + var inode uint64 + for { + inode = h.Sum64() + if inode != 0 { + break + } + // there's a tiny probability that result is zero; change the + // input a little and try again + _, _ = h.Write([]byte{'x'}) + } + return inode +} diff --git a/vendor/bazil.org/fuse/fs/tree.go b/vendor/bazil.org/fuse/fs/tree.go new file mode 100644 index 000000000..7e078045a --- /dev/null +++ b/vendor/bazil.org/fuse/fs/tree.go @@ -0,0 +1,99 @@ +// FUSE directory tree, for servers that wish to use it with the service loop. + +package fs + +import ( + "os" + pathpkg "path" + "strings" + + "golang.org/x/net/context" +) + +import ( + "bazil.org/fuse" +) + +// A Tree implements a basic read-only directory tree for FUSE. +// The Nodes contained in it may still be writable. +type Tree struct { + tree +} + +func (t *Tree) Root() (Node, error) { + return &t.tree, nil +} + +// Add adds the path to the tree, resolving to the given node. +// If path or a prefix of path has already been added to the tree, +// Add panics. +// +// Add is only safe to call before starting to serve requests. +func (t *Tree) Add(path string, node Node) { + path = pathpkg.Clean("/" + path)[1:] + elems := strings.Split(path, "/") + dir := Node(&t.tree) + for i, elem := range elems { + dt, ok := dir.(*tree) + if !ok { + panic("fuse: Tree.Add for " + strings.Join(elems[:i], "/") + " and " + path) + } + n := dt.lookup(elem) + if n != nil { + if i+1 == len(elems) { + panic("fuse: Tree.Add for " + path + " conflicts with " + elem) + } + dir = n + } else { + if i+1 == len(elems) { + dt.add(elem, node) + } else { + dir = &tree{} + dt.add(elem, dir) + } + } + } +} + +type treeDir struct { + name string + node Node +} + +type tree struct { + dir []treeDir +} + +func (t *tree) lookup(name string) Node { + for _, d := range t.dir { + if d.name == name { + return d.node + } + } + return nil +} + +func (t *tree) add(name string, n Node) { + t.dir = append(t.dir, treeDir{name, n}) +} + +func (t *tree) Attr(ctx context.Context, a *fuse.Attr) error { + a.Mode = os.ModeDir | 0555 + return nil +} + +func (t *tree) Lookup(ctx context.Context, name string) (Node, error) { + n := t.lookup(name) + if n != nil { + return n, nil + } + return nil, fuse.ENOENT +} + +func (t *tree) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { + var out []fuse.Dirent + for _, d := range t.dir { + out = append(out, fuse.Dirent{Name: d.name}) + } + return out, nil +} diff --git a/vendor/bazil.org/fuse/fuse.go b/vendor/bazil.org/fuse/fuse.go new file mode 100644 index 000000000..7dc70f9e1 --- /dev/null +++ b/vendor/bazil.org/fuse/fuse.go @@ -0,0 +1,2304 @@ +// See the file LICENSE for copyright and licensing information. + +// Adapted from Plan 9 from User Space's src/cmd/9pfuse/fuse.c, +// which carries this notice: +// +// The files in this directory are subject to the following license. +// +// The author of this software is Russ Cox. +// +// Copyright (c) 2006 Russ Cox +// +// Permission to use, copy, modify, and distribute this software for any +// purpose without fee is hereby granted, provided that this entire notice +// is included in all copies of any software which is or includes a copy +// or modification of this software and in all copies of the supporting +// documentation for such software. +// +// THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED +// WARRANTY. IN PARTICULAR, THE AUTHOR MAKES NO REPRESENTATION OR WARRANTY +// OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS +// FITNESS FOR ANY PARTICULAR PURPOSE. + +// Package fuse enables writing FUSE file systems on Linux, OS X, and FreeBSD. +// +// On OS X, it requires OSXFUSE (http://osxfuse.github.com/). +// +// There are two approaches to writing a FUSE file system. The first is to speak +// the low-level message protocol, reading from a Conn using ReadRequest and +// writing using the various Respond methods. This approach is closest to +// the actual interaction with the kernel and can be the simplest one in contexts +// such as protocol translators. +// +// Servers of synthesized file systems tend to share common +// bookkeeping abstracted away by the second approach, which is to +// call fs.Serve to serve the FUSE protocol using an implementation of +// the service methods in the interfaces FS* (file system), Node* (file +// or directory), and Handle* (opened file or directory). +// There are a daunting number of such methods that can be written, +// but few are required. +// The specific methods are described in the documentation for those interfaces. +// +// The hellofs subdirectory contains a simple illustration of the fs.Serve approach. +// +// Service Methods +// +// The required and optional methods for the FS, Node, and Handle interfaces +// have the general form +// +// Op(ctx context.Context, req *OpRequest, resp *OpResponse) error +// +// where Op is the name of a FUSE operation. Op reads request +// parameters from req and writes results to resp. An operation whose +// only result is the error result omits the resp parameter. +// +// Multiple goroutines may call service methods simultaneously; the +// methods being called are responsible for appropriate +// synchronization. +// +// The operation must not hold on to the request or response, +// including any []byte fields such as WriteRequest.Data or +// SetxattrRequest.Xattr. +// +// Errors +// +// Operations can return errors. The FUSE interface can only +// communicate POSIX errno error numbers to file system clients, the +// message is not visible to file system clients. The returned error +// can implement ErrorNumber to control the errno returned. Without +// ErrorNumber, a generic errno (EIO) is returned. +// +// Error messages will be visible in the debug log as part of the +// response. +// +// Interrupted Operations +// +// In some file systems, some operations +// may take an undetermined amount of time. For example, a Read waiting for +// a network message or a matching Write might wait indefinitely. If the request +// is cancelled and no longer needed, the context will be cancelled. +// Blocking operations should select on a receive from ctx.Done() and attempt to +// abort the operation early if the receive succeeds (meaning the channel is closed). +// To indicate that the operation failed because it was aborted, return fuse.EINTR. +// +// If an operation does not block for an indefinite amount of time, supporting +// cancellation is not necessary. +// +// Authentication +// +// All requests types embed a Header, meaning that the method can +// inspect req.Pid, req.Uid, and req.Gid as necessary to implement +// permission checking. The kernel FUSE layer normally prevents other +// users from accessing the FUSE file system (to change this, see +// AllowOther, AllowRoot), but does not enforce access modes (to +// change this, see DefaultPermissions). +// +// Mount Options +// +// Behavior and metadata of the mounted file system can be changed by +// passing MountOption values to Mount. +// +package fuse // import "bazil.org/fuse" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "sync" + "syscall" + "time" + "unsafe" +) + +// A Conn represents a connection to a mounted FUSE file system. +type Conn struct { + // Ready is closed when the mount is complete or has failed. + Ready <-chan struct{} + + // MountError stores any error from the mount process. Only valid + // after Ready is closed. + MountError error + + // File handle for kernel communication. Only safe to access if + // rio or wio is held. + dev *os.File + wio sync.RWMutex + rio sync.RWMutex + + // Protocol version negotiated with InitRequest/InitResponse. + proto Protocol +} + +// MountpointDoesNotExistError is an error returned when the +// mountpoint does not exist. +type MountpointDoesNotExistError struct { + Path string +} + +var _ error = (*MountpointDoesNotExistError)(nil) + +func (e *MountpointDoesNotExistError) Error() string { + return fmt.Sprintf("mountpoint does not exist: %v", e.Path) +} + +// Mount mounts a new FUSE connection on the named directory +// and returns a connection for reading and writing FUSE messages. +// +// After a successful return, caller must call Close to free +// resources. +// +// Even on successful return, the new mount is not guaranteed to be +// visible until after Conn.Ready is closed. See Conn.MountError for +// possible errors. Incoming requests on Conn must be served to make +// progress. +func Mount(dir string, options ...MountOption) (*Conn, error) { + conf := mountConfig{ + options: make(map[string]string), + } + for _, option := range options { + if err := option(&conf); err != nil { + return nil, err + } + } + + ready := make(chan struct{}, 1) + c := &Conn{ + Ready: ready, + } + f, err := mount(dir, &conf, ready, &c.MountError) + if err != nil { + return nil, err + } + c.dev = f + + if err := initMount(c, &conf); err != nil { + c.Close() + if err == ErrClosedWithoutInit { + // see if we can provide a better error + <-c.Ready + if err := c.MountError; err != nil { + return nil, err + } + } + return nil, err + } + + return c, nil +} + +type OldVersionError struct { + Kernel Protocol + LibraryMin Protocol +} + +func (e *OldVersionError) Error() string { + return fmt.Sprintf("kernel FUSE version is too old: %v < %v", e.Kernel, e.LibraryMin) +} + +var ( + ErrClosedWithoutInit = errors.New("fuse connection closed without init") +) + +func initMount(c *Conn, conf *mountConfig) error { + req, err := c.ReadRequest() + if err != nil { + if err == io.EOF { + return ErrClosedWithoutInit + } + return err + } + r, ok := req.(*InitRequest) + if !ok { + return fmt.Errorf("missing init, got: %T", req) + } + + min := Protocol{protoVersionMinMajor, protoVersionMinMinor} + if r.Kernel.LT(min) { + req.RespondError(Errno(syscall.EPROTO)) + c.Close() + return &OldVersionError{ + Kernel: r.Kernel, + LibraryMin: min, + } + } + + proto := Protocol{protoVersionMaxMajor, protoVersionMaxMinor} + if r.Kernel.LT(proto) { + // Kernel doesn't support the latest version we have. + proto = r.Kernel + } + c.proto = proto + + s := &InitResponse{ + Library: proto, + MaxReadahead: conf.maxReadahead, + MaxWrite: maxWrite, + Flags: InitBigWrites | conf.initFlags, + } + r.Respond(s) + return nil +} + +// A Request represents a single FUSE request received from the kernel. +// Use a type switch to determine the specific kind. +// A request of unrecognized type will have concrete type *Header. +type Request interface { + // Hdr returns the Header associated with this request. + Hdr() *Header + + // RespondError responds to the request with the given error. + RespondError(error) + + String() string +} + +// A RequestID identifies an active FUSE request. +type RequestID uint64 + +func (r RequestID) String() string { + return fmt.Sprintf("%#x", uint64(r)) +} + +// A NodeID is a number identifying a directory or file. +// It must be unique among IDs returned in LookupResponses +// that have not yet been forgotten by ForgetRequests. +type NodeID uint64 + +func (n NodeID) String() string { + return fmt.Sprintf("%#x", uint64(n)) +} + +// A HandleID is a number identifying an open directory or file. +// It only needs to be unique while the directory or file is open. +type HandleID uint64 + +func (h HandleID) String() string { + return fmt.Sprintf("%#x", uint64(h)) +} + +// The RootID identifies the root directory of a FUSE file system. +const RootID NodeID = rootID + +// A Header describes the basic information sent in every request. +type Header struct { + Conn *Conn `json:"-"` // connection this request was received on + ID RequestID // unique ID for request + Node NodeID // file or directory the request is about + Uid uint32 // user ID of process making request + Gid uint32 // group ID of process making request + Pid uint32 // process ID of process making request + + // for returning to reqPool + msg *message +} + +func (h *Header) String() string { + return fmt.Sprintf("ID=%v Node=%v Uid=%d Gid=%d Pid=%d", h.ID, h.Node, h.Uid, h.Gid, h.Pid) +} + +func (h *Header) Hdr() *Header { + return h +} + +func (h *Header) noResponse() { + putMessage(h.msg) +} + +func (h *Header) respond(msg []byte) { + out := (*outHeader)(unsafe.Pointer(&msg[0])) + out.Unique = uint64(h.ID) + h.Conn.respond(msg) + putMessage(h.msg) +} + +// An ErrorNumber is an error with a specific error number. +// +// Operations may return an error value that implements ErrorNumber to +// control what specific error number (errno) to return. +type ErrorNumber interface { + // Errno returns the the error number (errno) for this error. + Errno() Errno +} + +const ( + // ENOSYS indicates that the call is not supported. + ENOSYS = Errno(syscall.ENOSYS) + + // ESTALE is used by Serve to respond to violations of the FUSE protocol. + ESTALE = Errno(syscall.ESTALE) + + ENOENT = Errno(syscall.ENOENT) + EIO = Errno(syscall.EIO) + EPERM = Errno(syscall.EPERM) + + // EINTR indicates request was interrupted by an InterruptRequest. + // See also fs.Intr. + EINTR = Errno(syscall.EINTR) + + ERANGE = Errno(syscall.ERANGE) + ENOTSUP = Errno(syscall.ENOTSUP) + EEXIST = Errno(syscall.EEXIST) +) + +// DefaultErrno is the errno used when error returned does not +// implement ErrorNumber. +const DefaultErrno = EIO + +var errnoNames = map[Errno]string{ + ENOSYS: "ENOSYS", + ESTALE: "ESTALE", + ENOENT: "ENOENT", + EIO: "EIO", + EPERM: "EPERM", + EINTR: "EINTR", + EEXIST: "EEXIST", +} + +// Errno implements Error and ErrorNumber using a syscall.Errno. +type Errno syscall.Errno + +var _ = ErrorNumber(Errno(0)) +var _ = error(Errno(0)) + +func (e Errno) Errno() Errno { + return e +} + +func (e Errno) String() string { + return syscall.Errno(e).Error() +} + +func (e Errno) Error() string { + return syscall.Errno(e).Error() +} + +// ErrnoName returns the short non-numeric identifier for this errno. +// For example, "EIO". +func (e Errno) ErrnoName() string { + s := errnoNames[e] + if s == "" { + s = fmt.Sprint(e.Errno()) + } + return s +} + +func (e Errno) MarshalText() ([]byte, error) { + s := e.ErrnoName() + return []byte(s), nil +} + +func (h *Header) RespondError(err error) { + errno := DefaultErrno + if ferr, ok := err.(ErrorNumber); ok { + errno = ferr.Errno() + } + // FUSE uses negative errors! + // TODO: File bug report against OSXFUSE: positive error causes kernel panic. + buf := newBuffer(0) + hOut := (*outHeader)(unsafe.Pointer(&buf[0])) + hOut.Error = -int32(errno) + h.respond(buf) +} + +// All requests read from the kernel, without data, are shorter than +// this. +var maxRequestSize = syscall.Getpagesize() +var bufSize = maxRequestSize + maxWrite + +// reqPool is a pool of messages. +// +// Lifetime of a logical message is from getMessage to putMessage. +// getMessage is called by ReadRequest. putMessage is called by +// Conn.ReadRequest, Request.Respond, or Request.RespondError. +// +// Messages in the pool are guaranteed to have conn and off zeroed, +// buf allocated and len==bufSize, and hdr set. +var reqPool = sync.Pool{ + New: allocMessage, +} + +func allocMessage() interface{} { + m := &message{buf: make([]byte, bufSize)} + m.hdr = (*inHeader)(unsafe.Pointer(&m.buf[0])) + return m +} + +func getMessage(c *Conn) *message { + m := reqPool.Get().(*message) + m.conn = c + return m +} + +func putMessage(m *message) { + m.buf = m.buf[:bufSize] + m.conn = nil + m.off = 0 + reqPool.Put(m) +} + +// a message represents the bytes of a single FUSE message +type message struct { + conn *Conn + buf []byte // all bytes + hdr *inHeader // header + off int // offset for reading additional fields +} + +func (m *message) len() uintptr { + return uintptr(len(m.buf) - m.off) +} + +func (m *message) data() unsafe.Pointer { + var p unsafe.Pointer + if m.off < len(m.buf) { + p = unsafe.Pointer(&m.buf[m.off]) + } + return p +} + +func (m *message) bytes() []byte { + return m.buf[m.off:] +} + +func (m *message) Header() Header { + h := m.hdr + return Header{ + Conn: m.conn, + ID: RequestID(h.Unique), + Node: NodeID(h.Nodeid), + Uid: h.Uid, + Gid: h.Gid, + Pid: h.Pid, + + msg: m, + } +} + +// fileMode returns a Go os.FileMode from a Unix mode. +func fileMode(unixMode uint32) os.FileMode { + mode := os.FileMode(unixMode & 0777) + switch unixMode & syscall.S_IFMT { + case syscall.S_IFREG: + // nothing + case syscall.S_IFDIR: + mode |= os.ModeDir + case syscall.S_IFCHR: + mode |= os.ModeCharDevice | os.ModeDevice + case syscall.S_IFBLK: + mode |= os.ModeDevice + case syscall.S_IFIFO: + mode |= os.ModeNamedPipe + case syscall.S_IFLNK: + mode |= os.ModeSymlink + case syscall.S_IFSOCK: + mode |= os.ModeSocket + default: + // no idea + mode |= os.ModeDevice + } + if unixMode&syscall.S_ISUID != 0 { + mode |= os.ModeSetuid + } + if unixMode&syscall.S_ISGID != 0 { + mode |= os.ModeSetgid + } + return mode +} + +type noOpcode struct { + Opcode uint32 +} + +func (m noOpcode) String() string { + return fmt.Sprintf("No opcode %v", m.Opcode) +} + +type malformedMessage struct { +} + +func (malformedMessage) String() string { + return "malformed message" +} + +// Close closes the FUSE connection. +func (c *Conn) Close() error { + c.wio.Lock() + defer c.wio.Unlock() + c.rio.Lock() + defer c.rio.Unlock() + return c.dev.Close() +} + +// caller must hold wio or rio +func (c *Conn) fd() int { + return int(c.dev.Fd()) +} + +func (c *Conn) Protocol() Protocol { + return c.proto +} + +// ReadRequest returns the next FUSE request from the kernel. +// +// Caller must call either Request.Respond or Request.RespondError in +// a reasonable time. Caller must not retain Request after that call. +func (c *Conn) ReadRequest() (Request, error) { + m := getMessage(c) +loop: + c.rio.RLock() + n, err := syscall.Read(c.fd(), m.buf) + c.rio.RUnlock() + if err == syscall.EINTR { + // OSXFUSE sends EINTR to userspace when a request interrupt + // completed before it got sent to userspace? + goto loop + } + if err != nil && err != syscall.ENODEV { + putMessage(m) + return nil, err + } + if n <= 0 { + putMessage(m) + return nil, io.EOF + } + m.buf = m.buf[:n] + + if n < inHeaderSize { + putMessage(m) + return nil, errors.New("fuse: message too short") + } + + // FreeBSD FUSE sends a short length in the header + // for FUSE_INIT even though the actual read length is correct. + if n == inHeaderSize+initInSize && m.hdr.Opcode == opInit && m.hdr.Len < uint32(n) { + m.hdr.Len = uint32(n) + } + + // OSXFUSE sometimes sends the wrong m.hdr.Len in a FUSE_WRITE message. + if m.hdr.Len < uint32(n) && m.hdr.Len >= uint32(unsafe.Sizeof(writeIn{})) && m.hdr.Opcode == opWrite { + m.hdr.Len = uint32(n) + } + + if m.hdr.Len != uint32(n) { + // prepare error message before returning m to pool + err := fmt.Errorf("fuse: read %d opcode %d but expected %d", n, m.hdr.Opcode, m.hdr.Len) + putMessage(m) + return nil, err + } + + m.off = inHeaderSize + + // Convert to data structures. + // Do not trust kernel to hand us well-formed data. + var req Request + switch m.hdr.Opcode { + default: + Debug(noOpcode{Opcode: m.hdr.Opcode}) + goto unrecognized + + case opLookup: + buf := m.bytes() + n := len(buf) + if n == 0 || buf[n-1] != '\x00' { + goto corrupt + } + req = &LookupRequest{ + Header: m.Header(), + Name: string(buf[:n-1]), + } + + case opForget: + in := (*forgetIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &ForgetRequest{ + Header: m.Header(), + N: in.Nlookup, + } + + case opGetattr: + switch { + case c.proto.LT(Protocol{7, 9}): + req = &GetattrRequest{ + Header: m.Header(), + } + + default: + in := (*getattrIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &GetattrRequest{ + Header: m.Header(), + Flags: GetattrFlags(in.GetattrFlags), + Handle: HandleID(in.Fh), + } + } + + case opSetattr: + in := (*setattrIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &SetattrRequest{ + Header: m.Header(), + Valid: SetattrValid(in.Valid), + Handle: HandleID(in.Fh), + Size: in.Size, + Atime: time.Unix(int64(in.Atime), int64(in.AtimeNsec)), + Mtime: time.Unix(int64(in.Mtime), int64(in.MtimeNsec)), + Mode: fileMode(in.Mode), + Uid: in.Uid, + Gid: in.Gid, + Bkuptime: in.BkupTime(), + Chgtime: in.Chgtime(), + Flags: in.Flags(), + } + + case opReadlink: + if len(m.bytes()) > 0 { + goto corrupt + } + req = &ReadlinkRequest{ + Header: m.Header(), + } + + case opSymlink: + // m.bytes() is "newName\0target\0" + names := m.bytes() + if len(names) == 0 || names[len(names)-1] != 0 { + goto corrupt + } + i := bytes.IndexByte(names, '\x00') + if i < 0 { + goto corrupt + } + newName, target := names[0:i], names[i+1:len(names)-1] + req = &SymlinkRequest{ + Header: m.Header(), + NewName: string(newName), + Target: string(target), + } + + case opLink: + in := (*linkIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + newName := m.bytes()[unsafe.Sizeof(*in):] + if len(newName) < 2 || newName[len(newName)-1] != 0 { + goto corrupt + } + newName = newName[:len(newName)-1] + req = &LinkRequest{ + Header: m.Header(), + OldNode: NodeID(in.Oldnodeid), + NewName: string(newName), + } + + case opMknod: + size := mknodInSize(c.proto) + if m.len() < size { + goto corrupt + } + in := (*mknodIn)(m.data()) + name := m.bytes()[size:] + if len(name) < 2 || name[len(name)-1] != '\x00' { + goto corrupt + } + name = name[:len(name)-1] + r := &MknodRequest{ + Header: m.Header(), + Mode: fileMode(in.Mode), + Rdev: in.Rdev, + Name: string(name), + } + if c.proto.GE(Protocol{7, 12}) { + r.Umask = fileMode(in.Umask) & os.ModePerm + } + req = r + + case opMkdir: + size := mkdirInSize(c.proto) + if m.len() < size { + goto corrupt + } + in := (*mkdirIn)(m.data()) + name := m.bytes()[size:] + i := bytes.IndexByte(name, '\x00') + if i < 0 { + goto corrupt + } + r := &MkdirRequest{ + Header: m.Header(), + Name: string(name[:i]), + // observed on Linux: mkdirIn.Mode & syscall.S_IFMT == 0, + // and this causes fileMode to go into it's "no idea" + // code branch; enforce type to directory + Mode: fileMode((in.Mode &^ syscall.S_IFMT) | syscall.S_IFDIR), + } + if c.proto.GE(Protocol{7, 12}) { + r.Umask = fileMode(in.Umask) & os.ModePerm + } + req = r + + case opUnlink, opRmdir: + buf := m.bytes() + n := len(buf) + if n == 0 || buf[n-1] != '\x00' { + goto corrupt + } + req = &RemoveRequest{ + Header: m.Header(), + Name: string(buf[:n-1]), + Dir: m.hdr.Opcode == opRmdir, + } + + case opRename: + in := (*renameIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + newDirNodeID := NodeID(in.Newdir) + oldNew := m.bytes()[unsafe.Sizeof(*in):] + // oldNew should be "old\x00new\x00" + if len(oldNew) < 4 { + goto corrupt + } + if oldNew[len(oldNew)-1] != '\x00' { + goto corrupt + } + i := bytes.IndexByte(oldNew, '\x00') + if i < 0 { + goto corrupt + } + oldName, newName := string(oldNew[:i]), string(oldNew[i+1:len(oldNew)-1]) + req = &RenameRequest{ + Header: m.Header(), + NewDir: newDirNodeID, + OldName: oldName, + NewName: newName, + } + + case opOpendir, opOpen: + in := (*openIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &OpenRequest{ + Header: m.Header(), + Dir: m.hdr.Opcode == opOpendir, + Flags: openFlags(in.Flags), + } + + case opRead, opReaddir: + in := (*readIn)(m.data()) + if m.len() < readInSize(c.proto) { + goto corrupt + } + r := &ReadRequest{ + Header: m.Header(), + Dir: m.hdr.Opcode == opReaddir, + Handle: HandleID(in.Fh), + Offset: int64(in.Offset), + Size: int(in.Size), + } + if c.proto.GE(Protocol{7, 9}) { + r.Flags = ReadFlags(in.ReadFlags) + r.LockOwner = in.LockOwner + r.FileFlags = openFlags(in.Flags) + } + req = r + + case opWrite: + in := (*writeIn)(m.data()) + if m.len() < writeInSize(c.proto) { + goto corrupt + } + r := &WriteRequest{ + Header: m.Header(), + Handle: HandleID(in.Fh), + Offset: int64(in.Offset), + Flags: WriteFlags(in.WriteFlags), + } + if c.proto.GE(Protocol{7, 9}) { + r.LockOwner = in.LockOwner + r.FileFlags = openFlags(in.Flags) + } + buf := m.bytes()[writeInSize(c.proto):] + if uint32(len(buf)) < in.Size { + goto corrupt + } + r.Data = buf + req = r + + case opStatfs: + req = &StatfsRequest{ + Header: m.Header(), + } + + case opRelease, opReleasedir: + in := (*releaseIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &ReleaseRequest{ + Header: m.Header(), + Dir: m.hdr.Opcode == opReleasedir, + Handle: HandleID(in.Fh), + Flags: openFlags(in.Flags), + ReleaseFlags: ReleaseFlags(in.ReleaseFlags), + LockOwner: in.LockOwner, + } + + case opFsync, opFsyncdir: + in := (*fsyncIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &FsyncRequest{ + Dir: m.hdr.Opcode == opFsyncdir, + Header: m.Header(), + Handle: HandleID(in.Fh), + Flags: in.FsyncFlags, + } + + case opSetxattr: + in := (*setxattrIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + m.off += int(unsafe.Sizeof(*in)) + name := m.bytes() + i := bytes.IndexByte(name, '\x00') + if i < 0 { + goto corrupt + } + xattr := name[i+1:] + if uint32(len(xattr)) < in.Size { + goto corrupt + } + xattr = xattr[:in.Size] + req = &SetxattrRequest{ + Header: m.Header(), + Flags: in.Flags, + Position: in.position(), + Name: string(name[:i]), + Xattr: xattr, + } + + case opGetxattr: + in := (*getxattrIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + name := m.bytes()[unsafe.Sizeof(*in):] + i := bytes.IndexByte(name, '\x00') + if i < 0 { + goto corrupt + } + req = &GetxattrRequest{ + Header: m.Header(), + Name: string(name[:i]), + Size: in.Size, + Position: in.position(), + } + + case opListxattr: + in := (*getxattrIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &ListxattrRequest{ + Header: m.Header(), + Size: in.Size, + Position: in.position(), + } + + case opRemovexattr: + buf := m.bytes() + n := len(buf) + if n == 0 || buf[n-1] != '\x00' { + goto corrupt + } + req = &RemovexattrRequest{ + Header: m.Header(), + Name: string(buf[:n-1]), + } + + case opFlush: + in := (*flushIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &FlushRequest{ + Header: m.Header(), + Handle: HandleID(in.Fh), + Flags: in.FlushFlags, + LockOwner: in.LockOwner, + } + + case opInit: + in := (*initIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &InitRequest{ + Header: m.Header(), + Kernel: Protocol{in.Major, in.Minor}, + MaxReadahead: in.MaxReadahead, + Flags: InitFlags(in.Flags), + } + + case opGetlk: + panic("opGetlk") + case opSetlk: + panic("opSetlk") + case opSetlkw: + panic("opSetlkw") + + case opAccess: + in := (*accessIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &AccessRequest{ + Header: m.Header(), + Mask: in.Mask, + } + + case opCreate: + size := createInSize(c.proto) + if m.len() < size { + goto corrupt + } + in := (*createIn)(m.data()) + name := m.bytes()[size:] + i := bytes.IndexByte(name, '\x00') + if i < 0 { + goto corrupt + } + r := &CreateRequest{ + Header: m.Header(), + Flags: openFlags(in.Flags), + Mode: fileMode(in.Mode), + Name: string(name[:i]), + } + if c.proto.GE(Protocol{7, 12}) { + r.Umask = fileMode(in.Umask) & os.ModePerm + } + req = r + + case opInterrupt: + in := (*interruptIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &InterruptRequest{ + Header: m.Header(), + IntrID: RequestID(in.Unique), + } + + case opBmap: + panic("opBmap") + + case opDestroy: + req = &DestroyRequest{ + Header: m.Header(), + } + + // OS X + case opSetvolname: + panic("opSetvolname") + case opGetxtimes: + panic("opGetxtimes") + case opExchange: + in := (*exchangeIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + oldDirNodeID := NodeID(in.Olddir) + newDirNodeID := NodeID(in.Newdir) + oldNew := m.bytes()[unsafe.Sizeof(*in):] + // oldNew should be "oldname\x00newname\x00" + if len(oldNew) < 4 { + goto corrupt + } + if oldNew[len(oldNew)-1] != '\x00' { + goto corrupt + } + i := bytes.IndexByte(oldNew, '\x00') + if i < 0 { + goto corrupt + } + oldName, newName := string(oldNew[:i]), string(oldNew[i+1:len(oldNew)-1]) + req = &ExchangeDataRequest{ + Header: m.Header(), + OldDir: oldDirNodeID, + NewDir: newDirNodeID, + OldName: oldName, + NewName: newName, + // TODO options + } + } + + return req, nil + +corrupt: + Debug(malformedMessage{}) + putMessage(m) + return nil, fmt.Errorf("fuse: malformed message") + +unrecognized: + // Unrecognized message. + // Assume higher-level code will send a "no idea what you mean" error. + h := m.Header() + return &h, nil +} + +type bugShortKernelWrite struct { + Written int64 + Length int64 + Error string + Stack string +} + +func (b bugShortKernelWrite) String() string { + return fmt.Sprintf("short kernel write: written=%d/%d error=%q stack=\n%s", b.Written, b.Length, b.Error, b.Stack) +} + +type bugKernelWriteError struct { + Error string + Stack string +} + +func (b bugKernelWriteError) String() string { + return fmt.Sprintf("kernel write error: error=%q stack=\n%s", b.Error, b.Stack) +} + +// safe to call even with nil error +func errorString(err error) string { + if err == nil { + return "" + } + return err.Error() +} + +func (c *Conn) writeToKernel(msg []byte) error { + out := (*outHeader)(unsafe.Pointer(&msg[0])) + out.Len = uint32(len(msg)) + + c.wio.RLock() + defer c.wio.RUnlock() + nn, err := syscall.Write(c.fd(), msg) + if err == nil && nn != len(msg) { + Debug(bugShortKernelWrite{ + Written: int64(nn), + Length: int64(len(msg)), + Error: errorString(err), + Stack: stack(), + }) + } + return err +} + +func (c *Conn) respond(msg []byte) { + if err := c.writeToKernel(msg); err != nil { + Debug(bugKernelWriteError{ + Error: errorString(err), + Stack: stack(), + }) + } +} + +type notCachedError struct{} + +func (notCachedError) Error() string { + return "node not cached" +} + +var _ ErrorNumber = notCachedError{} + +func (notCachedError) Errno() Errno { + // Behave just like if the original syscall.ENOENT had been passed + // straight through. + return ENOENT +} + +var ( + ErrNotCached = notCachedError{} +) + +// sendInvalidate sends an invalidate notification to kernel. +// +// A returned ENOENT is translated to a friendlier error. +func (c *Conn) sendInvalidate(msg []byte) error { + switch err := c.writeToKernel(msg); err { + case syscall.ENOENT: + return ErrNotCached + default: + return err + } +} + +// InvalidateNode invalidates the kernel cache of the attributes and a +// range of the data of a node. +// +// Giving offset 0 and size -1 means all data. To invalidate just the +// attributes, give offset 0 and size 0. +// +// Returns ErrNotCached if the kernel is not currently caching the +// node. +func (c *Conn) InvalidateNode(nodeID NodeID, off int64, size int64) error { + buf := newBuffer(unsafe.Sizeof(notifyInvalInodeOut{})) + h := (*outHeader)(unsafe.Pointer(&buf[0])) + // h.Unique is 0 + h.Error = notifyCodeInvalInode + out := (*notifyInvalInodeOut)(buf.alloc(unsafe.Sizeof(notifyInvalInodeOut{}))) + out.Ino = uint64(nodeID) + out.Off = off + out.Len = size + return c.sendInvalidate(buf) +} + +// InvalidateEntry invalidates the kernel cache of the directory entry +// identified by parent directory node ID and entry basename. +// +// Kernel may or may not cache directory listings. To invalidate +// those, use InvalidateNode to invalidate all of the data for a +// directory. (As of 2015-06, Linux FUSE does not cache directory +// listings.) +// +// Returns ErrNotCached if the kernel is not currently caching the +// node. +func (c *Conn) InvalidateEntry(parent NodeID, name string) error { + const maxUint32 = ^uint32(0) + if uint64(len(name)) > uint64(maxUint32) { + // very unlikely, but we don't want to silently truncate + return syscall.ENAMETOOLONG + } + buf := newBuffer(unsafe.Sizeof(notifyInvalEntryOut{}) + uintptr(len(name)) + 1) + h := (*outHeader)(unsafe.Pointer(&buf[0])) + // h.Unique is 0 + h.Error = notifyCodeInvalEntry + out := (*notifyInvalEntryOut)(buf.alloc(unsafe.Sizeof(notifyInvalEntryOut{}))) + out.Parent = uint64(parent) + out.Namelen = uint32(len(name)) + buf = append(buf, name...) + buf = append(buf, '\x00') + return c.sendInvalidate(buf) +} + +// An InitRequest is the first request sent on a FUSE file system. +type InitRequest struct { + Header `json:"-"` + Kernel Protocol + // Maximum readahead in bytes that the kernel plans to use. + MaxReadahead uint32 + Flags InitFlags +} + +var _ = Request(&InitRequest{}) + +func (r *InitRequest) String() string { + return fmt.Sprintf("Init [%v] %v ra=%d fl=%v", &r.Header, r.Kernel, r.MaxReadahead, r.Flags) +} + +// An InitResponse is the response to an InitRequest. +type InitResponse struct { + Library Protocol + // Maximum readahead in bytes that the kernel can use. Ignored if + // greater than InitRequest.MaxReadahead. + MaxReadahead uint32 + Flags InitFlags + // Maximum size of a single write operation. + // Linux enforces a minimum of 4 KiB. + MaxWrite uint32 +} + +func (r *InitResponse) String() string { + return fmt.Sprintf("Init %v ra=%d fl=%v w=%d", r.Library, r.MaxReadahead, r.Flags, r.MaxWrite) +} + +// Respond replies to the request with the given response. +func (r *InitRequest) Respond(resp *InitResponse) { + buf := newBuffer(unsafe.Sizeof(initOut{})) + out := (*initOut)(buf.alloc(unsafe.Sizeof(initOut{}))) + out.Major = resp.Library.Major + out.Minor = resp.Library.Minor + out.MaxReadahead = resp.MaxReadahead + out.Flags = uint32(resp.Flags) + out.MaxWrite = resp.MaxWrite + + // MaxWrite larger than our receive buffer would just lead to + // errors on large writes. + if out.MaxWrite > maxWrite { + out.MaxWrite = maxWrite + } + r.respond(buf) +} + +// A StatfsRequest requests information about the mounted file system. +type StatfsRequest struct { + Header `json:"-"` +} + +var _ = Request(&StatfsRequest{}) + +func (r *StatfsRequest) String() string { + return fmt.Sprintf("Statfs [%s]", &r.Header) +} + +// Respond replies to the request with the given response. +func (r *StatfsRequest) Respond(resp *StatfsResponse) { + buf := newBuffer(unsafe.Sizeof(statfsOut{})) + out := (*statfsOut)(buf.alloc(unsafe.Sizeof(statfsOut{}))) + out.St = kstatfs{ + Blocks: resp.Blocks, + Bfree: resp.Bfree, + Bavail: resp.Bavail, + Files: resp.Files, + Ffree: resp.Ffree, + Bsize: resp.Bsize, + Namelen: resp.Namelen, + Frsize: resp.Frsize, + } + r.respond(buf) +} + +// A StatfsResponse is the response to a StatfsRequest. +type StatfsResponse struct { + Blocks uint64 // Total data blocks in file system. + Bfree uint64 // Free blocks in file system. + Bavail uint64 // Free blocks in file system if you're not root. + Files uint64 // Total files in file system. + Ffree uint64 // Free files in file system. + Bsize uint32 // Block size + Namelen uint32 // Maximum file name length? + Frsize uint32 // Fragment size, smallest addressable data size in the file system. +} + +func (r *StatfsResponse) String() string { + return fmt.Sprintf("Statfs blocks=%d/%d/%d files=%d/%d bsize=%d frsize=%d namelen=%d", + r.Bavail, r.Bfree, r.Blocks, + r.Ffree, r.Files, + r.Bsize, + r.Frsize, + r.Namelen, + ) +} + +// An AccessRequest asks whether the file can be accessed +// for the purpose specified by the mask. +type AccessRequest struct { + Header `json:"-"` + Mask uint32 +} + +var _ = Request(&AccessRequest{}) + +func (r *AccessRequest) String() string { + return fmt.Sprintf("Access [%s] mask=%#x", &r.Header, r.Mask) +} + +// Respond replies to the request indicating that access is allowed. +// To deny access, use RespondError. +func (r *AccessRequest) Respond() { + buf := newBuffer(0) + r.respond(buf) +} + +// An Attr is the metadata for a single file or directory. +type Attr struct { + Valid time.Duration // how long Attr can be cached + + Inode uint64 // inode number + Size uint64 // size in bytes + Blocks uint64 // size in 512-byte units + Atime time.Time // time of last access + Mtime time.Time // time of last modification + Ctime time.Time // time of last inode change + Crtime time.Time // time of creation (OS X only) + Mode os.FileMode // file mode + Nlink uint32 // number of links (usually 1) + Uid uint32 // owner uid + Gid uint32 // group gid + Rdev uint32 // device numbers + Flags uint32 // chflags(2) flags (OS X only) + BlockSize uint32 // preferred blocksize for filesystem I/O +} + +func (a Attr) String() string { + return fmt.Sprintf("valid=%v ino=%v size=%d mode=%v", a.Valid, a.Inode, a.Size, a.Mode) +} + +func unix(t time.Time) (sec uint64, nsec uint32) { + nano := t.UnixNano() + sec = uint64(nano / 1e9) + nsec = uint32(nano % 1e9) + return +} + +func (a *Attr) attr(out *attr, proto Protocol) { + out.Ino = a.Inode + out.Size = a.Size + out.Blocks = a.Blocks + out.Atime, out.AtimeNsec = unix(a.Atime) + out.Mtime, out.MtimeNsec = unix(a.Mtime) + out.Ctime, out.CtimeNsec = unix(a.Ctime) + out.SetCrtime(unix(a.Crtime)) + out.Mode = uint32(a.Mode) & 0777 + switch { + default: + out.Mode |= syscall.S_IFREG + case a.Mode&os.ModeDir != 0: + out.Mode |= syscall.S_IFDIR + case a.Mode&os.ModeDevice != 0: + if a.Mode&os.ModeCharDevice != 0 { + out.Mode |= syscall.S_IFCHR + } else { + out.Mode |= syscall.S_IFBLK + } + case a.Mode&os.ModeNamedPipe != 0: + out.Mode |= syscall.S_IFIFO + case a.Mode&os.ModeSymlink != 0: + out.Mode |= syscall.S_IFLNK + case a.Mode&os.ModeSocket != 0: + out.Mode |= syscall.S_IFSOCK + } + if a.Mode&os.ModeSetuid != 0 { + out.Mode |= syscall.S_ISUID + } + if a.Mode&os.ModeSetgid != 0 { + out.Mode |= syscall.S_ISGID + } + out.Nlink = a.Nlink + out.Uid = a.Uid + out.Gid = a.Gid + out.Rdev = a.Rdev + out.SetFlags(a.Flags) + if proto.GE(Protocol{7, 9}) { + out.Blksize = a.BlockSize + } + + return +} + +// A GetattrRequest asks for the metadata for the file denoted by r.Node. +type GetattrRequest struct { + Header `json:"-"` + Flags GetattrFlags + Handle HandleID +} + +var _ = Request(&GetattrRequest{}) + +func (r *GetattrRequest) String() string { + return fmt.Sprintf("Getattr [%s] %v fl=%v", &r.Header, r.Handle, r.Flags) +} + +// Respond replies to the request with the given response. +func (r *GetattrRequest) Respond(resp *GetattrResponse) { + size := attrOutSize(r.Header.Conn.proto) + buf := newBuffer(size) + out := (*attrOut)(buf.alloc(size)) + out.AttrValid = uint64(resp.Attr.Valid / time.Second) + out.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) + resp.Attr.attr(&out.Attr, r.Header.Conn.proto) + r.respond(buf) +} + +// A GetattrResponse is the response to a GetattrRequest. +type GetattrResponse struct { + Attr Attr // file attributes +} + +func (r *GetattrResponse) String() string { + return fmt.Sprintf("Getattr %v", r.Attr) +} + +// A GetxattrRequest asks for the extended attributes associated with r.Node. +type GetxattrRequest struct { + Header `json:"-"` + + // Maximum size to return. + Size uint32 + + // Name of the attribute requested. + Name string + + // Offset within extended attributes. + // + // Only valid for OS X, and then only with the resource fork + // attribute. + Position uint32 +} + +var _ = Request(&GetxattrRequest{}) + +func (r *GetxattrRequest) String() string { + return fmt.Sprintf("Getxattr [%s] %q %d @%d", &r.Header, r.Name, r.Size, r.Position) +} + +// Respond replies to the request with the given response. +func (r *GetxattrRequest) Respond(resp *GetxattrResponse) { + if r.Size == 0 { + buf := newBuffer(unsafe.Sizeof(getxattrOut{})) + out := (*getxattrOut)(buf.alloc(unsafe.Sizeof(getxattrOut{}))) + out.Size = uint32(len(resp.Xattr)) + r.respond(buf) + } else { + buf := newBuffer(uintptr(len(resp.Xattr))) + buf = append(buf, resp.Xattr...) + r.respond(buf) + } +} + +// A GetxattrResponse is the response to a GetxattrRequest. +type GetxattrResponse struct { + Xattr []byte +} + +func (r *GetxattrResponse) String() string { + return fmt.Sprintf("Getxattr %x", r.Xattr) +} + +// A ListxattrRequest asks to list the extended attributes associated with r.Node. +type ListxattrRequest struct { + Header `json:"-"` + Size uint32 // maximum size to return + Position uint32 // offset within attribute list +} + +var _ = Request(&ListxattrRequest{}) + +func (r *ListxattrRequest) String() string { + return fmt.Sprintf("Listxattr [%s] %d @%d", &r.Header, r.Size, r.Position) +} + +// Respond replies to the request with the given response. +func (r *ListxattrRequest) Respond(resp *ListxattrResponse) { + if r.Size == 0 { + buf := newBuffer(unsafe.Sizeof(getxattrOut{})) + out := (*getxattrOut)(buf.alloc(unsafe.Sizeof(getxattrOut{}))) + out.Size = uint32(len(resp.Xattr)) + r.respond(buf) + } else { + buf := newBuffer(uintptr(len(resp.Xattr))) + buf = append(buf, resp.Xattr...) + r.respond(buf) + } +} + +// A ListxattrResponse is the response to a ListxattrRequest. +type ListxattrResponse struct { + Xattr []byte +} + +func (r *ListxattrResponse) String() string { + return fmt.Sprintf("Listxattr %x", r.Xattr) +} + +// Append adds an extended attribute name to the response. +func (r *ListxattrResponse) Append(names ...string) { + for _, name := range names { + r.Xattr = append(r.Xattr, name...) + r.Xattr = append(r.Xattr, '\x00') + } +} + +// A RemovexattrRequest asks to remove an extended attribute associated with r.Node. +type RemovexattrRequest struct { + Header `json:"-"` + Name string // name of extended attribute +} + +var _ = Request(&RemovexattrRequest{}) + +func (r *RemovexattrRequest) String() string { + return fmt.Sprintf("Removexattr [%s] %q", &r.Header, r.Name) +} + +// Respond replies to the request, indicating that the attribute was removed. +func (r *RemovexattrRequest) Respond() { + buf := newBuffer(0) + r.respond(buf) +} + +// A SetxattrRequest asks to set an extended attribute associated with a file. +type SetxattrRequest struct { + Header `json:"-"` + + // Flags can make the request fail if attribute does/not already + // exist. Unfortunately, the constants are platform-specific and + // not exposed by Go1.2. Look for XATTR_CREATE, XATTR_REPLACE. + // + // TODO improve this later + // + // TODO XATTR_CREATE and exist -> EEXIST + // + // TODO XATTR_REPLACE and not exist -> ENODATA + Flags uint32 + + // Offset within extended attributes. + // + // Only valid for OS X, and then only with the resource fork + // attribute. + Position uint32 + + Name string + Xattr []byte +} + +var _ = Request(&SetxattrRequest{}) + +func trunc(b []byte, max int) ([]byte, string) { + if len(b) > max { + return b[:max], "..." + } + return b, "" +} + +func (r *SetxattrRequest) String() string { + xattr, tail := trunc(r.Xattr, 16) + return fmt.Sprintf("Setxattr [%s] %q %x%s fl=%v @%#x", &r.Header, r.Name, xattr, tail, r.Flags, r.Position) +} + +// Respond replies to the request, indicating that the extended attribute was set. +func (r *SetxattrRequest) Respond() { + buf := newBuffer(0) + r.respond(buf) +} + +// A LookupRequest asks to look up the given name in the directory named by r.Node. +type LookupRequest struct { + Header `json:"-"` + Name string +} + +var _ = Request(&LookupRequest{}) + +func (r *LookupRequest) String() string { + return fmt.Sprintf("Lookup [%s] %q", &r.Header, r.Name) +} + +// Respond replies to the request with the given response. +func (r *LookupRequest) Respond(resp *LookupResponse) { + size := entryOutSize(r.Header.Conn.proto) + buf := newBuffer(size) + out := (*entryOut)(buf.alloc(size)) + out.Nodeid = uint64(resp.Node) + out.Generation = resp.Generation + out.EntryValid = uint64(resp.EntryValid / time.Second) + out.EntryValidNsec = uint32(resp.EntryValid % time.Second / time.Nanosecond) + out.AttrValid = uint64(resp.Attr.Valid / time.Second) + out.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) + resp.Attr.attr(&out.Attr, r.Header.Conn.proto) + r.respond(buf) +} + +// A LookupResponse is the response to a LookupRequest. +type LookupResponse struct { + Node NodeID + Generation uint64 + EntryValid time.Duration + Attr Attr +} + +func (r *LookupResponse) string() string { + return fmt.Sprintf("%v gen=%d valid=%v attr={%v}", r.Node, r.Generation, r.EntryValid, r.Attr) +} + +func (r *LookupResponse) String() string { + return fmt.Sprintf("Lookup %s", r.string()) +} + +// An OpenRequest asks to open a file or directory +type OpenRequest struct { + Header `json:"-"` + Dir bool // is this Opendir? + Flags OpenFlags +} + +var _ = Request(&OpenRequest{}) + +func (r *OpenRequest) String() string { + return fmt.Sprintf("Open [%s] dir=%v fl=%v", &r.Header, r.Dir, r.Flags) +} + +// Respond replies to the request with the given response. +func (r *OpenRequest) Respond(resp *OpenResponse) { + buf := newBuffer(unsafe.Sizeof(openOut{})) + out := (*openOut)(buf.alloc(unsafe.Sizeof(openOut{}))) + out.Fh = uint64(resp.Handle) + out.OpenFlags = uint32(resp.Flags) + r.respond(buf) +} + +// A OpenResponse is the response to a OpenRequest. +type OpenResponse struct { + Handle HandleID + Flags OpenResponseFlags +} + +func (r *OpenResponse) string() string { + return fmt.Sprintf("%v fl=%v", r.Handle, r.Flags) +} + +func (r *OpenResponse) String() string { + return fmt.Sprintf("Open %s", r.string()) +} + +// A CreateRequest asks to create and open a file (not a directory). +type CreateRequest struct { + Header `json:"-"` + Name string + Flags OpenFlags + Mode os.FileMode + // Umask of the request. Not supported on OS X. + Umask os.FileMode +} + +var _ = Request(&CreateRequest{}) + +func (r *CreateRequest) String() string { + return fmt.Sprintf("Create [%s] %q fl=%v mode=%v umask=%v", &r.Header, r.Name, r.Flags, r.Mode, r.Umask) +} + +// Respond replies to the request with the given response. +func (r *CreateRequest) Respond(resp *CreateResponse) { + eSize := entryOutSize(r.Header.Conn.proto) + buf := newBuffer(eSize + unsafe.Sizeof(openOut{})) + + e := (*entryOut)(buf.alloc(eSize)) + e.Nodeid = uint64(resp.Node) + e.Generation = resp.Generation + e.EntryValid = uint64(resp.EntryValid / time.Second) + e.EntryValidNsec = uint32(resp.EntryValid % time.Second / time.Nanosecond) + e.AttrValid = uint64(resp.Attr.Valid / time.Second) + e.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) + resp.Attr.attr(&e.Attr, r.Header.Conn.proto) + + o := (*openOut)(buf.alloc(unsafe.Sizeof(openOut{}))) + o.Fh = uint64(resp.Handle) + o.OpenFlags = uint32(resp.Flags) + + r.respond(buf) +} + +// A CreateResponse is the response to a CreateRequest. +// It describes the created node and opened handle. +type CreateResponse struct { + LookupResponse + OpenResponse +} + +func (r *CreateResponse) String() string { + return fmt.Sprintf("Create {%s} {%s}", r.LookupResponse.string(), r.OpenResponse.string()) +} + +// A MkdirRequest asks to create (but not open) a directory. +type MkdirRequest struct { + Header `json:"-"` + Name string + Mode os.FileMode + // Umask of the request. Not supported on OS X. + Umask os.FileMode +} + +var _ = Request(&MkdirRequest{}) + +func (r *MkdirRequest) String() string { + return fmt.Sprintf("Mkdir [%s] %q mode=%v umask=%v", &r.Header, r.Name, r.Mode, r.Umask) +} + +// Respond replies to the request with the given response. +func (r *MkdirRequest) Respond(resp *MkdirResponse) { + size := entryOutSize(r.Header.Conn.proto) + buf := newBuffer(size) + out := (*entryOut)(buf.alloc(size)) + out.Nodeid = uint64(resp.Node) + out.Generation = resp.Generation + out.EntryValid = uint64(resp.EntryValid / time.Second) + out.EntryValidNsec = uint32(resp.EntryValid % time.Second / time.Nanosecond) + out.AttrValid = uint64(resp.Attr.Valid / time.Second) + out.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) + resp.Attr.attr(&out.Attr, r.Header.Conn.proto) + r.respond(buf) +} + +// A MkdirResponse is the response to a MkdirRequest. +type MkdirResponse struct { + LookupResponse +} + +func (r *MkdirResponse) String() string { + return fmt.Sprintf("Mkdir %v", r.LookupResponse.string()) +} + +// A ReadRequest asks to read from an open file. +type ReadRequest struct { + Header `json:"-"` + Dir bool // is this Readdir? + Handle HandleID + Offset int64 + Size int + Flags ReadFlags + LockOwner uint64 + FileFlags OpenFlags +} + +var _ = Request(&ReadRequest{}) + +func (r *ReadRequest) String() string { + return fmt.Sprintf("Read [%s] %v %d @%#x dir=%v fl=%v lock=%d ffl=%v", &r.Header, r.Handle, r.Size, r.Offset, r.Dir, r.Flags, r.LockOwner, r.FileFlags) +} + +// Respond replies to the request with the given response. +func (r *ReadRequest) Respond(resp *ReadResponse) { + buf := newBuffer(uintptr(len(resp.Data))) + buf = append(buf, resp.Data...) + r.respond(buf) +} + +// A ReadResponse is the response to a ReadRequest. +type ReadResponse struct { + Data []byte +} + +func (r *ReadResponse) String() string { + return fmt.Sprintf("Read %d", len(r.Data)) +} + +type jsonReadResponse struct { + Len uint64 +} + +func (r *ReadResponse) MarshalJSON() ([]byte, error) { + j := jsonReadResponse{ + Len: uint64(len(r.Data)), + } + return json.Marshal(j) +} + +// A ReleaseRequest asks to release (close) an open file handle. +type ReleaseRequest struct { + Header `json:"-"` + Dir bool // is this Releasedir? + Handle HandleID + Flags OpenFlags // flags from OpenRequest + ReleaseFlags ReleaseFlags + LockOwner uint32 +} + +var _ = Request(&ReleaseRequest{}) + +func (r *ReleaseRequest) String() string { + return fmt.Sprintf("Release [%s] %v fl=%v rfl=%v owner=%#x", &r.Header, r.Handle, r.Flags, r.ReleaseFlags, r.LockOwner) +} + +// Respond replies to the request, indicating that the handle has been released. +func (r *ReleaseRequest) Respond() { + buf := newBuffer(0) + r.respond(buf) +} + +// A DestroyRequest is sent by the kernel when unmounting the file system. +// No more requests will be received after this one, but it should still be +// responded to. +type DestroyRequest struct { + Header `json:"-"` +} + +var _ = Request(&DestroyRequest{}) + +func (r *DestroyRequest) String() string { + return fmt.Sprintf("Destroy [%s]", &r.Header) +} + +// Respond replies to the request. +func (r *DestroyRequest) Respond() { + buf := newBuffer(0) + r.respond(buf) +} + +// A ForgetRequest is sent by the kernel when forgetting about r.Node +// as returned by r.N lookup requests. +type ForgetRequest struct { + Header `json:"-"` + N uint64 +} + +var _ = Request(&ForgetRequest{}) + +func (r *ForgetRequest) String() string { + return fmt.Sprintf("Forget [%s] %d", &r.Header, r.N) +} + +// Respond replies to the request, indicating that the forgetfulness has been recorded. +func (r *ForgetRequest) Respond() { + // Don't reply to forget messages. + r.noResponse() +} + +// A Dirent represents a single directory entry. +type Dirent struct { + // Inode this entry names. + Inode uint64 + + // Type of the entry, for example DT_File. + // + // Setting this is optional. The zero value (DT_Unknown) means + // callers will just need to do a Getattr when the type is + // needed. Providing a type can speed up operations + // significantly. + Type DirentType + + // Name of the entry + Name string +} + +// Type of an entry in a directory listing. +type DirentType uint32 + +const ( + // These don't quite match os.FileMode; especially there's an + // explicit unknown, instead of zero value meaning file. They + // are also not quite syscall.DT_*; nothing says the FUSE + // protocol follows those, and even if they were, we don't + // want each fs to fiddle with syscall. + + // The shift by 12 is hardcoded in the FUSE userspace + // low-level C library, so it's safe here. + + DT_Unknown DirentType = 0 + DT_Socket DirentType = syscall.S_IFSOCK >> 12 + DT_Link DirentType = syscall.S_IFLNK >> 12 + DT_File DirentType = syscall.S_IFREG >> 12 + DT_Block DirentType = syscall.S_IFBLK >> 12 + DT_Dir DirentType = syscall.S_IFDIR >> 12 + DT_Char DirentType = syscall.S_IFCHR >> 12 + DT_FIFO DirentType = syscall.S_IFIFO >> 12 +) + +func (t DirentType) String() string { + switch t { + case DT_Unknown: + return "unknown" + case DT_Socket: + return "socket" + case DT_Link: + return "link" + case DT_File: + return "file" + case DT_Block: + return "block" + case DT_Dir: + return "dir" + case DT_Char: + return "char" + case DT_FIFO: + return "fifo" + } + return "invalid" +} + +// AppendDirent appends the encoded form of a directory entry to data +// and returns the resulting slice. +func AppendDirent(data []byte, dir Dirent) []byte { + de := dirent{ + Ino: dir.Inode, + Namelen: uint32(len(dir.Name)), + Type: uint32(dir.Type), + } + de.Off = uint64(len(data) + direntSize + (len(dir.Name)+7)&^7) + data = append(data, (*[direntSize]byte)(unsafe.Pointer(&de))[:]...) + data = append(data, dir.Name...) + n := direntSize + uintptr(len(dir.Name)) + if n%8 != 0 { + var pad [8]byte + data = append(data, pad[:8-n%8]...) + } + return data +} + +// A WriteRequest asks to write to an open file. +type WriteRequest struct { + Header + Handle HandleID + Offset int64 + Data []byte + Flags WriteFlags + LockOwner uint64 + FileFlags OpenFlags +} + +var _ = Request(&WriteRequest{}) + +func (r *WriteRequest) String() string { + return fmt.Sprintf("Write [%s] %v %d @%d fl=%v lock=%d ffl=%v", &r.Header, r.Handle, len(r.Data), r.Offset, r.Flags, r.LockOwner, r.FileFlags) +} + +type jsonWriteRequest struct { + Handle HandleID + Offset int64 + Len uint64 + Flags WriteFlags +} + +func (r *WriteRequest) MarshalJSON() ([]byte, error) { + j := jsonWriteRequest{ + Handle: r.Handle, + Offset: r.Offset, + Len: uint64(len(r.Data)), + Flags: r.Flags, + } + return json.Marshal(j) +} + +// Respond replies to the request with the given response. +func (r *WriteRequest) Respond(resp *WriteResponse) { + buf := newBuffer(unsafe.Sizeof(writeOut{})) + out := (*writeOut)(buf.alloc(unsafe.Sizeof(writeOut{}))) + out.Size = uint32(resp.Size) + r.respond(buf) +} + +// A WriteResponse replies to a write indicating how many bytes were written. +type WriteResponse struct { + Size int +} + +func (r *WriteResponse) String() string { + return fmt.Sprintf("Write %d", r.Size) +} + +// A SetattrRequest asks to change one or more attributes associated with a file, +// as indicated by Valid. +type SetattrRequest struct { + Header `json:"-"` + Valid SetattrValid + Handle HandleID + Size uint64 + Atime time.Time + Mtime time.Time + Mode os.FileMode + Uid uint32 + Gid uint32 + + // OS X only + Bkuptime time.Time + Chgtime time.Time + Crtime time.Time + Flags uint32 // see chflags(2) +} + +var _ = Request(&SetattrRequest{}) + +func (r *SetattrRequest) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "Setattr [%s]", &r.Header) + if r.Valid.Mode() { + fmt.Fprintf(&buf, " mode=%v", r.Mode) + } + if r.Valid.Uid() { + fmt.Fprintf(&buf, " uid=%d", r.Uid) + } + if r.Valid.Gid() { + fmt.Fprintf(&buf, " gid=%d", r.Gid) + } + if r.Valid.Size() { + fmt.Fprintf(&buf, " size=%d", r.Size) + } + if r.Valid.Atime() { + fmt.Fprintf(&buf, " atime=%v", r.Atime) + } + if r.Valid.AtimeNow() { + fmt.Fprintf(&buf, " atime=now") + } + if r.Valid.Mtime() { + fmt.Fprintf(&buf, " mtime=%v", r.Mtime) + } + if r.Valid.MtimeNow() { + fmt.Fprintf(&buf, " mtime=now") + } + if r.Valid.Handle() { + fmt.Fprintf(&buf, " handle=%v", r.Handle) + } else { + fmt.Fprintf(&buf, " handle=INVALID-%v", r.Handle) + } + if r.Valid.LockOwner() { + fmt.Fprintf(&buf, " lockowner") + } + if r.Valid.Crtime() { + fmt.Fprintf(&buf, " crtime=%v", r.Crtime) + } + if r.Valid.Chgtime() { + fmt.Fprintf(&buf, " chgtime=%v", r.Chgtime) + } + if r.Valid.Bkuptime() { + fmt.Fprintf(&buf, " bkuptime=%v", r.Bkuptime) + } + if r.Valid.Flags() { + fmt.Fprintf(&buf, " flags=%v", r.Flags) + } + return buf.String() +} + +// Respond replies to the request with the given response, +// giving the updated attributes. +func (r *SetattrRequest) Respond(resp *SetattrResponse) { + size := attrOutSize(r.Header.Conn.proto) + buf := newBuffer(size) + out := (*attrOut)(buf.alloc(size)) + out.AttrValid = uint64(resp.Attr.Valid / time.Second) + out.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) + resp.Attr.attr(&out.Attr, r.Header.Conn.proto) + r.respond(buf) +} + +// A SetattrResponse is the response to a SetattrRequest. +type SetattrResponse struct { + Attr Attr // file attributes +} + +func (r *SetattrResponse) String() string { + return fmt.Sprintf("Setattr %v", r.Attr) +} + +// A FlushRequest asks for the current state of an open file to be flushed +// to storage, as when a file descriptor is being closed. A single opened Handle +// may receive multiple FlushRequests over its lifetime. +type FlushRequest struct { + Header `json:"-"` + Handle HandleID + Flags uint32 + LockOwner uint64 +} + +var _ = Request(&FlushRequest{}) + +func (r *FlushRequest) String() string { + return fmt.Sprintf("Flush [%s] %v fl=%#x lk=%#x", &r.Header, r.Handle, r.Flags, r.LockOwner) +} + +// Respond replies to the request, indicating that the flush succeeded. +func (r *FlushRequest) Respond() { + buf := newBuffer(0) + r.respond(buf) +} + +// A RemoveRequest asks to remove a file or directory from the +// directory r.Node. +type RemoveRequest struct { + Header `json:"-"` + Name string // name of the entry to remove + Dir bool // is this rmdir? +} + +var _ = Request(&RemoveRequest{}) + +func (r *RemoveRequest) String() string { + return fmt.Sprintf("Remove [%s] %q dir=%v", &r.Header, r.Name, r.Dir) +} + +// Respond replies to the request, indicating that the file was removed. +func (r *RemoveRequest) Respond() { + buf := newBuffer(0) + r.respond(buf) +} + +// A SymlinkRequest is a request to create a symlink making NewName point to Target. +type SymlinkRequest struct { + Header `json:"-"` + NewName, Target string +} + +var _ = Request(&SymlinkRequest{}) + +func (r *SymlinkRequest) String() string { + return fmt.Sprintf("Symlink [%s] from %q to target %q", &r.Header, r.NewName, r.Target) +} + +// Respond replies to the request, indicating that the symlink was created. +func (r *SymlinkRequest) Respond(resp *SymlinkResponse) { + size := entryOutSize(r.Header.Conn.proto) + buf := newBuffer(size) + out := (*entryOut)(buf.alloc(size)) + out.Nodeid = uint64(resp.Node) + out.Generation = resp.Generation + out.EntryValid = uint64(resp.EntryValid / time.Second) + out.EntryValidNsec = uint32(resp.EntryValid % time.Second / time.Nanosecond) + out.AttrValid = uint64(resp.Attr.Valid / time.Second) + out.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) + resp.Attr.attr(&out.Attr, r.Header.Conn.proto) + r.respond(buf) +} + +// A SymlinkResponse is the response to a SymlinkRequest. +type SymlinkResponse struct { + LookupResponse +} + +func (r *SymlinkResponse) String() string { + return fmt.Sprintf("Symlink %v", r.LookupResponse.string()) +} + +// A ReadlinkRequest is a request to read a symlink's target. +type ReadlinkRequest struct { + Header `json:"-"` +} + +var _ = Request(&ReadlinkRequest{}) + +func (r *ReadlinkRequest) String() string { + return fmt.Sprintf("Readlink [%s]", &r.Header) +} + +func (r *ReadlinkRequest) Respond(target string) { + buf := newBuffer(uintptr(len(target))) + buf = append(buf, target...) + r.respond(buf) +} + +// A LinkRequest is a request to create a hard link. +type LinkRequest struct { + Header `json:"-"` + OldNode NodeID + NewName string +} + +var _ = Request(&LinkRequest{}) + +func (r *LinkRequest) String() string { + return fmt.Sprintf("Link [%s] node %d to %q", &r.Header, r.OldNode, r.NewName) +} + +func (r *LinkRequest) Respond(resp *LookupResponse) { + size := entryOutSize(r.Header.Conn.proto) + buf := newBuffer(size) + out := (*entryOut)(buf.alloc(size)) + out.Nodeid = uint64(resp.Node) + out.Generation = resp.Generation + out.EntryValid = uint64(resp.EntryValid / time.Second) + out.EntryValidNsec = uint32(resp.EntryValid % time.Second / time.Nanosecond) + out.AttrValid = uint64(resp.Attr.Valid / time.Second) + out.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) + resp.Attr.attr(&out.Attr, r.Header.Conn.proto) + r.respond(buf) +} + +// A RenameRequest is a request to rename a file. +type RenameRequest struct { + Header `json:"-"` + NewDir NodeID + OldName, NewName string +} + +var _ = Request(&RenameRequest{}) + +func (r *RenameRequest) String() string { + return fmt.Sprintf("Rename [%s] from %q to dirnode %v %q", &r.Header, r.OldName, r.NewDir, r.NewName) +} + +func (r *RenameRequest) Respond() { + buf := newBuffer(0) + r.respond(buf) +} + +type MknodRequest struct { + Header `json:"-"` + Name string + Mode os.FileMode + Rdev uint32 + // Umask of the request. Not supported on OS X. + Umask os.FileMode +} + +var _ = Request(&MknodRequest{}) + +func (r *MknodRequest) String() string { + return fmt.Sprintf("Mknod [%s] Name %q mode=%v umask=%v rdev=%d", &r.Header, r.Name, r.Mode, r.Umask, r.Rdev) +} + +func (r *MknodRequest) Respond(resp *LookupResponse) { + size := entryOutSize(r.Header.Conn.proto) + buf := newBuffer(size) + out := (*entryOut)(buf.alloc(size)) + out.Nodeid = uint64(resp.Node) + out.Generation = resp.Generation + out.EntryValid = uint64(resp.EntryValid / time.Second) + out.EntryValidNsec = uint32(resp.EntryValid % time.Second / time.Nanosecond) + out.AttrValid = uint64(resp.Attr.Valid / time.Second) + out.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) + resp.Attr.attr(&out.Attr, r.Header.Conn.proto) + r.respond(buf) +} + +type FsyncRequest struct { + Header `json:"-"` + Handle HandleID + // TODO bit 1 is datasync, not well documented upstream + Flags uint32 + Dir bool +} + +var _ = Request(&FsyncRequest{}) + +func (r *FsyncRequest) String() string { + return fmt.Sprintf("Fsync [%s] Handle %v Flags %v", &r.Header, r.Handle, r.Flags) +} + +func (r *FsyncRequest) Respond() { + buf := newBuffer(0) + r.respond(buf) +} + +// An InterruptRequest is a request to interrupt another pending request. The +// response to that request should return an error status of EINTR. +type InterruptRequest struct { + Header `json:"-"` + IntrID RequestID // ID of the request to be interrupt. +} + +var _ = Request(&InterruptRequest{}) + +func (r *InterruptRequest) Respond() { + // nothing to do here + r.noResponse() +} + +func (r *InterruptRequest) String() string { + return fmt.Sprintf("Interrupt [%s] ID %v", &r.Header, r.IntrID) +} + +// An ExchangeDataRequest is a request to exchange the contents of two +// files, while leaving most metadata untouched. +// +// This request comes from OS X exchangedata(2) and represents its +// specific semantics. Crucially, it is very different from Linux +// renameat(2) RENAME_EXCHANGE. +// +// https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/exchangedata.2.html +type ExchangeDataRequest struct { + Header `json:"-"` + OldDir, NewDir NodeID + OldName, NewName string + // TODO options +} + +var _ = Request(&ExchangeDataRequest{}) + +func (r *ExchangeDataRequest) String() string { + // TODO options + return fmt.Sprintf("ExchangeData [%s] %v %q and %v %q", &r.Header, r.OldDir, r.OldName, r.NewDir, r.NewName) +} + +func (r *ExchangeDataRequest) Respond() { + buf := newBuffer(0) + r.respond(buf) +} diff --git a/vendor/bazil.org/fuse/fuse_darwin.go b/vendor/bazil.org/fuse/fuse_darwin.go new file mode 100644 index 000000000..b58dca97d --- /dev/null +++ b/vendor/bazil.org/fuse/fuse_darwin.go @@ -0,0 +1,9 @@ +package fuse + +// Maximum file write size we are prepared to receive from the kernel. +// +// This value has to be >=16MB or OSXFUSE (3.4.0 observed) will +// forcibly close the /dev/fuse file descriptor on a Setxattr with a +// 16MB value. See TestSetxattr16MB and +// https://github.com/bazil/fuse/issues/42 +const maxWrite = 16 * 1024 * 1024 diff --git a/vendor/bazil.org/fuse/fuse_freebsd.go b/vendor/bazil.org/fuse/fuse_freebsd.go new file mode 100644 index 000000000..4aa83a0d4 --- /dev/null +++ b/vendor/bazil.org/fuse/fuse_freebsd.go @@ -0,0 +1,6 @@ +package fuse + +// Maximum file write size we are prepared to receive from the kernel. +// +// This number is just a guess. +const maxWrite = 128 * 1024 diff --git a/vendor/bazil.org/fuse/fuse_kernel.go b/vendor/bazil.org/fuse/fuse_kernel.go new file mode 100644 index 000000000..87c5ca1dc --- /dev/null +++ b/vendor/bazil.org/fuse/fuse_kernel.go @@ -0,0 +1,774 @@ +// See the file LICENSE for copyright and licensing information. + +// Derived from FUSE's fuse_kernel.h, which carries this notice: +/* + This file defines the kernel interface of FUSE + Copyright (C) 2001-2007 Miklos Szeredi + + + This -- and only this -- header file may also be distributed under + the terms of the BSD Licence as follows: + + Copyright (C) 2001-2007 Miklos Szeredi. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + SUCH DAMAGE. +*/ + +package fuse + +import ( + "fmt" + "syscall" + "unsafe" +) + +// The FUSE version implemented by the package. +const ( + protoVersionMinMajor = 7 + protoVersionMinMinor = 8 + protoVersionMaxMajor = 7 + protoVersionMaxMinor = 12 +) + +const ( + rootID = 1 +) + +type kstatfs struct { + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Bsize uint32 + Namelen uint32 + Frsize uint32 + _ uint32 + Spare [6]uint32 +} + +type fileLock struct { + Start uint64 + End uint64 + Type uint32 + Pid uint32 +} + +// GetattrFlags are bit flags that can be seen in GetattrRequest. +type GetattrFlags uint32 + +const ( + // Indicates the handle is valid. + GetattrFh GetattrFlags = 1 << 0 +) + +var getattrFlagsNames = []flagName{ + {uint32(GetattrFh), "GetattrFh"}, +} + +func (fl GetattrFlags) String() string { + return flagString(uint32(fl), getattrFlagsNames) +} + +// The SetattrValid are bit flags describing which fields in the SetattrRequest +// are included in the change. +type SetattrValid uint32 + +const ( + SetattrMode SetattrValid = 1 << 0 + SetattrUid SetattrValid = 1 << 1 + SetattrGid SetattrValid = 1 << 2 + SetattrSize SetattrValid = 1 << 3 + SetattrAtime SetattrValid = 1 << 4 + SetattrMtime SetattrValid = 1 << 5 + SetattrHandle SetattrValid = 1 << 6 + + // Linux only(?) + SetattrAtimeNow SetattrValid = 1 << 7 + SetattrMtimeNow SetattrValid = 1 << 8 + SetattrLockOwner SetattrValid = 1 << 9 // http://www.mail-archive.com/git-commits-head@vger.kernel.org/msg27852.html + + // OS X only + SetattrCrtime SetattrValid = 1 << 28 + SetattrChgtime SetattrValid = 1 << 29 + SetattrBkuptime SetattrValid = 1 << 30 + SetattrFlags SetattrValid = 1 << 31 +) + +func (fl SetattrValid) Mode() bool { return fl&SetattrMode != 0 } +func (fl SetattrValid) Uid() bool { return fl&SetattrUid != 0 } +func (fl SetattrValid) Gid() bool { return fl&SetattrGid != 0 } +func (fl SetattrValid) Size() bool { return fl&SetattrSize != 0 } +func (fl SetattrValid) Atime() bool { return fl&SetattrAtime != 0 } +func (fl SetattrValid) Mtime() bool { return fl&SetattrMtime != 0 } +func (fl SetattrValid) Handle() bool { return fl&SetattrHandle != 0 } +func (fl SetattrValid) AtimeNow() bool { return fl&SetattrAtimeNow != 0 } +func (fl SetattrValid) MtimeNow() bool { return fl&SetattrMtimeNow != 0 } +func (fl SetattrValid) LockOwner() bool { return fl&SetattrLockOwner != 0 } +func (fl SetattrValid) Crtime() bool { return fl&SetattrCrtime != 0 } +func (fl SetattrValid) Chgtime() bool { return fl&SetattrChgtime != 0 } +func (fl SetattrValid) Bkuptime() bool { return fl&SetattrBkuptime != 0 } +func (fl SetattrValid) Flags() bool { return fl&SetattrFlags != 0 } + +func (fl SetattrValid) String() string { + return flagString(uint32(fl), setattrValidNames) +} + +var setattrValidNames = []flagName{ + {uint32(SetattrMode), "SetattrMode"}, + {uint32(SetattrUid), "SetattrUid"}, + {uint32(SetattrGid), "SetattrGid"}, + {uint32(SetattrSize), "SetattrSize"}, + {uint32(SetattrAtime), "SetattrAtime"}, + {uint32(SetattrMtime), "SetattrMtime"}, + {uint32(SetattrHandle), "SetattrHandle"}, + {uint32(SetattrAtimeNow), "SetattrAtimeNow"}, + {uint32(SetattrMtimeNow), "SetattrMtimeNow"}, + {uint32(SetattrLockOwner), "SetattrLockOwner"}, + {uint32(SetattrCrtime), "SetattrCrtime"}, + {uint32(SetattrChgtime), "SetattrChgtime"}, + {uint32(SetattrBkuptime), "SetattrBkuptime"}, + {uint32(SetattrFlags), "SetattrFlags"}, +} + +// Flags that can be seen in OpenRequest.Flags. +const ( + // Access modes. These are not 1-bit flags, but alternatives where + // only one can be chosen. See the IsReadOnly etc convenience + // methods. + OpenReadOnly OpenFlags = syscall.O_RDONLY + OpenWriteOnly OpenFlags = syscall.O_WRONLY + OpenReadWrite OpenFlags = syscall.O_RDWR + + // File was opened in append-only mode, all writes will go to end + // of file. OS X does not provide this information. + OpenAppend OpenFlags = syscall.O_APPEND + OpenCreate OpenFlags = syscall.O_CREAT + OpenDirectory OpenFlags = syscall.O_DIRECTORY + OpenExclusive OpenFlags = syscall.O_EXCL + OpenNonblock OpenFlags = syscall.O_NONBLOCK + OpenSync OpenFlags = syscall.O_SYNC + OpenTruncate OpenFlags = syscall.O_TRUNC +) + +// OpenAccessModeMask is a bitmask that separates the access mode +// from the other flags in OpenFlags. +const OpenAccessModeMask OpenFlags = syscall.O_ACCMODE + +// OpenFlags are the O_FOO flags passed to open/create/etc calls. For +// example, os.O_WRONLY | os.O_APPEND. +type OpenFlags uint32 + +func (fl OpenFlags) String() string { + // O_RDONLY, O_RWONLY, O_RDWR are not flags + s := accModeName(fl & OpenAccessModeMask) + flags := uint32(fl &^ OpenAccessModeMask) + if flags != 0 { + s = s + "+" + flagString(flags, openFlagNames) + } + return s +} + +// Return true if OpenReadOnly is set. +func (fl OpenFlags) IsReadOnly() bool { + return fl&OpenAccessModeMask == OpenReadOnly +} + +// Return true if OpenWriteOnly is set. +func (fl OpenFlags) IsWriteOnly() bool { + return fl&OpenAccessModeMask == OpenWriteOnly +} + +// Return true if OpenReadWrite is set. +func (fl OpenFlags) IsReadWrite() bool { + return fl&OpenAccessModeMask == OpenReadWrite +} + +func accModeName(flags OpenFlags) string { + switch flags { + case OpenReadOnly: + return "OpenReadOnly" + case OpenWriteOnly: + return "OpenWriteOnly" + case OpenReadWrite: + return "OpenReadWrite" + default: + return "" + } +} + +var openFlagNames = []flagName{ + {uint32(OpenAppend), "OpenAppend"}, + {uint32(OpenCreate), "OpenCreate"}, + {uint32(OpenDirectory), "OpenDirectory"}, + {uint32(OpenExclusive), "OpenExclusive"}, + {uint32(OpenNonblock), "OpenNonblock"}, + {uint32(OpenSync), "OpenSync"}, + {uint32(OpenTruncate), "OpenTruncate"}, +} + +// The OpenResponseFlags are returned in the OpenResponse. +type OpenResponseFlags uint32 + +const ( + OpenDirectIO OpenResponseFlags = 1 << 0 // bypass page cache for this open file + OpenKeepCache OpenResponseFlags = 1 << 1 // don't invalidate the data cache on open + OpenNonSeekable OpenResponseFlags = 1 << 2 // mark the file as non-seekable (not supported on OS X) + + OpenPurgeAttr OpenResponseFlags = 1 << 30 // OS X + OpenPurgeUBC OpenResponseFlags = 1 << 31 // OS X +) + +func (fl OpenResponseFlags) String() string { + return flagString(uint32(fl), openResponseFlagNames) +} + +var openResponseFlagNames = []flagName{ + {uint32(OpenDirectIO), "OpenDirectIO"}, + {uint32(OpenKeepCache), "OpenKeepCache"}, + {uint32(OpenNonSeekable), "OpenNonSeekable"}, + {uint32(OpenPurgeAttr), "OpenPurgeAttr"}, + {uint32(OpenPurgeUBC), "OpenPurgeUBC"}, +} + +// The InitFlags are used in the Init exchange. +type InitFlags uint32 + +const ( + InitAsyncRead InitFlags = 1 << 0 + InitPosixLocks InitFlags = 1 << 1 + InitFileOps InitFlags = 1 << 2 + InitAtomicTrunc InitFlags = 1 << 3 + InitExportSupport InitFlags = 1 << 4 + InitBigWrites InitFlags = 1 << 5 + // Do not mask file access modes with umask. Not supported on OS X. + InitDontMask InitFlags = 1 << 6 + InitSpliceWrite InitFlags = 1 << 7 + InitSpliceMove InitFlags = 1 << 8 + InitSpliceRead InitFlags = 1 << 9 + InitFlockLocks InitFlags = 1 << 10 + InitHasIoctlDir InitFlags = 1 << 11 + InitAutoInvalData InitFlags = 1 << 12 + InitDoReaddirplus InitFlags = 1 << 13 + InitReaddirplusAuto InitFlags = 1 << 14 + InitAsyncDIO InitFlags = 1 << 15 + InitWritebackCache InitFlags = 1 << 16 + InitNoOpenSupport InitFlags = 1 << 17 + + InitCaseSensitive InitFlags = 1 << 29 // OS X only + InitVolRename InitFlags = 1 << 30 // OS X only + InitXtimes InitFlags = 1 << 31 // OS X only +) + +type flagName struct { + bit uint32 + name string +} + +var initFlagNames = []flagName{ + {uint32(InitAsyncRead), "InitAsyncRead"}, + {uint32(InitPosixLocks), "InitPosixLocks"}, + {uint32(InitFileOps), "InitFileOps"}, + {uint32(InitAtomicTrunc), "InitAtomicTrunc"}, + {uint32(InitExportSupport), "InitExportSupport"}, + {uint32(InitBigWrites), "InitBigWrites"}, + {uint32(InitDontMask), "InitDontMask"}, + {uint32(InitSpliceWrite), "InitSpliceWrite"}, + {uint32(InitSpliceMove), "InitSpliceMove"}, + {uint32(InitSpliceRead), "InitSpliceRead"}, + {uint32(InitFlockLocks), "InitFlockLocks"}, + {uint32(InitHasIoctlDir), "InitHasIoctlDir"}, + {uint32(InitAutoInvalData), "InitAutoInvalData"}, + {uint32(InitDoReaddirplus), "InitDoReaddirplus"}, + {uint32(InitReaddirplusAuto), "InitReaddirplusAuto"}, + {uint32(InitAsyncDIO), "InitAsyncDIO"}, + {uint32(InitWritebackCache), "InitWritebackCache"}, + {uint32(InitNoOpenSupport), "InitNoOpenSupport"}, + + {uint32(InitCaseSensitive), "InitCaseSensitive"}, + {uint32(InitVolRename), "InitVolRename"}, + {uint32(InitXtimes), "InitXtimes"}, +} + +func (fl InitFlags) String() string { + return flagString(uint32(fl), initFlagNames) +} + +func flagString(f uint32, names []flagName) string { + var s string + + if f == 0 { + return "0" + } + + for _, n := range names { + if f&n.bit != 0 { + s += "+" + n.name + f &^= n.bit + } + } + if f != 0 { + s += fmt.Sprintf("%+#x", f) + } + return s[1:] +} + +// The ReleaseFlags are used in the Release exchange. +type ReleaseFlags uint32 + +const ( + ReleaseFlush ReleaseFlags = 1 << 0 +) + +func (fl ReleaseFlags) String() string { + return flagString(uint32(fl), releaseFlagNames) +} + +var releaseFlagNames = []flagName{ + {uint32(ReleaseFlush), "ReleaseFlush"}, +} + +// Opcodes +const ( + opLookup = 1 + opForget = 2 // no reply + opGetattr = 3 + opSetattr = 4 + opReadlink = 5 + opSymlink = 6 + opMknod = 8 + opMkdir = 9 + opUnlink = 10 + opRmdir = 11 + opRename = 12 + opLink = 13 + opOpen = 14 + opRead = 15 + opWrite = 16 + opStatfs = 17 + opRelease = 18 + opFsync = 20 + opSetxattr = 21 + opGetxattr = 22 + opListxattr = 23 + opRemovexattr = 24 + opFlush = 25 + opInit = 26 + opOpendir = 27 + opReaddir = 28 + opReleasedir = 29 + opFsyncdir = 30 + opGetlk = 31 + opSetlk = 32 + opSetlkw = 33 + opAccess = 34 + opCreate = 35 + opInterrupt = 36 + opBmap = 37 + opDestroy = 38 + opIoctl = 39 // Linux? + opPoll = 40 // Linux? + + // OS X + opSetvolname = 61 + opGetxtimes = 62 + opExchange = 63 +) + +type entryOut struct { + Nodeid uint64 // Inode ID + Generation uint64 // Inode generation + EntryValid uint64 // Cache timeout for the name + AttrValid uint64 // Cache timeout for the attributes + EntryValidNsec uint32 + AttrValidNsec uint32 + Attr attr +} + +func entryOutSize(p Protocol) uintptr { + switch { + case p.LT(Protocol{7, 9}): + return unsafe.Offsetof(entryOut{}.Attr) + unsafe.Offsetof(entryOut{}.Attr.Blksize) + default: + return unsafe.Sizeof(entryOut{}) + } +} + +type forgetIn struct { + Nlookup uint64 +} + +type getattrIn struct { + GetattrFlags uint32 + _ uint32 + Fh uint64 +} + +type attrOut struct { + AttrValid uint64 // Cache timeout for the attributes + AttrValidNsec uint32 + _ uint32 + Attr attr +} + +func attrOutSize(p Protocol) uintptr { + switch { + case p.LT(Protocol{7, 9}): + return unsafe.Offsetof(attrOut{}.Attr) + unsafe.Offsetof(attrOut{}.Attr.Blksize) + default: + return unsafe.Sizeof(attrOut{}) + } +} + +// OS X +type getxtimesOut struct { + Bkuptime uint64 + Crtime uint64 + BkuptimeNsec uint32 + CrtimeNsec uint32 +} + +type mknodIn struct { + Mode uint32 + Rdev uint32 + Umask uint32 + _ uint32 + // "filename\x00" follows. +} + +func mknodInSize(p Protocol) uintptr { + switch { + case p.LT(Protocol{7, 12}): + return unsafe.Offsetof(mknodIn{}.Umask) + default: + return unsafe.Sizeof(mknodIn{}) + } +} + +type mkdirIn struct { + Mode uint32 + Umask uint32 + // filename follows +} + +func mkdirInSize(p Protocol) uintptr { + switch { + case p.LT(Protocol{7, 12}): + return unsafe.Offsetof(mkdirIn{}.Umask) + 4 + default: + return unsafe.Sizeof(mkdirIn{}) + } +} + +type renameIn struct { + Newdir uint64 + // "oldname\x00newname\x00" follows +} + +// OS X +type exchangeIn struct { + Olddir uint64 + Newdir uint64 + Options uint64 + // "oldname\x00newname\x00" follows +} + +type linkIn struct { + Oldnodeid uint64 +} + +type setattrInCommon struct { + Valid uint32 + _ uint32 + Fh uint64 + Size uint64 + LockOwner uint64 // unused on OS X? + Atime uint64 + Mtime uint64 + Unused2 uint64 + AtimeNsec uint32 + MtimeNsec uint32 + Unused3 uint32 + Mode uint32 + Unused4 uint32 + Uid uint32 + Gid uint32 + Unused5 uint32 +} + +type openIn struct { + Flags uint32 + Unused uint32 +} + +type openOut struct { + Fh uint64 + OpenFlags uint32 + _ uint32 +} + +type createIn struct { + Flags uint32 + Mode uint32 + Umask uint32 + _ uint32 +} + +func createInSize(p Protocol) uintptr { + switch { + case p.LT(Protocol{7, 12}): + return unsafe.Offsetof(createIn{}.Umask) + default: + return unsafe.Sizeof(createIn{}) + } +} + +type releaseIn struct { + Fh uint64 + Flags uint32 + ReleaseFlags uint32 + LockOwner uint32 +} + +type flushIn struct { + Fh uint64 + FlushFlags uint32 + _ uint32 + LockOwner uint64 +} + +type readIn struct { + Fh uint64 + Offset uint64 + Size uint32 + ReadFlags uint32 + LockOwner uint64 + Flags uint32 + _ uint32 +} + +func readInSize(p Protocol) uintptr { + switch { + case p.LT(Protocol{7, 9}): + return unsafe.Offsetof(readIn{}.ReadFlags) + 4 + default: + return unsafe.Sizeof(readIn{}) + } +} + +// The ReadFlags are passed in ReadRequest. +type ReadFlags uint32 + +const ( + // LockOwner field is valid. + ReadLockOwner ReadFlags = 1 << 1 +) + +var readFlagNames = []flagName{ + {uint32(ReadLockOwner), "ReadLockOwner"}, +} + +func (fl ReadFlags) String() string { + return flagString(uint32(fl), readFlagNames) +} + +type writeIn struct { + Fh uint64 + Offset uint64 + Size uint32 + WriteFlags uint32 + LockOwner uint64 + Flags uint32 + _ uint32 +} + +func writeInSize(p Protocol) uintptr { + switch { + case p.LT(Protocol{7, 9}): + return unsafe.Offsetof(writeIn{}.LockOwner) + default: + return unsafe.Sizeof(writeIn{}) + } +} + +type writeOut struct { + Size uint32 + _ uint32 +} + +// The WriteFlags are passed in WriteRequest. +type WriteFlags uint32 + +const ( + WriteCache WriteFlags = 1 << 0 + // LockOwner field is valid. + WriteLockOwner WriteFlags = 1 << 1 +) + +var writeFlagNames = []flagName{ + {uint32(WriteCache), "WriteCache"}, + {uint32(WriteLockOwner), "WriteLockOwner"}, +} + +func (fl WriteFlags) String() string { + return flagString(uint32(fl), writeFlagNames) +} + +const compatStatfsSize = 48 + +type statfsOut struct { + St kstatfs +} + +type fsyncIn struct { + Fh uint64 + FsyncFlags uint32 + _ uint32 +} + +type setxattrInCommon struct { + Size uint32 + Flags uint32 +} + +func (setxattrInCommon) position() uint32 { + return 0 +} + +type getxattrInCommon struct { + Size uint32 + _ uint32 +} + +func (getxattrInCommon) position() uint32 { + return 0 +} + +type getxattrOut struct { + Size uint32 + _ uint32 +} + +type lkIn struct { + Fh uint64 + Owner uint64 + Lk fileLock + LkFlags uint32 + _ uint32 +} + +func lkInSize(p Protocol) uintptr { + switch { + case p.LT(Protocol{7, 9}): + return unsafe.Offsetof(lkIn{}.LkFlags) + default: + return unsafe.Sizeof(lkIn{}) + } +} + +type lkOut struct { + Lk fileLock +} + +type accessIn struct { + Mask uint32 + _ uint32 +} + +type initIn struct { + Major uint32 + Minor uint32 + MaxReadahead uint32 + Flags uint32 +} + +const initInSize = int(unsafe.Sizeof(initIn{})) + +type initOut struct { + Major uint32 + Minor uint32 + MaxReadahead uint32 + Flags uint32 + Unused uint32 + MaxWrite uint32 +} + +type interruptIn struct { + Unique uint64 +} + +type bmapIn struct { + Block uint64 + BlockSize uint32 + _ uint32 +} + +type bmapOut struct { + Block uint64 +} + +type inHeader struct { + Len uint32 + Opcode uint32 + Unique uint64 + Nodeid uint64 + Uid uint32 + Gid uint32 + Pid uint32 + _ uint32 +} + +const inHeaderSize = int(unsafe.Sizeof(inHeader{})) + +type outHeader struct { + Len uint32 + Error int32 + Unique uint64 +} + +type dirent struct { + Ino uint64 + Off uint64 + Namelen uint32 + Type uint32 + Name [0]byte +} + +const direntSize = 8 + 8 + 4 + 4 + +const ( + notifyCodePoll int32 = 1 + notifyCodeInvalInode int32 = 2 + notifyCodeInvalEntry int32 = 3 +) + +type notifyInvalInodeOut struct { + Ino uint64 + Off int64 + Len int64 +} + +type notifyInvalEntryOut struct { + Parent uint64 + Namelen uint32 + _ uint32 +} diff --git a/vendor/bazil.org/fuse/fuse_kernel_darwin.go b/vendor/bazil.org/fuse/fuse_kernel_darwin.go new file mode 100644 index 000000000..b9873fdf3 --- /dev/null +++ b/vendor/bazil.org/fuse/fuse_kernel_darwin.go @@ -0,0 +1,88 @@ +package fuse + +import ( + "time" +) + +type attr struct { + Ino uint64 + Size uint64 + Blocks uint64 + Atime uint64 + Mtime uint64 + Ctime uint64 + Crtime_ uint64 // OS X only + AtimeNsec uint32 + MtimeNsec uint32 + CtimeNsec uint32 + CrtimeNsec uint32 // OS X only + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint32 + Flags_ uint32 // OS X only; see chflags(2) + Blksize uint32 + padding uint32 +} + +func (a *attr) SetCrtime(s uint64, ns uint32) { + a.Crtime_, a.CrtimeNsec = s, ns +} + +func (a *attr) SetFlags(f uint32) { + a.Flags_ = f +} + +type setattrIn struct { + setattrInCommon + + // OS X only + Bkuptime_ uint64 + Chgtime_ uint64 + Crtime uint64 + BkuptimeNsec uint32 + ChgtimeNsec uint32 + CrtimeNsec uint32 + Flags_ uint32 // see chflags(2) +} + +func (in *setattrIn) BkupTime() time.Time { + return time.Unix(int64(in.Bkuptime_), int64(in.BkuptimeNsec)) +} + +func (in *setattrIn) Chgtime() time.Time { + return time.Unix(int64(in.Chgtime_), int64(in.ChgtimeNsec)) +} + +func (in *setattrIn) Flags() uint32 { + return in.Flags_ +} + +func openFlags(flags uint32) OpenFlags { + return OpenFlags(flags) +} + +type getxattrIn struct { + getxattrInCommon + + // OS X only + Position uint32 + Padding uint32 +} + +func (g *getxattrIn) position() uint32 { + return g.Position +} + +type setxattrIn struct { + setxattrInCommon + + // OS X only + Position uint32 + Padding uint32 +} + +func (s *setxattrIn) position() uint32 { + return s.Position +} diff --git a/vendor/bazil.org/fuse/fuse_kernel_freebsd.go b/vendor/bazil.org/fuse/fuse_kernel_freebsd.go new file mode 100644 index 000000000..b1141e41d --- /dev/null +++ b/vendor/bazil.org/fuse/fuse_kernel_freebsd.go @@ -0,0 +1,62 @@ +package fuse + +import "time" + +type attr struct { + Ino uint64 + Size uint64 + Blocks uint64 + Atime uint64 + Mtime uint64 + Ctime uint64 + AtimeNsec uint32 + MtimeNsec uint32 + CtimeNsec uint32 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint32 + Blksize uint32 + padding uint32 +} + +func (a *attr) Crtime() time.Time { + return time.Time{} +} + +func (a *attr) SetCrtime(s uint64, ns uint32) { + // ignored on freebsd +} + +func (a *attr) SetFlags(f uint32) { + // ignored on freebsd +} + +type setattrIn struct { + setattrInCommon +} + +func (in *setattrIn) BkupTime() time.Time { + return time.Time{} +} + +func (in *setattrIn) Chgtime() time.Time { + return time.Time{} +} + +func (in *setattrIn) Flags() uint32 { + return 0 +} + +func openFlags(flags uint32) OpenFlags { + return OpenFlags(flags) +} + +type getxattrIn struct { + getxattrInCommon +} + +type setxattrIn struct { + setxattrInCommon +} diff --git a/vendor/bazil.org/fuse/fuse_kernel_linux.go b/vendor/bazil.org/fuse/fuse_kernel_linux.go new file mode 100644 index 000000000..d3ba86617 --- /dev/null +++ b/vendor/bazil.org/fuse/fuse_kernel_linux.go @@ -0,0 +1,70 @@ +package fuse + +import "time" + +type attr struct { + Ino uint64 + Size uint64 + Blocks uint64 + Atime uint64 + Mtime uint64 + Ctime uint64 + AtimeNsec uint32 + MtimeNsec uint32 + CtimeNsec uint32 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint32 + Blksize uint32 + padding uint32 +} + +func (a *attr) Crtime() time.Time { + return time.Time{} +} + +func (a *attr) SetCrtime(s uint64, ns uint32) { + // Ignored on Linux. +} + +func (a *attr) SetFlags(f uint32) { + // Ignored on Linux. +} + +type setattrIn struct { + setattrInCommon +} + +func (in *setattrIn) BkupTime() time.Time { + return time.Time{} +} + +func (in *setattrIn) Chgtime() time.Time { + return time.Time{} +} + +func (in *setattrIn) Flags() uint32 { + return 0 +} + +func openFlags(flags uint32) OpenFlags { + // on amd64, the 32-bit O_LARGEFILE flag is always seen; + // on i386, the flag probably depends on the app + // requesting, but in any case should be utterly + // uninteresting to us here; our kernel protocol messages + // are not directly related to the client app's kernel + // API/ABI + flags &^= 0x8000 + + return OpenFlags(flags) +} + +type getxattrIn struct { + getxattrInCommon +} + +type setxattrIn struct { + setxattrInCommon +} diff --git a/vendor/bazil.org/fuse/fuse_kernel_std.go b/vendor/bazil.org/fuse/fuse_kernel_std.go new file mode 100644 index 000000000..074cfd322 --- /dev/null +++ b/vendor/bazil.org/fuse/fuse_kernel_std.go @@ -0,0 +1 @@ +package fuse diff --git a/vendor/bazil.org/fuse/fuse_linux.go b/vendor/bazil.org/fuse/fuse_linux.go new file mode 100644 index 000000000..5fb96f9ae --- /dev/null +++ b/vendor/bazil.org/fuse/fuse_linux.go @@ -0,0 +1,7 @@ +package fuse + +// Maximum file write size we are prepared to receive from the kernel. +// +// Linux 4.2.0 has been observed to cap this value at 128kB +// (FUSE_MAX_PAGES_PER_REQ=32, 4kB pages). +const maxWrite = 128 * 1024 diff --git a/vendor/bazil.org/fuse/fuseutil/fuseutil.go b/vendor/bazil.org/fuse/fuseutil/fuseutil.go new file mode 100644 index 000000000..b3f52b73b --- /dev/null +++ b/vendor/bazil.org/fuse/fuseutil/fuseutil.go @@ -0,0 +1,20 @@ +package fuseutil // import "bazil.org/fuse/fuseutil" + +import ( + "bazil.org/fuse" +) + +// HandleRead handles a read request assuming that data is the entire file content. +// It adjusts the amount returned in resp according to req.Offset and req.Size. +func HandleRead(req *fuse.ReadRequest, resp *fuse.ReadResponse, data []byte) { + if req.Offset >= int64(len(data)) { + data = nil + } else { + data = data[req.Offset:] + } + if len(data) > req.Size { + data = data[:req.Size] + } + n := copy(resp.Data[:req.Size], data) + resp.Data = resp.Data[:n] +} diff --git a/vendor/bazil.org/fuse/mount.go b/vendor/bazil.org/fuse/mount.go new file mode 100644 index 000000000..8054e9021 --- /dev/null +++ b/vendor/bazil.org/fuse/mount.go @@ -0,0 +1,38 @@ +package fuse + +import ( + "bufio" + "errors" + "io" + "log" + "sync" +) + +var ( + // ErrOSXFUSENotFound is returned from Mount when the OSXFUSE + // installation is not detected. + // + // Only happens on OS X. Make sure OSXFUSE is installed, or see + // OSXFUSELocations for customization. + ErrOSXFUSENotFound = errors.New("cannot locate OSXFUSE") +) + +func neverIgnoreLine(line string) bool { + return false +} + +func lineLogger(wg *sync.WaitGroup, prefix string, ignore func(line string) bool, r io.ReadCloser) { + defer wg.Done() + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + if ignore(line) { + continue + } + log.Printf("%s: %s", prefix, line) + } + if err := scanner.Err(); err != nil { + log.Printf("%s, error reading: %v", prefix, err) + } +} diff --git a/vendor/bazil.org/fuse/mount_darwin.go b/vendor/bazil.org/fuse/mount_darwin.go new file mode 100644 index 000000000..c1c36e62b --- /dev/null +++ b/vendor/bazil.org/fuse/mount_darwin.go @@ -0,0 +1,208 @@ +package fuse + +import ( + "errors" + "fmt" + "log" + "os" + "os/exec" + "path" + "strconv" + "strings" + "sync" + "syscall" +) + +var ( + errNoAvail = errors.New("no available fuse devices") + errNotLoaded = errors.New("osxfuse is not loaded") +) + +func loadOSXFUSE(bin string) error { + cmd := exec.Command(bin) + cmd.Dir = "/" + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err := cmd.Run() + return err +} + +func openOSXFUSEDev(devPrefix string) (*os.File, error) { + var f *os.File + var err error + for i := uint64(0); ; i++ { + path := devPrefix + strconv.FormatUint(i, 10) + f, err = os.OpenFile(path, os.O_RDWR, 0000) + if os.IsNotExist(err) { + if i == 0 { + // not even the first device was found -> fuse is not loaded + return nil, errNotLoaded + } + + // we've run out of kernel-provided devices + return nil, errNoAvail + } + + if err2, ok := err.(*os.PathError); ok && err2.Err == syscall.EBUSY { + // try the next one + continue + } + + if err != nil { + return nil, err + } + return f, nil + } +} + +func handleMountOSXFUSE(helperName string, errCh chan<- error) func(line string) (ignore bool) { + var noMountpointPrefix = helperName + `: ` + const noMountpointSuffix = `: No such file or directory` + return func(line string) (ignore bool) { + if strings.HasPrefix(line, noMountpointPrefix) && strings.HasSuffix(line, noMountpointSuffix) { + // re-extract it from the error message in case some layer + // changed the path + mountpoint := line[len(noMountpointPrefix) : len(line)-len(noMountpointSuffix)] + err := &MountpointDoesNotExistError{ + Path: mountpoint, + } + select { + case errCh <- err: + return true + default: + // not the first error; fall back to logging it + return false + } + } + + return false + } +} + +// isBoringMountOSXFUSEError returns whether the Wait error is +// uninteresting; exit status 64 is. +func isBoringMountOSXFUSEError(err error) bool { + if err, ok := err.(*exec.ExitError); ok && err.Exited() { + if status, ok := err.Sys().(syscall.WaitStatus); ok && status.ExitStatus() == 64 { + return true + } + } + return false +} + +func callMount(bin string, daemonVar string, dir string, conf *mountConfig, f *os.File, ready chan<- struct{}, errp *error) error { + for k, v := range conf.options { + if strings.Contains(k, ",") || strings.Contains(v, ",") { + // Silly limitation but the mount helper does not + // understand any escaping. See TestMountOptionCommaError. + return fmt.Errorf("mount options cannot contain commas on darwin: %q=%q", k, v) + } + } + cmd := exec.Command( + bin, + "-o", conf.getOptions(), + // Tell osxfuse-kext how large our buffer is. It must split + // writes larger than this into multiple writes. + // + // OSXFUSE seems to ignore InitResponse.MaxWrite, and uses + // this instead. + "-o", "iosize="+strconv.FormatUint(maxWrite, 10), + // refers to fd passed in cmd.ExtraFiles + "3", + dir, + ) + cmd.ExtraFiles = []*os.File{f} + cmd.Env = os.Environ() + // OSXFUSE <3.3.0 + cmd.Env = append(cmd.Env, "MOUNT_FUSEFS_CALL_BY_LIB=") + // OSXFUSE >=3.3.0 + cmd.Env = append(cmd.Env, "MOUNT_OSXFUSE_CALL_BY_LIB=") + + daemon := os.Args[0] + if daemonVar != "" { + cmd.Env = append(cmd.Env, daemonVar+"="+daemon) + } + + stdout, err := cmd.StdoutPipe() + if err != nil { + return fmt.Errorf("setting up mount_osxfusefs stderr: %v", err) + } + stderr, err := cmd.StderrPipe() + if err != nil { + return fmt.Errorf("setting up mount_osxfusefs stderr: %v", err) + } + + if err := cmd.Start(); err != nil { + return fmt.Errorf("mount_osxfusefs: %v", err) + } + helperErrCh := make(chan error, 1) + go func() { + var wg sync.WaitGroup + wg.Add(2) + go lineLogger(&wg, "mount helper output", neverIgnoreLine, stdout) + helperName := path.Base(bin) + go lineLogger(&wg, "mount helper error", handleMountOSXFUSE(helperName, helperErrCh), stderr) + wg.Wait() + if err := cmd.Wait(); err != nil { + // see if we have a better error to report + select { + case helperErr := <-helperErrCh: + // log the Wait error if it's not what we expected + if !isBoringMountOSXFUSEError(err) { + log.Printf("mount helper failed: %v", err) + } + // and now return what we grabbed from stderr as the real + // error + *errp = helperErr + close(ready) + return + default: + // nope, fall back to generic message + } + + *errp = fmt.Errorf("mount_osxfusefs: %v", err) + close(ready) + return + } + + *errp = nil + close(ready) + }() + return nil +} + +func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (*os.File, error) { + locations := conf.osxfuseLocations + if locations == nil { + locations = []OSXFUSEPaths{ + OSXFUSELocationV3, + OSXFUSELocationV2, + } + } + for _, loc := range locations { + if _, err := os.Stat(loc.Mount); os.IsNotExist(err) { + // try the other locations + continue + } + + f, err := openOSXFUSEDev(loc.DevicePrefix) + if err == errNotLoaded { + err = loadOSXFUSE(loc.Load) + if err != nil { + return nil, err + } + // try again + f, err = openOSXFUSEDev(loc.DevicePrefix) + } + if err != nil { + return nil, err + } + err = callMount(loc.Mount, loc.DaemonVar, dir, conf, f, ready, errp) + if err != nil { + f.Close() + return nil, err + } + return f, nil + } + return nil, ErrOSXFUSENotFound +} diff --git a/vendor/bazil.org/fuse/mount_freebsd.go b/vendor/bazil.org/fuse/mount_freebsd.go new file mode 100644 index 000000000..70bb41024 --- /dev/null +++ b/vendor/bazil.org/fuse/mount_freebsd.go @@ -0,0 +1,111 @@ +package fuse + +import ( + "fmt" + "log" + "os" + "os/exec" + "strings" + "sync" + "syscall" +) + +func handleMountFusefsStderr(errCh chan<- error) func(line string) (ignore bool) { + return func(line string) (ignore bool) { + const ( + noMountpointPrefix = `mount_fusefs: ` + noMountpointSuffix = `: No such file or directory` + ) + if strings.HasPrefix(line, noMountpointPrefix) && strings.HasSuffix(line, noMountpointSuffix) { + // re-extract it from the error message in case some layer + // changed the path + mountpoint := line[len(noMountpointPrefix) : len(line)-len(noMountpointSuffix)] + err := &MountpointDoesNotExistError{ + Path: mountpoint, + } + select { + case errCh <- err: + return true + default: + // not the first error; fall back to logging it + return false + } + } + + return false + } +} + +// isBoringMountFusefsError returns whether the Wait error is +// uninteresting; exit status 1 is. +func isBoringMountFusefsError(err error) bool { + if err, ok := err.(*exec.ExitError); ok && err.Exited() { + if status, ok := err.Sys().(syscall.WaitStatus); ok && status.ExitStatus() == 1 { + return true + } + } + return false +} + +func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (*os.File, error) { + for k, v := range conf.options { + if strings.Contains(k, ",") || strings.Contains(v, ",") { + // Silly limitation but the mount helper does not + // understand any escaping. See TestMountOptionCommaError. + return nil, fmt.Errorf("mount options cannot contain commas on FreeBSD: %q=%q", k, v) + } + } + + f, err := os.OpenFile("/dev/fuse", os.O_RDWR, 0000) + if err != nil { + *errp = err + return nil, err + } + + cmd := exec.Command( + "/sbin/mount_fusefs", + "--safe", + "-o", conf.getOptions(), + "3", + dir, + ) + cmd.ExtraFiles = []*os.File{f} + + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, fmt.Errorf("setting up mount_fusefs stderr: %v", err) + } + stderr, err := cmd.StderrPipe() + if err != nil { + return nil, fmt.Errorf("setting up mount_fusefs stderr: %v", err) + } + + if err := cmd.Start(); err != nil { + return nil, fmt.Errorf("mount_fusefs: %v", err) + } + helperErrCh := make(chan error, 1) + var wg sync.WaitGroup + wg.Add(2) + go lineLogger(&wg, "mount helper output", neverIgnoreLine, stdout) + go lineLogger(&wg, "mount helper error", handleMountFusefsStderr(helperErrCh), stderr) + wg.Wait() + if err := cmd.Wait(); err != nil { + // see if we have a better error to report + select { + case helperErr := <-helperErrCh: + // log the Wait error if it's not what we expected + if !isBoringMountFusefsError(err) { + log.Printf("mount helper failed: %v", err) + } + // and now return what we grabbed from stderr as the real + // error + return nil, helperErr + default: + // nope, fall back to generic message + } + return nil, fmt.Errorf("mount_fusefs: %v", err) + } + + close(ready) + return f, nil +} diff --git a/vendor/bazil.org/fuse/mount_linux.go b/vendor/bazil.org/fuse/mount_linux.go new file mode 100644 index 000000000..197d1044e --- /dev/null +++ b/vendor/bazil.org/fuse/mount_linux.go @@ -0,0 +1,150 @@ +package fuse + +import ( + "fmt" + "log" + "net" + "os" + "os/exec" + "strings" + "sync" + "syscall" +) + +func handleFusermountStderr(errCh chan<- error) func(line string) (ignore bool) { + return func(line string) (ignore bool) { + if line == `fusermount: failed to open /etc/fuse.conf: Permission denied` { + // Silence this particular message, it occurs way too + // commonly and isn't very relevant to whether the mount + // succeeds or not. + return true + } + + const ( + noMountpointPrefix = `fusermount: failed to access mountpoint ` + noMountpointSuffix = `: No such file or directory` + ) + if strings.HasPrefix(line, noMountpointPrefix) && strings.HasSuffix(line, noMountpointSuffix) { + // re-extract it from the error message in case some layer + // changed the path + mountpoint := line[len(noMountpointPrefix) : len(line)-len(noMountpointSuffix)] + err := &MountpointDoesNotExistError{ + Path: mountpoint, + } + select { + case errCh <- err: + return true + default: + // not the first error; fall back to logging it + return false + } + } + + return false + } +} + +// isBoringFusermountError returns whether the Wait error is +// uninteresting; exit status 1 is. +func isBoringFusermountError(err error) bool { + if err, ok := err.(*exec.ExitError); ok && err.Exited() { + if status, ok := err.Sys().(syscall.WaitStatus); ok && status.ExitStatus() == 1 { + return true + } + } + return false +} + +func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (fusefd *os.File, err error) { + // linux mount is never delayed + close(ready) + + fds, err := syscall.Socketpair(syscall.AF_FILE, syscall.SOCK_STREAM, 0) + if err != nil { + return nil, fmt.Errorf("socketpair error: %v", err) + } + + writeFile := os.NewFile(uintptr(fds[0]), "fusermount-child-writes") + defer writeFile.Close() + + readFile := os.NewFile(uintptr(fds[1]), "fusermount-parent-reads") + defer readFile.Close() + + cmd := exec.Command( + "fusermount", + "-o", conf.getOptions(), + "--", + dir, + ) + cmd.Env = append(os.Environ(), "_FUSE_COMMFD=3") + + cmd.ExtraFiles = []*os.File{writeFile} + + var wg sync.WaitGroup + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, fmt.Errorf("setting up fusermount stderr: %v", err) + } + stderr, err := cmd.StderrPipe() + if err != nil { + return nil, fmt.Errorf("setting up fusermount stderr: %v", err) + } + + if err := cmd.Start(); err != nil { + return nil, fmt.Errorf("fusermount: %v", err) + } + helperErrCh := make(chan error, 1) + wg.Add(2) + go lineLogger(&wg, "mount helper output", neverIgnoreLine, stdout) + go lineLogger(&wg, "mount helper error", handleFusermountStderr(helperErrCh), stderr) + wg.Wait() + if err := cmd.Wait(); err != nil { + // see if we have a better error to report + select { + case helperErr := <-helperErrCh: + // log the Wait error if it's not what we expected + if !isBoringFusermountError(err) { + log.Printf("mount helper failed: %v", err) + } + // and now return what we grabbed from stderr as the real + // error + return nil, helperErr + default: + // nope, fall back to generic message + } + + return nil, fmt.Errorf("fusermount: %v", err) + } + + c, err := net.FileConn(readFile) + if err != nil { + return nil, fmt.Errorf("FileConn from fusermount socket: %v", err) + } + defer c.Close() + + uc, ok := c.(*net.UnixConn) + if !ok { + return nil, fmt.Errorf("unexpected FileConn type; expected UnixConn, got %T", c) + } + + buf := make([]byte, 32) // expect 1 byte + oob := make([]byte, 32) // expect 24 bytes + _, oobn, _, _, err := uc.ReadMsgUnix(buf, oob) + scms, err := syscall.ParseSocketControlMessage(oob[:oobn]) + if err != nil { + return nil, fmt.Errorf("ParseSocketControlMessage: %v", err) + } + if len(scms) != 1 { + return nil, fmt.Errorf("expected 1 SocketControlMessage; got scms = %#v", scms) + } + scm := scms[0] + gotFds, err := syscall.ParseUnixRights(&scm) + if err != nil { + return nil, fmt.Errorf("syscall.ParseUnixRights: %v", err) + } + if len(gotFds) != 1 { + return nil, fmt.Errorf("wanted 1 fd; got %#v", gotFds) + } + f := os.NewFile(uintptr(gotFds[0]), "/dev/fuse") + return f, nil +} diff --git a/vendor/bazil.org/fuse/options.go b/vendor/bazil.org/fuse/options.go new file mode 100644 index 000000000..65ce8a541 --- /dev/null +++ b/vendor/bazil.org/fuse/options.go @@ -0,0 +1,310 @@ +package fuse + +import ( + "errors" + "strings" +) + +func dummyOption(conf *mountConfig) error { + return nil +} + +// mountConfig holds the configuration for a mount operation. +// Use it by passing MountOption values to Mount. +type mountConfig struct { + options map[string]string + maxReadahead uint32 + initFlags InitFlags + osxfuseLocations []OSXFUSEPaths +} + +func escapeComma(s string) string { + s = strings.Replace(s, `\`, `\\`, -1) + s = strings.Replace(s, `,`, `\,`, -1) + return s +} + +// getOptions makes a string of options suitable for passing to FUSE +// mount flag `-o`. Returns an empty string if no options were set. +// Any platform specific adjustments should happen before the call. +func (m *mountConfig) getOptions() string { + var opts []string + for k, v := range m.options { + k = escapeComma(k) + if v != "" { + k += "=" + escapeComma(v) + } + opts = append(opts, k) + } + return strings.Join(opts, ",") +} + +type mountOption func(*mountConfig) error + +// MountOption is passed to Mount to change the behavior of the mount. +type MountOption mountOption + +// FSName sets the file system name (also called source) that is +// visible in the list of mounted file systems. +// +// FreeBSD ignores this option. +func FSName(name string) MountOption { + return func(conf *mountConfig) error { + conf.options["fsname"] = name + return nil + } +} + +// Subtype sets the subtype of the mount. The main type is always +// `fuse`. The type in a list of mounted file systems will look like +// `fuse.foo`. +// +// OS X ignores this option. +// FreeBSD ignores this option. +func Subtype(fstype string) MountOption { + return func(conf *mountConfig) error { + conf.options["subtype"] = fstype + return nil + } +} + +// LocalVolume sets the volume to be local (instead of network), +// changing the behavior of Finder, Spotlight, and such. +// +// OS X only. Others ignore this option. +func LocalVolume() MountOption { + return localVolume +} + +// VolumeName sets the volume name shown in Finder. +// +// OS X only. Others ignore this option. +func VolumeName(name string) MountOption { + return volumeName(name) +} + +// NoAppleDouble makes OSXFUSE disallow files with names used by OS X +// to store extended attributes on file systems that do not support +// them natively. +// +// Such file names are: +// +// ._* +// .DS_Store +// +// OS X only. Others ignore this option. +func NoAppleDouble() MountOption { + return noAppleDouble +} + +// NoAppleXattr makes OSXFUSE disallow extended attributes with the +// prefix "com.apple.". This disables persistent Finder state and +// other such information. +// +// OS X only. Others ignore this option. +func NoAppleXattr() MountOption { + return noAppleXattr +} + +// ExclCreate causes O_EXCL flag to be set for only "truly" exclusive creates, +// i.e. create calls for which the initiator explicitly set the O_EXCL flag. +// +// OSXFUSE expects all create calls to return EEXIST in case the file +// already exists, regardless of whether O_EXCL was specified or not. +// To ensure this behavior, it normally sets OpenExclusive for all +// Create calls, regardless of whether the original call had it set. +// For distributed filesystems, that may force every file create to be +// a distributed consensus action, causing undesirable delays. +// +// This option makes the FUSE filesystem see the original flag value, +// and better decide when to ensure global consensus. +// +// Note that returning EEXIST on existing file create is still +// expected with OSXFUSE, regardless of the presence of the +// OpenExclusive flag. +// +// For more information, see +// https://github.com/osxfuse/osxfuse/issues/209 +// +// OS X only. Others ignore this options. +// Requires OSXFUSE 3.4.1 or newer. +func ExclCreate() MountOption { + return exclCreate +} + +// DaemonTimeout sets the time in seconds between a request and a reply before +// the FUSE mount is declared dead. +// +// OS X and FreeBSD only. Others ignore this option. +func DaemonTimeout(name string) MountOption { + return daemonTimeout(name) +} + +var ErrCannotCombineAllowOtherAndAllowRoot = errors.New("cannot combine AllowOther and AllowRoot") + +// AllowOther allows other users to access the file system. +// +// Only one of AllowOther or AllowRoot can be used. +func AllowOther() MountOption { + return func(conf *mountConfig) error { + if _, ok := conf.options["allow_root"]; ok { + return ErrCannotCombineAllowOtherAndAllowRoot + } + conf.options["allow_other"] = "" + return nil + } +} + +// AllowRoot allows other users to access the file system. +// +// Only one of AllowOther or AllowRoot can be used. +// +// FreeBSD ignores this option. +func AllowRoot() MountOption { + return func(conf *mountConfig) error { + if _, ok := conf.options["allow_other"]; ok { + return ErrCannotCombineAllowOtherAndAllowRoot + } + conf.options["allow_root"] = "" + return nil + } +} + +// AllowDev enables interpreting character or block special devices on the +// filesystem. +func AllowDev() MountOption { + return func(conf *mountConfig) error { + conf.options["dev"] = "" + return nil + } +} + +// AllowSUID allows set-user-identifier or set-group-identifier bits to take +// effect. +func AllowSUID() MountOption { + return func(conf *mountConfig) error { + conf.options["suid"] = "" + return nil + } +} + +// DefaultPermissions makes the kernel enforce access control based on +// the file mode (as in chmod). +// +// Without this option, the Node itself decides what is and is not +// allowed. This is normally ok because FUSE file systems cannot be +// accessed by other users without AllowOther/AllowRoot. +// +// FreeBSD ignores this option. +func DefaultPermissions() MountOption { + return func(conf *mountConfig) error { + conf.options["default_permissions"] = "" + return nil + } +} + +// ReadOnly makes the mount read-only. +func ReadOnly() MountOption { + return func(conf *mountConfig) error { + conf.options["ro"] = "" + return nil + } +} + +// MaxReadahead sets the number of bytes that can be prefetched for +// sequential reads. The kernel can enforce a maximum value lower than +// this. +// +// This setting makes the kernel perform speculative reads that do not +// originate from any client process. This usually tremendously +// improves read performance. +func MaxReadahead(n uint32) MountOption { + return func(conf *mountConfig) error { + conf.maxReadahead = n + return nil + } +} + +// AsyncRead enables multiple outstanding read requests for the same +// handle. Without this, there is at most one request in flight at a +// time. +func AsyncRead() MountOption { + return func(conf *mountConfig) error { + conf.initFlags |= InitAsyncRead + return nil + } +} + +// WritebackCache enables the kernel to buffer writes before sending +// them to the FUSE server. Without this, writethrough caching is +// used. +func WritebackCache() MountOption { + return func(conf *mountConfig) error { + conf.initFlags |= InitWritebackCache + return nil + } +} + +// OSXFUSEPaths describes the paths used by an installed OSXFUSE +// version. See OSXFUSELocationV3 for typical values. +type OSXFUSEPaths struct { + // Prefix for the device file. At mount time, an incrementing + // number is suffixed until a free FUSE device is found. + DevicePrefix string + // Path of the load helper, used to load the kernel extension if + // no device files are found. + Load string + // Path of the mount helper, used for the actual mount operation. + Mount string + // Environment variable used to pass the path to the executable + // calling the mount helper. + DaemonVar string +} + +// Default paths for OSXFUSE. See OSXFUSELocations. +var ( + OSXFUSELocationV3 = OSXFUSEPaths{ + DevicePrefix: "/dev/osxfuse", + Load: "/Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse", + Mount: "/Library/Filesystems/osxfuse.fs/Contents/Resources/mount_osxfuse", + DaemonVar: "MOUNT_OSXFUSE_DAEMON_PATH", + } + OSXFUSELocationV2 = OSXFUSEPaths{ + DevicePrefix: "/dev/osxfuse", + Load: "/Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs", + Mount: "/Library/Filesystems/osxfusefs.fs/Support/mount_osxfusefs", + DaemonVar: "MOUNT_FUSEFS_DAEMON_PATH", + } +) + +// OSXFUSELocations sets where to look for OSXFUSE files. The +// arguments are all the possible locations. The previous locations +// are replaced. +// +// Without this option, OSXFUSELocationV3 and OSXFUSELocationV2 are +// used. +// +// OS X only. Others ignore this option. +func OSXFUSELocations(paths ...OSXFUSEPaths) MountOption { + return func(conf *mountConfig) error { + if len(paths) == 0 { + return errors.New("must specify at least one location for OSXFUSELocations") + } + // replace previous values, but make a copy so there's no + // worries about caller mutating their slice + conf.osxfuseLocations = append(conf.osxfuseLocations[:0], paths...) + return nil + } +} + +// AllowNonEmptyMount allows the mounting over a non-empty directory. +// +// The files in it will be shadowed by the freshly created mount. By +// default these mounts are rejected to prevent accidental covering up +// of data, which could for example prevent automatic backup. +func AllowNonEmptyMount() MountOption { + return func(conf *mountConfig) error { + conf.options["nonempty"] = "" + return nil + } +} diff --git a/vendor/bazil.org/fuse/options_darwin.go b/vendor/bazil.org/fuse/options_darwin.go new file mode 100644 index 000000000..faa9d78e7 --- /dev/null +++ b/vendor/bazil.org/fuse/options_darwin.go @@ -0,0 +1,35 @@ +package fuse + +func localVolume(conf *mountConfig) error { + conf.options["local"] = "" + return nil +} + +func volumeName(name string) MountOption { + return func(conf *mountConfig) error { + conf.options["volname"] = name + return nil + } +} + +func daemonTimeout(name string) MountOption { + return func(conf *mountConfig) error { + conf.options["daemon_timeout"] = name + return nil + } +} + +func noAppleXattr(conf *mountConfig) error { + conf.options["noapplexattr"] = "" + return nil +} + +func noAppleDouble(conf *mountConfig) error { + conf.options["noappledouble"] = "" + return nil +} + +func exclCreate(conf *mountConfig) error { + conf.options["excl_create"] = "" + return nil +} diff --git a/vendor/bazil.org/fuse/options_freebsd.go b/vendor/bazil.org/fuse/options_freebsd.go new file mode 100644 index 000000000..7c164b136 --- /dev/null +++ b/vendor/bazil.org/fuse/options_freebsd.go @@ -0,0 +1,28 @@ +package fuse + +func localVolume(conf *mountConfig) error { + return nil +} + +func volumeName(name string) MountOption { + return dummyOption +} + +func daemonTimeout(name string) MountOption { + return func(conf *mountConfig) error { + conf.options["timeout"] = name + return nil + } +} + +func noAppleXattr(conf *mountConfig) error { + return nil +} + +func noAppleDouble(conf *mountConfig) error { + return nil +} + +func exclCreate(conf *mountConfig) error { + return nil +} diff --git a/vendor/bazil.org/fuse/options_linux.go b/vendor/bazil.org/fuse/options_linux.go new file mode 100644 index 000000000..13f0896d5 --- /dev/null +++ b/vendor/bazil.org/fuse/options_linux.go @@ -0,0 +1,25 @@ +package fuse + +func localVolume(conf *mountConfig) error { + return nil +} + +func volumeName(name string) MountOption { + return dummyOption +} + +func daemonTimeout(name string) MountOption { + return dummyOption +} + +func noAppleXattr(conf *mountConfig) error { + return nil +} + +func noAppleDouble(conf *mountConfig) error { + return nil +} + +func exclCreate(conf *mountConfig) error { + return nil +} diff --git a/vendor/bazil.org/fuse/protocol.go b/vendor/bazil.org/fuse/protocol.go new file mode 100644 index 000000000..a77bbf72f --- /dev/null +++ b/vendor/bazil.org/fuse/protocol.go @@ -0,0 +1,75 @@ +package fuse + +import ( + "fmt" +) + +// Protocol is a FUSE protocol version number. +type Protocol struct { + Major uint32 + Minor uint32 +} + +func (p Protocol) String() string { + return fmt.Sprintf("%d.%d", p.Major, p.Minor) +} + +// LT returns whether a is less than b. +func (a Protocol) LT(b Protocol) bool { + return a.Major < b.Major || + (a.Major == b.Major && a.Minor < b.Minor) +} + +// GE returns whether a is greater than or equal to b. +func (a Protocol) GE(b Protocol) bool { + return a.Major > b.Major || + (a.Major == b.Major && a.Minor >= b.Minor) +} + +func (a Protocol) is79() bool { + return a.GE(Protocol{7, 9}) +} + +// HasAttrBlockSize returns whether Attr.BlockSize is respected by the +// kernel. +func (a Protocol) HasAttrBlockSize() bool { + return a.is79() +} + +// HasReadWriteFlags returns whether ReadRequest/WriteRequest +// fields Flags and FileFlags are valid. +func (a Protocol) HasReadWriteFlags() bool { + return a.is79() +} + +// HasGetattrFlags returns whether GetattrRequest field Flags is +// valid. +func (a Protocol) HasGetattrFlags() bool { + return a.is79() +} + +func (a Protocol) is710() bool { + return a.GE(Protocol{7, 10}) +} + +// HasOpenNonSeekable returns whether OpenResponse field Flags flag +// OpenNonSeekable is supported. +func (a Protocol) HasOpenNonSeekable() bool { + return a.is710() +} + +func (a Protocol) is712() bool { + return a.GE(Protocol{7, 12}) +} + +// HasUmask returns whether CreateRequest/MkdirRequest/MknodRequest +// field Umask is valid. +func (a Protocol) HasUmask() bool { + return a.is712() +} + +// HasInvalidate returns whether InvalidateNode/InvalidateEntry are +// supported. +func (a Protocol) HasInvalidate() bool { + return a.is712() +} diff --git a/vendor/bazil.org/fuse/unmount.go b/vendor/bazil.org/fuse/unmount.go new file mode 100644 index 000000000..ffe3f155c --- /dev/null +++ b/vendor/bazil.org/fuse/unmount.go @@ -0,0 +1,6 @@ +package fuse + +// Unmount tries to unmount the filesystem mounted at dir. +func Unmount(dir string) error { + return unmount(dir) +} diff --git a/vendor/bazil.org/fuse/unmount_linux.go b/vendor/bazil.org/fuse/unmount_linux.go new file mode 100644 index 000000000..088f0cfee --- /dev/null +++ b/vendor/bazil.org/fuse/unmount_linux.go @@ -0,0 +1,21 @@ +package fuse + +import ( + "bytes" + "errors" + "os/exec" +) + +func unmount(dir string) error { + cmd := exec.Command("fusermount", "-u", dir) + output, err := cmd.CombinedOutput() + if err != nil { + if len(output) > 0 { + output = bytes.TrimRight(output, "\n") + msg := err.Error() + ": " + string(output) + err = errors.New(msg) + } + return err + } + return nil +} diff --git a/vendor/bazil.org/fuse/unmount_std.go b/vendor/bazil.org/fuse/unmount_std.go new file mode 100644 index 000000000..d6efe276f --- /dev/null +++ b/vendor/bazil.org/fuse/unmount_std.go @@ -0,0 +1,17 @@ +// +build !linux + +package fuse + +import ( + "os" + "syscall" +) + +func unmount(dir string) error { + err := syscall.Unmount(dir, 0) + if err != nil { + err = &os.PathError{Op: "unmount", Path: dir, Err: err} + return err + } + return nil +} diff --git a/vendor/github.com/CovenantSQL/HashStablePack/marshalhash/write_bytes.go b/vendor/github.com/CovenantSQL/HashStablePack/marshalhash/write_bytes.go index d4e9dde2f..584e93780 100644 --- a/vendor/github.com/CovenantSQL/HashStablePack/marshalhash/write_bytes.go +++ b/vendor/github.com/CovenantSQL/HashStablePack/marshalhash/write_bytes.go @@ -276,6 +276,9 @@ func AppendComplex128(b []byte, c complex128) []byte { // AppendTime appends a time.Time to the slice as a MessagePack extension func AppendTime(b []byte, t time.Time) []byte { + if t.IsZero() { + return AppendNil(b) + } o, n := ensure(b, TimeSize) t = t.UTC() o[n] = mext8 diff --git a/vendor/github.com/CovenantSQL/sqlparser/token.go b/vendor/github.com/CovenantSQL/sqlparser/token.go index 8dfc057fa..74ed904e6 100644 --- a/vendor/github.com/CovenantSQL/sqlparser/token.go +++ b/vendor/github.com/CovenantSQL/sqlparser/token.go @@ -34,19 +34,20 @@ const ( // Tokenizer is the struct used to generate SQL // tokens for the parser. type Tokenizer struct { - InStream io.Reader - AllowComments bool - ForceEOF bool - lastChar uint16 - Position int - lastToken []byte - LastError error - posVarIndex int - ParseTree Statement - partialDDL *DDL - nesting int - multi bool - specialComment *Tokenizer + InStream io.Reader + AllowComments bool + AllowBackSlashEscape bool + ForceEOF bool + lastChar uint16 + Position int + lastToken []byte + LastError error + posVarIndex int + ParseTree Statement + partialDDL *DDL + nesting int + multi bool + specialComment *Tokenizer buf []byte bufPos int @@ -660,14 +661,14 @@ func (tkn *Tokenizer) scanString(delim uint16, typ int) (int, []byte) { return LEX_ERROR, buffer.Bytes() } - if ch != delim && ch != '\\' { + if ch != delim && (!tkn.AllowBackSlashEscape || ch != '\\') { buffer.WriteByte(byte(ch)) // Scan ahead to the next interesting character. start := tkn.bufPos for ; tkn.bufPos < tkn.bufSize; tkn.bufPos++ { ch = uint16(tkn.buf[tkn.bufPos]) - if ch == delim || ch == '\\' { + if ch == delim || (tkn.AllowBackSlashEscape && ch == '\\') { break } } @@ -687,7 +688,7 @@ func (tkn *Tokenizer) scanString(delim uint16, typ int) (int, []byte) { } tkn.next() // Read one past the delim or escape character. - if ch == '\\' { + if tkn.AllowBackSlashEscape && ch == '\\' { if tkn.lastChar == eofChar { // String terminates mid escape character. return LEX_ERROR, buffer.Bytes() diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE deleted file mode 100644 index bc52e96f2..000000000 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012-2016 Dave Collins - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go deleted file mode 100644 index 792994785..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine, compiled by GopherJS, and -// "-tags safe" is not added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// Go versions prior to 1.4 are disabled because they use a different layout -// for interfaces which make the implementation of unsafeReflectValue more complex. -// +build !js,!appengine,!safe,!disableunsafe,go1.4 - -package spew - -import ( - "reflect" - "unsafe" -) - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = false - - // ptrSize is the size of a pointer on the current arch. - ptrSize = unsafe.Sizeof((*byte)(nil)) -) - -type flag uintptr - -var ( - // flagRO indicates whether the value field of a reflect.Value - // is read-only. - flagRO flag - - // flagAddr indicates whether the address of the reflect.Value's - // value may be taken. - flagAddr flag -) - -// flagKindMask holds the bits that make up the kind -// part of the flags field. In all the supported versions, -// it is in the lower 5 bits. -const flagKindMask = flag(0x1f) - -// Different versions of Go have used different -// bit layouts for the flags type. This table -// records the known combinations. -var okFlags = []struct { - ro, addr flag -}{{ - // From Go 1.4 to 1.5 - ro: 1 << 5, - addr: 1 << 7, -}, { - // Up to Go tip. - ro: 1<<5 | 1<<6, - addr: 1 << 8, -}} - -var flagValOffset = func() uintptr { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - return field.Offset -}() - -// flagField returns a pointer to the flag field of a reflect.Value. -func flagField(v *reflect.Value) *flag { - return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) -} - -// unsafeReflectValue converts the passed reflect.Value into a one that bypasses -// the typical safety restrictions preventing access to unaddressable and -// unexported data. It works by digging the raw pointer to the underlying -// value out of the protected value and generating a new unprotected (unsafe) -// reflect.Value to it. -// -// This allows us to check for implementations of the Stringer and error -// interfaces to be used for pretty printing ordinarily unaddressable and -// inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) reflect.Value { - if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { - return v - } - flagFieldPtr := flagField(&v) - *flagFieldPtr &^= flagRO - *flagFieldPtr |= flagAddr - return v -} - -// Sanity checks against future reflect package changes -// to the type or semantics of the Value.flag field. -func init() { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { - panic("reflect.Value flag field has changed kind") - } - type t0 int - var t struct { - A t0 - // t0 will have flagEmbedRO set. - t0 - // a will have flagStickyRO set - a t0 - } - vA := reflect.ValueOf(t).FieldByName("A") - va := reflect.ValueOf(t).FieldByName("a") - vt0 := reflect.ValueOf(t).FieldByName("t0") - - // Infer flagRO from the difference between the flags - // for the (otherwise identical) fields in t. - flagPublic := *flagField(&vA) - flagWithRO := *flagField(&va) | *flagField(&vt0) - flagRO = flagPublic ^ flagWithRO - - // Infer flagAddr from the difference between a value - // taken from a pointer and not. - vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") - flagNoPtr := *flagField(&vA) - flagPtr := *flagField(&vPtrA) - flagAddr = flagNoPtr ^ flagPtr - - // Check that the inferred flags tally with one of the known versions. - for _, f := range okFlags { - if flagRO == f.ro && flagAddr == f.addr { - return - } - } - panic("reflect.Value read-only flag has changed semantics") -} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go deleted file mode 100644 index 205c28d68..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is running on Google App Engine, compiled by GopherJS, or -// "-tags safe" is added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe !go1.4 - -package spew - -import "reflect" - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = true -) - -// unsafeReflectValue typically converts the passed reflect.Value into a one -// that bypasses the typical safety restrictions preventing access to -// unaddressable and unexported data. However, doing this relies on access to -// the unsafe package. This is a stub version which simply returns the passed -// reflect.Value when the unsafe package is not available. -func unsafeReflectValue(v reflect.Value) reflect.Value { - return v -} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go deleted file mode 100644 index 1be8ce945..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "reflect" - "sort" - "strconv" -) - -// Some constants in the form of bytes to avoid string overhead. This mirrors -// the technique used in the fmt package. -var ( - panicBytes = []byte("(PANIC=") - plusBytes = []byte("+") - iBytes = []byte("i") - trueBytes = []byte("true") - falseBytes = []byte("false") - interfaceBytes = []byte("(interface {})") - commaNewlineBytes = []byte(",\n") - newlineBytes = []byte("\n") - openBraceBytes = []byte("{") - openBraceNewlineBytes = []byte("{\n") - closeBraceBytes = []byte("}") - asteriskBytes = []byte("*") - colonBytes = []byte(":") - colonSpaceBytes = []byte(": ") - openParenBytes = []byte("(") - closeParenBytes = []byte(")") - spaceBytes = []byte(" ") - pointerChainBytes = []byte("->") - nilAngleBytes = []byte("") - maxNewlineBytes = []byte("\n") - maxShortBytes = []byte("") - circularBytes = []byte("") - circularShortBytes = []byte("") - invalidAngleBytes = []byte("") - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - percentBytes = []byte("%") - precisionBytes = []byte(".") - openAngleBytes = []byte("<") - closeAngleBytes = []byte(">") - openMapBytes = []byte("map[") - closeMapBytes = []byte("]") - lenEqualsBytes = []byte("len=") - capEqualsBytes = []byte("cap=") -) - -// hexDigits is used to map a decimal value to a hex digit. -var hexDigits = "0123456789abcdef" - -// catchPanic handles any panics that might occur during the handleMethods -// calls. -func catchPanic(w io.Writer, v reflect.Value) { - if err := recover(); err != nil { - w.Write(panicBytes) - fmt.Fprintf(w, "%v", err) - w.Write(closeParenBytes) - } -} - -// handleMethods attempts to call the Error and String methods on the underlying -// type the passed reflect.Value represents and outputes the result to Writer w. -// -// It handles panics in any called methods by catching and displaying the error -// as the formatted value. -func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { - // We need an interface to check if the type implements the error or - // Stringer interface. However, the reflect package won't give us an - // interface on certain things like unexported struct fields in order - // to enforce visibility rules. We use unsafe, when it's available, - // to bypass these restrictions since this package does not mutate the - // values. - if !v.CanInterface() { - if UnsafeDisabled { - return false - } - - v = unsafeReflectValue(v) - } - - // Choose whether or not to do error and Stringer interface lookups against - // the base type or a pointer to the base type depending on settings. - // Technically calling one of these methods with a pointer receiver can - // mutate the value, however, types which choose to satisify an error or - // Stringer interface with a pointer receiver should not be mutating their - // state inside these interface methods. - if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { - v = unsafeReflectValue(v) - } - if v.CanAddr() { - v = v.Addr() - } - - // Is it an error or Stringer? - switch iface := v.Interface().(type) { - case error: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.Error())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - - w.Write([]byte(iface.Error())) - return true - - case fmt.Stringer: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.String())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - w.Write([]byte(iface.String())) - return true - } - return false -} - -// printBool outputs a boolean value as true or false to Writer w. -func printBool(w io.Writer, val bool) { - if val { - w.Write(trueBytes) - } else { - w.Write(falseBytes) - } -} - -// printInt outputs a signed integer value to Writer w. -func printInt(w io.Writer, val int64, base int) { - w.Write([]byte(strconv.FormatInt(val, base))) -} - -// printUint outputs an unsigned integer value to Writer w. -func printUint(w io.Writer, val uint64, base int) { - w.Write([]byte(strconv.FormatUint(val, base))) -} - -// printFloat outputs a floating point value using the specified precision, -// which is expected to be 32 or 64bit, to Writer w. -func printFloat(w io.Writer, val float64, precision int) { - w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) -} - -// printComplex outputs a complex value using the specified float precision -// for the real and imaginary parts to Writer w. -func printComplex(w io.Writer, c complex128, floatPrecision int) { - r := real(c) - w.Write(openParenBytes) - w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) - i := imag(c) - if i >= 0 { - w.Write(plusBytes) - } - w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) - w.Write(iBytes) - w.Write(closeParenBytes) -} - -// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' -// prefix to Writer w. -func printHexPtr(w io.Writer, p uintptr) { - // Null pointer. - num := uint64(p) - if num == 0 { - w.Write(nilAngleBytes) - return - } - - // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix - buf := make([]byte, 18) - - // It's simpler to construct the hex string right to left. - base := uint64(16) - i := len(buf) - 1 - for num >= base { - buf[i] = hexDigits[num%base] - num /= base - i-- - } - buf[i] = hexDigits[num] - - // Add '0x' prefix. - i-- - buf[i] = 'x' - i-- - buf[i] = '0' - - // Strip unused leading bytes. - buf = buf[i:] - w.Write(buf) -} - -// valuesSorter implements sort.Interface to allow a slice of reflect.Value -// elements to be sorted. -type valuesSorter struct { - values []reflect.Value - strings []string // either nil or same len and values - cs *ConfigState -} - -// newValuesSorter initializes a valuesSorter instance, which holds a set of -// surrogate keys on which the data should be sorted. It uses flags in -// ConfigState to decide if and how to populate those surrogate keys. -func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { - vs := &valuesSorter{values: values, cs: cs} - if canSortSimply(vs.values[0].Kind()) { - return vs - } - if !cs.DisableMethods { - vs.strings = make([]string, len(values)) - for i := range vs.values { - b := bytes.Buffer{} - if !handleMethods(cs, &b, vs.values[i]) { - vs.strings = nil - break - } - vs.strings[i] = b.String() - } - } - if vs.strings == nil && cs.SpewKeys { - vs.strings = make([]string, len(values)) - for i := range vs.values { - vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) - } - } - return vs -} - -// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted -// directly, or whether it should be considered for sorting by surrogate keys -// (if the ConfigState allows it). -func canSortSimply(kind reflect.Kind) bool { - // This switch parallels valueSortLess, except for the default case. - switch kind { - case reflect.Bool: - return true - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return true - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Uintptr: - return true - case reflect.Array: - return true - } - return false -} - -// Len returns the number of values in the slice. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Len() int { - return len(s.values) -} - -// Swap swaps the values at the passed indices. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Swap(i, j int) { - s.values[i], s.values[j] = s.values[j], s.values[i] - if s.strings != nil { - s.strings[i], s.strings[j] = s.strings[j], s.strings[i] - } -} - -// valueSortLess returns whether the first value should sort before the second -// value. It is used by valueSorter.Less as part of the sort.Interface -// implementation. -func valueSortLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Bool: - return !a.Bool() && b.Bool() - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return a.Int() < b.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return a.Uint() < b.Uint() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.String: - return a.String() < b.String() - case reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Array: - // Compare the contents of both arrays. - l := a.Len() - for i := 0; i < l; i++ { - av := a.Index(i) - bv := b.Index(i) - if av.Interface() == bv.Interface() { - continue - } - return valueSortLess(av, bv) - } - } - return a.String() < b.String() -} - -// Less returns whether the value at index i should sort before the -// value at index j. It is part of the sort.Interface implementation. -func (s *valuesSorter) Less(i, j int) bool { - if s.strings == nil { - return valueSortLess(s.values[i], s.values[j]) - } - return s.strings[i] < s.strings[j] -} - -// sortValues is a sort function that handles both native types and any type that -// can be converted to error or Stringer. Other inputs are sorted according to -// their Value.String() value to ensure display stability. -func sortValues(values []reflect.Value, cs *ConfigState) { - if len(values) == 0 { - return - } - sort.Sort(newValuesSorter(values, cs)) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go deleted file mode 100644 index 2e3d22f31..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// ConfigState houses the configuration options used by spew to format and -// display values. There is a global instance, Config, that is used to control -// all top-level Formatter and Dump functionality. Each ConfigState instance -// provides methods equivalent to the top-level functions. -// -// The zero value for ConfigState provides no indentation. You would typically -// want to set it to a space or a tab. -// -// Alternatively, you can use NewDefaultConfig to get a ConfigState instance -// with default settings. See the documentation of NewDefaultConfig for default -// values. -type ConfigState struct { - // Indent specifies the string to use for each indentation level. The - // global config instance that all top-level functions use set this to a - // single space by default. If you would like more indentation, you might - // set this to a tab with "\t" or perhaps two spaces with " ". - Indent string - - // MaxDepth controls the maximum number of levels to descend into nested - // data structures. The default, 0, means there is no limit. - // - // NOTE: Circular data structures are properly detected, so it is not - // necessary to set this value unless you specifically want to limit deeply - // nested data structures. - MaxDepth int - - // DisableMethods specifies whether or not error and Stringer interfaces are - // invoked for types that implement them. - DisableMethods bool - - // DisablePointerMethods specifies whether or not to check for and invoke - // error and Stringer interfaces on types which only accept a pointer - // receiver when the current type is not a pointer. - // - // NOTE: This might be an unsafe action since calling one of these methods - // with a pointer receiver could technically mutate the value, however, - // in practice, types which choose to satisify an error or Stringer - // interface with a pointer receiver should not be mutating their state - // inside these interface methods. As a result, this option relies on - // access to the unsafe package, so it will not have any effect when - // running in environments without access to the unsafe package such as - // Google App Engine or with the "safe" build tag specified. - DisablePointerMethods bool - - // DisablePointerAddresses specifies whether to disable the printing of - // pointer addresses. This is useful when diffing data structures in tests. - DisablePointerAddresses bool - - // DisableCapacities specifies whether to disable the printing of capacities - // for arrays, slices, maps and channels. This is useful when diffing - // data structures in tests. - DisableCapacities bool - - // ContinueOnMethod specifies whether or not recursion should continue once - // a custom error or Stringer interface is invoked. The default, false, - // means it will print the results of invoking the custom error or Stringer - // interface and return immediately instead of continuing to recurse into - // the internals of the data type. - // - // NOTE: This flag does not have any effect if method invocation is disabled - // via the DisableMethods or DisablePointerMethods options. - ContinueOnMethod bool - - // SortKeys specifies map keys should be sorted before being printed. Use - // this to have a more deterministic, diffable output. Note that only - // native types (bool, int, uint, floats, uintptr and string) and types - // that support the error or Stringer interfaces (if methods are - // enabled) are supported, with other types sorted according to the - // reflect.Value.String() output which guarantees display stability. - SortKeys bool - - // SpewKeys specifies that, as a last resort attempt, map keys should - // be spewed to strings and sorted by those strings. This is only - // considered if SortKeys is true. - SpewKeys bool -} - -// Config is the active configuration of the top-level functions. -// The configuration can be changed by modifying the contents of spew.Config. -var Config = ConfigState{Indent: " "} - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the formatted string as a value that satisfies error. See NewFormatter -// for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, c.convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, c.convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, c.convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a Formatter interface returned by c.NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, c.convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Print(a ...interface{}) (n int, err error) { - return fmt.Print(c.convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, c.convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Println(a ...interface{}) (n int, err error) { - return fmt.Println(c.convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprint(a ...interface{}) string { - return fmt.Sprint(c.convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, c.convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a Formatter interface returned by c.NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintln(a ...interface{}) string { - return fmt.Sprintln(c.convertArgs(a)...) -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -c.Printf, c.Println, or c.Printf. -*/ -func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(c, v) -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { - fdump(c, w, a...) -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by modifying the public members -of c. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func (c *ConfigState) Dump(a ...interface{}) { - fdump(c, os.Stdout, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func (c *ConfigState) Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(c, &buf, a...) - return buf.String() -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a spew Formatter interface using -// the ConfigState associated with s. -func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = newFormatter(c, arg) - } - return formatters -} - -// NewDefaultConfig returns a ConfigState with the following default settings. -// -// Indent: " " -// MaxDepth: 0 -// DisableMethods: false -// DisablePointerMethods: false -// ContinueOnMethod: false -// SortKeys: false -func NewDefaultConfig() *ConfigState { - return &ConfigState{Indent: " "} -} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go deleted file mode 100644 index aacaac6f1..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Package spew implements a deep pretty printer for Go data structures to aid in -debugging. - -A quick overview of the additional features spew provides over the built-in -printing facilities for Go data types are as follows: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output (only when using - Dump style) - -There are two different approaches spew allows for dumping Go data structures: - - * Dump style which prints with newlines, customizable indentation, - and additional debug information such as types and all pointer addresses - used to indirect to the final value - * A custom Formatter interface that integrates cleanly with the standard fmt - package and replaces %v, %+v, %#v, and %#+v to provide inline printing - similar to the default %v while providing the additional functionality - outlined above and passing unsupported format verbs such as %x and %q - along to fmt - -Quick Start - -This section demonstrates how to quickly get started with spew. See the -sections below for further details on formatting and configuration options. - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - spew.Dump(myVar1, myVar2, ...) - spew.Fdump(someWriter, myVar1, myVar2, ...) - str := spew.Sdump(myVar1, myVar2, ...) - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with -%v (most compact), %+v (adds pointer addresses), %#v (adds types), or -%#+v (adds types and pointer addresses): - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available -via the spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -The following configuration options are available: - * Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - - * MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - - * DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - - * DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. - Pointer method invocation is enabled by default. - - * DisablePointerAddresses - DisablePointerAddresses specifies whether to disable the printing of - pointer addresses. This is useful when diffing data structures in tests. - - * DisableCapacities - DisableCapacities specifies whether to disable the printing of - capacities for arrays, slices, maps and channels. This is useful when - diffing data structures in tests. - - * ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - - * SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are - supported with other types sorted according to the - reflect.Value.String() output which guarantees display - stability. Natural map order is used by default. - - * SpewKeys - Specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only - considered if SortKeys is true. - -Dump Usage - -Simply call spew.Dump with a list of variables you want to dump: - - spew.Dump(myVar1, myVar2, ...) - -You may also call spew.Fdump if you would prefer to output to an arbitrary -io.Writer. For example, to dump to standard error: - - spew.Fdump(os.Stderr, myVar1, myVar2, ...) - -A third option is to call spew.Sdump to get the formatted output as a string: - - str := spew.Sdump(myVar1, myVar2, ...) - -Sample Dump Output - -See the Dump example for details on the setup of the types and variables being -shown here. - - (main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) (len=1) { - (string) (len=3) "one": (bool) true - } - } - -Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C -command as shown. - ([]uint8) (len=32 cap=32) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| - } - -Custom Formatter - -Spew provides a custom formatter that implements the fmt.Formatter interface -so that it integrates cleanly with standard fmt package printing functions. The -formatter is useful for inline printing of smaller data types similar to the -standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Custom Formatter Usage - -The simplest way to make use of the spew custom formatter is to call one of the -convenience functions such as spew.Printf, spew.Println, or spew.Printf. The -functions have syntax you are most likely already familiar with: - - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Println(myVar, myVar2) - spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -See the Index for the full list convenience functions. - -Sample Formatter Output - -Double pointer to a uint8: - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 - -Pointer to circular struct with a uint8 field and a pointer to itself: - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} - -See the Printf example for details on the setup of variables being shown -here. - -Errors - -Since it is possible for custom Stringer/error interfaces to panic, spew -detects them and handles them internally by printing the panic information -inline with the output. Since spew is intended to provide deep pretty printing -capabilities on structures, it intentionally does not return any errors. -*/ -package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go deleted file mode 100644 index f78d89fc1..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "os" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ( - // uint8Type is a reflect.Type representing a uint8. It is used to - // convert cgo types to uint8 slices for hexdumping. - uint8Type = reflect.TypeOf(uint8(0)) - - // cCharRE is a regular expression that matches a cgo char. - // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) - - // cUnsignedCharRE is a regular expression that matches a cgo unsigned - // char. It is used to detect unsigned character arrays to hexdump - // them. - cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) - - // cUint8tCharRE is a regular expression that matches a cgo uint8_t. - // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) -) - -// dumpState contains information about the state of a dump operation. -type dumpState struct { - w io.Writer - depth int - pointers map[uintptr]int - ignoreNextType bool - ignoreNextIndent bool - cs *ConfigState -} - -// indent performs indentation according to the depth level and cs.Indent -// option. -func (d *dumpState) indent() { - if d.ignoreNextIndent { - d.ignoreNextIndent = false - return - } - d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) -} - -// unpackValue returns values inside of non-nil interfaces when possible. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface && !v.IsNil() { - v = v.Elem() - } - return v -} - -// dumpPtr handles formatting of pointers by indirecting them as necessary. -func (d *dumpState) dumpPtr(v reflect.Value) { - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range d.pointers { - if depth >= d.depth { - delete(d.pointers, k) - } - } - - // Keep list of all dereferenced pointers to show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by dereferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := d.pointers[addr]; ok && pd < d.depth { - cycleFound = true - indirects-- - break - } - d.pointers[addr] = d.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type information. - d.w.Write(openParenBytes) - d.w.Write(bytes.Repeat(asteriskBytes, indirects)) - d.w.Write([]byte(ve.Type().String())) - d.w.Write(closeParenBytes) - - // Display pointer information. - if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { - d.w.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - d.w.Write(pointerChainBytes) - } - printHexPtr(d.w, addr) - } - d.w.Write(closeParenBytes) - } - - // Display dereferenced value. - d.w.Write(openParenBytes) - switch { - case nilFound: - d.w.Write(nilAngleBytes) - - case cycleFound: - d.w.Write(circularBytes) - - default: - d.ignoreNextType = true - d.dump(ve) - } - d.w.Write(closeParenBytes) -} - -// dumpSlice handles formatting of arrays and slices. Byte (uint8 under -// reflection) arrays and slices are dumped in hexdump -C fashion. -func (d *dumpState) dumpSlice(v reflect.Value) { - // Determine whether this type should be hex dumped or not. Also, - // for types which should be hexdumped, try to use the underlying data - // first, then fall back to trying to convert them to a uint8 slice. - var buf []uint8 - doConvert := false - doHexDump := false - numEntries := v.Len() - if numEntries > 0 { - vt := v.Index(0).Type() - vts := vt.String() - switch { - // C types that need to be converted. - case cCharRE.MatchString(vts): - fallthrough - case cUnsignedCharRE.MatchString(vts): - fallthrough - case cUint8tCharRE.MatchString(vts): - doConvert = true - - // Try to use existing uint8 slices and fall back to converting - // and copying if that fails. - case vt.Kind() == reflect.Uint8: - // We need an addressable interface to convert the type - // to a byte slice. However, the reflect package won't - // give us an interface on certain things like - // unexported struct fields in order to enforce - // visibility rules. We use unsafe, when available, to - // bypass these restrictions since this package does not - // mutate the values. - vs := v - if !vs.CanInterface() || !vs.CanAddr() { - vs = unsafeReflectValue(vs) - } - if !UnsafeDisabled { - vs = vs.Slice(0, numEntries) - - // Use the existing uint8 slice if it can be - // type asserted. - iface := vs.Interface() - if slice, ok := iface.([]uint8); ok { - buf = slice - doHexDump = true - break - } - } - - // The underlying data needs to be converted if it can't - // be type asserted to a uint8 slice. - doConvert = true - } - - // Copy and convert the underlying type if needed. - if doConvert && vt.ConvertibleTo(uint8Type) { - // Convert and copy each element into a uint8 byte - // slice. - buf = make([]uint8, numEntries) - for i := 0; i < numEntries; i++ { - vv := v.Index(i) - buf[i] = uint8(vv.Convert(uint8Type).Uint()) - } - doHexDump = true - } - } - - // Hexdump the entire slice as needed. - if doHexDump { - indent := strings.Repeat(d.cs.Indent, d.depth) - str := indent + hex.Dump(buf) - str = strings.Replace(str, "\n", "\n"+indent, -1) - str = strings.TrimRight(str, d.cs.Indent) - d.w.Write([]byte(str)) - return - } - - // Recursively call dump for each item. - for i := 0; i < numEntries; i++ { - d.dump(d.unpackValue(v.Index(i))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } -} - -// dump is the main workhorse for dumping a value. It uses the passed reflect -// value to figure out what kind of object we are dealing with and formats it -// appropriately. It is a recursive function, however circular data structures -// are detected and handled properly. -func (d *dumpState) dump(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - d.w.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - d.indent() - d.dumpPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !d.ignoreNextType { - d.indent() - d.w.Write(openParenBytes) - d.w.Write([]byte(v.Type().String())) - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - d.ignoreNextType = false - - // Display length and capacity if the built-in len and cap functions - // work with the value's kind and the len/cap itself is non-zero. - valueLen, valueCap := 0, 0 - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - valueLen, valueCap = v.Len(), v.Cap() - case reflect.Map, reflect.String: - valueLen = v.Len() - } - if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { - d.w.Write(openParenBytes) - if valueLen != 0 { - d.w.Write(lenEqualsBytes) - printInt(d.w, int64(valueLen), 10) - } - if !d.cs.DisableCapacities && valueCap != 0 { - if valueLen != 0 { - d.w.Write(spaceBytes) - } - d.w.Write(capEqualsBytes) - printInt(d.w, int64(valueCap), 10) - } - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - - // Call Stringer/error interfaces if they exist and the handle methods flag - // is enabled - if !d.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(d.cs, d.w, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(d.w, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(d.w, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(d.w, v.Uint(), 10) - - case reflect.Float32: - printFloat(d.w, v.Float(), 32) - - case reflect.Float64: - printFloat(d.w, v.Float(), 64) - - case reflect.Complex64: - printComplex(d.w, v.Complex(), 32) - - case reflect.Complex128: - printComplex(d.w, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - d.dumpSlice(v) - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.String: - d.w.Write([]byte(strconv.Quote(v.String()))) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - d.w.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - numEntries := v.Len() - keys := v.MapKeys() - if d.cs.SortKeys { - sortValues(keys, d.cs) - } - for i, key := range keys { - d.dump(d.unpackValue(key)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.MapIndex(key))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Struct: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - vt := v.Type() - numFields := v.NumField() - for i := 0; i < numFields; i++ { - d.indent() - vtf := vt.Field(i) - d.w.Write([]byte(vtf.Name)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.Field(i))) - if i < (numFields - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(d.w, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(d.w, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it in case any new - // types are added. - default: - if v.CanInterface() { - fmt.Fprintf(d.w, "%v", v.Interface()) - } else { - fmt.Fprintf(d.w, "%v", v.String()) - } - } -} - -// fdump is a helper function to consolidate the logic from the various public -// methods which take varying writers and config states. -func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { - for _, arg := range a { - if arg == nil { - w.Write(interfaceBytes) - w.Write(spaceBytes) - w.Write(nilAngleBytes) - w.Write(newlineBytes) - continue - } - - d := dumpState{w: w, cs: cs} - d.pointers = make(map[uintptr]int) - d.dump(reflect.ValueOf(arg)) - d.w.Write(newlineBytes) - } -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func Fdump(w io.Writer, a ...interface{}) { - fdump(&Config, w, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(&Config, &buf, a...) - return buf.String() -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by an exported package global, -spew.Config. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func Dump(a ...interface{}) { - fdump(&Config, os.Stdout, a...) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go deleted file mode 100644 index b04edb7d7..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" -) - -// supportedFlags is a list of all the character flags supported by fmt package. -const supportedFlags = "0-+# " - -// formatState implements the fmt.Formatter interface and contains information -// about the state of a formatting operation. The NewFormatter function can -// be used to get a new Formatter which can be used directly as arguments -// in standard fmt package printing calls. -type formatState struct { - value interface{} - fs fmt.State - depth int - pointers map[uintptr]int - ignoreNextType bool - cs *ConfigState -} - -// buildDefaultFormat recreates the original format string without precision -// and width information to pass in to fmt.Sprintf in the case of an -// unrecognized type. Unless new types are added to the language, this -// function won't ever be called. -func (f *formatState) buildDefaultFormat() (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - buf.WriteRune('v') - - format = buf.String() - return format -} - -// constructOrigFormat recreates the original format string including precision -// and width information to pass along to the standard fmt package. This allows -// automatic deferral of all format strings this package doesn't support. -func (f *formatState) constructOrigFormat(verb rune) (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - if width, ok := f.fs.Width(); ok { - buf.WriteString(strconv.Itoa(width)) - } - - if precision, ok := f.fs.Precision(); ok { - buf.Write(precisionBytes) - buf.WriteString(strconv.Itoa(precision)) - } - - buf.WriteRune(verb) - - format = buf.String() - return format -} - -// unpackValue returns values inside of non-nil interfaces when possible and -// ensures that types for values which have been unpacked from an interface -// are displayed when the show types flag is also set. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (f *formatState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface { - f.ignoreNextType = false - if !v.IsNil() { - v = v.Elem() - } - } - return v -} - -// formatPtr handles formatting of pointers by indirecting them as necessary. -func (f *formatState) formatPtr(v reflect.Value) { - // Display nil if top level pointer is nil. - showTypes := f.fs.Flag('#') - if v.IsNil() && (!showTypes || f.ignoreNextType) { - f.fs.Write(nilAngleBytes) - return - } - - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range f.pointers { - if depth >= f.depth { - delete(f.pointers, k) - } - } - - // Keep list of all dereferenced pointers to possibly show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by derferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := f.pointers[addr]; ok && pd < f.depth { - cycleFound = true - indirects-- - break - } - f.pointers[addr] = f.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type or indirection level depending on flags. - if showTypes && !f.ignoreNextType { - f.fs.Write(openParenBytes) - f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) - f.fs.Write([]byte(ve.Type().String())) - f.fs.Write(closeParenBytes) - } else { - if nilFound || cycleFound { - indirects += strings.Count(ve.Type().String(), "*") - } - f.fs.Write(openAngleBytes) - f.fs.Write([]byte(strings.Repeat("*", indirects))) - f.fs.Write(closeAngleBytes) - } - - // Display pointer information depending on flags. - if f.fs.Flag('+') && (len(pointerChain) > 0) { - f.fs.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - f.fs.Write(pointerChainBytes) - } - printHexPtr(f.fs, addr) - } - f.fs.Write(closeParenBytes) - } - - // Display dereferenced value. - switch { - case nilFound: - f.fs.Write(nilAngleBytes) - - case cycleFound: - f.fs.Write(circularShortBytes) - - default: - f.ignoreNextType = true - f.format(ve) - } -} - -// format is the main workhorse for providing the Formatter interface. It -// uses the passed reflect value to figure out what kind of object we are -// dealing with and formats it appropriately. It is a recursive function, -// however circular data structures are detected and handled properly. -func (f *formatState) format(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - f.fs.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - f.formatPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !f.ignoreNextType && f.fs.Flag('#') { - f.fs.Write(openParenBytes) - f.fs.Write([]byte(v.Type().String())) - f.fs.Write(closeParenBytes) - } - f.ignoreNextType = false - - // Call Stringer/error interfaces if they exist and the handle methods - // flag is enabled. - if !f.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(f.cs, f.fs, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(f.fs, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(f.fs, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(f.fs, v.Uint(), 10) - - case reflect.Float32: - printFloat(f.fs, v.Float(), 32) - - case reflect.Float64: - printFloat(f.fs, v.Float(), 64) - - case reflect.Complex64: - printComplex(f.fs, v.Complex(), 32) - - case reflect.Complex128: - printComplex(f.fs, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - f.fs.Write(openBracketBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - numEntries := v.Len() - for i := 0; i < numEntries; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(v.Index(i))) - } - } - f.depth-- - f.fs.Write(closeBracketBytes) - - case reflect.String: - f.fs.Write([]byte(v.String())) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - f.fs.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - - f.fs.Write(openMapBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - keys := v.MapKeys() - if f.cs.SortKeys { - sortValues(keys, f.cs) - } - for i, key := range keys { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(key)) - f.fs.Write(colonBytes) - f.ignoreNextType = true - f.format(f.unpackValue(v.MapIndex(key))) - } - } - f.depth-- - f.fs.Write(closeMapBytes) - - case reflect.Struct: - numFields := v.NumField() - f.fs.Write(openBraceBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - vt := v.Type() - for i := 0; i < numFields; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - vtf := vt.Field(i) - if f.fs.Flag('+') || f.fs.Flag('#') { - f.fs.Write([]byte(vtf.Name)) - f.fs.Write(colonBytes) - } - f.format(f.unpackValue(v.Field(i))) - } - } - f.depth-- - f.fs.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(f.fs, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(f.fs, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it if any get added. - default: - format := f.buildDefaultFormat() - if v.CanInterface() { - fmt.Fprintf(f.fs, format, v.Interface()) - } else { - fmt.Fprintf(f.fs, format, v.String()) - } - } -} - -// Format satisfies the fmt.Formatter interface. See NewFormatter for usage -// details. -func (f *formatState) Format(fs fmt.State, verb rune) { - f.fs = fs - - // Use standard formatting for verbs that are not v. - if verb != 'v' { - format := f.constructOrigFormat(verb) - fmt.Fprintf(fs, format, f.value) - return - } - - if f.value == nil { - if fs.Flag('#') { - fs.Write(interfaceBytes) - } - fs.Write(nilAngleBytes) - return - } - - f.format(reflect.ValueOf(f.value)) -} - -// newFormatter is a helper function to consolidate the logic from the various -// public methods which take varying config states. -func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { - fs := &formatState{value: v, cs: cs} - fs.pointers = make(map[uintptr]int) - return fs -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -Printf, Println, or Fprintf. -*/ -func NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(&Config, v) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go deleted file mode 100644 index 32c0e3388..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/spew.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "fmt" - "io" -) - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the formatted string as a value that satisfies error. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a default Formatter interface returned by NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) -func Print(a ...interface{}) (n int, err error) { - return fmt.Print(convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) -func Println(a ...interface{}) (n int, err error) { - return fmt.Println(convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprint(a ...interface{}) string { - return fmt.Sprint(convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintln(a ...interface{}) string { - return fmt.Sprintln(convertArgs(a)...) -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a default spew Formatter interface. -func convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = NewFormatter(arg) - } - return formatters -} diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE deleted file mode 100644 index c67dad612..000000000 --- a/vendor/github.com/pmezard/go-difflib/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013, Patrick Mezard -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - The names of its contributors may not be used to endorse or promote -products derived from this software without specific prior written -permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go deleted file mode 100644 index 003e99fad..000000000 --- a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go +++ /dev/null @@ -1,772 +0,0 @@ -// Package difflib is a partial port of Python difflib module. -// -// It provides tools to compare sequences of strings and generate textual diffs. -// -// The following class and functions have been ported: -// -// - SequenceMatcher -// -// - unified_diff -// -// - context_diff -// -// Getting unified diffs was the main goal of the port. Keep in mind this code -// is mostly suitable to output text differences in a human friendly way, there -// are no guarantees generated diffs are consumable by patch(1). -package difflib - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" -) - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -func calculateRatio(matches, length int) float64 { - if length > 0 { - return 2.0 * float64(matches) / float64(length) - } - return 1.0 -} - -type Match struct { - A int - B int - Size int -} - -type OpCode struct { - Tag byte - I1 int - I2 int - J1 int - J2 int -} - -// SequenceMatcher compares sequence of strings. The basic -// algorithm predates, and is a little fancier than, an algorithm -// published in the late 1980's by Ratcliff and Obershelp under the -// hyperbolic name "gestalt pattern matching". The basic idea is to find -// the longest contiguous matching subsequence that contains no "junk" -// elements (R-O doesn't address junk). The same idea is then applied -// recursively to the pieces of the sequences to the left and to the right -// of the matching subsequence. This does not yield minimal edit -// sequences, but does tend to yield matches that "look right" to people. -// -// SequenceMatcher tries to compute a "human-friendly diff" between two -// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the -// longest *contiguous* & junk-free matching subsequence. That's what -// catches peoples' eyes. The Windows(tm) windiff has another interesting -// notion, pairing up elements that appear uniquely in each sequence. -// That, and the method here, appear to yield more intuitive difference -// reports than does diff. This method appears to be the least vulnerable -// to synching up on blocks of "junk lines", though (like blank lines in -// ordinary text files, or maybe "

" lines in HTML files). That may be -// because this is the only method of the 3 that has a *concept* of -// "junk" . -// -// Timing: Basic R-O is cubic time worst case and quadratic time expected -// case. SequenceMatcher is quadratic time for the worst case and has -// expected-case behavior dependent in a complicated way on how many -// elements the sequences have in common; best case time is linear. -type SequenceMatcher struct { - a []string - b []string - b2j map[string][]int - IsJunk func(string) bool - autoJunk bool - bJunk map[string]struct{} - matchingBlocks []Match - fullBCount map[string]int - bPopular map[string]struct{} - opCodes []OpCode -} - -func NewMatcher(a, b []string) *SequenceMatcher { - m := SequenceMatcher{autoJunk: true} - m.SetSeqs(a, b) - return &m -} - -func NewMatcherWithJunk(a, b []string, autoJunk bool, - isJunk func(string) bool) *SequenceMatcher { - - m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} - m.SetSeqs(a, b) - return &m -} - -// Set two sequences to be compared. -func (m *SequenceMatcher) SetSeqs(a, b []string) { - m.SetSeq1(a) - m.SetSeq2(b) -} - -// Set the first sequence to be compared. The second sequence to be compared is -// not changed. -// -// SequenceMatcher computes and caches detailed information about the second -// sequence, so if you want to compare one sequence S against many sequences, -// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other -// sequences. -// -// See also SetSeqs() and SetSeq2(). -func (m *SequenceMatcher) SetSeq1(a []string) { - if &a == &m.a { - return - } - m.a = a - m.matchingBlocks = nil - m.opCodes = nil -} - -// Set the second sequence to be compared. The first sequence to be compared is -// not changed. -func (m *SequenceMatcher) SetSeq2(b []string) { - if &b == &m.b { - return - } - m.b = b - m.matchingBlocks = nil - m.opCodes = nil - m.fullBCount = nil - m.chainB() -} - -func (m *SequenceMatcher) chainB() { - // Populate line -> index mapping - b2j := map[string][]int{} - for i, s := range m.b { - indices := b2j[s] - indices = append(indices, i) - b2j[s] = indices - } - - // Purge junk elements - m.bJunk = map[string]struct{}{} - if m.IsJunk != nil { - junk := m.bJunk - for s, _ := range b2j { - if m.IsJunk(s) { - junk[s] = struct{}{} - } - } - for s, _ := range junk { - delete(b2j, s) - } - } - - // Purge remaining popular elements - popular := map[string]struct{}{} - n := len(m.b) - if m.autoJunk && n >= 200 { - ntest := n/100 + 1 - for s, indices := range b2j { - if len(indices) > ntest { - popular[s] = struct{}{} - } - } - for s, _ := range popular { - delete(b2j, s) - } - } - m.bPopular = popular - m.b2j = b2j -} - -func (m *SequenceMatcher) isBJunk(s string) bool { - _, ok := m.bJunk[s] - return ok -} - -// Find longest matching block in a[alo:ahi] and b[blo:bhi]. -// -// If IsJunk is not defined: -// -// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where -// alo <= i <= i+k <= ahi -// blo <= j <= j+k <= bhi -// and for all (i',j',k') meeting those conditions, -// k >= k' -// i <= i' -// and if i == i', j <= j' -// -// In other words, of all maximal matching blocks, return one that -// starts earliest in a, and of all those maximal matching blocks that -// start earliest in a, return the one that starts earliest in b. -// -// If IsJunk is defined, first the longest matching block is -// determined as above, but with the additional restriction that no -// junk element appears in the block. Then that block is extended as -// far as possible by matching (only) junk elements on both sides. So -// the resulting block never matches on junk except as identical junk -// happens to be adjacent to an "interesting" match. -// -// If no blocks match, return (alo, blo, 0). -func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { - // CAUTION: stripping common prefix or suffix would be incorrect. - // E.g., - // ab - // acab - // Longest matching block is "ab", but if common prefix is - // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so - // strip, so ends up claiming that ab is changed to acab by - // inserting "ca" in the middle. That's minimal but unintuitive: - // "it's obvious" that someone inserted "ac" at the front. - // Windiff ends up at the same place as diff, but by pairing up - // the unique 'b's and then matching the first two 'a's. - besti, bestj, bestsize := alo, blo, 0 - - // find longest junk-free match - // during an iteration of the loop, j2len[j] = length of longest - // junk-free match ending with a[i-1] and b[j] - j2len := map[int]int{} - for i := alo; i != ahi; i++ { - // look at all instances of a[i] in b; note that because - // b2j has no junk keys, the loop is skipped if a[i] is junk - newj2len := map[int]int{} - for _, j := range m.b2j[m.a[i]] { - // a[i] matches b[j] - if j < blo { - continue - } - if j >= bhi { - break - } - k := j2len[j-1] + 1 - newj2len[j] = k - if k > bestsize { - besti, bestj, bestsize = i-k+1, j-k+1, k - } - } - j2len = newj2len - } - - // Extend the best by non-junk elements on each end. In particular, - // "popular" non-junk elements aren't in b2j, which greatly speeds - // the inner loop above, but also means "the best" match so far - // doesn't contain any junk *or* popular non-junk elements. - for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - !m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - // Now that we have a wholly interesting match (albeit possibly - // empty!), we may as well suck up the matching junk on each - // side of it too. Can't think of a good reason not to, and it - // saves post-processing the (possibly considerable) expense of - // figuring out what to do with it. In the case of an empty - // interesting match, this is clearly the right thing to do, - // because no other kind of match is possible in the regions. - for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - return Match{A: besti, B: bestj, Size: bestsize} -} - -// Return list of triples describing matching subsequences. -// -// Each triple is of the form (i, j, n), and means that -// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in -// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are -// adjacent triples in the list, and the second is not the last triple in the -// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe -// adjacent equal blocks. -// -// The last triple is a dummy, (len(a), len(b), 0), and is the only -// triple with n==0. -func (m *SequenceMatcher) GetMatchingBlocks() []Match { - if m.matchingBlocks != nil { - return m.matchingBlocks - } - - var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match - matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { - match := m.findLongestMatch(alo, ahi, blo, bhi) - i, j, k := match.A, match.B, match.Size - if match.Size > 0 { - if alo < i && blo < j { - matched = matchBlocks(alo, i, blo, j, matched) - } - matched = append(matched, match) - if i+k < ahi && j+k < bhi { - matched = matchBlocks(i+k, ahi, j+k, bhi, matched) - } - } - return matched - } - matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) - - // It's possible that we have adjacent equal blocks in the - // matching_blocks list now. - nonAdjacent := []Match{} - i1, j1, k1 := 0, 0, 0 - for _, b := range matched { - // Is this block adjacent to i1, j1, k1? - i2, j2, k2 := b.A, b.B, b.Size - if i1+k1 == i2 && j1+k1 == j2 { - // Yes, so collapse them -- this just increases the length of - // the first block by the length of the second, and the first - // block so lengthened remains the block to compare against. - k1 += k2 - } else { - // Not adjacent. Remember the first block (k1==0 means it's - // the dummy we started with), and make the second block the - // new block to compare against. - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - i1, j1, k1 = i2, j2, k2 - } - } - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - - nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) - m.matchingBlocks = nonAdjacent - return m.matchingBlocks -} - -// Return list of 5-tuples describing how to turn a into b. -// -// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple -// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the -// tuple preceding it, and likewise for j1 == the previous j2. -// -// The tags are characters, with these meanings: -// -// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] -// -// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. -// -// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. -// -// 'e' (equal): a[i1:i2] == b[j1:j2] -func (m *SequenceMatcher) GetOpCodes() []OpCode { - if m.opCodes != nil { - return m.opCodes - } - i, j := 0, 0 - matching := m.GetMatchingBlocks() - opCodes := make([]OpCode, 0, len(matching)) - for _, m := range matching { - // invariant: we've pumped out correct diffs to change - // a[:i] into b[:j], and the next matching block is - // a[ai:ai+size] == b[bj:bj+size]. So we need to pump - // out a diff to change a[i:ai] into b[j:bj], pump out - // the matching block, and move (i,j) beyond the match - ai, bj, size := m.A, m.B, m.Size - tag := byte(0) - if i < ai && j < bj { - tag = 'r' - } else if i < ai { - tag = 'd' - } else if j < bj { - tag = 'i' - } - if tag > 0 { - opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) - } - i, j = ai+size, bj+size - // the list of matching blocks is terminated by a - // sentinel with size 0 - if size > 0 { - opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) - } - } - m.opCodes = opCodes - return m.opCodes -} - -// Isolate change clusters by eliminating ranges with no changes. -// -// Return a generator of groups with up to n lines of context. -// Each group is in the same format as returned by GetOpCodes(). -func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { - if n < 0 { - n = 3 - } - codes := m.GetOpCodes() - if len(codes) == 0 { - codes = []OpCode{OpCode{'e', 0, 1, 0, 1}} - } - // Fixup leading and trailing groups if they show no changes. - if codes[0].Tag == 'e' { - c := codes[0] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} - } - if codes[len(codes)-1].Tag == 'e' { - c := codes[len(codes)-1] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} - } - nn := n + n - groups := [][]OpCode{} - group := []OpCode{} - for _, c := range codes { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - // End the current group and start a new one whenever - // there is a large range with no changes. - if c.Tag == 'e' && i2-i1 > nn { - group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), - j1, min(j2, j1+n)}) - groups = append(groups, group) - group = []OpCode{} - i1, j1 = max(i1, i2-n), max(j1, j2-n) - } - group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) - } - if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { - groups = append(groups, group) - } - return groups -} - -// Return a measure of the sequences' similarity (float in [0,1]). -// -// Where T is the total number of elements in both sequences, and -// M is the number of matches, this is 2.0*M / T. -// Note that this is 1 if the sequences are identical, and 0 if -// they have nothing in common. -// -// .Ratio() is expensive to compute if you haven't already computed -// .GetMatchingBlocks() or .GetOpCodes(), in which case you may -// want to try .QuickRatio() or .RealQuickRation() first to get an -// upper bound. -func (m *SequenceMatcher) Ratio() float64 { - matches := 0 - for _, m := range m.GetMatchingBlocks() { - matches += m.Size - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() relatively quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute. -func (m *SequenceMatcher) QuickRatio() float64 { - // viewing a and b as multisets, set matches to the cardinality - // of their intersection; this counts the number of matches - // without regard to order, so is clearly an upper bound - if m.fullBCount == nil { - m.fullBCount = map[string]int{} - for _, s := range m.b { - m.fullBCount[s] = m.fullBCount[s] + 1 - } - } - - // avail[x] is the number of times x appears in 'b' less the - // number of times we've seen it in 'a' so far ... kinda - avail := map[string]int{} - matches := 0 - for _, s := range m.a { - n, ok := avail[s] - if !ok { - n = m.fullBCount[s] - } - avail[s] = n - 1 - if n > 0 { - matches += 1 - } - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() very quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute than either .Ratio() or .QuickRatio(). -func (m *SequenceMatcher) RealQuickRatio() float64 { - la, lb := len(m.a), len(m.b) - return calculateRatio(min(la, lb), la+lb) -} - -// Convert range to the "ed" format -func formatRangeUnified(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 1 { - return fmt.Sprintf("%d", beginning) - } - if length == 0 { - beginning -= 1 // empty ranges begin at line just before the range - } - return fmt.Sprintf("%d,%d", beginning, length) -} - -// Unified diff parameters -type UnifiedDiff struct { - A []string // First sequence lines - FromFile string // First file name - FromDate string // First file time - B []string // Second sequence lines - ToFile string // Second file name - ToDate string // Second file time - Eol string // Headers end of line, defaults to LF - Context int // Number of context lines -} - -// Compare two sequences of lines; generate the delta as a unified diff. -// -// Unified diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by 'n' which -// defaults to three. -// -// By default, the diff control lines (those with ---, +++, or @@) are -// created with a trailing newline. This is helpful so that inputs -// created from file.readlines() result in diffs that are suitable for -// file.writelines() since both the inputs and outputs have trailing -// newlines. -// -// For inputs that do not have trailing newlines, set the lineterm -// argument to "" so that the output will be uniformly newline free. -// -// The unidiff format normally has a header for filenames and modification -// times. Any or all of these may be specified using strings for -// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. -// The modification times are normally expressed in the ISO 8601 format. -func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - wf := func(format string, args ...interface{}) error { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - return err - } - ws := func(s string) error { - _, err := buf.WriteString(s) - return err - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - if diff.FromFile != "" || diff.ToFile != "" { - err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) - if err != nil { - return err - } - err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) - if err != nil { - return err - } - } - } - first, last := g[0], g[len(g)-1] - range1 := formatRangeUnified(first.I1, last.I2) - range2 := formatRangeUnified(first.J1, last.J2) - if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { - return err - } - for _, c := range g { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - if c.Tag == 'e' { - for _, line := range diff.A[i1:i2] { - if err := ws(" " + line); err != nil { - return err - } - } - continue - } - if c.Tag == 'r' || c.Tag == 'd' { - for _, line := range diff.A[i1:i2] { - if err := ws("-" + line); err != nil { - return err - } - } - } - if c.Tag == 'r' || c.Tag == 'i' { - for _, line := range diff.B[j1:j2] { - if err := ws("+" + line); err != nil { - return err - } - } - } - } - } - return nil -} - -// Like WriteUnifiedDiff but returns the diff a string. -func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteUnifiedDiff(w, diff) - return string(w.Bytes()), err -} - -// Convert range to the "ed" format. -func formatRangeContext(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 0 { - beginning -= 1 // empty ranges begin at line just before the range - } - if length <= 1 { - return fmt.Sprintf("%d", beginning) - } - return fmt.Sprintf("%d,%d", beginning, beginning+length-1) -} - -type ContextDiff UnifiedDiff - -// Compare two sequences of lines; generate the delta as a context diff. -// -// Context diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by diff.Context -// which defaults to three. -// -// By default, the diff control lines (those with *** or ---) are -// created with a trailing newline. -// -// For inputs that do not have trailing newlines, set the diff.Eol -// argument to "" so that the output will be uniformly newline free. -// -// The context diff format normally has a header for filenames and -// modification times. Any or all of these may be specified using -// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate. -// The modification times are normally expressed in the ISO 8601 format. -// If not specified, the strings default to blanks. -func WriteContextDiff(writer io.Writer, diff ContextDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - var diffErr error - wf := func(format string, args ...interface{}) { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - if diffErr == nil && err != nil { - diffErr = err - } - } - ws := func(s string) { - _, err := buf.WriteString(s) - if diffErr == nil && err != nil { - diffErr = err - } - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - prefix := map[byte]string{ - 'i': "+ ", - 'd': "- ", - 'r': "! ", - 'e': " ", - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - if diff.FromFile != "" || diff.ToFile != "" { - wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) - wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) - } - } - - first, last := g[0], g[len(g)-1] - ws("***************" + diff.Eol) - - range1 := formatRangeContext(first.I1, last.I2) - wf("*** %s ****%s", range1, diff.Eol) - for _, c := range g { - if c.Tag == 'r' || c.Tag == 'd' { - for _, cc := range g { - if cc.Tag == 'i' { - continue - } - for _, line := range diff.A[cc.I1:cc.I2] { - ws(prefix[cc.Tag] + line) - } - } - break - } - } - - range2 := formatRangeContext(first.J1, last.J2) - wf("--- %s ----%s", range2, diff.Eol) - for _, c := range g { - if c.Tag == 'r' || c.Tag == 'i' { - for _, cc := range g { - if cc.Tag == 'd' { - continue - } - for _, line := range diff.B[cc.J1:cc.J2] { - ws(prefix[cc.Tag] + line) - } - } - break - } - } - } - return diffErr -} - -// Like WriteContextDiff but returns the diff a string. -func GetContextDiffString(diff ContextDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteContextDiff(w, diff) - return string(w.Bytes()), err -} - -// Split a string on "\n" while preserving them. The output can be used -// as input for UnifiedDiff and ContextDiff structures. -func SplitLines(s string) []string { - lines := strings.SplitAfter(s, "\n") - lines[len(lines)-1] += "\n" - return lines -} diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore index 66be63a00..6b7d7d1e8 100644 --- a/vendor/github.com/sirupsen/logrus/.gitignore +++ b/vendor/github.com/sirupsen/logrus/.gitignore @@ -1 +1,2 @@ logrus +vendor diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md index ff0471869..cb85d9f9f 100644 --- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md +++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md @@ -1,3 +1,10 @@ +# 1.2.0 +This new release introduces: + * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued + * A new trace level named `Trace` whose level is below `Debug` + * A configurable exit function to be called upon a Fatal trace + * The `Level` object now implements `encoding.TextUnmarshaler` interface + # 1.1.1 This is a bug fix release. * fix the build break on Solaris diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md index 072e99be3..093bb13f8 100644 --- a/vendor/github.com/sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -56,8 +56,39 @@ time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -exit status 1 ``` +To ensure this behaviour even if a TTY is attached, set your formatter as follows: + +```go + log.SetFormatter(&log.TextFormatter{ + DisableColors: true, + FullTimestamp: true, + }) +``` + +#### Logging Method Name + +If you wish to add the calling method as a field, instruct the logger via: +```go +log.SetReportCaller(true) +``` +This adds the caller as 'method' like so: + +```json +{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", +"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} +``` + +```text +time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin +``` +Note that this does add measurable overhead - the cost will depend on the version of Go, but is +between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your +environment via benchmarks: +``` +go test -bench=.*CallerTracing +``` + #### Case-sensitivity @@ -246,9 +277,10 @@ A list of currently known of service hook can be found in this wiki [page](https #### Level logging -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. +Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. ```go +log.Trace("Something very low level.") log.Debug("Useful debugging information.") log.Info("Something noteworthy happened!") log.Warn("You should probably take a look at this.") diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go index ca634a609..cc85d3aab 100644 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -5,11 +5,29 @@ import ( "fmt" "os" "reflect" + "runtime" + "strings" "sync" "time" ) -var bufferPool *sync.Pool +var ( + bufferPool *sync.Pool + + // qualified package name, cached at first use + logrusPackage string + + // Positions in the call stack when tracing to report the calling method + minimumCallerDepth int + + // Used for caller information initialisation + callerInitOnce sync.Once +) + +const ( + maximumCallerDepth int = 25 + knownLogrusFrames int = 4 +) func init() { bufferPool = &sync.Pool{ @@ -17,15 +35,18 @@ func init() { return new(bytes.Buffer) }, } + + // start at the bottom of the stack before the package-name cache is primed + minimumCallerDepth = 1 } // Defines the key when adding errors using WithError. var ErrorKey = "error" // An entry is the final or intermediate Logrus logging entry. It contains all -// the fields passed with WithField{,s}. It's finally logged when Debug, Info, -// Warn, Error, Fatal or Panic is called on it. These objects can be reused and -// passed around as much as you wish to avoid field duplication. +// the fields passed with WithField{,s}. It's finally logged when Trace, Debug, +// Info, Warn, Error, Fatal or Panic is called on it. These objects can be +// reused and passed around as much as you wish to avoid field duplication. type Entry struct { Logger *Logger @@ -35,11 +56,14 @@ type Entry struct { // Time at which the log entry was created Time time.Time - // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic // This field will be set on entry firing and the value will be equal to the one in Logger struct field. Level Level - // Message passed to Debug, Info, Warn, Error, Fatal or Panic + // Calling method, with package name + Caller *runtime.Frame + + // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic Message string // When formatter is called in entry.log(), a Buffer may be set to entry @@ -52,8 +76,8 @@ type Entry struct { func NewEntry(logger *Logger) *Entry { return &Entry{ Logger: logger, - // Default is five fields, give a little extra room - Data: make(Fields, 5), + // Default is three fields, plus one optional. Give a little extra room. + Data: make(Fields, 6), } } @@ -103,6 +127,57 @@ func (entry *Entry) WithTime(t time.Time) *Entry { return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t} } +// getPackageName reduces a fully qualified function name to the package name +// There really ought to be to be a better way... +func getPackageName(f string) string { + for { + lastPeriod := strings.LastIndex(f, ".") + lastSlash := strings.LastIndex(f, "/") + if lastPeriod > lastSlash { + f = f[:lastPeriod] + } else { + break + } + } + + return f +} + +// getCaller retrieves the name of the first non-logrus calling function +func getCaller() *runtime.Frame { + // Restrict the lookback frames to avoid runaway lookups + pcs := make([]uintptr, maximumCallerDepth) + depth := runtime.Callers(minimumCallerDepth, pcs) + frames := runtime.CallersFrames(pcs[:depth]) + + // cache this package's fully-qualified name + callerInitOnce.Do(func() { + logrusPackage = getPackageName(runtime.FuncForPC(pcs[0]).Name()) + + // now that we have the cache, we can skip a minimum count of known-logrus functions + // XXX this is dubious, the number of frames may vary store an entry in a logger interface + minimumCallerDepth = knownLogrusFrames + }) + + for f, again := frames.Next(); again; f, again = frames.Next() { + pkg := getPackageName(f.Function) + + // If the caller isn't part of this package, we're done + if pkg != logrusPackage { + return &f + } + } + + // if we got here, we failed to find the caller's context + return nil +} + +func (entry Entry) HasCaller() (has bool) { + return entry.Logger != nil && + entry.Logger.ReportCaller && + entry.Caller != nil +} + // This function is not declared with a pointer value because otherwise // race conditions will occur when using multiple goroutines func (entry Entry) log(level Level, msg string) { @@ -119,6 +194,9 @@ func (entry Entry) log(level Level, msg string) { entry.Level = level entry.Message = msg + if entry.Logger.ReportCaller { + entry.Caller = getCaller() + } entry.fireHooks() @@ -162,6 +240,12 @@ func (entry *Entry) write() { } } +func (entry *Entry) Trace(args ...interface{}) { + if entry.Logger.IsLevelEnabled(TraceLevel) { + entry.log(TraceLevel, fmt.Sprint(args...)) + } +} + func (entry *Entry) Debug(args ...interface{}) { if entry.Logger.IsLevelEnabled(DebugLevel) { entry.log(DebugLevel, fmt.Sprint(args...)) @@ -198,7 +282,7 @@ func (entry *Entry) Fatal(args ...interface{}) { if entry.Logger.IsLevelEnabled(FatalLevel) { entry.log(FatalLevel, fmt.Sprint(args...)) } - Exit(1) + entry.Logger.Exit(1) } func (entry *Entry) Panic(args ...interface{}) { @@ -210,6 +294,12 @@ func (entry *Entry) Panic(args ...interface{}) { // Entry Printf family functions +func (entry *Entry) Tracef(format string, args ...interface{}) { + if entry.Logger.IsLevelEnabled(TraceLevel) { + entry.Trace(fmt.Sprintf(format, args...)) + } +} + func (entry *Entry) Debugf(format string, args ...interface{}) { if entry.Logger.IsLevelEnabled(DebugLevel) { entry.Debug(fmt.Sprintf(format, args...)) @@ -246,7 +336,7 @@ func (entry *Entry) Fatalf(format string, args ...interface{}) { if entry.Logger.IsLevelEnabled(FatalLevel) { entry.Fatal(fmt.Sprintf(format, args...)) } - Exit(1) + entry.Logger.Exit(1) } func (entry *Entry) Panicf(format string, args ...interface{}) { @@ -257,6 +347,12 @@ func (entry *Entry) Panicf(format string, args ...interface{}) { // Entry Println family functions +func (entry *Entry) Traceln(args ...interface{}) { + if entry.Logger.IsLevelEnabled(TraceLevel) { + entry.Trace(entry.sprintlnn(args...)) + } +} + func (entry *Entry) Debugln(args ...interface{}) { if entry.Logger.IsLevelEnabled(DebugLevel) { entry.Debug(entry.sprintlnn(args...)) @@ -293,7 +389,7 @@ func (entry *Entry) Fatalln(args ...interface{}) { if entry.Logger.IsLevelEnabled(FatalLevel) { entry.Fatal(entry.sprintlnn(args...)) } - Exit(1) + entry.Logger.Exit(1) } func (entry *Entry) Panicln(args ...interface{}) { diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go index fb2a7a1f0..7342613c3 100644 --- a/vendor/github.com/sirupsen/logrus/exported.go +++ b/vendor/github.com/sirupsen/logrus/exported.go @@ -24,6 +24,12 @@ func SetFormatter(formatter Formatter) { std.SetFormatter(formatter) } +// SetReportCaller sets whether the standard logger will include the calling +// method as a field. +func SetReportCaller(include bool) { + std.SetReportCaller(include) +} + // SetLevel sets the standard logger level. func SetLevel(level Level) { std.SetLevel(level) @@ -77,6 +83,11 @@ func WithTime(t time.Time) *Entry { return std.WithTime(t) } +// Trace logs a message at level Trace on the standard logger. +func Trace(args ...interface{}) { + std.Trace(args...) +} + // Debug logs a message at level Debug on the standard logger. func Debug(args ...interface{}) { std.Debug(args...) @@ -117,6 +128,11 @@ func Fatal(args ...interface{}) { std.Fatal(args...) } +// Tracef logs a message at level Trace on the standard logger. +func Tracef(format string, args ...interface{}) { + std.Tracef(format, args...) +} + // Debugf logs a message at level Debug on the standard logger. func Debugf(format string, args ...interface{}) { std.Debugf(format, args...) @@ -157,6 +173,11 @@ func Fatalf(format string, args ...interface{}) { std.Fatalf(format, args...) } +// Traceln logs a message at level Trace on the standard logger. +func Traceln(args ...interface{}) { + std.Traceln(args...) +} + // Debugln logs a message at level Debug on the standard logger. func Debugln(args ...interface{}) { std.Debugln(args...) diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go index be2f3fcee..408883773 100644 --- a/vendor/github.com/sirupsen/logrus/formatter.go +++ b/vendor/github.com/sirupsen/logrus/formatter.go @@ -9,6 +9,8 @@ const ( FieldKeyLevel = "level" FieldKeyTime = "time" FieldKeyLogrusError = "logrus_error" + FieldKeyFunc = "func" + FieldKeyFile = "file" ) // The Formatter interface is used to implement a custom Formatter. It takes an @@ -25,7 +27,7 @@ type Formatter interface { Format(*Entry) ([]byte, error) } -// This is to not silently overwrite `time`, `msg` and `level` fields when +// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when // dumping it. If this code wasn't there doing: // // logrus.WithField("level", 1).Info("hello") @@ -37,7 +39,7 @@ type Formatter interface { // // It's not exported because it's still using Data in an opinionated way. It's to // avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields, fieldMap FieldMap) { +func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { timeKey := fieldMap.resolve(FieldKeyTime) if t, ok := data[timeKey]; ok { data["fields."+timeKey] = t @@ -61,4 +63,16 @@ func prefixFieldClashes(data Fields, fieldMap FieldMap) { data["fields."+logrusErrKey] = l delete(data, logrusErrKey) } + + // If reportCaller is not set, 'func' will not conflict. + if reportCaller { + funcKey := fieldMap.resolve(FieldKeyFunc) + if l, ok := data[funcKey]; ok { + data["fields."+funcKey] = l + } + fileKey := fieldMap.resolve(FieldKeyFile) + if l, ok := data[fileKey]; ok { + data["fields."+fileKey] = l + } + } } diff --git a/vendor/github.com/sirupsen/logrus/go.mod b/vendor/github.com/sirupsen/logrus/go.mod index f4fed02fb..94574cc63 100644 --- a/vendor/github.com/sirupsen/logrus/go.mod +++ b/vendor/github.com/sirupsen/logrus/go.mod @@ -2,8 +2,9 @@ module github.com/sirupsen/logrus require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe + github.com/konsorten/go-windows-terminal-sequences v1.0.1 github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/objx v0.1.1 // indirect github.com/stretchr/testify v1.2.2 golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 diff --git a/vendor/github.com/sirupsen/logrus/go.sum b/vendor/github.com/sirupsen/logrus/go.sum index 1f0d71964..133d34ae1 100644 --- a/vendor/github.com/sirupsen/logrus/go.sum +++ b/vendor/github.com/sirupsen/logrus/go.sum @@ -2,8 +2,11 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs= github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I= diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go index ef8d07460..260575359 100644 --- a/vendor/github.com/sirupsen/logrus/json_formatter.go +++ b/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -34,9 +34,10 @@ type JSONFormatter struct { // As an example: // formatter := &JSONFormatter{ // FieldMap: FieldMap{ - // FieldKeyTime: "@timestamp", + // FieldKeyTime: "@timestamp", // FieldKeyLevel: "@level", - // FieldKeyMsg: "@message", + // FieldKeyMsg: "@message", + // FieldKeyFunc: "@caller", // }, // } FieldMap FieldMap @@ -47,7 +48,7 @@ type JSONFormatter struct { // Format renders a single log entry func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+3) + data := make(Fields, len(entry.Data)+4) for k, v := range entry.Data { switch v := v.(type) { case error: @@ -65,7 +66,7 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { data = newData } - prefixFieldClashes(data, f.FieldMap) + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) timestampFormat := f.TimestampFormat if timestampFormat == "" { @@ -80,6 +81,10 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { } data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + if entry.HasCaller() { + data[f.FieldMap.resolve(FieldKeyFunc)] = entry.Caller.Function + data[f.FieldMap.resolve(FieldKeyFile)] = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + } var b *bytes.Buffer if entry.Buffer != nil { diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go index b67bfcbd3..5ceca0eab 100644 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -24,6 +24,10 @@ type Logger struct { // own that implements the `Formatter` interface, see the `README` or included // formatters for examples. Formatter Formatter + + // Flag for whether to log caller info (off by default) + ReportCaller bool + // The logging level the logger should log at. This is typically (and defaults // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be // logged. @@ -32,8 +36,12 @@ type Logger struct { mu MutexWrap // Reusable empty entry entryPool sync.Pool + // Function to exit the application, defaults to `os.Exit()` + ExitFunc exitFunc } +type exitFunc func(int) + type MutexWrap struct { lock sync.Mutex disabled bool @@ -69,10 +77,12 @@ func (mw *MutexWrap) Disable() { // It's recommended to make this a global instance called `log`. func New() *Logger { return &Logger{ - Out: os.Stderr, - Formatter: new(TextFormatter), - Hooks: make(LevelHooks), - Level: InfoLevel, + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + ExitFunc: os.Exit, + ReportCaller: false, } } @@ -121,6 +131,14 @@ func (logger *Logger) WithTime(t time.Time) *Entry { return entry.WithTime(t) } +func (logger *Logger) Tracef(format string, args ...interface{}) { + if logger.IsLevelEnabled(TraceLevel) { + entry := logger.newEntry() + entry.Tracef(format, args...) + logger.releaseEntry(entry) + } +} + func (logger *Logger) Debugf(format string, args ...interface{}) { if logger.IsLevelEnabled(DebugLevel) { entry := logger.newEntry() @@ -173,7 +191,7 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) { entry.Fatalf(format, args...) logger.releaseEntry(entry) } - Exit(1) + logger.Exit(1) } func (logger *Logger) Panicf(format string, args ...interface{}) { @@ -184,6 +202,14 @@ func (logger *Logger) Panicf(format string, args ...interface{}) { } } +func (logger *Logger) Trace(args ...interface{}) { + if logger.IsLevelEnabled(TraceLevel) { + entry := logger.newEntry() + entry.Trace(args...) + logger.releaseEntry(entry) + } +} + func (logger *Logger) Debug(args ...interface{}) { if logger.IsLevelEnabled(DebugLevel) { entry := logger.newEntry() @@ -236,7 +262,7 @@ func (logger *Logger) Fatal(args ...interface{}) { entry.Fatal(args...) logger.releaseEntry(entry) } - Exit(1) + logger.Exit(1) } func (logger *Logger) Panic(args ...interface{}) { @@ -247,6 +273,14 @@ func (logger *Logger) Panic(args ...interface{}) { } } +func (logger *Logger) Traceln(args ...interface{}) { + if logger.IsLevelEnabled(TraceLevel) { + entry := logger.newEntry() + entry.Traceln(args...) + logger.releaseEntry(entry) + } +} + func (logger *Logger) Debugln(args ...interface{}) { if logger.IsLevelEnabled(DebugLevel) { entry := logger.newEntry() @@ -299,7 +333,7 @@ func (logger *Logger) Fatalln(args ...interface{}) { entry.Fatalln(args...) logger.releaseEntry(entry) } - Exit(1) + logger.Exit(1) } func (logger *Logger) Panicln(args ...interface{}) { @@ -310,6 +344,14 @@ func (logger *Logger) Panicln(args ...interface{}) { } } +func (logger *Logger) Exit(code int) { + runHandlers() + if logger.ExitFunc == nil { + logger.ExitFunc = os.Exit + } + logger.ExitFunc(code) +} + //When file is opened with appending mode, it's safe to //write concurrently to a file (within 4k message on Linux). //In these cases user can choose to disable the lock. @@ -357,6 +399,12 @@ func (logger *Logger) SetOutput(output io.Writer) { logger.Out = output } +func (logger *Logger) SetReportCaller(reportCaller bool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.ReportCaller = reportCaller +} + // ReplaceHooks replaces the logger hooks and returns the old ones func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { logger.mu.Lock() diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go index fa0b9dea8..4ef451866 100644 --- a/vendor/github.com/sirupsen/logrus/logrus.go +++ b/vendor/github.com/sirupsen/logrus/logrus.go @@ -15,6 +15,8 @@ type Level uint32 // Convert the Level to a string. E.g. PanicLevel becomes "panic". func (level Level) String() string { switch level { + case TraceLevel: + return "trace" case DebugLevel: return "debug" case InfoLevel: @@ -47,12 +49,26 @@ func ParseLevel(lvl string) (Level, error) { return InfoLevel, nil case "debug": return DebugLevel, nil + case "trace": + return TraceLevel, nil } var l Level return l, fmt.Errorf("not a valid logrus Level: %q", lvl) } +// UnmarshalText implements encoding.TextUnmarshaler. +func (level *Level) UnmarshalText(text []byte) error { + l, err := ParseLevel(string(text)) + if err != nil { + return err + } + + *level = Level(l) + + return nil +} + // A constant exposing all logging levels var AllLevels = []Level{ PanicLevel, @@ -61,6 +77,7 @@ var AllLevels = []Level{ WarnLevel, InfoLevel, DebugLevel, + TraceLevel, } // These are the different logging levels. You can set the logging level to log @@ -69,7 +86,7 @@ const ( // PanicLevel level, highest level of severity. Logs and then calls panic with the // message passed to Debug, Info, ... PanicLevel Level = iota - // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the // logging level is set to Panic. FatalLevel // ErrorLevel level. Logs. Used for errors that should definitely be noted. @@ -82,6 +99,8 @@ const ( InfoLevel // DebugLevel level. Usually only enabled when debugging. Very verbose logging. DebugLevel + // TraceLevel level. Designates finer-grained informational events than the Debug. + TraceLevel ) // Won't compile if StdLogger can't be realized by a log.Logger @@ -148,3 +167,12 @@ type FieldLogger interface { // IsFatalEnabled() bool // IsPanicEnabled() bool } + +// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is +// here for consistancy. Do not use. Use Logger or Entry instead. +type Ext1FieldLogger interface { + FieldLogger + Tracef(format string, args ...interface{}) + Trace(args ...interface{}) + Traceln(args ...interface{}) +} diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go index d4663b8c2..49ec92f17 100644 --- a/vendor/github.com/sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -107,7 +107,7 @@ func (f *TextFormatter) isColored() bool { // Format renders a single log entry func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - prefixFieldClashes(entry.Data, f.FieldMap) + prefixFieldClashes(entry.Data, f.FieldMap, entry.HasCaller()) keys := make([]string, 0, len(entry.Data)) for k := range entry.Data { @@ -125,6 +125,10 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { if entry.err != "" { fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) } + if entry.HasCaller() { + fixedKeys = append(fixedKeys, + f.FieldMap.resolve(FieldKeyFunc), f.FieldMap.resolve(FieldKeyFile)) + } if !f.DisableSorting { if f.SortingFunc == nil { @@ -160,15 +164,19 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { } else { for _, key := range fixedKeys { var value interface{} - switch key { - case f.FieldMap.resolve(FieldKeyTime): + switch { + case key == f.FieldMap.resolve(FieldKeyTime): value = entry.Time.Format(timestampFormat) - case f.FieldMap.resolve(FieldKeyLevel): + case key == f.FieldMap.resolve(FieldKeyLevel): value = entry.Level.String() - case f.FieldMap.resolve(FieldKeyMsg): + case key == f.FieldMap.resolve(FieldKeyMsg): value = entry.Message - case f.FieldMap.resolve(FieldKeyLogrusError): + case key == f.FieldMap.resolve(FieldKeyLogrusError): value = entry.err + case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): + value = entry.Caller.Function + case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): + value = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) default: value = entry.Data[key] } @@ -183,7 +191,7 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { var levelColor int switch entry.Level { - case DebugLevel: + case DebugLevel, TraceLevel: levelColor = gray case WarnLevel: levelColor = yellow @@ -202,12 +210,19 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin // the behavior of logrus text_formatter the same as the stdlib log package entry.Message = strings.TrimSuffix(entry.Message, "\n") + caller := "" + + if entry.HasCaller() { + caller = fmt.Sprintf("%s:%d %s()", + entry.Caller.File, entry.Caller.Line, entry.Caller.Function) + } + if f.DisableTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message) + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) } else if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message) + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) } else { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) } for _, k := range keys { v := entry.Data[k] diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go index 7bdebedc6..9e1f75135 100644 --- a/vendor/github.com/sirupsen/logrus/writer.go +++ b/vendor/github.com/sirupsen/logrus/writer.go @@ -24,6 +24,8 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { var printFunc func(args ...interface{}) switch level { + case TraceLevel: + printFunc = entry.Trace case DebugLevel: printFunc = entry.Debug case InfoLevel: diff --git a/vendor/github.com/stretchr/objx/.codeclimate.yml b/vendor/github.com/stretchr/objx/.codeclimate.yml deleted file mode 100644 index 010d4ccd5..000000000 --- a/vendor/github.com/stretchr/objx/.codeclimate.yml +++ /dev/null @@ -1,13 +0,0 @@ -engines: - gofmt: - enabled: true - golint: - enabled: true - govet: - enabled: true - -exclude_patterns: -- ".github/" -- "vendor/" -- "codegen/" -- "doc.go" diff --git a/vendor/github.com/stretchr/objx/Gopkg.lock b/vendor/github.com/stretchr/objx/Gopkg.lock deleted file mode 100644 index eebe342a9..000000000 --- a/vendor/github.com/stretchr/objx/Gopkg.lock +++ /dev/null @@ -1,30 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "github.com/davecgh/go-spew" - packages = ["spew"] - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - version = "v1.1.0" - -[[projects]] - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - name = "github.com/stretchr/testify" - packages = [ - "assert", - "require" - ] - revision = "b91bfb9ebec76498946beb6af7c0230c7cc7ba6c" - version = "v1.2.0" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "2d160a7dea4ffd13c6c31dab40373822f9d78c73beba016d662bef8f7a998876" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/stretchr/objx/Gopkg.toml b/vendor/github.com/stretchr/objx/Gopkg.toml deleted file mode 100644 index d70f1570b..000000000 --- a/vendor/github.com/stretchr/objx/Gopkg.toml +++ /dev/null @@ -1,8 +0,0 @@ -[prune] - unused-packages = true - non-go = true - go-tests = true - -[[constraint]] - name = "github.com/stretchr/testify" - version = "~1.2.0" diff --git a/vendor/github.com/stretchr/objx/LICENSE b/vendor/github.com/stretchr/objx/LICENSE deleted file mode 100644 index 44d4d9d5a..000000000 --- a/vendor/github.com/stretchr/objx/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License - -Copyright (c) 2014 Stretchr, Inc. -Copyright (c) 2017-2018 objx contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/stretchr/objx/README.md b/vendor/github.com/stretchr/objx/README.md deleted file mode 100644 index be5750c94..000000000 --- a/vendor/github.com/stretchr/objx/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# Objx -[![Build Status](https://travis-ci.org/stretchr/objx.svg?branch=master)](https://travis-ci.org/stretchr/objx) -[![Go Report Card](https://goreportcard.com/badge/github.com/stretchr/objx)](https://goreportcard.com/report/github.com/stretchr/objx) -[![Maintainability](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/maintainability)](https://codeclimate.com/github/stretchr/objx/maintainability) -[![Test Coverage](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/test_coverage)](https://codeclimate.com/github/stretchr/objx/test_coverage) -[![Sourcegraph](https://sourcegraph.com/github.com/stretchr/objx/-/badge.svg)](https://sourcegraph.com/github.com/stretchr/objx) -[![GoDoc](https://godoc.org/github.com/stretchr/objx?status.svg)](https://godoc.org/github.com/stretchr/objx) - -Objx - Go package for dealing with maps, slices, JSON and other data. - -Get started: - -- Install Objx with [one line of code](#installation), or [update it with another](#staying-up-to-date) -- Check out the API Documentation http://godoc.org/github.com/stretchr/objx - -## Overview -Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes a powerful `Get` method (among others) that allows you to easily and quickly get access to data within the map, without having to worry too much about type assertions, missing data, default values etc. - -### Pattern -Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: - - m, err := objx.FromJSON(json) - -NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, the rest will be optimistic and try to figure things out without panicking. - -Use `Get` to access the value you're interested in. You can use dot and array -notation too: - - m.Get("places[0].latlng") - -Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. - - if m.Get("code").IsStr() { // Your code... } - -Or you can just assume the type, and use one of the strong type methods to extract the real value: - - m.Get("code").Int() - -If there's no value there (or if it's the wrong type) then a default value will be returned, or you can be explicit about the default value. - - Get("code").Int(-1) - -If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, manipulating and selecting that data. You can find out more by exploring the index below. - -### Reading data -A simple example of how to use Objx: - - // Use MustFromJSON to make an objx.Map from some JSON - m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) - - // Get the details - name := m.Get("name").Str() - age := m.Get("age").Int() - - // Get their nickname (or use their name if they don't have one) - nickname := m.Get("nickname").Str(name) - -### Ranging -Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For example, to `range` the data, do what you would expect: - - m := objx.MustFromJSON(json) - for key, value := range m { - // Your code... - } - -## Installation -To install Objx, use go get: - - go get github.com/stretchr/objx - -### Staying up to date -To update Objx to the latest version, run: - - go get -u github.com/stretchr/objx - -### Supported go versions -We support the lastest two major Go versions, which are 1.8 and 1.9 at the moment. - -## Contributing -Please feel free to submit issues, fork the repository and send pull requests! diff --git a/vendor/github.com/stretchr/objx/Taskfile.yml b/vendor/github.com/stretchr/objx/Taskfile.yml deleted file mode 100644 index f8035641f..000000000 --- a/vendor/github.com/stretchr/objx/Taskfile.yml +++ /dev/null @@ -1,32 +0,0 @@ -default: - deps: [test] - -dl-deps: - desc: Downloads cli dependencies - cmds: - - go get -u github.com/golang/lint/golint - - go get -u github.com/golang/dep/cmd/dep - -update-deps: - desc: Updates dependencies - cmds: - - dep ensure - - dep ensure -update - -lint: - desc: Runs golint - cmds: - - go fmt $(go list ./... | grep -v /vendor/) - - go vet $(go list ./... | grep -v /vendor/) - - golint $(ls *.go | grep -v "doc.go") - silent: true - -test: - desc: Runs go tests - cmds: - - go test -race . - -test-coverage: - desc: Runs go tests and calucates test coverage - cmds: - - go test -coverprofile=c.out . diff --git a/vendor/github.com/stretchr/objx/accessors.go b/vendor/github.com/stretchr/objx/accessors.go deleted file mode 100644 index 204356a22..000000000 --- a/vendor/github.com/stretchr/objx/accessors.go +++ /dev/null @@ -1,148 +0,0 @@ -package objx - -import ( - "regexp" - "strconv" - "strings" -) - -// arrayAccesRegexString is the regex used to extract the array number -// from the access path -const arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` - -// arrayAccesRegex is the compiled arrayAccesRegexString -var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) - -// Get gets the value using the specified selector and -// returns it inside a new Obj object. -// -// If it cannot find the value, Get will return a nil -// value inside an instance of Obj. -// -// Get can only operate directly on map[string]interface{} and []interface. -// -// Example -// -// To access the title of the third chapter of the second book, do: -// -// o.Get("books[1].chapters[2].title") -func (m Map) Get(selector string) *Value { - rawObj := access(m, selector, nil, false) - return &Value{data: rawObj} -} - -// Set sets the value using the specified selector and -// returns the object on which Set was called. -// -// Set can only operate directly on map[string]interface{} and []interface -// -// Example -// -// To set the title of the third chapter of the second book, do: -// -// o.Set("books[1].chapters[2].title","Time to Go") -func (m Map) Set(selector string, value interface{}) Map { - access(m, selector, value, true) - return m -} - -// access accesses the object using the selector and performs the -// appropriate action. -func access(current, selector, value interface{}, isSet bool) interface{} { - switch selector.(type) { - case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: - if array, ok := current.([]interface{}); ok { - index := intFromInterface(selector) - if index >= len(array) { - return nil - } - return array[index] - } - return nil - - case string: - selStr := selector.(string) - selSegs := strings.SplitN(selStr, PathSeparator, 2) - thisSel := selSegs[0] - index := -1 - var err error - - if strings.Contains(thisSel, "[") { - arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel) - if len(arrayMatches) > 0 { - // Get the key into the map - thisSel = arrayMatches[1] - - // Get the index into the array at the key - index, err = strconv.Atoi(arrayMatches[2]) - - if err != nil { - // This should never happen. If it does, something has gone - // seriously wrong. Panic. - panic("objx: Array index is not an integer. Must use array[int].") - } - } - } - if curMap, ok := current.(Map); ok { - current = map[string]interface{}(curMap) - } - // get the object in question - switch current.(type) { - case map[string]interface{}: - curMSI := current.(map[string]interface{}) - if len(selSegs) <= 1 && isSet { - curMSI[thisSel] = value - return nil - } - current = curMSI[thisSel] - default: - current = nil - } - // do we need to access the item of an array? - if index > -1 { - if array, ok := current.([]interface{}); ok { - if index < len(array) { - current = array[index] - } else { - current = nil - } - } - } - if len(selSegs) > 1 { - current = access(current, selSegs[1], value, isSet) - } - } - return current -} - -// intFromInterface converts an interface object to the largest -// representation of an unsigned integer using a type switch and -// assertions -func intFromInterface(selector interface{}) int { - var value int - switch selector.(type) { - case int: - value = selector.(int) - case int8: - value = int(selector.(int8)) - case int16: - value = int(selector.(int16)) - case int32: - value = int(selector.(int32)) - case int64: - value = int(selector.(int64)) - case uint: - value = int(selector.(uint)) - case uint8: - value = int(selector.(uint8)) - case uint16: - value = int(selector.(uint16)) - case uint32: - value = int(selector.(uint32)) - case uint64: - value = int(selector.(uint64)) - default: - return 0 - } - return value -} diff --git a/vendor/github.com/stretchr/objx/constants.go b/vendor/github.com/stretchr/objx/constants.go deleted file mode 100644 index f9eb42a25..000000000 --- a/vendor/github.com/stretchr/objx/constants.go +++ /dev/null @@ -1,13 +0,0 @@ -package objx - -const ( - // PathSeparator is the character used to separate the elements - // of the keypath. - // - // For example, `location.address.city` - PathSeparator string = "." - - // SignatureSeparator is the character that is used to - // separate the Base64 string from the security signature. - SignatureSeparator = "_" -) diff --git a/vendor/github.com/stretchr/objx/conversions.go b/vendor/github.com/stretchr/objx/conversions.go deleted file mode 100644 index 5e020f310..000000000 --- a/vendor/github.com/stretchr/objx/conversions.go +++ /dev/null @@ -1,108 +0,0 @@ -package objx - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "net/url" -) - -// JSON converts the contained object to a JSON string -// representation -func (m Map) JSON() (string, error) { - result, err := json.Marshal(m) - if err != nil { - err = errors.New("objx: JSON encode failed with: " + err.Error()) - } - return string(result), err -} - -// MustJSON converts the contained object to a JSON string -// representation and panics if there is an error -func (m Map) MustJSON() string { - result, err := m.JSON() - if err != nil { - panic(err.Error()) - } - return result -} - -// Base64 converts the contained object to a Base64 string -// representation of the JSON string representation -func (m Map) Base64() (string, error) { - var buf bytes.Buffer - - jsonData, err := m.JSON() - if err != nil { - return "", err - } - - encoder := base64.NewEncoder(base64.StdEncoding, &buf) - _, err = encoder.Write([]byte(jsonData)) - if err != nil { - return "", err - } - _ = encoder.Close() - - return buf.String(), nil -} - -// MustBase64 converts the contained object to a Base64 string -// representation of the JSON string representation and panics -// if there is an error -func (m Map) MustBase64() string { - result, err := m.Base64() - if err != nil { - panic(err.Error()) - } - return result -} - -// SignedBase64 converts the contained object to a Base64 string -// representation of the JSON string representation and signs it -// using the provided key. -func (m Map) SignedBase64(key string) (string, error) { - base64, err := m.Base64() - if err != nil { - return "", err - } - - sig := HashWithKey(base64, key) - return base64 + SignatureSeparator + sig, nil -} - -// MustSignedBase64 converts the contained object to a Base64 string -// representation of the JSON string representation and signs it -// using the provided key and panics if there is an error -func (m Map) MustSignedBase64(key string) string { - result, err := m.SignedBase64(key) - if err != nil { - panic(err.Error()) - } - return result -} - -/* - URL Query - ------------------------------------------------ -*/ - -// URLValues creates a url.Values object from an Obj. This -// function requires that the wrapped object be a map[string]interface{} -func (m Map) URLValues() url.Values { - vals := make(url.Values) - for k, v := range m { - //TODO: can this be done without sprintf? - vals.Set(k, fmt.Sprintf("%v", v)) - } - return vals -} - -// URLQuery gets an encoded URL query representing the given -// Obj. This function requires that the wrapped object be a -// map[string]interface{} -func (m Map) URLQuery() (string, error) { - return m.URLValues().Encode(), nil -} diff --git a/vendor/github.com/stretchr/objx/doc.go b/vendor/github.com/stretchr/objx/doc.go deleted file mode 100644 index 6d6af1a83..000000000 --- a/vendor/github.com/stretchr/objx/doc.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Objx - Go package for dealing with maps, slices, JSON and other data. - -Overview - -Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes -a powerful `Get` method (among others) that allows you to easily and quickly get -access to data within the map, without having to worry too much about type assertions, -missing data, default values etc. - -Pattern - -Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. -Call one of the `objx.` functions to create your `objx.Map` to get going: - - m, err := objx.FromJSON(json) - -NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, -the rest will be optimistic and try to figure things out without panicking. - -Use `Get` to access the value you're interested in. You can use dot and array -notation too: - - m.Get("places[0].latlng") - -Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. - - if m.Get("code").IsStr() { // Your code... } - -Or you can just assume the type, and use one of the strong type methods to extract the real value: - - m.Get("code").Int() - -If there's no value there (or if it's the wrong type) then a default value will be returned, -or you can be explicit about the default value. - - Get("code").Int(-1) - -If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, -manipulating and selecting that data. You can find out more by exploring the index below. - -Reading data - -A simple example of how to use Objx: - - // Use MustFromJSON to make an objx.Map from some JSON - m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) - - // Get the details - name := m.Get("name").Str() - age := m.Get("age").Int() - - // Get their nickname (or use their name if they don't have one) - nickname := m.Get("nickname").Str(name) - -Ranging - -Since `objx.Map` is a `map[string]interface{}` you can treat it as such. -For example, to `range` the data, do what you would expect: - - m := objx.MustFromJSON(json) - for key, value := range m { - // Your code... - } -*/ -package objx diff --git a/vendor/github.com/stretchr/objx/map.go b/vendor/github.com/stretchr/objx/map.go deleted file mode 100644 index 406bc8926..000000000 --- a/vendor/github.com/stretchr/objx/map.go +++ /dev/null @@ -1,190 +0,0 @@ -package objx - -import ( - "encoding/base64" - "encoding/json" - "errors" - "io/ioutil" - "net/url" - "strings" -) - -// MSIConvertable is an interface that defines methods for converting your -// custom types to a map[string]interface{} representation. -type MSIConvertable interface { - // MSI gets a map[string]interface{} (msi) representing the - // object. - MSI() map[string]interface{} -} - -// Map provides extended functionality for working with -// untyped data, in particular map[string]interface (msi). -type Map map[string]interface{} - -// Value returns the internal value instance -func (m Map) Value() *Value { - return &Value{data: m} -} - -// Nil represents a nil Map. -var Nil = New(nil) - -// New creates a new Map containing the map[string]interface{} in the data argument. -// If the data argument is not a map[string]interface, New attempts to call the -// MSI() method on the MSIConvertable interface to create one. -func New(data interface{}) Map { - if _, ok := data.(map[string]interface{}); !ok { - if converter, ok := data.(MSIConvertable); ok { - data = converter.MSI() - } else { - return nil - } - } - return Map(data.(map[string]interface{})) -} - -// MSI creates a map[string]interface{} and puts it inside a new Map. -// -// The arguments follow a key, value pattern. -// -// -// Returns nil if any key argument is non-string or if there are an odd number of arguments. -// -// Example -// -// To easily create Maps: -// -// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) -// -// // creates an Map equivalent to -// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}} -func MSI(keyAndValuePairs ...interface{}) Map { - newMap := Map{} - keyAndValuePairsLen := len(keyAndValuePairs) - if keyAndValuePairsLen%2 != 0 { - return nil - } - for i := 0; i < keyAndValuePairsLen; i = i + 2 { - key := keyAndValuePairs[i] - value := keyAndValuePairs[i+1] - - // make sure the key is a string - keyString, keyStringOK := key.(string) - if !keyStringOK { - return nil - } - newMap[keyString] = value - } - return newMap -} - -// ****** Conversion Constructors - -// MustFromJSON creates a new Map containing the data specified in the -// jsonString. -// -// Panics if the JSON is invalid. -func MustFromJSON(jsonString string) Map { - o, err := FromJSON(jsonString) - if err != nil { - panic("objx: MustFromJSON failed with error: " + err.Error()) - } - return o -} - -// FromJSON creates a new Map containing the data specified in the -// jsonString. -// -// Returns an error if the JSON is invalid. -func FromJSON(jsonString string) (Map, error) { - var data interface{} - err := json.Unmarshal([]byte(jsonString), &data) - if err != nil { - return Nil, err - } - return New(data), nil -} - -// FromBase64 creates a new Obj containing the data specified -// in the Base64 string. -// -// The string is an encoded JSON string returned by Base64 -func FromBase64(base64String string) (Map, error) { - decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String)) - decoded, err := ioutil.ReadAll(decoder) - if err != nil { - return nil, err - } - return FromJSON(string(decoded)) -} - -// MustFromBase64 creates a new Obj containing the data specified -// in the Base64 string and panics if there is an error. -// -// The string is an encoded JSON string returned by Base64 -func MustFromBase64(base64String string) Map { - result, err := FromBase64(base64String) - if err != nil { - panic("objx: MustFromBase64 failed with error: " + err.Error()) - } - return result -} - -// FromSignedBase64 creates a new Obj containing the data specified -// in the Base64 string. -// -// The string is an encoded JSON string returned by SignedBase64 -func FromSignedBase64(base64String, key string) (Map, error) { - parts := strings.Split(base64String, SignatureSeparator) - if len(parts) != 2 { - return nil, errors.New("objx: Signed base64 string is malformed") - } - - sig := HashWithKey(parts[0], key) - if parts[1] != sig { - return nil, errors.New("objx: Signature for base64 data does not match") - } - return FromBase64(parts[0]) -} - -// MustFromSignedBase64 creates a new Obj containing the data specified -// in the Base64 string and panics if there is an error. -// -// The string is an encoded JSON string returned by Base64 -func MustFromSignedBase64(base64String, key string) Map { - result, err := FromSignedBase64(base64String, key) - if err != nil { - panic("objx: MustFromSignedBase64 failed with error: " + err.Error()) - } - return result -} - -// FromURLQuery generates a new Obj by parsing the specified -// query. -// -// For queries with multiple values, the first value is selected. -func FromURLQuery(query string) (Map, error) { - vals, err := url.ParseQuery(query) - if err != nil { - return nil, err - } - m := Map{} - for k, vals := range vals { - m[k] = vals[0] - } - return m, nil -} - -// MustFromURLQuery generates a new Obj by parsing the specified -// query. -// -// For queries with multiple values, the first value is selected. -// -// Panics if it encounters an error -func MustFromURLQuery(query string) Map { - o, err := FromURLQuery(query) - if err != nil { - panic("objx: MustFromURLQuery failed with error: " + err.Error()) - } - return o -} diff --git a/vendor/github.com/stretchr/objx/mutations.go b/vendor/github.com/stretchr/objx/mutations.go deleted file mode 100644 index c3400a3f7..000000000 --- a/vendor/github.com/stretchr/objx/mutations.go +++ /dev/null @@ -1,77 +0,0 @@ -package objx - -// Exclude returns a new Map with the keys in the specified []string -// excluded. -func (m Map) Exclude(exclude []string) Map { - excluded := make(Map) - for k, v := range m { - if !contains(exclude, k) { - excluded[k] = v - } - } - return excluded -} - -// Copy creates a shallow copy of the Obj. -func (m Map) Copy() Map { - copied := Map{} - for k, v := range m { - copied[k] = v - } - return copied -} - -// Merge blends the specified map with a copy of this map and returns the result. -// -// Keys that appear in both will be selected from the specified map. -// This method requires that the wrapped object be a map[string]interface{} -func (m Map) Merge(merge Map) Map { - return m.Copy().MergeHere(merge) -} - -// MergeHere blends the specified map with this map and returns the current map. -// -// Keys that appear in both will be selected from the specified map. The original map -// will be modified. This method requires that -// the wrapped object be a map[string]interface{} -func (m Map) MergeHere(merge Map) Map { - for k, v := range merge { - m[k] = v - } - return m -} - -// Transform builds a new Obj giving the transformer a chance -// to change the keys and values as it goes. This method requires that -// the wrapped object be a map[string]interface{} -func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map { - newMap := Map{} - for k, v := range m { - modifiedKey, modifiedVal := transformer(k, v) - newMap[modifiedKey] = modifiedVal - } - return newMap -} - -// TransformKeys builds a new map using the specified key mapping. -// -// Unspecified keys will be unaltered. -// This method requires that the wrapped object be a map[string]interface{} -func (m Map) TransformKeys(mapping map[string]string) Map { - return m.Transform(func(key string, value interface{}) (string, interface{}) { - if newKey, ok := mapping[key]; ok { - return newKey, value - } - return key, value - }) -} - -// Checks if a string slice contains a string -func contains(s []string, e string) bool { - for _, a := range s { - if a == e { - return true - } - } - return false -} diff --git a/vendor/github.com/stretchr/objx/security.go b/vendor/github.com/stretchr/objx/security.go deleted file mode 100644 index 692be8e2a..000000000 --- a/vendor/github.com/stretchr/objx/security.go +++ /dev/null @@ -1,12 +0,0 @@ -package objx - -import ( - "crypto/sha1" - "encoding/hex" -) - -// HashWithKey hashes the specified string using the security key -func HashWithKey(data, key string) string { - d := sha1.Sum([]byte(data + ":" + key)) - return hex.EncodeToString(d[:]) -} diff --git a/vendor/github.com/stretchr/objx/tests.go b/vendor/github.com/stretchr/objx/tests.go deleted file mode 100644 index d9e0b479a..000000000 --- a/vendor/github.com/stretchr/objx/tests.go +++ /dev/null @@ -1,17 +0,0 @@ -package objx - -// Has gets whether there is something at the specified selector -// or not. -// -// If m is nil, Has will always return false. -func (m Map) Has(selector string) bool { - if m == nil { - return false - } - return !m.Get(selector).IsNil() -} - -// IsNil gets whether the data is nil or not. -func (v *Value) IsNil() bool { - return v == nil || v.data == nil -} diff --git a/vendor/github.com/stretchr/objx/type_specific_codegen.go b/vendor/github.com/stretchr/objx/type_specific_codegen.go deleted file mode 100644 index 202a91f8c..000000000 --- a/vendor/github.com/stretchr/objx/type_specific_codegen.go +++ /dev/null @@ -1,2501 +0,0 @@ -package objx - -/* - Inter (interface{} and []interface{}) -*/ - -// Inter gets the value as a interface{}, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Inter(optionalDefault ...interface{}) interface{} { - if s, ok := v.data.(interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInter gets the value as a interface{}. -// -// Panics if the object is not a interface{}. -func (v *Value) MustInter() interface{} { - return v.data.(interface{}) -} - -// InterSlice gets the value as a []interface{}, returns the optionalDefault -// value or nil if the value is not a []interface{}. -func (v *Value) InterSlice(optionalDefault ...[]interface{}) []interface{} { - if s, ok := v.data.([]interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInterSlice gets the value as a []interface{}. -// -// Panics if the object is not a []interface{}. -func (v *Value) MustInterSlice() []interface{} { - return v.data.([]interface{}) -} - -// IsInter gets whether the object contained is a interface{} or not. -func (v *Value) IsInter() bool { - _, ok := v.data.(interface{}) - return ok -} - -// IsInterSlice gets whether the object contained is a []interface{} or not. -func (v *Value) IsInterSlice() bool { - _, ok := v.data.([]interface{}) - return ok -} - -// EachInter calls the specified callback for each object -// in the []interface{}. -// -// Panics if the object is the wrong type. -func (v *Value) EachInter(callback func(int, interface{}) bool) *Value { - for index, val := range v.MustInterSlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereInter uses the specified decider function to select items -// from the []interface{}. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value { - var selected []interface{} - v.EachInter(func(index int, val interface{}) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupInter uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]interface{}. -func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { - groups := make(map[string][]interface{}) - v.EachInter(func(index int, val interface{}) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]interface{}, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceInter uses the specified function to replace each interface{}s -// by iterating each item. The data in the returned result will be a -// []interface{} containing the replaced items. -func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value { - arr := v.MustInterSlice() - replaced := make([]interface{}, len(arr)) - v.EachInter(func(index int, val interface{}) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectInter uses the specified collector function to collect a value -// for each of the interface{}s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value { - arr := v.MustInterSlice() - collected := make([]interface{}, len(arr)) - v.EachInter(func(index int, val interface{}) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - MSI (map[string]interface{} and []map[string]interface{}) -*/ - -// MSI gets the value as a map[string]interface{}, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} { - if s, ok := v.data.(map[string]interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustMSI gets the value as a map[string]interface{}. -// -// Panics if the object is not a map[string]interface{}. -func (v *Value) MustMSI() map[string]interface{} { - return v.data.(map[string]interface{}) -} - -// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault -// value or nil if the value is not a []map[string]interface{}. -func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} { - if s, ok := v.data.([]map[string]interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustMSISlice gets the value as a []map[string]interface{}. -// -// Panics if the object is not a []map[string]interface{}. -func (v *Value) MustMSISlice() []map[string]interface{} { - return v.data.([]map[string]interface{}) -} - -// IsMSI gets whether the object contained is a map[string]interface{} or not. -func (v *Value) IsMSI() bool { - _, ok := v.data.(map[string]interface{}) - return ok -} - -// IsMSISlice gets whether the object contained is a []map[string]interface{} or not. -func (v *Value) IsMSISlice() bool { - _, ok := v.data.([]map[string]interface{}) - return ok -} - -// EachMSI calls the specified callback for each object -// in the []map[string]interface{}. -// -// Panics if the object is the wrong type. -func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { - for index, val := range v.MustMSISlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereMSI uses the specified decider function to select items -// from the []map[string]interface{}. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { - var selected []map[string]interface{} - v.EachMSI(func(index int, val map[string]interface{}) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupMSI uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]map[string]interface{}. -func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { - groups := make(map[string][]map[string]interface{}) - v.EachMSI(func(index int, val map[string]interface{}) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]map[string]interface{}, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceMSI uses the specified function to replace each map[string]interface{}s -// by iterating each item. The data in the returned result will be a -// []map[string]interface{} containing the replaced items. -func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { - arr := v.MustMSISlice() - replaced := make([]map[string]interface{}, len(arr)) - v.EachMSI(func(index int, val map[string]interface{}) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectMSI uses the specified collector function to collect a value -// for each of the map[string]interface{}s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { - arr := v.MustMSISlice() - collected := make([]interface{}, len(arr)) - v.EachMSI(func(index int, val map[string]interface{}) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - ObjxMap ((Map) and [](Map)) -*/ - -// ObjxMap gets the value as a (Map), returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) ObjxMap(optionalDefault ...(Map)) Map { - if s, ok := v.data.((Map)); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return New(nil) -} - -// MustObjxMap gets the value as a (Map). -// -// Panics if the object is not a (Map). -func (v *Value) MustObjxMap() Map { - return v.data.((Map)) -} - -// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault -// value or nil if the value is not a [](Map). -func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) { - if s, ok := v.data.([](Map)); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustObjxMapSlice gets the value as a [](Map). -// -// Panics if the object is not a [](Map). -func (v *Value) MustObjxMapSlice() [](Map) { - return v.data.([](Map)) -} - -// IsObjxMap gets whether the object contained is a (Map) or not. -func (v *Value) IsObjxMap() bool { - _, ok := v.data.((Map)) - return ok -} - -// IsObjxMapSlice gets whether the object contained is a [](Map) or not. -func (v *Value) IsObjxMapSlice() bool { - _, ok := v.data.([](Map)) - return ok -} - -// EachObjxMap calls the specified callback for each object -// in the [](Map). -// -// Panics if the object is the wrong type. -func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { - for index, val := range v.MustObjxMapSlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereObjxMap uses the specified decider function to select items -// from the [](Map). The object contained in the result will contain -// only the selected items. -func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { - var selected [](Map) - v.EachObjxMap(func(index int, val Map) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupObjxMap uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][](Map). -func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { - groups := make(map[string][](Map)) - v.EachObjxMap(func(index int, val Map) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([](Map), 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceObjxMap uses the specified function to replace each (Map)s -// by iterating each item. The data in the returned result will be a -// [](Map) containing the replaced items. -func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { - arr := v.MustObjxMapSlice() - replaced := make([](Map), len(arr)) - v.EachObjxMap(func(index int, val Map) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectObjxMap uses the specified collector function to collect a value -// for each of the (Map)s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { - arr := v.MustObjxMapSlice() - collected := make([]interface{}, len(arr)) - v.EachObjxMap(func(index int, val Map) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Bool (bool and []bool) -*/ - -// Bool gets the value as a bool, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Bool(optionalDefault ...bool) bool { - if s, ok := v.data.(bool); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return false -} - -// MustBool gets the value as a bool. -// -// Panics if the object is not a bool. -func (v *Value) MustBool() bool { - return v.data.(bool) -} - -// BoolSlice gets the value as a []bool, returns the optionalDefault -// value or nil if the value is not a []bool. -func (v *Value) BoolSlice(optionalDefault ...[]bool) []bool { - if s, ok := v.data.([]bool); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustBoolSlice gets the value as a []bool. -// -// Panics if the object is not a []bool. -func (v *Value) MustBoolSlice() []bool { - return v.data.([]bool) -} - -// IsBool gets whether the object contained is a bool or not. -func (v *Value) IsBool() bool { - _, ok := v.data.(bool) - return ok -} - -// IsBoolSlice gets whether the object contained is a []bool or not. -func (v *Value) IsBoolSlice() bool { - _, ok := v.data.([]bool) - return ok -} - -// EachBool calls the specified callback for each object -// in the []bool. -// -// Panics if the object is the wrong type. -func (v *Value) EachBool(callback func(int, bool) bool) *Value { - for index, val := range v.MustBoolSlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereBool uses the specified decider function to select items -// from the []bool. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereBool(decider func(int, bool) bool) *Value { - var selected []bool - v.EachBool(func(index int, val bool) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupBool uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]bool. -func (v *Value) GroupBool(grouper func(int, bool) string) *Value { - groups := make(map[string][]bool) - v.EachBool(func(index int, val bool) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]bool, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceBool uses the specified function to replace each bools -// by iterating each item. The data in the returned result will be a -// []bool containing the replaced items. -func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value { - arr := v.MustBoolSlice() - replaced := make([]bool, len(arr)) - v.EachBool(func(index int, val bool) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectBool uses the specified collector function to collect a value -// for each of the bools in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value { - arr := v.MustBoolSlice() - collected := make([]interface{}, len(arr)) - v.EachBool(func(index int, val bool) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Str (string and []string) -*/ - -// Str gets the value as a string, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Str(optionalDefault ...string) string { - if s, ok := v.data.(string); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return "" -} - -// MustStr gets the value as a string. -// -// Panics if the object is not a string. -func (v *Value) MustStr() string { - return v.data.(string) -} - -// StrSlice gets the value as a []string, returns the optionalDefault -// value or nil if the value is not a []string. -func (v *Value) StrSlice(optionalDefault ...[]string) []string { - if s, ok := v.data.([]string); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustStrSlice gets the value as a []string. -// -// Panics if the object is not a []string. -func (v *Value) MustStrSlice() []string { - return v.data.([]string) -} - -// IsStr gets whether the object contained is a string or not. -func (v *Value) IsStr() bool { - _, ok := v.data.(string) - return ok -} - -// IsStrSlice gets whether the object contained is a []string or not. -func (v *Value) IsStrSlice() bool { - _, ok := v.data.([]string) - return ok -} - -// EachStr calls the specified callback for each object -// in the []string. -// -// Panics if the object is the wrong type. -func (v *Value) EachStr(callback func(int, string) bool) *Value { - for index, val := range v.MustStrSlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereStr uses the specified decider function to select items -// from the []string. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereStr(decider func(int, string) bool) *Value { - var selected []string - v.EachStr(func(index int, val string) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupStr uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]string. -func (v *Value) GroupStr(grouper func(int, string) string) *Value { - groups := make(map[string][]string) - v.EachStr(func(index int, val string) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]string, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceStr uses the specified function to replace each strings -// by iterating each item. The data in the returned result will be a -// []string containing the replaced items. -func (v *Value) ReplaceStr(replacer func(int, string) string) *Value { - arr := v.MustStrSlice() - replaced := make([]string, len(arr)) - v.EachStr(func(index int, val string) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectStr uses the specified collector function to collect a value -// for each of the strings in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectStr(collector func(int, string) interface{}) *Value { - arr := v.MustStrSlice() - collected := make([]interface{}, len(arr)) - v.EachStr(func(index int, val string) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Int (int and []int) -*/ - -// Int gets the value as a int, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int(optionalDefault ...int) int { - if s, ok := v.data.(int); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt gets the value as a int. -// -// Panics if the object is not a int. -func (v *Value) MustInt() int { - return v.data.(int) -} - -// IntSlice gets the value as a []int, returns the optionalDefault -// value or nil if the value is not a []int. -func (v *Value) IntSlice(optionalDefault ...[]int) []int { - if s, ok := v.data.([]int); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustIntSlice gets the value as a []int. -// -// Panics if the object is not a []int. -func (v *Value) MustIntSlice() []int { - return v.data.([]int) -} - -// IsInt gets whether the object contained is a int or not. -func (v *Value) IsInt() bool { - _, ok := v.data.(int) - return ok -} - -// IsIntSlice gets whether the object contained is a []int or not. -func (v *Value) IsIntSlice() bool { - _, ok := v.data.([]int) - return ok -} - -// EachInt calls the specified callback for each object -// in the []int. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt(callback func(int, int) bool) *Value { - for index, val := range v.MustIntSlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereInt uses the specified decider function to select items -// from the []int. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt(decider func(int, int) bool) *Value { - var selected []int - v.EachInt(func(index int, val int) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupInt uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int. -func (v *Value) GroupInt(grouper func(int, int) string) *Value { - groups := make(map[string][]int) - v.EachInt(func(index int, val int) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceInt uses the specified function to replace each ints -// by iterating each item. The data in the returned result will be a -// []int containing the replaced items. -func (v *Value) ReplaceInt(replacer func(int, int) int) *Value { - arr := v.MustIntSlice() - replaced := make([]int, len(arr)) - v.EachInt(func(index int, val int) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectInt uses the specified collector function to collect a value -// for each of the ints in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt(collector func(int, int) interface{}) *Value { - arr := v.MustIntSlice() - collected := make([]interface{}, len(arr)) - v.EachInt(func(index int, val int) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Int8 (int8 and []int8) -*/ - -// Int8 gets the value as a int8, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int8(optionalDefault ...int8) int8 { - if s, ok := v.data.(int8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt8 gets the value as a int8. -// -// Panics if the object is not a int8. -func (v *Value) MustInt8() int8 { - return v.data.(int8) -} - -// Int8Slice gets the value as a []int8, returns the optionalDefault -// value or nil if the value is not a []int8. -func (v *Value) Int8Slice(optionalDefault ...[]int8) []int8 { - if s, ok := v.data.([]int8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt8Slice gets the value as a []int8. -// -// Panics if the object is not a []int8. -func (v *Value) MustInt8Slice() []int8 { - return v.data.([]int8) -} - -// IsInt8 gets whether the object contained is a int8 or not. -func (v *Value) IsInt8() bool { - _, ok := v.data.(int8) - return ok -} - -// IsInt8Slice gets whether the object contained is a []int8 or not. -func (v *Value) IsInt8Slice() bool { - _, ok := v.data.([]int8) - return ok -} - -// EachInt8 calls the specified callback for each object -// in the []int8. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt8(callback func(int, int8) bool) *Value { - for index, val := range v.MustInt8Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereInt8 uses the specified decider function to select items -// from the []int8. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt8(decider func(int, int8) bool) *Value { - var selected []int8 - v.EachInt8(func(index int, val int8) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupInt8 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int8. -func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { - groups := make(map[string][]int8) - v.EachInt8(func(index int, val int8) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int8, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceInt8 uses the specified function to replace each int8s -// by iterating each item. The data in the returned result will be a -// []int8 containing the replaced items. -func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value { - arr := v.MustInt8Slice() - replaced := make([]int8, len(arr)) - v.EachInt8(func(index int, val int8) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectInt8 uses the specified collector function to collect a value -// for each of the int8s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value { - arr := v.MustInt8Slice() - collected := make([]interface{}, len(arr)) - v.EachInt8(func(index int, val int8) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Int16 (int16 and []int16) -*/ - -// Int16 gets the value as a int16, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int16(optionalDefault ...int16) int16 { - if s, ok := v.data.(int16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt16 gets the value as a int16. -// -// Panics if the object is not a int16. -func (v *Value) MustInt16() int16 { - return v.data.(int16) -} - -// Int16Slice gets the value as a []int16, returns the optionalDefault -// value or nil if the value is not a []int16. -func (v *Value) Int16Slice(optionalDefault ...[]int16) []int16 { - if s, ok := v.data.([]int16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt16Slice gets the value as a []int16. -// -// Panics if the object is not a []int16. -func (v *Value) MustInt16Slice() []int16 { - return v.data.([]int16) -} - -// IsInt16 gets whether the object contained is a int16 or not. -func (v *Value) IsInt16() bool { - _, ok := v.data.(int16) - return ok -} - -// IsInt16Slice gets whether the object contained is a []int16 or not. -func (v *Value) IsInt16Slice() bool { - _, ok := v.data.([]int16) - return ok -} - -// EachInt16 calls the specified callback for each object -// in the []int16. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt16(callback func(int, int16) bool) *Value { - for index, val := range v.MustInt16Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereInt16 uses the specified decider function to select items -// from the []int16. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt16(decider func(int, int16) bool) *Value { - var selected []int16 - v.EachInt16(func(index int, val int16) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupInt16 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int16. -func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { - groups := make(map[string][]int16) - v.EachInt16(func(index int, val int16) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int16, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceInt16 uses the specified function to replace each int16s -// by iterating each item. The data in the returned result will be a -// []int16 containing the replaced items. -func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value { - arr := v.MustInt16Slice() - replaced := make([]int16, len(arr)) - v.EachInt16(func(index int, val int16) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectInt16 uses the specified collector function to collect a value -// for each of the int16s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value { - arr := v.MustInt16Slice() - collected := make([]interface{}, len(arr)) - v.EachInt16(func(index int, val int16) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Int32 (int32 and []int32) -*/ - -// Int32 gets the value as a int32, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int32(optionalDefault ...int32) int32 { - if s, ok := v.data.(int32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt32 gets the value as a int32. -// -// Panics if the object is not a int32. -func (v *Value) MustInt32() int32 { - return v.data.(int32) -} - -// Int32Slice gets the value as a []int32, returns the optionalDefault -// value or nil if the value is not a []int32. -func (v *Value) Int32Slice(optionalDefault ...[]int32) []int32 { - if s, ok := v.data.([]int32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt32Slice gets the value as a []int32. -// -// Panics if the object is not a []int32. -func (v *Value) MustInt32Slice() []int32 { - return v.data.([]int32) -} - -// IsInt32 gets whether the object contained is a int32 or not. -func (v *Value) IsInt32() bool { - _, ok := v.data.(int32) - return ok -} - -// IsInt32Slice gets whether the object contained is a []int32 or not. -func (v *Value) IsInt32Slice() bool { - _, ok := v.data.([]int32) - return ok -} - -// EachInt32 calls the specified callback for each object -// in the []int32. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt32(callback func(int, int32) bool) *Value { - for index, val := range v.MustInt32Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereInt32 uses the specified decider function to select items -// from the []int32. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt32(decider func(int, int32) bool) *Value { - var selected []int32 - v.EachInt32(func(index int, val int32) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupInt32 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int32. -func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { - groups := make(map[string][]int32) - v.EachInt32(func(index int, val int32) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int32, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceInt32 uses the specified function to replace each int32s -// by iterating each item. The data in the returned result will be a -// []int32 containing the replaced items. -func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value { - arr := v.MustInt32Slice() - replaced := make([]int32, len(arr)) - v.EachInt32(func(index int, val int32) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectInt32 uses the specified collector function to collect a value -// for each of the int32s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value { - arr := v.MustInt32Slice() - collected := make([]interface{}, len(arr)) - v.EachInt32(func(index int, val int32) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Int64 (int64 and []int64) -*/ - -// Int64 gets the value as a int64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int64(optionalDefault ...int64) int64 { - if s, ok := v.data.(int64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt64 gets the value as a int64. -// -// Panics if the object is not a int64. -func (v *Value) MustInt64() int64 { - return v.data.(int64) -} - -// Int64Slice gets the value as a []int64, returns the optionalDefault -// value or nil if the value is not a []int64. -func (v *Value) Int64Slice(optionalDefault ...[]int64) []int64 { - if s, ok := v.data.([]int64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt64Slice gets the value as a []int64. -// -// Panics if the object is not a []int64. -func (v *Value) MustInt64Slice() []int64 { - return v.data.([]int64) -} - -// IsInt64 gets whether the object contained is a int64 or not. -func (v *Value) IsInt64() bool { - _, ok := v.data.(int64) - return ok -} - -// IsInt64Slice gets whether the object contained is a []int64 or not. -func (v *Value) IsInt64Slice() bool { - _, ok := v.data.([]int64) - return ok -} - -// EachInt64 calls the specified callback for each object -// in the []int64. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt64(callback func(int, int64) bool) *Value { - for index, val := range v.MustInt64Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereInt64 uses the specified decider function to select items -// from the []int64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt64(decider func(int, int64) bool) *Value { - var selected []int64 - v.EachInt64(func(index int, val int64) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupInt64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int64. -func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { - groups := make(map[string][]int64) - v.EachInt64(func(index int, val int64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceInt64 uses the specified function to replace each int64s -// by iterating each item. The data in the returned result will be a -// []int64 containing the replaced items. -func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value { - arr := v.MustInt64Slice() - replaced := make([]int64, len(arr)) - v.EachInt64(func(index int, val int64) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectInt64 uses the specified collector function to collect a value -// for each of the int64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value { - arr := v.MustInt64Slice() - collected := make([]interface{}, len(arr)) - v.EachInt64(func(index int, val int64) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Uint (uint and []uint) -*/ - -// Uint gets the value as a uint, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint(optionalDefault ...uint) uint { - if s, ok := v.data.(uint); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint gets the value as a uint. -// -// Panics if the object is not a uint. -func (v *Value) MustUint() uint { - return v.data.(uint) -} - -// UintSlice gets the value as a []uint, returns the optionalDefault -// value or nil if the value is not a []uint. -func (v *Value) UintSlice(optionalDefault ...[]uint) []uint { - if s, ok := v.data.([]uint); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUintSlice gets the value as a []uint. -// -// Panics if the object is not a []uint. -func (v *Value) MustUintSlice() []uint { - return v.data.([]uint) -} - -// IsUint gets whether the object contained is a uint or not. -func (v *Value) IsUint() bool { - _, ok := v.data.(uint) - return ok -} - -// IsUintSlice gets whether the object contained is a []uint or not. -func (v *Value) IsUintSlice() bool { - _, ok := v.data.([]uint) - return ok -} - -// EachUint calls the specified callback for each object -// in the []uint. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint(callback func(int, uint) bool) *Value { - for index, val := range v.MustUintSlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereUint uses the specified decider function to select items -// from the []uint. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint(decider func(int, uint) bool) *Value { - var selected []uint - v.EachUint(func(index int, val uint) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupUint uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint. -func (v *Value) GroupUint(grouper func(int, uint) string) *Value { - groups := make(map[string][]uint) - v.EachUint(func(index int, val uint) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceUint uses the specified function to replace each uints -// by iterating each item. The data in the returned result will be a -// []uint containing the replaced items. -func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value { - arr := v.MustUintSlice() - replaced := make([]uint, len(arr)) - v.EachUint(func(index int, val uint) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectUint uses the specified collector function to collect a value -// for each of the uints in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value { - arr := v.MustUintSlice() - collected := make([]interface{}, len(arr)) - v.EachUint(func(index int, val uint) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Uint8 (uint8 and []uint8) -*/ - -// Uint8 gets the value as a uint8, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint8(optionalDefault ...uint8) uint8 { - if s, ok := v.data.(uint8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint8 gets the value as a uint8. -// -// Panics if the object is not a uint8. -func (v *Value) MustUint8() uint8 { - return v.data.(uint8) -} - -// Uint8Slice gets the value as a []uint8, returns the optionalDefault -// value or nil if the value is not a []uint8. -func (v *Value) Uint8Slice(optionalDefault ...[]uint8) []uint8 { - if s, ok := v.data.([]uint8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint8Slice gets the value as a []uint8. -// -// Panics if the object is not a []uint8. -func (v *Value) MustUint8Slice() []uint8 { - return v.data.([]uint8) -} - -// IsUint8 gets whether the object contained is a uint8 or not. -func (v *Value) IsUint8() bool { - _, ok := v.data.(uint8) - return ok -} - -// IsUint8Slice gets whether the object contained is a []uint8 or not. -func (v *Value) IsUint8Slice() bool { - _, ok := v.data.([]uint8) - return ok -} - -// EachUint8 calls the specified callback for each object -// in the []uint8. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint8(callback func(int, uint8) bool) *Value { - for index, val := range v.MustUint8Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereUint8 uses the specified decider function to select items -// from the []uint8. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value { - var selected []uint8 - v.EachUint8(func(index int, val uint8) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupUint8 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint8. -func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { - groups := make(map[string][]uint8) - v.EachUint8(func(index int, val uint8) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint8, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceUint8 uses the specified function to replace each uint8s -// by iterating each item. The data in the returned result will be a -// []uint8 containing the replaced items. -func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value { - arr := v.MustUint8Slice() - replaced := make([]uint8, len(arr)) - v.EachUint8(func(index int, val uint8) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectUint8 uses the specified collector function to collect a value -// for each of the uint8s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value { - arr := v.MustUint8Slice() - collected := make([]interface{}, len(arr)) - v.EachUint8(func(index int, val uint8) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Uint16 (uint16 and []uint16) -*/ - -// Uint16 gets the value as a uint16, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint16(optionalDefault ...uint16) uint16 { - if s, ok := v.data.(uint16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint16 gets the value as a uint16. -// -// Panics if the object is not a uint16. -func (v *Value) MustUint16() uint16 { - return v.data.(uint16) -} - -// Uint16Slice gets the value as a []uint16, returns the optionalDefault -// value or nil if the value is not a []uint16. -func (v *Value) Uint16Slice(optionalDefault ...[]uint16) []uint16 { - if s, ok := v.data.([]uint16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint16Slice gets the value as a []uint16. -// -// Panics if the object is not a []uint16. -func (v *Value) MustUint16Slice() []uint16 { - return v.data.([]uint16) -} - -// IsUint16 gets whether the object contained is a uint16 or not. -func (v *Value) IsUint16() bool { - _, ok := v.data.(uint16) - return ok -} - -// IsUint16Slice gets whether the object contained is a []uint16 or not. -func (v *Value) IsUint16Slice() bool { - _, ok := v.data.([]uint16) - return ok -} - -// EachUint16 calls the specified callback for each object -// in the []uint16. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint16(callback func(int, uint16) bool) *Value { - for index, val := range v.MustUint16Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereUint16 uses the specified decider function to select items -// from the []uint16. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value { - var selected []uint16 - v.EachUint16(func(index int, val uint16) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupUint16 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint16. -func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { - groups := make(map[string][]uint16) - v.EachUint16(func(index int, val uint16) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint16, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceUint16 uses the specified function to replace each uint16s -// by iterating each item. The data in the returned result will be a -// []uint16 containing the replaced items. -func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value { - arr := v.MustUint16Slice() - replaced := make([]uint16, len(arr)) - v.EachUint16(func(index int, val uint16) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectUint16 uses the specified collector function to collect a value -// for each of the uint16s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value { - arr := v.MustUint16Slice() - collected := make([]interface{}, len(arr)) - v.EachUint16(func(index int, val uint16) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Uint32 (uint32 and []uint32) -*/ - -// Uint32 gets the value as a uint32, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint32(optionalDefault ...uint32) uint32 { - if s, ok := v.data.(uint32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint32 gets the value as a uint32. -// -// Panics if the object is not a uint32. -func (v *Value) MustUint32() uint32 { - return v.data.(uint32) -} - -// Uint32Slice gets the value as a []uint32, returns the optionalDefault -// value or nil if the value is not a []uint32. -func (v *Value) Uint32Slice(optionalDefault ...[]uint32) []uint32 { - if s, ok := v.data.([]uint32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint32Slice gets the value as a []uint32. -// -// Panics if the object is not a []uint32. -func (v *Value) MustUint32Slice() []uint32 { - return v.data.([]uint32) -} - -// IsUint32 gets whether the object contained is a uint32 or not. -func (v *Value) IsUint32() bool { - _, ok := v.data.(uint32) - return ok -} - -// IsUint32Slice gets whether the object contained is a []uint32 or not. -func (v *Value) IsUint32Slice() bool { - _, ok := v.data.([]uint32) - return ok -} - -// EachUint32 calls the specified callback for each object -// in the []uint32. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint32(callback func(int, uint32) bool) *Value { - for index, val := range v.MustUint32Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereUint32 uses the specified decider function to select items -// from the []uint32. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value { - var selected []uint32 - v.EachUint32(func(index int, val uint32) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupUint32 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint32. -func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { - groups := make(map[string][]uint32) - v.EachUint32(func(index int, val uint32) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint32, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceUint32 uses the specified function to replace each uint32s -// by iterating each item. The data in the returned result will be a -// []uint32 containing the replaced items. -func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value { - arr := v.MustUint32Slice() - replaced := make([]uint32, len(arr)) - v.EachUint32(func(index int, val uint32) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectUint32 uses the specified collector function to collect a value -// for each of the uint32s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value { - arr := v.MustUint32Slice() - collected := make([]interface{}, len(arr)) - v.EachUint32(func(index int, val uint32) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Uint64 (uint64 and []uint64) -*/ - -// Uint64 gets the value as a uint64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint64(optionalDefault ...uint64) uint64 { - if s, ok := v.data.(uint64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint64 gets the value as a uint64. -// -// Panics if the object is not a uint64. -func (v *Value) MustUint64() uint64 { - return v.data.(uint64) -} - -// Uint64Slice gets the value as a []uint64, returns the optionalDefault -// value or nil if the value is not a []uint64. -func (v *Value) Uint64Slice(optionalDefault ...[]uint64) []uint64 { - if s, ok := v.data.([]uint64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint64Slice gets the value as a []uint64. -// -// Panics if the object is not a []uint64. -func (v *Value) MustUint64Slice() []uint64 { - return v.data.([]uint64) -} - -// IsUint64 gets whether the object contained is a uint64 or not. -func (v *Value) IsUint64() bool { - _, ok := v.data.(uint64) - return ok -} - -// IsUint64Slice gets whether the object contained is a []uint64 or not. -func (v *Value) IsUint64Slice() bool { - _, ok := v.data.([]uint64) - return ok -} - -// EachUint64 calls the specified callback for each object -// in the []uint64. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint64(callback func(int, uint64) bool) *Value { - for index, val := range v.MustUint64Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereUint64 uses the specified decider function to select items -// from the []uint64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value { - var selected []uint64 - v.EachUint64(func(index int, val uint64) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupUint64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint64. -func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { - groups := make(map[string][]uint64) - v.EachUint64(func(index int, val uint64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceUint64 uses the specified function to replace each uint64s -// by iterating each item. The data in the returned result will be a -// []uint64 containing the replaced items. -func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value { - arr := v.MustUint64Slice() - replaced := make([]uint64, len(arr)) - v.EachUint64(func(index int, val uint64) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectUint64 uses the specified collector function to collect a value -// for each of the uint64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value { - arr := v.MustUint64Slice() - collected := make([]interface{}, len(arr)) - v.EachUint64(func(index int, val uint64) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Uintptr (uintptr and []uintptr) -*/ - -// Uintptr gets the value as a uintptr, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uintptr(optionalDefault ...uintptr) uintptr { - if s, ok := v.data.(uintptr); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUintptr gets the value as a uintptr. -// -// Panics if the object is not a uintptr. -func (v *Value) MustUintptr() uintptr { - return v.data.(uintptr) -} - -// UintptrSlice gets the value as a []uintptr, returns the optionalDefault -// value or nil if the value is not a []uintptr. -func (v *Value) UintptrSlice(optionalDefault ...[]uintptr) []uintptr { - if s, ok := v.data.([]uintptr); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUintptrSlice gets the value as a []uintptr. -// -// Panics if the object is not a []uintptr. -func (v *Value) MustUintptrSlice() []uintptr { - return v.data.([]uintptr) -} - -// IsUintptr gets whether the object contained is a uintptr or not. -func (v *Value) IsUintptr() bool { - _, ok := v.data.(uintptr) - return ok -} - -// IsUintptrSlice gets whether the object contained is a []uintptr or not. -func (v *Value) IsUintptrSlice() bool { - _, ok := v.data.([]uintptr) - return ok -} - -// EachUintptr calls the specified callback for each object -// in the []uintptr. -// -// Panics if the object is the wrong type. -func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value { - for index, val := range v.MustUintptrSlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereUintptr uses the specified decider function to select items -// from the []uintptr. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value { - var selected []uintptr - v.EachUintptr(func(index int, val uintptr) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupUintptr uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uintptr. -func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { - groups := make(map[string][]uintptr) - v.EachUintptr(func(index int, val uintptr) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uintptr, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceUintptr uses the specified function to replace each uintptrs -// by iterating each item. The data in the returned result will be a -// []uintptr containing the replaced items. -func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value { - arr := v.MustUintptrSlice() - replaced := make([]uintptr, len(arr)) - v.EachUintptr(func(index int, val uintptr) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectUintptr uses the specified collector function to collect a value -// for each of the uintptrs in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value { - arr := v.MustUintptrSlice() - collected := make([]interface{}, len(arr)) - v.EachUintptr(func(index int, val uintptr) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Float32 (float32 and []float32) -*/ - -// Float32 gets the value as a float32, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Float32(optionalDefault ...float32) float32 { - if s, ok := v.data.(float32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustFloat32 gets the value as a float32. -// -// Panics if the object is not a float32. -func (v *Value) MustFloat32() float32 { - return v.data.(float32) -} - -// Float32Slice gets the value as a []float32, returns the optionalDefault -// value or nil if the value is not a []float32. -func (v *Value) Float32Slice(optionalDefault ...[]float32) []float32 { - if s, ok := v.data.([]float32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustFloat32Slice gets the value as a []float32. -// -// Panics if the object is not a []float32. -func (v *Value) MustFloat32Slice() []float32 { - return v.data.([]float32) -} - -// IsFloat32 gets whether the object contained is a float32 or not. -func (v *Value) IsFloat32() bool { - _, ok := v.data.(float32) - return ok -} - -// IsFloat32Slice gets whether the object contained is a []float32 or not. -func (v *Value) IsFloat32Slice() bool { - _, ok := v.data.([]float32) - return ok -} - -// EachFloat32 calls the specified callback for each object -// in the []float32. -// -// Panics if the object is the wrong type. -func (v *Value) EachFloat32(callback func(int, float32) bool) *Value { - for index, val := range v.MustFloat32Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereFloat32 uses the specified decider function to select items -// from the []float32. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value { - var selected []float32 - v.EachFloat32(func(index int, val float32) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupFloat32 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]float32. -func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { - groups := make(map[string][]float32) - v.EachFloat32(func(index int, val float32) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]float32, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceFloat32 uses the specified function to replace each float32s -// by iterating each item. The data in the returned result will be a -// []float32 containing the replaced items. -func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value { - arr := v.MustFloat32Slice() - replaced := make([]float32, len(arr)) - v.EachFloat32(func(index int, val float32) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectFloat32 uses the specified collector function to collect a value -// for each of the float32s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value { - arr := v.MustFloat32Slice() - collected := make([]interface{}, len(arr)) - v.EachFloat32(func(index int, val float32) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Float64 (float64 and []float64) -*/ - -// Float64 gets the value as a float64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Float64(optionalDefault ...float64) float64 { - if s, ok := v.data.(float64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustFloat64 gets the value as a float64. -// -// Panics if the object is not a float64. -func (v *Value) MustFloat64() float64 { - return v.data.(float64) -} - -// Float64Slice gets the value as a []float64, returns the optionalDefault -// value or nil if the value is not a []float64. -func (v *Value) Float64Slice(optionalDefault ...[]float64) []float64 { - if s, ok := v.data.([]float64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustFloat64Slice gets the value as a []float64. -// -// Panics if the object is not a []float64. -func (v *Value) MustFloat64Slice() []float64 { - return v.data.([]float64) -} - -// IsFloat64 gets whether the object contained is a float64 or not. -func (v *Value) IsFloat64() bool { - _, ok := v.data.(float64) - return ok -} - -// IsFloat64Slice gets whether the object contained is a []float64 or not. -func (v *Value) IsFloat64Slice() bool { - _, ok := v.data.([]float64) - return ok -} - -// EachFloat64 calls the specified callback for each object -// in the []float64. -// -// Panics if the object is the wrong type. -func (v *Value) EachFloat64(callback func(int, float64) bool) *Value { - for index, val := range v.MustFloat64Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereFloat64 uses the specified decider function to select items -// from the []float64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value { - var selected []float64 - v.EachFloat64(func(index int, val float64) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupFloat64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]float64. -func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { - groups := make(map[string][]float64) - v.EachFloat64(func(index int, val float64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]float64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceFloat64 uses the specified function to replace each float64s -// by iterating each item. The data in the returned result will be a -// []float64 containing the replaced items. -func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value { - arr := v.MustFloat64Slice() - replaced := make([]float64, len(arr)) - v.EachFloat64(func(index int, val float64) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectFloat64 uses the specified collector function to collect a value -// for each of the float64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value { - arr := v.MustFloat64Slice() - collected := make([]interface{}, len(arr)) - v.EachFloat64(func(index int, val float64) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Complex64 (complex64 and []complex64) -*/ - -// Complex64 gets the value as a complex64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Complex64(optionalDefault ...complex64) complex64 { - if s, ok := v.data.(complex64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustComplex64 gets the value as a complex64. -// -// Panics if the object is not a complex64. -func (v *Value) MustComplex64() complex64 { - return v.data.(complex64) -} - -// Complex64Slice gets the value as a []complex64, returns the optionalDefault -// value or nil if the value is not a []complex64. -func (v *Value) Complex64Slice(optionalDefault ...[]complex64) []complex64 { - if s, ok := v.data.([]complex64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustComplex64Slice gets the value as a []complex64. -// -// Panics if the object is not a []complex64. -func (v *Value) MustComplex64Slice() []complex64 { - return v.data.([]complex64) -} - -// IsComplex64 gets whether the object contained is a complex64 or not. -func (v *Value) IsComplex64() bool { - _, ok := v.data.(complex64) - return ok -} - -// IsComplex64Slice gets whether the object contained is a []complex64 or not. -func (v *Value) IsComplex64Slice() bool { - _, ok := v.data.([]complex64) - return ok -} - -// EachComplex64 calls the specified callback for each object -// in the []complex64. -// -// Panics if the object is the wrong type. -func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value { - for index, val := range v.MustComplex64Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereComplex64 uses the specified decider function to select items -// from the []complex64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value { - var selected []complex64 - v.EachComplex64(func(index int, val complex64) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupComplex64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]complex64. -func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { - groups := make(map[string][]complex64) - v.EachComplex64(func(index int, val complex64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]complex64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceComplex64 uses the specified function to replace each complex64s -// by iterating each item. The data in the returned result will be a -// []complex64 containing the replaced items. -func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value { - arr := v.MustComplex64Slice() - replaced := make([]complex64, len(arr)) - v.EachComplex64(func(index int, val complex64) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectComplex64 uses the specified collector function to collect a value -// for each of the complex64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value { - arr := v.MustComplex64Slice() - collected := make([]interface{}, len(arr)) - v.EachComplex64(func(index int, val complex64) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Complex128 (complex128 and []complex128) -*/ - -// Complex128 gets the value as a complex128, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Complex128(optionalDefault ...complex128) complex128 { - if s, ok := v.data.(complex128); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustComplex128 gets the value as a complex128. -// -// Panics if the object is not a complex128. -func (v *Value) MustComplex128() complex128 { - return v.data.(complex128) -} - -// Complex128Slice gets the value as a []complex128, returns the optionalDefault -// value or nil if the value is not a []complex128. -func (v *Value) Complex128Slice(optionalDefault ...[]complex128) []complex128 { - if s, ok := v.data.([]complex128); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustComplex128Slice gets the value as a []complex128. -// -// Panics if the object is not a []complex128. -func (v *Value) MustComplex128Slice() []complex128 { - return v.data.([]complex128) -} - -// IsComplex128 gets whether the object contained is a complex128 or not. -func (v *Value) IsComplex128() bool { - _, ok := v.data.(complex128) - return ok -} - -// IsComplex128Slice gets whether the object contained is a []complex128 or not. -func (v *Value) IsComplex128Slice() bool { - _, ok := v.data.([]complex128) - return ok -} - -// EachComplex128 calls the specified callback for each object -// in the []complex128. -// -// Panics if the object is the wrong type. -func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value { - for index, val := range v.MustComplex128Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereComplex128 uses the specified decider function to select items -// from the []complex128. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value { - var selected []complex128 - v.EachComplex128(func(index int, val complex128) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupComplex128 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]complex128. -func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { - groups := make(map[string][]complex128) - v.EachComplex128(func(index int, val complex128) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]complex128, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceComplex128 uses the specified function to replace each complex128s -// by iterating each item. The data in the returned result will be a -// []complex128 containing the replaced items. -func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value { - arr := v.MustComplex128Slice() - replaced := make([]complex128, len(arr)) - v.EachComplex128(func(index int, val complex128) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectComplex128 uses the specified collector function to collect a value -// for each of the complex128s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value { - arr := v.MustComplex128Slice() - collected := make([]interface{}, len(arr)) - v.EachComplex128(func(index int, val complex128) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} diff --git a/vendor/github.com/stretchr/objx/value.go b/vendor/github.com/stretchr/objx/value.go deleted file mode 100644 index e4b4a1433..000000000 --- a/vendor/github.com/stretchr/objx/value.go +++ /dev/null @@ -1,53 +0,0 @@ -package objx - -import ( - "fmt" - "strconv" -) - -// Value provides methods for extracting interface{} data in various -// types. -type Value struct { - // data contains the raw data being managed by this Value - data interface{} -} - -// Data returns the raw data contained by this Value -func (v *Value) Data() interface{} { - return v.data -} - -// String returns the value always as a string -func (v *Value) String() string { - switch { - case v.IsStr(): - return v.Str() - case v.IsBool(): - return strconv.FormatBool(v.Bool()) - case v.IsFloat32(): - return strconv.FormatFloat(float64(v.Float32()), 'f', -1, 32) - case v.IsFloat64(): - return strconv.FormatFloat(v.Float64(), 'f', -1, 64) - case v.IsInt(): - return strconv.FormatInt(int64(v.Int()), 10) - case v.IsInt8(): - return strconv.FormatInt(int64(v.Int8()), 10) - case v.IsInt16(): - return strconv.FormatInt(int64(v.Int16()), 10) - case v.IsInt32(): - return strconv.FormatInt(int64(v.Int32()), 10) - case v.IsInt64(): - return strconv.FormatInt(v.Int64(), 10) - case v.IsUint(): - return strconv.FormatUint(uint64(v.Uint()), 10) - case v.IsUint8(): - return strconv.FormatUint(uint64(v.Uint8()), 10) - case v.IsUint16(): - return strconv.FormatUint(uint64(v.Uint16()), 10) - case v.IsUint32(): - return strconv.FormatUint(uint64(v.Uint32()), 10) - case v.IsUint64(): - return strconv.FormatUint(v.Uint64(), 10) - } - return fmt.Sprintf("%#v", v.Data()) -} diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE deleted file mode 100644 index 473b670a7..000000000 --- a/vendor/github.com/stretchr/testify/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell - -Please consider promoting this project if you find it useful. - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of the Software, -and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT -OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE -OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go deleted file mode 100644 index aa1c2b95c..000000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ /dev/null @@ -1,484 +0,0 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ - -package assert - -import ( - http "net/http" - url "net/url" - time "time" -) - -// Conditionf uses a Comparison to assert a complex condition. -func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Condition(t, comp, append([]interface{}{msg}, args...)...) -} - -// Containsf asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") -// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") -// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") -func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Contains(t, s, contains, append([]interface{}{msg}, args...)...) -} - -// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. -func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return DirExists(t, path, append([]interface{}{msg}, args...)...) -} - -// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") -func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) -} - -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// assert.Emptyf(t, obj, "error message %s", "formatted") -func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Empty(t, object, append([]interface{}{msg}, args...)...) -} - -// Equalf asserts that two objects are equal. -// -// assert.Equalf(t, 123, 123, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Equal(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// EqualErrorf asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") -func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...) -} - -// EqualValuesf asserts that two objects are equal or convertable to the same types -// and equal. -// -// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) -func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Errorf asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } -func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Error(t, err, append([]interface{}{msg}, args...)...) -} - -// Exactlyf asserts that two objects are equal in value and type. -// -// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) -func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Failf reports a failure through -func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, failureMessage, append([]interface{}{msg}, args...)...) -} - -// FailNowf fails test -func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...) -} - -// Falsef asserts that the specified value is false. -// -// assert.Falsef(t, myBool, "error message %s", "formatted") -func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return False(t, value, append([]interface{}{msg}, args...)...) -} - -// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. -func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return FileExists(t, path, append([]interface{}{msg}, args...)...) -} - -// HTTPBodyContainsf asserts that a specified handler returns a -// body that contains a string. -// -// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) -} - -// HTTPBodyNotContainsf asserts that a specified handler returns a -// body that does not contain a string. -// -// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) -} - -// HTTPErrorf asserts that a specified handler returns an error status code. -// -// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). -func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...) -} - -// HTTPRedirectf asserts that a specified handler returns a redirect status code. -// -// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). -func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...) -} - -// HTTPSuccessf asserts that a specified handler returns a success status code. -// -// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...) -} - -// Implementsf asserts that an object is implemented by the specified interface. -// -// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) -func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) -} - -// InDeltaf asserts that the two numerals are within delta of each other. -// -// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) -func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// InDeltaSlicef is the same as InDelta, except it compares two slices. -func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// InEpsilonf asserts that expected and actual have a relative error less than epsilon -func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) -} - -// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. -func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) -} - -// IsTypef asserts that the specified objects are of the same type. -func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...) -} - -// JSONEqf asserts that two JSON strings are equivalent. -// -// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") -func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Lenf asserts that the specified object has specific length. -// Lenf also fails if the object has a type that len() not accept. -// -// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") -func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Len(t, object, length, append([]interface{}{msg}, args...)...) -} - -// Nilf asserts that the specified object is nil. -// -// assert.Nilf(t, err, "error message %s", "formatted") -func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Nil(t, object, append([]interface{}{msg}, args...)...) -} - -// NoErrorf asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if assert.NoErrorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) -// } -func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NoError(t, err, append([]interface{}{msg}, args...)...) -} - -// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") -func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) -} - -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) -// } -func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotEmpty(t, object, append([]interface{}{msg}, args...)...) -} - -// NotEqualf asserts that the specified values are NOT equal. -// -// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// NotNilf asserts that the specified object is not nil. -// -// assert.NotNilf(t, err, "error message %s", "formatted") -func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotNil(t, object, append([]interface{}{msg}, args...)...) -} - -// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") -func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotPanics(t, f, append([]interface{}{msg}, args...)...) -} - -// NotRegexpf asserts that a specified regexp does not match a string. -// -// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") -// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") -func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...) -} - -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") -func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...) -} - -// NotZerof asserts that i is not the zero value for its type. -func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotZero(t, i, append([]interface{}{msg}, args...)...) -} - -// Panicsf asserts that the code inside the specified PanicTestFunc panics. -// -// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") -func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Panics(t, f, append([]interface{}{msg}, args...)...) -} - -// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...) -} - -// Regexpf asserts that a specified regexp matches a string. -// -// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") -// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") -func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Regexp(t, rx, str, append([]interface{}{msg}, args...)...) -} - -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") -func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Subset(t, list, subset, append([]interface{}{msg}, args...)...) -} - -// Truef asserts that the specified value is true. -// -// assert.Truef(t, myBool, "error message %s", "formatted") -func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return True(t, value, append([]interface{}{msg}, args...)...) -} - -// WithinDurationf asserts that the two times are within duration delta of each other. -// -// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") -func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// Zerof asserts that i is the zero value for its type. -func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Zero(t, i, append([]interface{}{msg}, args...)...) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl deleted file mode 100644 index d2bb0b817..000000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl +++ /dev/null @@ -1,5 +0,0 @@ -{{.CommentFormat}} -func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool { - if h, ok := t.(tHelper); ok { h.Helper() } - return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}}) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go deleted file mode 100644 index de39f794e..000000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ /dev/null @@ -1,956 +0,0 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ - -package assert - -import ( - http "net/http" - url "net/url" - time "time" -) - -// Condition uses a Comparison to assert a complex condition. -func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Condition(a.t, comp, msgAndArgs...) -} - -// Conditionf uses a Comparison to assert a complex condition. -func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Conditionf(a.t, comp, msg, args...) -} - -// Contains asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// a.Contains("Hello World", "World") -// a.Contains(["Hello", "World"], "World") -// a.Contains({"Hello": "World"}, "Hello") -func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Contains(a.t, s, contains, msgAndArgs...) -} - -// Containsf asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// a.Containsf("Hello World", "World", "error message %s", "formatted") -// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") -// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") -func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Containsf(a.t, s, contains, msg, args...) -} - -// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. -func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return DirExists(a.t, path, msgAndArgs...) -} - -// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. -func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return DirExistsf(a.t, path, msg, args...) -} - -// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]) -func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ElementsMatch(a.t, listA, listB, msgAndArgs...) -} - -// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") -func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ElementsMatchf(a.t, listA, listB, msg, args...) -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// a.Empty(obj) -func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Empty(a.t, object, msgAndArgs...) -} - -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// a.Emptyf(obj, "error message %s", "formatted") -func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Emptyf(a.t, object, msg, args...) -} - -// Equal asserts that two objects are equal. -// -// a.Equal(123, 123) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Equal(a.t, expected, actual, msgAndArgs...) -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// a.EqualError(err, expectedErrorString) -func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualError(a.t, theError, errString, msgAndArgs...) -} - -// EqualErrorf asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") -func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualErrorf(a.t, theError, errString, msg, args...) -} - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// a.EqualValues(uint32(123), int32(123)) -func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualValues(a.t, expected, actual, msgAndArgs...) -} - -// EqualValuesf asserts that two objects are equal or convertable to the same types -// and equal. -// -// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) -func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualValuesf(a.t, expected, actual, msg, args...) -} - -// Equalf asserts that two objects are equal. -// -// a.Equalf(123, 123, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Equalf(a.t, expected, actual, msg, args...) -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } -func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Error(a.t, err, msgAndArgs...) -} - -// Errorf asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } -func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Errorf(a.t, err, msg, args...) -} - -// Exactly asserts that two objects are equal in value and type. -// -// a.Exactly(int32(123), int64(123)) -func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Exactly(a.t, expected, actual, msgAndArgs...) -} - -// Exactlyf asserts that two objects are equal in value and type. -// -// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) -func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Exactlyf(a.t, expected, actual, msg, args...) -} - -// Fail reports a failure through -func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Fail(a.t, failureMessage, msgAndArgs...) -} - -// FailNow fails test -func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FailNow(a.t, failureMessage, msgAndArgs...) -} - -// FailNowf fails test -func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FailNowf(a.t, failureMessage, msg, args...) -} - -// Failf reports a failure through -func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Failf(a.t, failureMessage, msg, args...) -} - -// False asserts that the specified value is false. -// -// a.False(myBool) -func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return False(a.t, value, msgAndArgs...) -} - -// Falsef asserts that the specified value is false. -// -// a.Falsef(myBool, "error message %s", "formatted") -func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Falsef(a.t, value, msg, args...) -} - -// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. -func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FileExists(a.t, path, msgAndArgs...) -} - -// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. -func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FileExistsf(a.t, path, msg, args...) -} - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...) -} - -// HTTPBodyContainsf asserts that a specified handler returns a -// body that contains a string. -// -// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...) -} - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...) -} - -// HTTPBodyNotContainsf asserts that a specified handler returns a -// body that does not contain a string. -// -// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...) -} - -// HTTPError asserts that a specified handler returns an error status code. -// -// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPError(a.t, handler, method, url, values, msgAndArgs...) -} - -// HTTPErrorf asserts that a specified handler returns an error status code. -// -// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). -func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPErrorf(a.t, handler, method, url, values, msg, args...) -} - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...) -} - -// HTTPRedirectf asserts that a specified handler returns a redirect status code. -// -// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). -func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPRedirectf(a.t, handler, method, url, values, msg, args...) -} - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...) -} - -// HTTPSuccessf asserts that a specified handler returns a success status code. -// -// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPSuccessf(a.t, handler, method, url, values, msg, args...) -} - -// Implements asserts that an object is implemented by the specified interface. -// -// a.Implements((*MyInterface)(nil), new(MyObject)) -func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Implements(a.t, interfaceObject, object, msgAndArgs...) -} - -// Implementsf asserts that an object is implemented by the specified interface. -// -// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) -func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Implementsf(a.t, interfaceObject, object, msg, args...) -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// a.InDelta(math.Pi, (22 / 7.0), 0.01) -func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDelta(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...) -} - -// InDeltaSlice is the same as InDelta, except it compares two slices. -func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDeltaSlicef is the same as InDelta, except it compares two slices. -func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaSlicef(a.t, expected, actual, delta, msg, args...) -} - -// InDeltaf asserts that the two numerals are within delta of each other. -// -// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) -func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaf(a.t, expected, actual, delta, msg, args...) -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) -} - -// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. -func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) -} - -// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. -func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...) -} - -// InEpsilonf asserts that expected and actual have a relative error less than epsilon -func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilonf(a.t, expected, actual, epsilon, msg, args...) -} - -// IsType asserts that the specified objects are of the same type. -func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsType(a.t, expectedType, object, msgAndArgs...) -} - -// IsTypef asserts that the specified objects are of the same type. -func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsTypef(a.t, expectedType, object, msg, args...) -} - -// JSONEq asserts that two JSON strings are equivalent. -// -// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return JSONEq(a.t, expected, actual, msgAndArgs...) -} - -// JSONEqf asserts that two JSON strings are equivalent. -// -// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") -func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return JSONEqf(a.t, expected, actual, msg, args...) -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// a.Len(mySlice, 3) -func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Len(a.t, object, length, msgAndArgs...) -} - -// Lenf asserts that the specified object has specific length. -// Lenf also fails if the object has a type that len() not accept. -// -// a.Lenf(mySlice, 3, "error message %s", "formatted") -func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Lenf(a.t, object, length, msg, args...) -} - -// Nil asserts that the specified object is nil. -// -// a.Nil(err) -func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Nil(a.t, object, msgAndArgs...) -} - -// Nilf asserts that the specified object is nil. -// -// a.Nilf(err, "error message %s", "formatted") -func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Nilf(a.t, object, msg, args...) -} - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if a.NoError(err) { -// assert.Equal(t, expectedObj, actualObj) -// } -func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoError(a.t, err, msgAndArgs...) -} - -// NoErrorf asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if a.NoErrorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) -// } -func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoErrorf(a.t, err, msg, args...) -} - -// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// a.NotContains("Hello World", "Earth") -// a.NotContains(["Hello", "World"], "Earth") -// a.NotContains({"Hello": "World"}, "Earth") -func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotContains(a.t, s, contains, msgAndArgs...) -} - -// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") -// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") -// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") -func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotContainsf(a.t, s, contains, msg, args...) -} - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if a.NotEmpty(obj) { -// assert.Equal(t, "two", obj[1]) -// } -func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEmpty(a.t, object, msgAndArgs...) -} - -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if a.NotEmptyf(obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) -// } -func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEmptyf(a.t, object, msg, args...) -} - -// NotEqual asserts that the specified values are NOT equal. -// -// a.NotEqual(obj1, obj2) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEqual(a.t, expected, actual, msgAndArgs...) -} - -// NotEqualf asserts that the specified values are NOT equal. -// -// a.NotEqualf(obj1, obj2, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEqualf(a.t, expected, actual, msg, args...) -} - -// NotNil asserts that the specified object is not nil. -// -// a.NotNil(err) -func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotNil(a.t, object, msgAndArgs...) -} - -// NotNilf asserts that the specified object is not nil. -// -// a.NotNilf(err, "error message %s", "formatted") -func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotNilf(a.t, object, msg, args...) -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// a.NotPanics(func(){ RemainCalm() }) -func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotPanics(a.t, f, msgAndArgs...) -} - -// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") -func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotPanicsf(a.t, f, msg, args...) -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") -// a.NotRegexp("^start", "it's not starting") -func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotRegexp(a.t, rx, str, msgAndArgs...) -} - -// NotRegexpf asserts that a specified regexp does not match a string. -// -// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") -// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") -func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotRegexpf(a.t, rx, str, msg, args...) -} - -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") -func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotSubset(a.t, list, subset, msgAndArgs...) -} - -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") -func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotSubsetf(a.t, list, subset, msg, args...) -} - -// NotZero asserts that i is not the zero value for its type. -func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotZero(a.t, i, msgAndArgs...) -} - -// NotZerof asserts that i is not the zero value for its type. -func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotZerof(a.t, i, msg, args...) -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// a.Panics(func(){ GoCrazy() }) -func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Panics(a.t, f, msgAndArgs...) -} - -// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) -func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return PanicsWithValue(a.t, expected, f, msgAndArgs...) -} - -// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return PanicsWithValuef(a.t, expected, f, msg, args...) -} - -// Panicsf asserts that the code inside the specified PanicTestFunc panics. -// -// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") -func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Panicsf(a.t, f, msg, args...) -} - -// Regexp asserts that a specified regexp matches a string. -// -// a.Regexp(regexp.MustCompile("start"), "it's starting") -// a.Regexp("start...$", "it's not starting") -func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Regexp(a.t, rx, str, msgAndArgs...) -} - -// Regexpf asserts that a specified regexp matches a string. -// -// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") -// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") -func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Regexpf(a.t, rx, str, msg, args...) -} - -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") -func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Subset(a.t, list, subset, msgAndArgs...) -} - -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") -func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Subsetf(a.t, list, subset, msg, args...) -} - -// True asserts that the specified value is true. -// -// a.True(myBool) -func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return True(a.t, value, msgAndArgs...) -} - -// Truef asserts that the specified value is true. -// -// a.Truef(myBool, "error message %s", "formatted") -func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Truef(a.t, value, msg, args...) -} - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) -func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) -} - -// WithinDurationf asserts that the two times are within duration delta of each other. -// -// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") -func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return WithinDurationf(a.t, expected, actual, delta, msg, args...) -} - -// Zero asserts that i is the zero value for its type. -func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Zero(a.t, i, msgAndArgs...) -} - -// Zerof asserts that i is the zero value for its type. -func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Zerof(a.t, i, msg, args...) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl deleted file mode 100644 index 188bb9e17..000000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl +++ /dev/null @@ -1,5 +0,0 @@ -{{.CommentWithoutT "a"}} -func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool { - if h, ok := a.t.(tHelper); ok { h.Helper() } - return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go deleted file mode 100644 index 5bdec56cd..000000000 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ /dev/null @@ -1,1394 +0,0 @@ -package assert - -import ( - "bufio" - "bytes" - "encoding/json" - "errors" - "fmt" - "math" - "os" - "reflect" - "regexp" - "runtime" - "strings" - "time" - "unicode" - "unicode/utf8" - - "github.com/davecgh/go-spew/spew" - "github.com/pmezard/go-difflib/difflib" -) - -//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl - -// TestingT is an interface wrapper around *testing.T -type TestingT interface { - Errorf(format string, args ...interface{}) -} - -// ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful -// for table driven tests. -type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{}) bool - -// ValueAssertionFunc is a common function prototype when validating a single value. Can be useful -// for table driven tests. -type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) bool - -// BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful -// for table driven tests. -type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool - -// ValuesAssertionFunc is a common function prototype when validating an error value. Can be useful -// for table driven tests. -type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool - -// Comparison a custom function that returns true on success and false on failure -type Comparison func() (success bool) - -/* - Helper functions -*/ - -// ObjectsAreEqual determines if two objects are considered equal. -// -// This function does no assertion of any kind. -func ObjectsAreEqual(expected, actual interface{}) bool { - if expected == nil || actual == nil { - return expected == actual - } - - exp, ok := expected.([]byte) - if !ok { - return reflect.DeepEqual(expected, actual) - } - - act, ok := actual.([]byte) - if !ok { - return false - } - if exp == nil || act == nil { - return exp == nil && act == nil - } - return bytes.Equal(exp, act) -} - -// ObjectsAreEqualValues gets whether two objects are equal, or if their -// values are equal. -func ObjectsAreEqualValues(expected, actual interface{}) bool { - if ObjectsAreEqual(expected, actual) { - return true - } - - actualType := reflect.TypeOf(actual) - if actualType == nil { - return false - } - expectedValue := reflect.ValueOf(expected) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { - // Attempt comparison after type conversion - return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) - } - - return false -} - -/* CallerInfo is necessary because the assert functions use the testing object -internally, causing it to print the file:line of the assert method, rather than where -the problem actually occurred in calling code.*/ - -// CallerInfo returns an array of strings containing the file and line number -// of each stack frame leading from the current test to the assert call that -// failed. -func CallerInfo() []string { - - pc := uintptr(0) - file := "" - line := 0 - ok := false - name := "" - - callers := []string{} - for i := 0; ; i++ { - pc, file, line, ok = runtime.Caller(i) - if !ok { - // The breaks below failed to terminate the loop, and we ran off the - // end of the call stack. - break - } - - // This is a huge edge case, but it will panic if this is the case, see #180 - if file == "" { - break - } - - f := runtime.FuncForPC(pc) - if f == nil { - break - } - name = f.Name() - - // testing.tRunner is the standard library function that calls - // tests. Subtests are called directly by tRunner, without going through - // the Test/Benchmark/Example function that contains the t.Run calls, so - // with subtests we should break when we hit tRunner, without adding it - // to the list of callers. - if name == "testing.tRunner" { - break - } - - parts := strings.Split(file, "/") - file = parts[len(parts)-1] - if len(parts) > 1 { - dir := parts[len(parts)-2] - if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) - } - } - - // Drop the package - segments := strings.Split(name, ".") - name = segments[len(segments)-1] - if isTest(name, "Test") || - isTest(name, "Benchmark") || - isTest(name, "Example") { - break - } - } - - return callers -} - -// Stolen from the `go test` tool. -// isTest tells whether name looks like a test (or benchmark, according to prefix). -// It is a Test (say) if there is a character after Test that is not a lower-case letter. -// We don't want TesticularCancer. -func isTest(name, prefix string) bool { - if !strings.HasPrefix(name, prefix) { - return false - } - if len(name) == len(prefix) { // "Test" is ok - return true - } - rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) - return !unicode.IsLower(rune) -} - -func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { - if len(msgAndArgs) == 0 || msgAndArgs == nil { - return "" - } - if len(msgAndArgs) == 1 { - return msgAndArgs[0].(string) - } - if len(msgAndArgs) > 1 { - return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) - } - return "" -} - -// Aligns the provided message so that all lines after the first line start at the same location as the first line. -// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). -// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the -// basis on which the alignment occurs). -func indentMessageLines(message string, longestLabelLen int) string { - outBuf := new(bytes.Buffer) - - for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { - // no need to align first line because it starts at the correct location (after the label) - if i != 0 { - // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab - outBuf.WriteString("\n\t" + strings.Repeat(" ", longestLabelLen+1) + "\t") - } - outBuf.WriteString(scanner.Text()) - } - - return outBuf.String() -} - -type failNower interface { - FailNow() -} - -// FailNow fails test -func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - Fail(t, failureMessage, msgAndArgs...) - - // We cannot extend TestingT with FailNow() and - // maintain backwards compatibility, so we fallback - // to panicking when FailNow is not available in - // TestingT. - // See issue #263 - - if t, ok := t.(failNower); ok { - t.FailNow() - } else { - panic("test failed and t is missing `FailNow()`") - } - return false -} - -// Fail reports a failure through -func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - content := []labeledContent{ - {"Error Trace", strings.Join(CallerInfo(), "\n\t\t\t")}, - {"Error", failureMessage}, - } - - // Add test name if the Go version supports it - if n, ok := t.(interface { - Name() string - }); ok { - content = append(content, labeledContent{"Test", n.Name()}) - } - - message := messageFromMsgAndArgs(msgAndArgs...) - if len(message) > 0 { - content = append(content, labeledContent{"Messages", message}) - } - - t.Errorf("\n%s", ""+labeledOutput(content...)) - - return false -} - -type labeledContent struct { - label string - content string -} - -// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner: -// -// \t{{label}}:{{align_spaces}}\t{{content}}\n -// -// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label. -// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this -// alignment is achieved, "\t{{content}}\n" is added for the output. -// -// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line. -func labeledOutput(content ...labeledContent) string { - longestLabel := 0 - for _, v := range content { - if len(v.label) > longestLabel { - longestLabel = len(v.label) - } - } - var output string - for _, v := range content { - output += "\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n" - } - return output -} - -// Implements asserts that an object is implemented by the specified interface. -// -// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) -func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - interfaceType := reflect.TypeOf(interfaceObject).Elem() - - if object == nil { - return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...) - } - if !reflect.TypeOf(object).Implements(interfaceType) { - return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...) - } - - return true -} - -// IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { - return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) - } - - return true -} - -// Equal asserts that two objects are equal. -// -// assert.Equal(t, 123, 123) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if err := validateEqualArgs(expected, actual); err != nil { - return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)", - expected, actual, err), msgAndArgs...) - } - - if !ObjectsAreEqual(expected, actual) { - diff := diff(expected, actual) - expected, actual = formatUnequalValues(expected, actual) - return Fail(t, fmt.Sprintf("Not equal: \n"+ - "expected: %s\n"+ - "actual : %s%s", expected, actual, diff), msgAndArgs...) - } - - return true - -} - -// formatUnequalValues takes two values of arbitrary types and returns string -// representations appropriate to be presented to the user. -// -// If the values are not of like type, the returned strings will be prefixed -// with the type name, and the value will be enclosed in parenthesis similar -// to a type conversion in the Go grammar. -func formatUnequalValues(expected, actual interface{}) (e string, a string) { - if reflect.TypeOf(expected) != reflect.TypeOf(actual) { - return fmt.Sprintf("%T(%#v)", expected, expected), - fmt.Sprintf("%T(%#v)", actual, actual) - } - - return fmt.Sprintf("%#v", expected), - fmt.Sprintf("%#v", actual) -} - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// assert.EqualValues(t, uint32(123), int32(123)) -func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if !ObjectsAreEqualValues(expected, actual) { - diff := diff(expected, actual) - expected, actual = formatUnequalValues(expected, actual) - return Fail(t, fmt.Sprintf("Not equal: \n"+ - "expected: %s\n"+ - "actual : %s%s", expected, actual, diff), msgAndArgs...) - } - - return true - -} - -// Exactly asserts that two objects are equal in value and type. -// -// assert.Exactly(t, int32(123), int64(123)) -func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - aType := reflect.TypeOf(expected) - bType := reflect.TypeOf(actual) - - if aType != bType { - return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) - } - - return Equal(t, expected, actual, msgAndArgs...) - -} - -// NotNil asserts that the specified object is not nil. -// -// assert.NotNil(t, err) -func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if !isNil(object) { - return true - } - return Fail(t, "Expected value not to be nil.", msgAndArgs...) -} - -// isNil checks if a specified object is nil or not, without Failing. -func isNil(object interface{}) bool { - if object == nil { - return true - } - - value := reflect.ValueOf(object) - kind := value.Kind() - if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { - return true - } - - return false -} - -// Nil asserts that the specified object is nil. -// -// assert.Nil(t, err) -func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if isNil(object) { - return true - } - return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) -} - -// isEmpty gets whether the specified object is considered empty or not. -func isEmpty(object interface{}) bool { - - // get nil case out of the way - if object == nil { - return true - } - - objValue := reflect.ValueOf(object) - - switch objValue.Kind() { - // collection types are empty when they have no element - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty - case reflect.Ptr: - if objValue.IsNil() { - return true - } - deref := objValue.Elem().Interface() - return isEmpty(deref) - // for all other types, compare against the zero value - default: - zero := reflect.Zero(objValue.Type()) - return reflect.DeepEqual(object, zero.Interface()) - } -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// assert.Empty(t, obj) -func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - pass := isEmpty(object) - if !pass { - Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) -// } -func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - pass := !isEmpty(object) - if !pass { - Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// getLen try to get length of object. -// return (false, 0) if impossible. -func getLen(x interface{}) (ok bool, length int) { - v := reflect.ValueOf(x) - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - return true, v.Len() -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// assert.Len(t, mySlice, 3) -func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - ok, l := getLen(object) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) - } - - if l != length { - return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) - } - return true -} - -// True asserts that the specified value is true. -// -// assert.True(t, myBool) -func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if h, ok := t.(interface { - Helper() - }); ok { - h.Helper() - } - - if value != true { - return Fail(t, "Should be true", msgAndArgs...) - } - - return true - -} - -// False asserts that the specified value is false. -// -// assert.False(t, myBool) -func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if value != false { - return Fail(t, "Should be false", msgAndArgs...) - } - - return true - -} - -// NotEqual asserts that the specified values are NOT equal. -// -// assert.NotEqual(t, obj1, obj2) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if err := validateEqualArgs(expected, actual); err != nil { - return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)", - expected, actual, err), msgAndArgs...) - } - - if ObjectsAreEqual(expected, actual) { - return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) - } - - return true - -} - -// containsElement try loop over the list check if the list includes the element. -// return (false, false) if impossible. -// return (true, false) if element was not found. -// return (true, true) if element was found. -func includeElement(list interface{}, element interface{}) (ok, found bool) { - - listValue := reflect.ValueOf(list) - elementValue := reflect.ValueOf(element) - defer func() { - if e := recover(); e != nil { - ok = false - found = false - } - }() - - if reflect.TypeOf(list).Kind() == reflect.String { - return true, strings.Contains(listValue.String(), elementValue.String()) - } - - if reflect.TypeOf(list).Kind() == reflect.Map { - mapKeys := listValue.MapKeys() - for i := 0; i < len(mapKeys); i++ { - if ObjectsAreEqual(mapKeys[i].Interface(), element) { - return true, true - } - } - return true, false - } - - for i := 0; i < listValue.Len(); i++ { - if ObjectsAreEqual(listValue.Index(i).Interface(), element) { - return true, true - } - } - return true, false - -} - -// Contains asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// assert.Contains(t, "Hello World", "World") -// assert.Contains(t, ["Hello", "World"], "World") -// assert.Contains(t, {"Hello": "World"}, "Hello") -func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - ok, found := includeElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) - } - if !found { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) - } - - return true - -} - -// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// assert.NotContains(t, "Hello World", "Earth") -// assert.NotContains(t, ["Hello", "World"], "Earth") -// assert.NotContains(t, {"Hello": "World"}, "Earth") -func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - ok, found := includeElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) - } - if found { - return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) - } - - return true - -} - -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") -func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if subset == nil { - return true // we consider nil to be equal to the nil set - } - - subsetValue := reflect.ValueOf(subset) - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - - if listKind != reflect.Array && listKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) - } - - if subsetKind != reflect.Array && subsetKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) - } - - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) - } - if !found { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...) - } - } - - return true -} - -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") -func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if subset == nil { - return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) - } - - subsetValue := reflect.ValueOf(subset) - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - - if listKind != reflect.Array && listKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) - } - - if subsetKind != reflect.Array && subsetKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) - } - - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) - } - if !found { - return true - } - } - - return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) -} - -// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) -func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if isEmpty(listA) && isEmpty(listB) { - return true - } - - aKind := reflect.TypeOf(listA).Kind() - bKind := reflect.TypeOf(listB).Kind() - - if aKind != reflect.Array && aKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...) - } - - if bKind != reflect.Array && bKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...) - } - - aValue := reflect.ValueOf(listA) - bValue := reflect.ValueOf(listB) - - aLen := aValue.Len() - bLen := bValue.Len() - - if aLen != bLen { - return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...) - } - - // Mark indexes in bValue that we already used - visited := make([]bool, bLen) - for i := 0; i < aLen; i++ { - element := aValue.Index(i).Interface() - found := false - for j := 0; j < bLen; j++ { - if visited[j] { - continue - } - if ObjectsAreEqual(bValue.Index(j).Interface(), element) { - visited[j] = true - found = true - break - } - } - if !found { - return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...) - } - } - - return true -} - -// Condition uses a Comparison to assert a complex condition. -func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - result := comp() - if !result { - Fail(t, "Condition failed!", msgAndArgs...) - } - return result -} - -// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics -// methods, and represents a simple func that takes no arguments, and returns nothing. -type PanicTestFunc func() - -// didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}) { - - didPanic := false - var message interface{} - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - } - }() - - // call the target function - f() - - }() - - return didPanic, message - -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// assert.Panics(t, func(){ GoCrazy() }) -func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) - } - - return true -} - -// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) -func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - funcDidPanic, panicValue := didPanic(f) - if !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) - } - if panicValue != expected { - return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v", f, expected, panicValue), msgAndArgs...) - } - - return true -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// assert.NotPanics(t, func(){ RemainCalm() }) -func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if funcDidPanic, panicValue := didPanic(f); funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v", f, panicValue), msgAndArgs...) - } - - return true -} - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) -func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - dt := expected.Sub(actual) - if dt < -delta || dt > delta { - return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) - } - - return true -} - -func toFloat(x interface{}) (float64, bool) { - var xf float64 - xok := true - - switch xn := x.(type) { - case uint8: - xf = float64(xn) - case uint16: - xf = float64(xn) - case uint32: - xf = float64(xn) - case uint64: - xf = float64(xn) - case int: - xf = float64(xn) - case int8: - xf = float64(xn) - case int16: - xf = float64(xn) - case int32: - xf = float64(xn) - case int64: - xf = float64(xn) - case float32: - xf = float64(xn) - case float64: - xf = float64(xn) - case time.Duration: - xf = float64(xn) - default: - xok = false - } - - return xf, xok -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) -func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - af, aok := toFloat(expected) - bf, bok := toFloat(actual) - - if !aok || !bok { - return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) - } - - if math.IsNaN(af) { - return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) - } - - if math.IsNaN(bf) { - return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...) - } - - dt := af - bf - if dt < -delta || dt > delta { - return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) - } - - return true -} - -// InDeltaSlice is the same as InDelta, except it compares two slices. -func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) - } - - actualSlice := reflect.ValueOf(actual) - expectedSlice := reflect.ValueOf(expected) - - for i := 0; i < actualSlice.Len(); i++ { - result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...) - if !result { - return result - } - } - - return true -} - -// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Map || - reflect.TypeOf(expected).Kind() != reflect.Map { - return Fail(t, "Arguments must be maps", msgAndArgs...) - } - - expectedMap := reflect.ValueOf(expected) - actualMap := reflect.ValueOf(actual) - - if expectedMap.Len() != actualMap.Len() { - return Fail(t, "Arguments must have the same number of keys", msgAndArgs...) - } - - for _, k := range expectedMap.MapKeys() { - ev := expectedMap.MapIndex(k) - av := actualMap.MapIndex(k) - - if !ev.IsValid() { - return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...) - } - - if !av.IsValid() { - return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...) - } - - if !InDelta( - t, - ev.Interface(), - av.Interface(), - delta, - msgAndArgs..., - ) { - return false - } - } - - return true -} - -func calcRelativeError(expected, actual interface{}) (float64, error) { - af, aok := toFloat(expected) - if !aok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) - } - if af == 0 { - return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") - } - bf, bok := toFloat(actual) - if !bok { - return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) - } - - return math.Abs(af-bf) / math.Abs(af), nil -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - actualEpsilon, err := calcRelativeError(expected, actual) - if err != nil { - return Fail(t, err.Error(), msgAndArgs...) - } - if actualEpsilon > epsilon { - return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ - " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) - } - - return true -} - -// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. -func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) - } - - actualSlice := reflect.ValueOf(actual) - expectedSlice := reflect.ValueOf(expected) - - for i := 0; i < actualSlice.Len(); i++ { - result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) - if !result { - return result - } - } - - return true -} - -/* - Errors -*/ - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, expectedObj, actualObj) -// } -func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if err != nil { - return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...) - } - - return true -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) -// } -func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if err == nil { - return Fail(t, "An error is expected but got nil.", msgAndArgs...) - } - - return true -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// assert.EqualError(t, err, expectedErrorString) -func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if !Error(t, theError, msgAndArgs...) { - return false - } - expected := errString - actual := theError.Error() - // don't need to use deep equals here, we know they are both strings - if expected != actual { - return Fail(t, fmt.Sprintf("Error message not equal:\n"+ - "expected: %q\n"+ - "actual : %q", expected, actual), msgAndArgs...) - } - return true -} - -// matchRegexp return true if a specified regexp matches a string. -func matchRegexp(rx interface{}, str interface{}) bool { - - var r *regexp.Regexp - if rr, ok := rx.(*regexp.Regexp); ok { - r = rr - } else { - r = regexp.MustCompile(fmt.Sprint(rx)) - } - - return (r.FindStringIndex(fmt.Sprint(str)) != nil) - -} - -// Regexp asserts that a specified regexp matches a string. -// -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") -func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - match := matchRegexp(rx, str) - - if !match { - Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) - } - - return match -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") -func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - match := matchRegexp(rx, str) - - if match { - Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) - } - - return !match - -} - -// Zero asserts that i is the zero value for its type. -func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { - return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...) - } - return true -} - -// NotZero asserts that i is not the zero value for its type. -func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { - return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...) - } - return true -} - -// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. -func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - info, err := os.Lstat(path) - if err != nil { - if os.IsNotExist(err) { - return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) - } - return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) - } - if info.IsDir() { - return Fail(t, fmt.Sprintf("%q is a directory", path), msgAndArgs...) - } - return true -} - -// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. -func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - info, err := os.Lstat(path) - if err != nil { - if os.IsNotExist(err) { - return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) - } - return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) - } - if !info.IsDir() { - return Fail(t, fmt.Sprintf("%q is a file", path), msgAndArgs...) - } - return true -} - -// JSONEq asserts that two JSON strings are equivalent. -// -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - var expectedJSONAsInterface, actualJSONAsInterface interface{} - - if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) - } - - if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) - } - - return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) -} - -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice or array. Otherwise it returns an empty string. -func diff(expected interface{}, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { - return "" - } - - var e, a string - if ek != reflect.String { - e = spewConfig.Sdump(expected) - a = spewConfig.Sdump(actual) - } else { - e = expected.(string) - a = actual.(string) - } - - diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ - A: difflib.SplitLines(e), - B: difflib.SplitLines(a), - FromFile: "Expected", - FromDate: "", - ToFile: "Actual", - ToDate: "", - Context: 1, - }) - - return "\n\nDiff:\n" + diff -} - -// validateEqualArgs checks whether provided arguments can be safely used in the -// Equal/NotEqual functions. -func validateEqualArgs(expected, actual interface{}) error { - if isFunction(expected) || isFunction(actual) { - return errors.New("cannot take func type as argument") - } - return nil -} - -func isFunction(arg interface{}) bool { - if arg == nil { - return false - } - return reflect.TypeOf(arg).Kind() == reflect.Func -} - -var spewConfig = spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, -} - -type tHelper interface { - Helper() -} diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go deleted file mode 100644 index c9dccc4d6..000000000 --- a/vendor/github.com/stretchr/testify/assert/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. -// -// Example Usage -// -// The following is a complete example using assert in a standard test function: -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(t, a, b, "The two words should be the same.") -// -// } -// -// if you assert many times, use the format below: -// -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// assert := assert.New(t) -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(a, b, "The two words should be the same.") -// } -// -// Assertions -// -// Assertions allow you to easily write test code, and are global funcs in the `assert` package. -// All assertion functions take, as the first argument, the `*testing.T` object provided by the -// testing framework. This allows the assertion funcs to write the failings and other details to -// the correct place. -// -// Every assertion function also takes an optional string message as the final argument, -// allowing custom error messages to be appended to the message the assertion method outputs. -package assert diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go deleted file mode 100644 index ac9dc9d1d..000000000 --- a/vendor/github.com/stretchr/testify/assert/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package assert - -import ( - "errors" -) - -// AnError is an error instance useful for testing. If the code does not care -// about error specifics, and only needs to return the error for example, this -// error should be used to make the test code more readable. -var AnError = errors.New("assert.AnError general error for testing") diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go deleted file mode 100644 index 9ad56851d..000000000 --- a/vendor/github.com/stretchr/testify/assert/forward_assertions.go +++ /dev/null @@ -1,16 +0,0 @@ -package assert - -// Assertions provides assertion methods around the -// TestingT interface. -type Assertions struct { - t TestingT -} - -// New makes a new Assertions object for the specified TestingT. -func New(t TestingT) *Assertions { - return &Assertions{ - t: t, - } -} - -//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go deleted file mode 100644 index df46fa777..000000000 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ /dev/null @@ -1,143 +0,0 @@ -package assert - -import ( - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "strings" -) - -// httpCode is a helper that returns HTTP code of the response. It returns -1 and -// an error if building a new request fails. -func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { - w := httptest.NewRecorder() - req, err := http.NewRequest(method, url, nil) - if err != nil { - return -1, err - } - req.URL.RawQuery = values.Encode() - handler(w, req) - return w.Code, nil -} - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - code, err := httpCode(handler, method, url, values) - if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) - return false - } - - isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent - if !isSuccessCode { - Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) - } - - return isSuccessCode -} - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - code, err := httpCode(handler, method, url, values) - if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) - return false - } - - isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect - if !isRedirectCode { - Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) - } - - return isRedirectCode -} - -// HTTPError asserts that a specified handler returns an error status code. -// -// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - code, err := httpCode(handler, method, url, values) - if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) - return false - } - - isErrorCode := code >= http.StatusBadRequest - if !isErrorCode { - Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) - } - - return isErrorCode -} - -// HTTPBody is a helper that returns HTTP body of the response. It returns -// empty string if building a new request fails. -func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { - w := httptest.NewRecorder() - req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) - if err != nil { - return "" - } - handler(w, req) - return w.Body.String() -} - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - body := HTTPBody(handler, method, url, values) - - contains := strings.Contains(body, fmt.Sprint(str)) - if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) - } - - return contains -} - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - body := HTTPBody(handler, method, url, values) - - contains := strings.Contains(body, fmt.Sprint(str)) - if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) - } - - return !contains -} diff --git a/vendor/github.com/stretchr/testify/mock/doc.go b/vendor/github.com/stretchr/testify/mock/doc.go deleted file mode 100644 index 7324128ef..000000000 --- a/vendor/github.com/stretchr/testify/mock/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -// Package mock provides a system by which it is possible to mock your objects -// and verify calls are happening as expected. -// -// Example Usage -// -// The mock package provides an object, Mock, that tracks activity on another object. It is usually -// embedded into a test object as shown below: -// -// type MyTestObject struct { -// // add a Mock object instance -// mock.Mock -// -// // other fields go here as normal -// } -// -// When implementing the methods of an interface, you wire your functions up -// to call the Mock.Called(args...) method, and return the appropriate values. -// -// For example, to mock a method that saves the name and age of a person and returns -// the year of their birth or an error, you might write this: -// -// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) { -// args := o.Called(firstname, lastname, age) -// return args.Int(0), args.Error(1) -// } -// -// The Int, Error and Bool methods are examples of strongly typed getters that take the argument -// index position. Given this argument list: -// -// (12, true, "Something") -// -// You could read them out strongly typed like this: -// -// args.Int(0) -// args.Bool(1) -// args.String(2) -// -// For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion: -// -// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine) -// -// This may cause a panic if the object you are getting is nil (the type assertion will fail), in those -// cases you should check for nil first. -package mock diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go deleted file mode 100644 index cc4f642b5..000000000 --- a/vendor/github.com/stretchr/testify/mock/mock.go +++ /dev/null @@ -1,885 +0,0 @@ -package mock - -import ( - "errors" - "fmt" - "reflect" - "regexp" - "runtime" - "strings" - "sync" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pmezard/go-difflib/difflib" - "github.com/stretchr/objx" - "github.com/stretchr/testify/assert" -) - -// TestingT is an interface wrapper around *testing.T -type TestingT interface { - Logf(format string, args ...interface{}) - Errorf(format string, args ...interface{}) - FailNow() -} - -/* - Call -*/ - -// Call represents a method call and is used for setting expectations, -// as well as recording activity. -type Call struct { - Parent *Mock - - // The name of the method that was or will be called. - Method string - - // Holds the arguments of the method. - Arguments Arguments - - // Holds the arguments that should be returned when - // this method is called. - ReturnArguments Arguments - - // Holds the caller info for the On() call - callerInfo []string - - // The number of times to return the return arguments when setting - // expectations. 0 means to always return the value. - Repeatability int - - // Amount of times this call has been called - totalCalls int - - // Call to this method can be optional - optional bool - - // Holds a channel that will be used to block the Return until it either - // receives a message or is closed. nil means it returns immediately. - WaitFor <-chan time.Time - - waitTime time.Duration - - // Holds a handler used to manipulate arguments content that are passed by - // reference. It's useful when mocking methods such as unmarshalers or - // decoders. - RunFn func(Arguments) -} - -func newCall(parent *Mock, methodName string, callerInfo []string, methodArguments ...interface{}) *Call { - return &Call{ - Parent: parent, - Method: methodName, - Arguments: methodArguments, - ReturnArguments: make([]interface{}, 0), - callerInfo: callerInfo, - Repeatability: 0, - WaitFor: nil, - RunFn: nil, - } -} - -func (c *Call) lock() { - c.Parent.mutex.Lock() -} - -func (c *Call) unlock() { - c.Parent.mutex.Unlock() -} - -// Return specifies the return arguments for the expectation. -// -// Mock.On("DoSomething").Return(errors.New("failed")) -func (c *Call) Return(returnArguments ...interface{}) *Call { - c.lock() - defer c.unlock() - - c.ReturnArguments = returnArguments - - return c -} - -// Once indicates that that the mock should only return the value once. -// -// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() -func (c *Call) Once() *Call { - return c.Times(1) -} - -// Twice indicates that that the mock should only return the value twice. -// -// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() -func (c *Call) Twice() *Call { - return c.Times(2) -} - -// Times indicates that that the mock should only return the indicated number -// of times. -// -// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) -func (c *Call) Times(i int) *Call { - c.lock() - defer c.unlock() - c.Repeatability = i - return c -} - -// WaitUntil sets the channel that will block the mock's return until its closed -// or a message is received. -// -// Mock.On("MyMethod", arg1, arg2).WaitUntil(time.After(time.Second)) -func (c *Call) WaitUntil(w <-chan time.Time) *Call { - c.lock() - defer c.unlock() - c.WaitFor = w - return c -} - -// After sets how long to block until the call returns -// -// Mock.On("MyMethod", arg1, arg2).After(time.Second) -func (c *Call) After(d time.Duration) *Call { - c.lock() - defer c.unlock() - c.waitTime = d - return c -} - -// Run sets a handler to be called before returning. It can be used when -// mocking a method such as unmarshalers that takes a pointer to a struct and -// sets properties in such struct -// -// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}").Return().Run(func(args Arguments) { -// arg := args.Get(0).(*map[string]interface{}) -// arg["foo"] = "bar" -// }) -func (c *Call) Run(fn func(args Arguments)) *Call { - c.lock() - defer c.unlock() - c.RunFn = fn - return c -} - -// Maybe allows the method call to be optional. Not calling an optional method -// will not cause an error while asserting expectations -func (c *Call) Maybe() *Call { - c.lock() - defer c.unlock() - c.optional = true - return c -} - -// On chains a new expectation description onto the mocked interface. This -// allows syntax like. -// -// Mock. -// On("MyMethod", 1).Return(nil). -// On("MyOtherMethod", 'a', 'b', 'c').Return(errors.New("Some Error")) -func (c *Call) On(methodName string, arguments ...interface{}) *Call { - return c.Parent.On(methodName, arguments...) -} - -// Mock is the workhorse used to track activity on another object. -// For an example of its usage, refer to the "Example Usage" section at the top -// of this document. -type Mock struct { - // Represents the calls that are expected of - // an object. - ExpectedCalls []*Call - - // Holds the calls that were made to this mocked object. - Calls []Call - - // test is An optional variable that holds the test struct, to be used when an - // invalid mock call was made. - test TestingT - - // TestData holds any data that might be useful for testing. Testify ignores - // this data completely allowing you to do whatever you like with it. - testData objx.Map - - mutex sync.Mutex -} - -// TestData holds any data that might be useful for testing. Testify ignores -// this data completely allowing you to do whatever you like with it. -func (m *Mock) TestData() objx.Map { - - if m.testData == nil { - m.testData = make(objx.Map) - } - - return m.testData -} - -/* - Setting expectations -*/ - -// Test sets the test struct variable of the mock object -func (m *Mock) Test(t TestingT) { - m.mutex.Lock() - defer m.mutex.Unlock() - m.test = t -} - -// fail fails the current test with the given formatted format and args. -// In case that a test was defined, it uses the test APIs for failing a test, -// otherwise it uses panic. -func (m *Mock) fail(format string, args ...interface{}) { - m.mutex.Lock() - defer m.mutex.Unlock() - - if m.test == nil { - panic(fmt.Sprintf(format, args...)) - } - m.test.Errorf(format, args...) - m.test.FailNow() -} - -// On starts a description of an expectation of the specified method -// being called. -// -// Mock.On("MyMethod", arg1, arg2) -func (m *Mock) On(methodName string, arguments ...interface{}) *Call { - for _, arg := range arguments { - if v := reflect.ValueOf(arg); v.Kind() == reflect.Func { - panic(fmt.Sprintf("cannot use Func in expectations. Use mock.AnythingOfType(\"%T\")", arg)) - } - } - - m.mutex.Lock() - defer m.mutex.Unlock() - c := newCall(m, methodName, assert.CallerInfo(), arguments...) - m.ExpectedCalls = append(m.ExpectedCalls, c) - return c -} - -// /* -// Recording and responding to activity -// */ - -func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) { - for i, call := range m.ExpectedCalls { - if call.Method == method && call.Repeatability > -1 { - - _, diffCount := call.Arguments.Diff(arguments) - if diffCount == 0 { - return i, call - } - - } - } - return -1, nil -} - -func (m *Mock) findClosestCall(method string, arguments ...interface{}) (*Call, string) { - var diffCount int - var closestCall *Call - var err string - - for _, call := range m.expectedCalls() { - if call.Method == method { - - errInfo, tempDiffCount := call.Arguments.Diff(arguments) - if tempDiffCount < diffCount || diffCount == 0 { - diffCount = tempDiffCount - closestCall = call - err = errInfo - } - - } - } - - return closestCall, err -} - -func callString(method string, arguments Arguments, includeArgumentValues bool) string { - - var argValsString string - if includeArgumentValues { - var argVals []string - for argIndex, arg := range arguments { - argVals = append(argVals, fmt.Sprintf("%d: %#v", argIndex, arg)) - } - argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t")) - } - - return fmt.Sprintf("%s(%s)%s", method, arguments.String(), argValsString) -} - -// Called tells the mock object that a method has been called, and gets an array -// of arguments to return. Panics if the call is unexpected (i.e. not preceded by -// appropriate .On .Return() calls) -// If Call.WaitFor is set, blocks until the channel is closed or receives a message. -func (m *Mock) Called(arguments ...interface{}) Arguments { - // get the calling function's name - pc, _, _, ok := runtime.Caller(1) - if !ok { - panic("Couldn't get the caller information") - } - functionPath := runtime.FuncForPC(pc).Name() - //Next four lines are required to use GCCGO function naming conventions. - //For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock - //uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree - //With GCCGO we need to remove interface information starting from pN

. - re := regexp.MustCompile("\\.pN\\d+_") - if re.MatchString(functionPath) { - functionPath = re.Split(functionPath, -1)[0] - } - parts := strings.Split(functionPath, ".") - functionName := parts[len(parts)-1] - return m.MethodCalled(functionName, arguments...) -} - -// MethodCalled tells the mock object that the given method has been called, and gets -// an array of arguments to return. Panics if the call is unexpected (i.e. not preceded -// by appropriate .On .Return() calls) -// If Call.WaitFor is set, blocks until the channel is closed or receives a message. -func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Arguments { - m.mutex.Lock() - //TODO: could combine expected and closes in single loop - found, call := m.findExpectedCall(methodName, arguments...) - - if found < 0 { - // we have to fail here - because we don't know what to do - // as the return arguments. This is because: - // - // a) this is a totally unexpected call to this method, - // b) the arguments are not what was expected, or - // c) the developer has forgotten to add an accompanying On...Return pair. - - closestCall, mismatch := m.findClosestCall(methodName, arguments...) - m.mutex.Unlock() - - if closestCall != nil { - m.fail("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\nDiff: %s", - callString(methodName, arguments, true), - callString(methodName, closestCall.Arguments, true), - diffArguments(closestCall.Arguments, arguments), - strings.TrimSpace(mismatch), - ) - } else { - m.fail("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo()) - } - } - - if call.Repeatability == 1 { - call.Repeatability = -1 - } else if call.Repeatability > 1 { - call.Repeatability-- - } - call.totalCalls++ - - // add the call - m.Calls = append(m.Calls, *newCall(m, methodName, assert.CallerInfo(), arguments...)) - m.mutex.Unlock() - - // block if specified - if call.WaitFor != nil { - <-call.WaitFor - } else { - time.Sleep(call.waitTime) - } - - m.mutex.Lock() - runFn := call.RunFn - m.mutex.Unlock() - - if runFn != nil { - runFn(arguments) - } - - m.mutex.Lock() - returnArgs := call.ReturnArguments - m.mutex.Unlock() - - return returnArgs -} - -/* - Assertions -*/ - -type assertExpectationser interface { - AssertExpectations(TestingT) bool -} - -// AssertExpectationsForObjects asserts that everything specified with On and Return -// of the specified objects was in fact called as expected. -// -// Calls may have occurred in any order. -func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - for _, obj := range testObjects { - if m, ok := obj.(Mock); ok { - t.Logf("Deprecated mock.AssertExpectationsForObjects(myMock.Mock) use mock.AssertExpectationsForObjects(myMock)") - obj = &m - } - m := obj.(assertExpectationser) - if !m.AssertExpectations(t) { - t.Logf("Expectations didn't match for Mock: %+v", reflect.TypeOf(m)) - return false - } - } - return true -} - -// AssertExpectations asserts that everything specified with On and Return was -// in fact called as expected. Calls may have occurred in any order. -func (m *Mock) AssertExpectations(t TestingT) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - m.mutex.Lock() - defer m.mutex.Unlock() - var somethingMissing bool - var failedExpectations int - - // iterate through each expectation - expectedCalls := m.expectedCalls() - for _, expectedCall := range expectedCalls { - if !expectedCall.optional && !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments) && expectedCall.totalCalls == 0 { - somethingMissing = true - failedExpectations++ - t.Logf("FAIL:\t%s(%s)\n\t\tat: %s", expectedCall.Method, expectedCall.Arguments.String(), expectedCall.callerInfo) - } else { - if expectedCall.Repeatability > 0 { - somethingMissing = true - failedExpectations++ - t.Logf("FAIL:\t%s(%s)\n\t\tat: %s", expectedCall.Method, expectedCall.Arguments.String(), expectedCall.callerInfo) - } else { - t.Logf("PASS:\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) - } - } - } - - if somethingMissing { - t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(expectedCalls)-failedExpectations, len(expectedCalls), failedExpectations, assert.CallerInfo()) - } - - return !somethingMissing -} - -// AssertNumberOfCalls asserts that the method was called expectedCalls times. -func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - m.mutex.Lock() - defer m.mutex.Unlock() - var actualCalls int - for _, call := range m.calls() { - if call.Method == methodName { - actualCalls++ - } - } - return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls)) -} - -// AssertCalled asserts that the method was called. -// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. -func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - m.mutex.Lock() - defer m.mutex.Unlock() - if !m.methodWasCalled(methodName, arguments) { - var calledWithArgs []string - for _, call := range m.calls() { - calledWithArgs = append(calledWithArgs, fmt.Sprintf("%v", call.Arguments)) - } - if len(calledWithArgs) == 0 { - return assert.Fail(t, "Should have called with given arguments", - fmt.Sprintf("Expected %q to have been called with:\n%v\nbut no actual calls happened", methodName, arguments)) - } - return assert.Fail(t, "Should have called with given arguments", - fmt.Sprintf("Expected %q to have been called with:\n%v\nbut actual calls were:\n %v", methodName, arguments, strings.Join(calledWithArgs, "\n"))) - } - return true -} - -// AssertNotCalled asserts that the method was not called. -// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. -func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - m.mutex.Lock() - defer m.mutex.Unlock() - if m.methodWasCalled(methodName, arguments) { - return assert.Fail(t, "Should not have called with given arguments", - fmt.Sprintf("Expected %q to not have been called with:\n%v\nbut actually it was.", methodName, arguments)) - } - return true -} - -func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool { - for _, call := range m.calls() { - if call.Method == methodName { - - _, differences := Arguments(expected).Diff(call.Arguments) - - if differences == 0 { - // found the expected call - return true - } - - } - } - // we didn't find the expected call - return false -} - -func (m *Mock) expectedCalls() []*Call { - return append([]*Call{}, m.ExpectedCalls...) -} - -func (m *Mock) calls() []Call { - return append([]Call{}, m.Calls...) -} - -/* - Arguments -*/ - -// Arguments holds an array of method arguments or return values. -type Arguments []interface{} - -const ( - // Anything is used in Diff and Assert when the argument being tested - // shouldn't be taken into consideration. - Anything = "mock.Anything" -) - -// AnythingOfTypeArgument is a string that contains the type of an argument -// for use when type checking. Used in Diff and Assert. -type AnythingOfTypeArgument string - -// AnythingOfType returns an AnythingOfTypeArgument object containing the -// name of the type to check for. Used in Diff and Assert. -// -// For example: -// Assert(t, AnythingOfType("string"), AnythingOfType("int")) -func AnythingOfType(t string) AnythingOfTypeArgument { - return AnythingOfTypeArgument(t) -} - -// argumentMatcher performs custom argument matching, returning whether or -// not the argument is matched by the expectation fixture function. -type argumentMatcher struct { - // fn is a function which accepts one argument, and returns a bool. - fn reflect.Value -} - -func (f argumentMatcher) Matches(argument interface{}) bool { - expectType := f.fn.Type().In(0) - expectTypeNilSupported := false - switch expectType.Kind() { - case reflect.Interface, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Ptr: - expectTypeNilSupported = true - } - - argType := reflect.TypeOf(argument) - var arg reflect.Value - if argType == nil { - arg = reflect.New(expectType).Elem() - } else { - arg = reflect.ValueOf(argument) - } - - if argType == nil && !expectTypeNilSupported { - panic(errors.New("attempting to call matcher with nil for non-nil expected type")) - } - if argType == nil || argType.AssignableTo(expectType) { - result := f.fn.Call([]reflect.Value{arg}) - return result[0].Bool() - } - return false -} - -func (f argumentMatcher) String() string { - return fmt.Sprintf("func(%s) bool", f.fn.Type().In(0).Name()) -} - -// MatchedBy can be used to match a mock call based on only certain properties -// from a complex struct or some calculation. It takes a function that will be -// evaluated with the called argument and will return true when there's a match -// and false otherwise. -// -// Example: -// m.On("Do", MatchedBy(func(req *http.Request) bool { return req.Host == "example.com" })) -// -// |fn|, must be a function accepting a single argument (of the expected type) -// which returns a bool. If |fn| doesn't match the required signature, -// MatchedBy() panics. -func MatchedBy(fn interface{}) argumentMatcher { - fnType := reflect.TypeOf(fn) - - if fnType.Kind() != reflect.Func { - panic(fmt.Sprintf("assert: arguments: %s is not a func", fn)) - } - if fnType.NumIn() != 1 { - panic(fmt.Sprintf("assert: arguments: %s does not take exactly one argument", fn)) - } - if fnType.NumOut() != 1 || fnType.Out(0).Kind() != reflect.Bool { - panic(fmt.Sprintf("assert: arguments: %s does not return a bool", fn)) - } - - return argumentMatcher{fn: reflect.ValueOf(fn)} -} - -// Get Returns the argument at the specified index. -func (args Arguments) Get(index int) interface{} { - if index+1 > len(args) { - panic(fmt.Sprintf("assert: arguments: Cannot call Get(%d) because there are %d argument(s).", index, len(args))) - } - return args[index] -} - -// Is gets whether the objects match the arguments specified. -func (args Arguments) Is(objects ...interface{}) bool { - for i, obj := range args { - if obj != objects[i] { - return false - } - } - return true -} - -// Diff gets a string describing the differences between the arguments -// and the specified objects. -// -// Returns the diff string and number of differences found. -func (args Arguments) Diff(objects []interface{}) (string, int) { - //TODO: could return string as error and nil for No difference - - var output = "\n" - var differences int - - var maxArgCount = len(args) - if len(objects) > maxArgCount { - maxArgCount = len(objects) - } - - for i := 0; i < maxArgCount; i++ { - var actual, expected interface{} - var actualFmt, expectedFmt string - - if len(objects) <= i { - actual = "(Missing)" - actualFmt = "(Missing)" - } else { - actual = objects[i] - actualFmt = fmt.Sprintf("(%[1]T=%[1]v)", actual) - } - - if len(args) <= i { - expected = "(Missing)" - expectedFmt = "(Missing)" - } else { - expected = args[i] - expectedFmt = fmt.Sprintf("(%[1]T=%[1]v)", expected) - } - - if matcher, ok := expected.(argumentMatcher); ok { - if matcher.Matches(actual) { - output = fmt.Sprintf("%s\t%d: PASS: %s matched by %s\n", output, i, actualFmt, matcher) - } else { - differences++ - output = fmt.Sprintf("%s\t%d: PASS: %s not matched by %s\n", output, i, actualFmt, matcher) - } - } else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { - - // type checking - if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { - // not match - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt) - } - - } else { - - // normal checking - - if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { - // match - output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actualFmt, expectedFmt) - } else { - // not match - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actualFmt, expectedFmt) - } - } - - } - - if differences == 0 { - return "No differences.", differences - } - - return output, differences - -} - -// Assert compares the arguments with the specified objects and fails if -// they do not exactly match. -func (args Arguments) Assert(t TestingT, objects ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - // get the differences - diff, diffCount := args.Diff(objects) - - if diffCount == 0 { - return true - } - - // there are differences... report them... - t.Logf(diff) - t.Errorf("%sArguments do not match.", assert.CallerInfo()) - - return false - -} - -// String gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -// -// If no index is provided, String() returns a complete string representation -// of the arguments. -func (args Arguments) String(indexOrNil ...int) string { - - if len(indexOrNil) == 0 { - // normal String() method - return a string representation of the args - var argsStr []string - for _, arg := range args { - argsStr = append(argsStr, fmt.Sprintf("%s", reflect.TypeOf(arg))) - } - return strings.Join(argsStr, ",") - } else if len(indexOrNil) == 1 { - // Index has been specified - get the argument at that index - var index = indexOrNil[0] - var s string - var ok bool - if s, ok = args.Get(index).(string); !ok { - panic(fmt.Sprintf("assert: arguments: String(%d) failed because object wasn't correct type: %s", index, args.Get(index))) - } - return s - } - - panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String. Must be 0 or 1, not %d", len(indexOrNil))) - -} - -// Int gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -func (args Arguments) Int(index int) int { - var s int - var ok bool - if s, ok = args.Get(index).(int); !ok { - panic(fmt.Sprintf("assert: arguments: Int(%d) failed because object wasn't correct type: %v", index, args.Get(index))) - } - return s -} - -// Error gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -func (args Arguments) Error(index int) error { - obj := args.Get(index) - var s error - var ok bool - if obj == nil { - return nil - } - if s, ok = obj.(error); !ok { - panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index))) - } - return s -} - -// Bool gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -func (args Arguments) Bool(index int) bool { - var s bool - var ok bool - if s, ok = args.Get(index).(bool); !ok { - panic(fmt.Sprintf("assert: arguments: Bool(%d) failed because object wasn't correct type: %v", index, args.Get(index))) - } - return s -} - -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - -func diffArguments(expected Arguments, actual Arguments) string { - if len(expected) != len(actual) { - return fmt.Sprintf("Provided %v arguments, mocked for %v arguments", len(expected), len(actual)) - } - - for x := range expected { - if diffString := diff(expected[x], actual[x]); diffString != "" { - return fmt.Sprintf("Difference found in argument %v:\n\n%s", x, diffString) - } - } - - return "" -} - -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice or array. Otherwise it returns an empty string. -func diff(expected interface{}, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array { - return "" - } - - e := spewConfig.Sdump(expected) - a := spewConfig.Sdump(actual) - - diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ - A: difflib.SplitLines(e), - B: difflib.SplitLines(a), - FromFile: "Expected", - FromDate: "", - ToFile: "Actual", - ToDate: "", - Context: 1, - }) - - return diff -} - -var spewConfig = spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, -} - -type tHelper interface { - Helper() -} diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go new file mode 100644 index 000000000..a3c021d3f --- /dev/null +++ b/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,56 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// As of Go 1.7 this package is available in the standard library under the +// name context. https://golang.org/pkg/context. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context // import "golang.org/x/net/context" + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go new file mode 100644 index 000000000..d20f52b7d --- /dev/null +++ b/vendor/golang.org/x/net/context/go17.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, CancelFunc(f) +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, CancelFunc(f) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go new file mode 100644 index 000000000..d88bd1db1 --- /dev/null +++ b/vendor/golang.org/x/net/context/go19.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package context + +import "context" // standard library's context, as of Go 1.7 + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go new file mode 100644 index 000000000..0f35592df --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -0,0 +1,300 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go new file mode 100644 index 000000000..b105f80be --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go19.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package context + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 6401e3a29..756510575 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -248,11 +248,13 @@ func Uname(uname *Utsname) error { //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) //sys Exit(code int) +//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchdir(fd int) (err error) //sys Fchflags(fd int, flags int) (err error) //sys Fchmod(fd int, mode uint32) (err error) //sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) +//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) @@ -280,11 +282,14 @@ func Uname(uname *Utsname) error { //sys Kqueue() (fd int, err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Link(path string, link string) (err error) +//sys Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) //sys Listen(s int, backlog int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) //sys Mkdir(path string, mode uint32) (err error) +//sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mknodat(fd int, path string, mode uint32, dev int) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) @@ -313,11 +318,13 @@ func Uname(uname *Utsname) error { //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) //sys Symlink(path string, link string) (err error) +//sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error) //sys Sync() (err error) //sys Truncate(path string, length int64) (err error) //sys Umask(newmask int) (oldmask int) //sys Undelete(path string) (err error) //sys Unlink(path string) (err error) +//sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Unmount(path string, flags int) (err error) //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 871fe65c3..b711aca82 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -269,6 +269,7 @@ func Uname(uname *Utsname) error { //sys Fchmod(fd int, mode uint32) (err error) //sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) +//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) @@ -293,11 +294,15 @@ func Uname(uname *Utsname) error { //sys Kqueue() (fd int, err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Link(path string, link string) (err error) +//sys Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) //sys Listen(s int, backlog int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) //sys Mkdir(path string, mode uint32) (err error) +//sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) +//sys Mkfifoat(dirfd int, path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) @@ -306,7 +311,9 @@ func Uname(uname *Utsname) error { //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) +//sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) //sys Rename(from string, to string) (err error) +//sys Renameat(fromfd int, from string, tofd int, to string) (err error) //sys Revoke(path string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK @@ -324,10 +331,12 @@ func Uname(uname *Utsname) error { //sysnb Setuid(uid int) (err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Symlink(path string, link string) (err error) +//sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error) //sys Sync() (err error) //sys Truncate(path string, length int64) (err error) //sys Umask(newmask int) (oldmask int) //sys Unlink(path string) (err error) +//sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Unmount(path string, flags int) (err error) //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 2b9f26a63..5a398f817 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -233,6 +233,7 @@ func Uname(uname *Utsname) error { //sys Fchmod(fd int, mode uint32) (err error) //sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) +//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) @@ -259,11 +260,15 @@ func Uname(uname *Utsname) error { //sys Kqueue() (fd int, err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Link(path string, link string) (err error) +//sys Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) //sys Listen(s int, backlog int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) //sys Mkdir(path string, mode uint32) (err error) +//sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) +//sys Mkfifoat(dirfd int, path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) @@ -272,7 +277,9 @@ func Uname(uname *Utsname) error { //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) +//sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) //sys Rename(from string, to string) (err error) +//sys Renameat(fromfd int, from string, tofd int, to string) (err error) //sys Revoke(path string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK @@ -295,10 +302,12 @@ func Uname(uname *Utsname) error { //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) //sys Symlink(path string, link string) (err error) +//sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error) //sys Sync() (err error) //sys Truncate(path string, length int64) (err error) //sys Umask(newmask int) (oldmask int) //sys Unlink(path string) (err error) +//sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Unmount(path string, flags int) (err error) //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) @@ -321,15 +330,11 @@ func Uname(uname *Utsname) error { // clock_settime // closefrom // execve -// faccessat -// fchmodat -// fchownat // fcntl // fhopen // fhstat // fhstatfs // fork -// fstatat // futimens // getfh // getgid @@ -343,12 +348,8 @@ func Uname(uname *Utsname) error { // lfs_markv // lfs_segclean // lfs_segwait -// linkat // mincore // minherit -// mkdirat -// mkfifoat -// mknodat // mount // mquery // msgctl @@ -361,7 +362,6 @@ func Uname(uname *Utsname) error { // profil // pwritev // quotactl -// readlinkat // readv // reboot // renameat @@ -382,13 +382,11 @@ func Uname(uname *Utsname) error { // sigprocmask // sigreturn // sigsuspend -// symlinkat // sysarch // syscall // threxit // thrsigdivert // thrsleep // thrwakeup -// unlinkat // vfork // writev diff --git a/vendor/golang.org/x/sys/unix/xattr_bsd.go b/vendor/golang.org/x/sys/unix/xattr_bsd.go index 930499324..30c1d71f4 100644 --- a/vendor/golang.org/x/sys/unix/xattr_bsd.go +++ b/vendor/golang.org/x/sys/unix/xattr_bsd.go @@ -81,7 +81,10 @@ func Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { // flags are unused on FreeBSD func Fsetxattr(fd int, attr string, data []byte, flags int) (err error) { - d := unsafe.Pointer(&data[0]) + var d unsafe.Pointer + if len(data) > 0 { + d = unsafe.Pointer(&data[0]) + } datasiz := len(data) nsid, a, err := xattrnamespace(attr) @@ -94,7 +97,10 @@ func Fsetxattr(fd int, attr string, data []byte, flags int) (err error) { } func Setxattr(file string, attr string, data []byte, flags int) (err error) { - d := unsafe.Pointer(&data[0]) + var d unsafe.Pointer + if len(data) > 0 { + d = unsafe.Pointer(&data[0]) + } datasiz := len(data) nsid, a, err := xattrnamespace(attr) @@ -107,7 +113,10 @@ func Setxattr(file string, attr string, data []byte, flags int) (err error) { } func Lsetxattr(link string, attr string, data []byte, flags int) (err error) { - d := unsafe.Pointer(&data[0]) + var d unsafe.Pointer + if len(data) > 0 { + d = unsafe.Pointer(&data[0]) + } datasiz := len(data) nsid, a, err := xattrnamespace(attr) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 12da7b41f..96a671344 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -588,6 +588,21 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchdir(fd int) (err error) { _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { @@ -643,6 +658,21 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -927,6 +957,26 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Listen(s int, backlog int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { @@ -967,6 +1017,21 @@ func Mkdir(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -997,6 +1062,21 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mknodat(fd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1361,6 +1441,26 @@ func Symlink(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() (err error) { _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { @@ -1424,6 +1524,21 @@ func Unlink(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index c0d856c55..9ed7c71fb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -865,6 +865,21 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -1114,6 +1129,26 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Listen(s int, backlog int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { @@ -1154,6 +1189,21 @@ func Mkdir(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1169,6 +1219,21 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1184,6 +1249,21 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1315,6 +1395,28 @@ func Readlink(path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Rename(from string, to string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(from) @@ -1335,6 +1437,26 @@ func Rename(from string, to string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Revoke(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1532,6 +1654,26 @@ func Symlink(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() (err error) { _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { @@ -1580,6 +1722,21 @@ func Unlink(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 1466a8ca1..613b7fd99 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -865,6 +865,21 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -1114,6 +1129,26 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Listen(s int, backlog int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { @@ -1154,6 +1189,21 @@ func Mkdir(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1169,6 +1219,21 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1184,6 +1249,21 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1315,6 +1395,28 @@ func Readlink(path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Rename(from string, to string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(from) @@ -1335,6 +1437,26 @@ func Rename(from string, to string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Revoke(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1532,6 +1654,26 @@ func Symlink(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() (err error) { _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { @@ -1580,6 +1722,21 @@ func Unlink(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 2ca54f029..084750878 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -865,6 +865,21 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -1114,6 +1129,26 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Listen(s int, backlog int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { @@ -1154,6 +1189,21 @@ func Mkdir(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1169,6 +1219,21 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1184,6 +1249,21 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1315,6 +1395,28 @@ func Readlink(path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Rename(from string, to string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(from) @@ -1335,6 +1437,26 @@ func Rename(from string, to string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Revoke(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1532,6 +1654,26 @@ func Symlink(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() (err error) { _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { @@ -1580,6 +1722,21 @@ func Unlink(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 082235681..414cd13c8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -650,6 +650,21 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -920,6 +935,26 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Listen(s int, backlog int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { @@ -960,6 +995,21 @@ func Mkdir(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -975,6 +1025,21 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -990,6 +1055,21 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1121,6 +1201,28 @@ func Readlink(path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Rename(from string, to string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(from) @@ -1141,6 +1243,26 @@ func Rename(from string, to string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Revoke(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1398,6 +1520,26 @@ func Symlink(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() (err error) { _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { @@ -1446,6 +1588,21 @@ func Unlink(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 3d0bae427..846f5fa64 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -650,6 +650,21 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -920,6 +935,26 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Listen(s int, backlog int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { @@ -960,6 +995,21 @@ func Mkdir(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -975,6 +1025,21 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -990,6 +1055,21 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1121,6 +1201,28 @@ func Readlink(path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Rename(from string, to string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(from) @@ -1141,6 +1243,26 @@ func Rename(from string, to string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Revoke(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1398,6 +1520,26 @@ func Symlink(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() (err error) { _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { @@ -1446,6 +1588,21 @@ func Unlink(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 6422c4605..59911659d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -650,6 +650,21 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -920,6 +935,26 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Listen(s int, backlog int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { @@ -960,6 +995,21 @@ func Mkdir(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -975,6 +1025,21 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -990,6 +1055,21 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1121,6 +1201,28 @@ func Readlink(path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Rename(from string, to string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(from) @@ -1141,6 +1243,26 @@ func Rename(from string, to string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Revoke(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1398,6 +1520,26 @@ func Symlink(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() (err error) { _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { @@ -1446,6 +1588,21 @@ func Unlink(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/worker/db.go b/worker/db.go index a49209bb8..ba2c7289f 100644 --- a/worker/db.go +++ b/worker/db.go @@ -17,55 +17,60 @@ package worker import ( - "bytes" "context" - "io" "os" "path/filepath" - "runtime/trace" - "strings" + //"runtime/trace" "sync" "time" - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/kayak" - ka "github.com/CovenantSQL/CovenantSQL/kayak/api" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + kl "github.com/CovenantSQL/CovenantSQL/kayak/wal" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/sqlchain" - "github.com/CovenantSQL/CovenantSQL/sqlchain/storage" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" - "github.com/CovenantSQL/CovenantSQL/utils" - "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" - "github.com/CovenantSQL/sqlparser" + "github.com/CovenantSQL/CovenantSQL/storage" + "github.com/CovenantSQL/CovenantSQL/types" + "github.com/pkg/errors" ) const ( // StorageFileName defines storage file name of database instance. StorageFileName = "storage.db3" + // KayakWalFileName defines log pool name of database instance. + KayakWalFileName = "kayak.ldb" + // SQLChainFileName defines sqlchain storage file name. SQLChainFileName = "chain.db" // MaxRecordedConnectionSequences defines the max connection slots to anti reply attack. MaxRecordedConnectionSequences = 1000 + + // PrepareThreshold defines the prepare complete threshold. + PrepareThreshold = 1.0 + + // CommitThreshold defines the commit complete threshold. + CommitThreshold = 1.0 ) // Database defines a single database instance in worker runtime. type Database struct { cfg *DBConfig dbID proto.DatabaseID - storage *storage.Storage + kayakWal *kl.LevelDBWal kayakRuntime *kayak.Runtime - kayakConfig kayak.Config + kayakConfig *kt.RuntimeConfig connSeqs sync.Map connSeqEvictCh chan uint64 chain *sqlchain.Chain + nodeID proto.NodeID + mux *DBKayakMuxService } // NewDatabase create a single database instance using config. -func NewDatabase(cfg *DBConfig, peers *kayak.Peers, genesisBlock *ct.Block) (db *Database, err error) { +func NewDatabase(cfg *DBConfig, peers *proto.Peers, genesisBlock *types.Block) (db *Database, err error) { // ensure dir exists if err = os.MkdirAll(cfg.DataDir, 0755); err != nil { return @@ -80,6 +85,7 @@ func NewDatabase(cfg *DBConfig, peers *kayak.Peers, genesisBlock *ct.Block) (db db = &Database{ cfg: cfg, dbID: cfg.DatabaseID, + mux: cfg.KayakMux, connSeqEvictCh: make(chan uint64, 1), } @@ -95,11 +101,6 @@ func NewDatabase(cfg *DBConfig, peers *kayak.Peers, genesisBlock *ct.Block) (db if db.chain != nil { db.chain.Stop() } - - // close storage - if db.storage != nil { - db.storage.Close() - } } }() @@ -114,30 +115,24 @@ func NewDatabase(cfg *DBConfig, peers *kayak.Peers, genesisBlock *ct.Block) (db storageDSN.AddParam("_crypto_key", cfg.EncryptionKey) } - if db.storage, err = storage.New(storageDSN.Format()); err != nil { - return - } - // init chain - var nodeID proto.NodeID chainFile := filepath.Join(cfg.DataDir, SQLChainFileName) - if nodeID, err = kms.GetLocalNodeID(); err != nil { + if db.nodeID, err = kms.GetLocalNodeID(); err != nil { return } // TODO(xq262144): make sqlchain config use of global config object chainCfg := &sqlchain.Config{ - DatabaseID: cfg.DatabaseID, - DataFile: chainFile, - Genesis: genesisBlock, - Peers: peers, + DatabaseID: cfg.DatabaseID, + ChainFilePrefix: chainFile, + DataFile: storageDSN.Format(), + Genesis: genesisBlock, + Peers: peers, // TODO(xq262144): should refactor server/node definition to conf/proto package // currently sqlchain package only use Server.ID as node id MuxService: cfg.ChainMux, - Server: &kayak.Server{ - ID: nodeID, - }, + Server: db.nodeID, // TODO(xq262144): currently using fixed period/resolution from sqlchain test case Period: 60 * time.Second, @@ -151,19 +146,37 @@ func NewDatabase(cfg *DBConfig, peers *kayak.Peers, genesisBlock *ct.Block) (db } // init kayak config - options := ka.NewDefaultTwoPCOptions().WithTransportID(string(cfg.DatabaseID)) - db.kayakConfig = ka.NewTwoPCConfigWithOptions(cfg.DataDir, cfg.KayakMux, db, options) - - // create kayak runtime - if db.kayakRuntime, err = ka.NewTwoPCKayak(peers, db.kayakConfig); err != nil { + kayakWalPath := filepath.Join(cfg.DataDir, KayakWalFileName) + if db.kayakWal, err = kl.NewLevelDBWal(kayakWalPath); err != nil { + err = errors.Wrap(err, "init kayak log pool failed") return } - // init kayak runtime - if err = db.kayakRuntime.Init(); err != nil { + db.kayakConfig = &kt.RuntimeConfig{ + Handler: db, + PrepareThreshold: PrepareThreshold, + CommitThreshold: CommitThreshold, + PrepareTimeout: time.Second, + CommitTimeout: time.Second * 60, + Peers: peers, + Wal: db.kayakWal, + NodeID: db.nodeID, + InstanceID: string(db.dbID), + ServiceName: DBKayakRPCName, + MethodName: DBKayakMethodName, + } + + // create kayak runtime + if db.kayakRuntime, err = kayak.NewRuntime(db.kayakConfig); err != nil { return } + // register kayak runtime rpc + db.mux.register(db.dbID, db.kayakRuntime) + + // start kayak runtime + db.kayakRuntime.Start() + // init sequence eviction processor go db.evictSequences() @@ -171,7 +184,7 @@ func NewDatabase(cfg *DBConfig, peers *kayak.Peers, genesisBlock *ct.Block) (db } // UpdatePeers defines peers update query interface. -func (db *Database) UpdatePeers(peers *kayak.Peers) (err error) { +func (db *Database) UpdatePeers(peers *proto.Peers) (err error) { if err = db.kayakRuntime.UpdatePeers(peers); err != nil { return } @@ -180,25 +193,25 @@ func (db *Database) UpdatePeers(peers *kayak.Peers) (err error) { } // Query defines database query interface. -func (db *Database) Query(request *wt.Request) (response *wt.Response, err error) { +func (db *Database) Query(request *types.Request) (response *types.Response, err error) { // Just need to verify signature in db.saveAck //if err = request.Verify(); err != nil { // return //} switch request.Header.QueryType { - case wt.ReadQuery: - return db.readQuery(request) - case wt.WriteQuery: + case types.ReadQuery: + return db.chain.Query(request) + case types.WriteQuery: return db.writeQuery(request) default: // TODO(xq262144): verbose errors with custom error structure - return nil, ErrInvalidRequest + return nil, errors.Wrap(ErrInvalidRequest, "invalid query type") } } // Ack defines client response ack interface. -func (db *Database) Ack(ack *wt.Ack) (err error) { +func (db *Database) Ack(ack *types.Ack) (err error) { // Just need to verify signature in db.saveAck //if err = ack.Verify(); err != nil { // return @@ -214,6 +227,14 @@ func (db *Database) Shutdown() (err error) { if err = db.kayakRuntime.Shutdown(); err != nil { return } + + // unregister + db.mux.unregister(db.dbID) + } + + if db.kayakWal != nil { + // shutdown, stop kayak + db.kayakWal.Close() } if db.chain != nil { @@ -223,13 +244,6 @@ func (db *Database) Shutdown() (err error) { } } - if db.storage != nil { - // stop storage - if err = db.storage.Close(); err != nil { - return - } - } - if db.connSeqEvictCh != nil { // stop connection sequence evictions select { @@ -257,11 +271,11 @@ func (db *Database) Destroy() (err error) { return } -func (db *Database) writeQuery(request *wt.Request) (response *wt.Response, err error) { - ctx := context.Background() - ctx, task := trace.NewTask(ctx, "writeQuery") - defer task.End() - defer trace.StartRegion(ctx, "writeQueryRegion").End() +func (db *Database) writeQuery(request *types.Request) (response *types.Response, err error) { + //ctx := context.Background() + //ctx, task := trace.NewTask(ctx, "writeQuery") + //defer task.End() + //defer trace.StartRegion(ctx, "writeQueryRegion").End() // check database size first, wal/kayak/chain database size is not included if db.cfg.SpaceLimit > 0 { @@ -281,148 +295,25 @@ func (db *Database) writeQuery(request *wt.Request) (response *wt.Response, err } // call kayak runtime Process - var buf *bytes.Buffer - if buf, err = utils.EncodeMsgPack(request); err != nil { - return - } - - var logOffset uint64 - logOffset, err = db.kayakRuntime.Apply(buf.Bytes()) - - if err != nil { - return - } - - return db.buildQueryResponse(request, logOffset, []string{}, []string{}, [][]interface{}{}) -} - -func (db *Database) readQuery(request *wt.Request) (response *wt.Response, err error) { - // call storage query directly - // TODO(xq262144): add timeout logic basic of client options - var columns, types []string - var data [][]interface{} - var queries []storage.Query - - // sanitize dangerous queries - if queries, err = convertAndSanitizeQuery(request.Payload.Queries); err != nil { + var result interface{} + if result, _, err = db.kayakRuntime.Apply(context.Background(), request); err != nil { + err = errors.Wrap(err, "apply failed") return } - columns, types, data, err = db.storage.Query(context.Background(), queries) - if err != nil { + var ok bool + if response, ok = (result).(*types.Response); !ok { + err = errors.Wrap(err, "invalid response type") return } - return db.buildQueryResponse(request, 0, columns, types, data) -} - -func (db *Database) buildQueryResponse(request *wt.Request, offset uint64, - columns []string, types []string, data [][]interface{}) (response *wt.Response, err error) { - // build response - response = new(wt.Response) - response.Header.Request = request.Header - if response.Header.NodeID, err = kms.GetLocalNodeID(); err != nil { - return - } - response.Header.LogOffset = offset - response.Header.Timestamp = getLocalTime() - response.Header.RowCount = uint64(len(data)) - - // set payload - response.Payload.Columns = columns - response.Payload.DeclTypes = types - response.Payload.Rows = make([]wt.ResponseRow, len(data)) - - for i, d := range data { - response.Payload.Rows[i].Values = d - } - - // sign fields - var privateKey *asymmetric.PrivateKey - if privateKey, err = getLocalPrivateKey(); err != nil { - return - } - if err = response.Sign(privateKey); err != nil { - return - } - - // record response for future ack process - err = db.saveResponse(&response.Header) return } -func (db *Database) saveResponse(respHeader *wt.SignedResponseHeader) (err error) { - return db.chain.VerifyAndPushResponsedQuery(respHeader) -} - -func (db *Database) saveAck(ackHeader *wt.SignedAckHeader) (err error) { +func (db *Database) saveAck(ackHeader *types.SignedAckHeader) (err error) { return db.chain.VerifyAndPushAckedQuery(ackHeader) } func getLocalTime() time.Time { return time.Now().UTC() } - -func getLocalPrivateKey() (privateKey *asymmetric.PrivateKey, err error) { - return kms.GetLocalPrivateKey() -} - -func convertAndSanitizeQuery(inQuery []wt.Query) (outQuery []storage.Query, err error) { - outQuery = make([]storage.Query, len(inQuery)) - for i, q := range inQuery { - tokenizer := sqlparser.NewStringTokenizer(q.Pattern) - var stmt sqlparser.Statement - var lastPos int - var query string - var originalQueries []string - - for { - stmt, err = sqlparser.ParseNext(tokenizer) - - if err != nil && err != io.EOF { - return - } - - if err == io.EOF { - err = nil - break - } - - query = q.Pattern[lastPos : tokenizer.Position-1] - lastPos = tokenizer.Position + 1 - - // translate show statement - if showStmt, ok := stmt.(*sqlparser.Show); ok { - origQuery := query - - switch showStmt.Type { - case "table": - if showStmt.ShowCreate { - query = "SELECT sql FROM sqlite_master WHERE type = \"table\" AND tbl_name = \"" + - showStmt.OnTable.Name.String() + "\"" - } else { - query = "PRAGMA table_info(" + showStmt.OnTable.Name.String() + ")" - } - case "index": - query = "SELECT name FROM sqlite_master WHERE type = \"index\" AND tbl_name = \"" + - showStmt.OnTable.Name.String() + "\"" - case "tables": - query = "SELECT name FROM sqlite_master WHERE type = \"table\"" - } - - log.WithFields(log.Fields{ - "from": origQuery, - "to": query, - }).Debug("query translated") - } - - originalQueries = append(originalQueries, query) - } - - outQuery[i] = storage.Query{ - Pattern: strings.Join(originalQueries, "; "), - Args: q.Args, - } - } - return -} diff --git a/worker/db_config.go b/worker/db_config.go index 22508513f..7a2b5b1dc 100644 --- a/worker/db_config.go +++ b/worker/db_config.go @@ -19,7 +19,6 @@ package worker import ( "time" - kt "github.com/CovenantSQL/CovenantSQL/kayak/transport" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/sqlchain" ) @@ -28,7 +27,7 @@ import ( type DBConfig struct { DatabaseID proto.DatabaseID DataDir string - KayakMux *kt.ETLSTransportService + KayakMux *DBKayakMuxService ChainMux *sqlchain.MuxService MaxWriteTimeGap time.Duration EncryptionKey string diff --git a/worker/db_storage.go b/worker/db_storage.go index 6407e9c0d..b56f6bbee 100644 --- a/worker/db_storage.go +++ b/worker/db_storage.go @@ -17,86 +17,53 @@ package worker import ( + "bytes" "container/list" - "context" - "github.com/CovenantSQL/CovenantSQL/sqlchain/storage" - "github.com/CovenantSQL/CovenantSQL/twopc" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/pkg/errors" ) // Following contains storage related logic extracted from main database instance definition. -// Prepare implements twopc.Worker.Prepare. -func (db *Database) Prepare(ctx context.Context, wb twopc.WriteBatch) (err error) { - // wrap storage with signature check - var log *storage.ExecLog - if log, err = db.convertRequest(wb); err != nil { - return - } - return db.storage.Prepare(ctx, log) -} +// EncodePayload implements kayak.types.Handler.EncodePayload. +func (db *Database) EncodePayload(request interface{}) (data []byte, err error) { + var buf *bytes.Buffer -// Commit implements twopc.Worker.Commmit. -func (db *Database) Commit(ctx context.Context, wb twopc.WriteBatch) (err error) { - // wrap storage with signature check - var log *storage.ExecLog - if log, err = db.convertRequest(wb); err != nil { + if buf, err = utils.EncodeMsgPack(request); err != nil { + err = errors.Wrap(err, "encode request failed") return } - db.recordSequence(log) - return db.storage.Commit(ctx, log) -} -// Rollback implements twopc.Worker.Rollback. -func (db *Database) Rollback(ctx context.Context, wb twopc.WriteBatch) (err error) { - // wrap storage with signature check - var log *storage.ExecLog - if log, err = db.convertRequest(wb); err != nil { - return - } - db.recordSequence(log) - return db.storage.Rollback(ctx, log) + data = buf.Bytes() + return } -func (db *Database) recordSequence(log *storage.ExecLog) { - db.connSeqs.Store(log.ConnectionID, log.SeqNo) -} +// DecodePayload implements kayak.types.Handler.DecodePayload. +func (db *Database) DecodePayload(data []byte) (request interface{}, err error) { + var req *types.Request -func (db *Database) verifySequence(log *storage.ExecLog) (err error) { - var data interface{} - var ok bool - var lastSeq uint64 - - if data, ok = db.connSeqs.Load(log.ConnectionID); ok { - lastSeq, _ = data.(uint64) - - if log.SeqNo <= lastSeq { - return ErrInvalidRequestSeq - } + if err = utils.DecodeMsgPack(data, &req); err != nil { + err = errors.Wrap(err, "decode request failed") + return } + request = req + return } -func (db *Database) convertRequest(wb twopc.WriteBatch) (log *storage.ExecLog, err error) { +// Check implements kayak.types.Handler.Check. +func (db *Database) Check(rawReq interface{}) (err error) { + var req *types.Request var ok bool - - // type convert - var payloadBytes []byte - if payloadBytes, ok = wb.([]byte); !ok { - err = ErrInvalidRequest - return - } - - // decode - var req wt.Request - if err = utils.DecodeMsgPack(payloadBytes, &req); err != nil { + if req, ok = rawReq.(*types.Request); !ok || req == nil { + err = errors.Wrap(ErrInvalidRequest, "invalid request payload") return } - // verify + // verify signature, check time/sequence only if err = req.Verify(); err != nil { return } @@ -107,26 +74,52 @@ func (db *Database) convertRequest(wb twopc.WriteBatch) (log *storage.ExecLog, e maxTime := nowTime.Add(db.cfg.MaxWriteTimeGap) if req.Header.Timestamp.Before(minTime) || req.Header.Timestamp.After(maxTime) { - err = ErrInvalidRequest + err = errors.Wrap(ErrInvalidRequest, "invalid request time") return } - // convert - log = new(storage.ExecLog) - log.ConnectionID = req.Header.ConnectionID - log.SeqNo = req.Header.SeqNo - log.Timestamp = req.Header.Timestamp.UnixNano() - - // sanitize dangerous query - if log.Queries, err = convertAndSanitizeQuery(req.Payload.Queries); err != nil { + // verify sequence + if err = db.verifySequence(req.Header.ConnectionID, req.Header.SeqNo); err != nil { return } - // verify connection sequence - if err = db.verifySequence(log); err != nil { + // record sequence + db.recordSequence(req.Header.ConnectionID, req.Header.SeqNo) + + return +} + +// Commit implements kayak.types.Handler.Commit. +func (db *Database) Commit(rawReq interface{}) (result interface{}, err error) { + // convert query and check syntax + var req *types.Request + var ok bool + if req, ok = rawReq.(*types.Request); !ok || req == nil { + err = errors.Wrap(ErrInvalidRequest, "invalid request payload") return } + // execute + return db.chain.Query(req) +} + +func (db *Database) recordSequence(connID uint64, seqNo uint64) { + db.connSeqs.Store(connID, seqNo) +} + +func (db *Database) verifySequence(connID uint64, seqNo uint64) (err error) { + var data interface{} + var ok bool + var lastSeq uint64 + + if data, ok = db.connSeqs.Load(connID); ok { + lastSeq, _ = data.(uint64) + + if seqNo <= lastSeq { + return ErrInvalidRequestSeq + } + } + return } diff --git a/worker/db_test.go b/worker/db_test.go index 4e576bc5c..36561d5bc 100644 --- a/worker/db_test.go +++ b/worker/db_test.go @@ -29,23 +29,19 @@ import ( "testing" "time" - bp "github.com/CovenantSQL/CovenantSQL/blockproducer" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/consistent" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" - ka "github.com/CovenantSQL/CovenantSQL/kayak/api" "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/sqlchain" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" "github.com/fortytw2/leaktest" . "github.com/smartystreets/goconvey/convey" ) @@ -69,10 +65,14 @@ func TestSingleDatabase(t *testing.T) { So(err, ShouldBeNil) // create mux service - service := ka.NewMuxService("DBKayak", server) + kayakMuxService, err := NewDBKayakMuxService("DBKayak", server) + So(err, ShouldBeNil) + + chainMuxService, err := sqlchain.NewMuxService("sqlchain", server) + So(err, ShouldBeNil) // create peers - var peers *kayak.Peers + var peers *proto.Peers peers, err = getPeers(1) So(err, ShouldBeNil) @@ -80,13 +80,13 @@ func TestSingleDatabase(t *testing.T) { cfg := &DBConfig{ DatabaseID: "TEST", DataDir: rootDir, - KayakMux: service, - ChainMux: sqlchain.NewMuxService("sqlchain", server), + KayakMux: kayakMuxService, + ChainMux: chainMuxService, MaxWriteTimeGap: time.Second * 5, } // create genesis block - var block *ct.Block + var block *types.Block block, err = createRandomBlock(rootHash, true) So(err, ShouldBeNil) @@ -97,9 +97,9 @@ func TestSingleDatabase(t *testing.T) { Convey("test query rewrite", func() { // test query rewrite - var writeQuery *wt.Request - var res *wt.Response - writeQuery, err = buildQuery(wt.WriteQuery, 1, 1, []string{ + var writeQuery *types.Request + var res *types.Response + writeQuery, err = buildQuery(types.WriteQuery, 1, 1, []string{ "create table test (col1 int, col2 string)", "create index test_index on test (col1)", }) @@ -111,8 +111,8 @@ func TestSingleDatabase(t *testing.T) { So(err, ShouldBeNil) // test show tables query - var readQuery *wt.Request - readQuery, err = buildQuery(wt.ReadQuery, 1, 2, []string{ + var readQuery *types.Request + readQuery, err = buildQuery(types.ReadQuery, 1, 2, []string{ "show tables", }) So(err, ShouldBeNil) @@ -128,7 +128,7 @@ func TestSingleDatabase(t *testing.T) { So(res.Payload.Rows[0].Values[0], ShouldResemble, []byte("test")) // test show full tables query - readQuery, err = buildQuery(wt.ReadQuery, 1, 3, []string{ + readQuery, err = buildQuery(types.ReadQuery, 1, 3, []string{ "show full tables", }) So(err, ShouldBeNil) @@ -144,7 +144,7 @@ func TestSingleDatabase(t *testing.T) { So(res.Payload.Rows[0].Values[0], ShouldResemble, []byte("test")) // test show create table - readQuery, err = buildQuery(wt.ReadQuery, 1, 4, []string{ + readQuery, err = buildQuery(types.ReadQuery, 1, 4, []string{ "show create table test", }) So(err, ShouldBeNil) @@ -162,7 +162,7 @@ func TestSingleDatabase(t *testing.T) { So(strings.ToUpper(string(byteStr)), ShouldContainSubstring, "CREATE") // test show table - readQuery, err = buildQuery(wt.ReadQuery, 1, 5, []string{ + readQuery, err = buildQuery(types.ReadQuery, 1, 5, []string{ "show table test", }) So(err, ShouldBeNil) @@ -180,7 +180,7 @@ func TestSingleDatabase(t *testing.T) { So(res.Payload.Rows[1].Values[1], ShouldResemble, []byte("col2")) // test desc table - readQuery, err = buildQuery(wt.ReadQuery, 1, 6, []string{ + readQuery, err = buildQuery(types.ReadQuery, 1, 6, []string{ "desc test", }) So(err, ShouldBeNil) @@ -198,7 +198,7 @@ func TestSingleDatabase(t *testing.T) { So(res.Payload.Rows[1].Values[1], ShouldResemble, []byte("col2")) // test show index from table - readQuery, err = buildQuery(wt.ReadQuery, 1, 7, []string{ + readQuery, err = buildQuery(types.ReadQuery, 1, 7, []string{ "show index from table test", }) So(err, ShouldBeNil) @@ -216,9 +216,9 @@ func TestSingleDatabase(t *testing.T) { Convey("test read write", func() { // test write query - var writeQuery *wt.Request - var res *wt.Response - writeQuery, err = buildQuery(wt.WriteQuery, 1, 1, []string{ + var writeQuery *types.Request + var res *types.Response + writeQuery, err = buildQuery(types.WriteQuery, 1, 1, []string{ "create table test (test int)", "insert into test values(1)", }) @@ -231,8 +231,8 @@ func TestSingleDatabase(t *testing.T) { So(res.Header.RowCount, ShouldEqual, 0) // test select query - var readQuery *wt.Request - readQuery, err = buildQuery(wt.ReadQuery, 1, 2, []string{ + var readQuery *types.Request + readQuery, err = buildQuery(types.ReadQuery, 1, 2, []string{ "select * from test", }) So(err, ShouldBeNil) @@ -254,9 +254,9 @@ func TestSingleDatabase(t *testing.T) { }) Convey("test invalid request", func() { - var writeQuery *wt.Request - var res *wt.Response - writeQuery, err = buildQuery(wt.WriteQuery, 1, 1, []string{ + var writeQuery *types.Request + var res *types.Response + writeQuery, err = buildQuery(types.WriteQuery, 1, 1, []string{ "create table test (test int)", "insert into test values(1)", }) @@ -270,43 +270,43 @@ func TestSingleDatabase(t *testing.T) { So(res.Header.RowCount, ShouldEqual, 0) // request again with same sequence - writeQuery, err = buildQuery(wt.WriteQuery, 1, 1, []string{ + writeQuery, err = buildQuery(types.WriteQuery, 1, 1, []string{ "insert into test values(2)", }) res, err = db.Query(writeQuery) So(err, ShouldNotBeNil) // request again with low sequence - writeQuery, err = buildQuery(wt.WriteQuery, 1, 0, []string{ + writeQuery, err = buildQuery(types.WriteQuery, 1, 0, []string{ "insert into test values(3)", }) res, err = db.Query(writeQuery) So(err, ShouldNotBeNil) // request with invalid timestamp - writeQuery, err = buildQueryWithTimeShift(wt.WriteQuery, 1, 2, time.Second*100, []string{ + writeQuery, err = buildQueryWithTimeShift(types.WriteQuery, 1, 2, time.Second*100, []string{ "insert into test values(4)", }) res, err = db.Query(writeQuery) So(err, ShouldNotBeNil) // request with invalid timestamp - writeQuery, err = buildQueryWithTimeShift(wt.WriteQuery, 1, 2, -time.Second*100, []string{ + writeQuery, err = buildQueryWithTimeShift(types.WriteQuery, 1, 2, -time.Second*100, []string{ "insert into test values(5)", }) res, err = db.Query(writeQuery) So(err, ShouldNotBeNil) // request with different connection id - writeQuery, err = buildQuery(wt.WriteQuery, 2, 1, []string{ + writeQuery, err = buildQuery(types.WriteQuery, 2, 1, []string{ "insert into test values(6)", }) res, err = db.Query(writeQuery) So(err, ShouldBeNil) // read query, test records - var readQuery *wt.Request - readQuery, err = buildQuery(wt.ReadQuery, 1, 2, []string{ + var readQuery *types.Request + readQuery, err = buildQuery(types.ReadQuery, 1, 2, []string{ "select * from test", }) So(err, ShouldBeNil) @@ -329,7 +329,7 @@ func TestSingleDatabase(t *testing.T) { }) Convey("corner case", func() { - var req *wt.Request + var req *types.Request var err error req, err = buildQuery(-1, 1, 1, []string{ "create table test (test int)", @@ -338,9 +338,9 @@ func TestSingleDatabase(t *testing.T) { _, err = db.Query(req) So(err, ShouldNotBeNil) - var writeQuery *wt.Request - var res *wt.Response - writeQuery, err = buildQuery(wt.WriteQuery, 1, 1, []string{ + var writeQuery *types.Request + var res *types.Response + writeQuery, err = buildQuery(types.WriteQuery, 1, 1, []string{ "create table test (test int)", }) So(err, ShouldBeNil) @@ -348,8 +348,8 @@ func TestSingleDatabase(t *testing.T) { So(err, ShouldBeNil) // read query, test records - var readQuery *wt.Request - readQuery, err = buildQuery(wt.ReadQuery, 1, 2, []string{ + var readQuery *types.Request + readQuery, err = buildQuery(types.ReadQuery, 1, 2, []string{ "select * from test", }) So(err, ShouldBeNil) @@ -364,7 +364,7 @@ func TestSingleDatabase(t *testing.T) { So(res.Payload.Rows, ShouldBeEmpty) // write query, test failed - writeQuery, err = buildQuery(wt.WriteQuery, 1, 3, []string{ + writeQuery, err = buildQuery(types.WriteQuery, 1, 3, []string{ "insert into test2 values(1)", // table should not exists }) So(err, ShouldBeNil) @@ -372,7 +372,7 @@ func TestSingleDatabase(t *testing.T) { So(err, ShouldNotBeNil) // read query, test dynamic fields - readQuery, err = buildQuery(wt.ReadQuery, 1, 4, []string{ + readQuery, err = buildQuery(types.ReadQuery, 1, 4, []string{ "select 1 as test", }) So(err, ShouldBeNil) @@ -387,7 +387,7 @@ func TestSingleDatabase(t *testing.T) { So(res.Payload.Rows, ShouldNotBeEmpty) // test ack - var ack *wt.Ack + var ack *types.Ack ack, err = buildAck(res) So(err, ShouldBeNil) @@ -426,10 +426,14 @@ func TestInitFailed(t *testing.T) { defer os.RemoveAll(rootDir) // create mux service - service := ka.NewMuxService("DBKayak", server) + kayakMuxService, err := NewDBKayakMuxService("DBKayak", server) + So(err, ShouldBeNil) + + chainMuxService, err := sqlchain.NewMuxService("sqlchain", server) + So(err, ShouldBeNil) // create peers - var peers *kayak.Peers + var peers *proto.Peers peers, err = getPeers(1) So(err, ShouldBeNil) @@ -437,13 +441,13 @@ func TestInitFailed(t *testing.T) { cfg := &DBConfig{ DatabaseID: "TEST", DataDir: rootDir, - KayakMux: service, - ChainMux: sqlchain.NewMuxService("sqlchain", server), + KayakMux: kayakMuxService, + ChainMux: chainMuxService, MaxWriteTimeGap: time.Duration(5 * time.Second), } // create genesis block - var block *ct.Block + var block *types.Block block, err = createRandomBlock(rootHash, true) So(err, ShouldBeNil) @@ -475,10 +479,14 @@ func TestDatabaseRecycle(t *testing.T) { So(err, ShouldBeNil) // create mux service - service := ka.NewMuxService("DBKayak", server) + kayakMuxService, err := NewDBKayakMuxService("DBKayak", server) + So(err, ShouldBeNil) + + chainMuxService, err := sqlchain.NewMuxService("sqlchain", server) + So(err, ShouldBeNil) // create peers - var peers *kayak.Peers + var peers *proto.Peers peers, err = getPeers(1) So(err, ShouldBeNil) @@ -486,13 +494,13 @@ func TestDatabaseRecycle(t *testing.T) { cfg := &DBConfig{ DatabaseID: "TEST", DataDir: rootDir, - KayakMux: service, - ChainMux: sqlchain.NewMuxService("sqlchain", server), + KayakMux: kayakMuxService, + ChainMux: chainMuxService, MaxWriteTimeGap: time.Duration(5 * time.Second), } // create genesis block - var block *ct.Block + var block *types.Block block, err = createRandomBlock(rootHash, true) So(err, ShouldBeNil) @@ -502,9 +510,9 @@ func TestDatabaseRecycle(t *testing.T) { So(err, ShouldBeNil) // do some query - var writeQuery *wt.Request - var res *wt.Response - writeQuery, err = buildQuery(wt.WriteQuery, 1, 1, []string{ + var writeQuery *types.Request + var res *types.Response + writeQuery, err = buildQuery(types.WriteQuery, 1, 1, []string{ "create table test (test int)", "insert into test values(1)", }) @@ -517,8 +525,8 @@ func TestDatabaseRecycle(t *testing.T) { So(res.Header.RowCount, ShouldEqual, 0) // test select query - var readQuery *wt.Request - readQuery, err = buildQuery(wt.ReadQuery, 1, 2, []string{ + var readQuery *types.Request + readQuery, err = buildQuery(types.ReadQuery, 1, 2, []string{ "select * from test", }) So(err, ShouldBeNil) @@ -543,7 +551,7 @@ func TestDatabaseRecycle(t *testing.T) { }) } -func buildAck(res *wt.Response) (ack *wt.Ack, err error) { +func buildAck(res *types.Response) (ack *types.Ack, err error) { // get node id var nodeID proto.NodeID if nodeID, err = kms.GetLocalNodeID(); err != nil { @@ -557,9 +565,9 @@ func buildAck(res *wt.Response) (ack *wt.Ack, err error) { return } - ack = &wt.Ack{ - Header: wt.SignedAckHeader{ - AckHeader: wt.AckHeader{ + ack = &types.Ack{ + Header: types.SignedAckHeader{ + AckHeader: types.AckHeader{ Response: res.Header, NodeID: nodeID, Timestamp: getLocalTime(), @@ -572,19 +580,19 @@ func buildAck(res *wt.Response) (ack *wt.Ack, err error) { return } -func buildQuery(queryType wt.QueryType, connID uint64, seqNo uint64, queries []string) (query *wt.Request, err error) { +func buildQuery(queryType types.QueryType, connID uint64, seqNo uint64, queries []string) (query *types.Request, err error) { return buildQueryEx(queryType, connID, seqNo, time.Duration(0), proto.DatabaseID(""), queries) } -func buildQueryWithDatabaseID(queryType wt.QueryType, connID uint64, seqNo uint64, databaseID proto.DatabaseID, queries []string) (query *wt.Request, err error) { +func buildQueryWithDatabaseID(queryType types.QueryType, connID uint64, seqNo uint64, databaseID proto.DatabaseID, queries []string) (query *types.Request, err error) { return buildQueryEx(queryType, connID, seqNo, time.Duration(0), databaseID, queries) } -func buildQueryWithTimeShift(queryType wt.QueryType, connID uint64, seqNo uint64, timeShift time.Duration, queries []string) (query *wt.Request, err error) { +func buildQueryWithTimeShift(queryType types.QueryType, connID uint64, seqNo uint64, timeShift time.Duration, queries []string) (query *types.Request, err error) { return buildQueryEx(queryType, connID, seqNo, timeShift, proto.DatabaseID(""), queries) } -func buildQueryEx(queryType wt.QueryType, connID uint64, seqNo uint64, timeShift time.Duration, databaseID proto.DatabaseID, queries []string) (query *wt.Request, err error) { +func buildQueryEx(queryType types.QueryType, connID uint64, seqNo uint64, timeShift time.Duration, databaseID proto.DatabaseID, queries []string) (query *types.Request, err error) { // get node id var nodeID proto.NodeID if nodeID, err = kms.GetLocalNodeID(); err != nil { @@ -602,15 +610,15 @@ func buildQueryEx(queryType wt.QueryType, connID uint64, seqNo uint64, timeShift tm = tm.Add(-timeShift) // build queries - realQueries := make([]wt.Query, len(queries)) + realQueries := make([]types.Query, len(queries)) for i, v := range queries { realQueries[i].Pattern = v } - query = &wt.Request{ - Header: wt.SignedRequestHeader{ - RequestHeader: wt.RequestHeader{ + query = &types.Request{ + Header: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ DatabaseID: databaseID, QueryType: queryType, NodeID: nodeID, @@ -619,7 +627,7 @@ func buildQueryEx(queryType wt.QueryType, connID uint64, seqNo uint64, timeShift Timestamp: tm, }, }, - Payload: wt.RequestPayload{ + Payload: types.RequestPayload{ Queries: realQueries, }, } @@ -629,7 +637,7 @@ func buildQueryEx(queryType wt.QueryType, connID uint64, seqNo uint64, timeShift return } -func getPeers(term uint64) (peers *kayak.Peers, err error) { +func getPeers(term uint64) (peers *proto.Peers, err error) { // get node id var nodeID proto.NodeID if nodeID, err = kms.GetLocalNodeID(); err != nil { @@ -637,24 +645,19 @@ func getPeers(term uint64) (peers *kayak.Peers, err error) { } // get private/public key - var pubKey *asymmetric.PublicKey var privateKey *asymmetric.PrivateKey - if privateKey, pubKey, err = getKeys(); err != nil { + if privateKey, _, err = getKeys(); err != nil { return } // generate peers and sign - server := &kayak.Server{ - Role: proto.Leader, - ID: nodeID, - PubKey: pubKey, - } - peers = &kayak.Peers{ - Term: term, - Leader: server, - Servers: []*kayak.Server{server}, - PubKey: pubKey, + peers = &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: term, + Leader: nodeID, + Servers: []proto.NodeID{nodeID}, + }, } err = peers.Sign(privateKey) return @@ -737,7 +740,7 @@ func initNode() (cleanupFunc func(), server *rpc.Server, err error) { } // copied from sqlchain.xxx_test. -func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error) { +func createRandomBlock(parent hash.Hash, isGenesis bool) (b *types.Block, err error) { // Generate key pair priv, pub, err := asymmetric.GenSecp256k1KeyPair() @@ -748,9 +751,9 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error h := hash.Hash{} rand.Read(h[:]) - b = &ct.Block{ - SignedHeader: ct.SignedHeader{ - Header: ct.Header{ + b = &types.Block{ + SignedHeader: types.SignedHeader{ + Header: types.Header{ Version: 0x01000000, Producer: proto.NodeID(h.String()), GenesisHash: rootHash, @@ -758,12 +761,6 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error Timestamp: time.Now().UTC(), }, }, - Queries: make([]*hash.Hash, rand.Intn(10)+10), - } - - for i := range b.Queries { - b.Queries[i] = new(hash.Hash) - rand.Read(b.Queries[i][:]) } if isGenesis { @@ -796,7 +793,7 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error // fake BPDB service type stubBPDBService struct{} -func (s *stubBPDBService) CreateDatabase(req *bp.CreateDatabaseRequest, resp *bp.CreateDatabaseResponse) (err error) { +func (s *stubBPDBService) CreateDatabase(req *types.CreateDatabaseRequest, resp *types.CreateDatabaseResponse) (err error) { if resp.Header.InstanceMeta, err = s.getInstanceMeta("db2"); err != nil { return } @@ -811,11 +808,11 @@ func (s *stubBPDBService) CreateDatabase(req *bp.CreateDatabaseRequest, resp *bp return } -func (s *stubBPDBService) DropDatabase(req *bp.DropDatabaseRequest, resp *bp.DropDatabaseRequest) (err error) { +func (s *stubBPDBService) DropDatabase(req *types.DropDatabaseRequest, resp *types.DropDatabaseRequest) (err error) { return } -func (s *stubBPDBService) GetDatabase(req *bp.GetDatabaseRequest, resp *bp.GetDatabaseResponse) (err error) { +func (s *stubBPDBService) GetDatabase(req *types.GetDatabaseRequest, resp *types.GetDatabaseResponse) (err error) { if resp.Header.InstanceMeta, err = s.getInstanceMeta(req.Header.DatabaseID); err != nil { return } @@ -830,8 +827,8 @@ func (s *stubBPDBService) GetDatabase(req *bp.GetDatabaseRequest, resp *bp.GetDa return } -func (s *stubBPDBService) GetNodeDatabases(req *wt.InitService, resp *wt.InitServiceResponse) (err error) { - resp.Header.Instances = make([]wt.ServiceInstance, 1) +func (s *stubBPDBService) GetNodeDatabases(req *types.InitService, resp *types.InitServiceResponse) (err error) { + resp.Header.Instances = make([]types.ServiceInstance, 1) resp.Header.Instances[0], err = s.getInstanceMeta("db2") if resp.Header.Signee, err = kms.GetLocalPublicKey(); err != nil { return @@ -846,12 +843,7 @@ func (s *stubBPDBService) GetNodeDatabases(req *wt.InitService, resp *wt.InitSer return } -func (s *stubBPDBService) getInstanceMeta(dbID proto.DatabaseID) (instance wt.ServiceInstance, err error) { - var pubKey *asymmetric.PublicKey - if pubKey, err = kms.GetLocalPublicKey(); err != nil { - return - } - +func (s *stubBPDBService) getInstanceMeta(dbID proto.DatabaseID) (instance types.ServiceInstance, err error) { var privKey *asymmetric.PrivateKey if privKey, err = kms.GetLocalPrivateKey(); err != nil { return @@ -863,21 +855,12 @@ func (s *stubBPDBService) getInstanceMeta(dbID proto.DatabaseID) (instance wt.Se } instance.DatabaseID = proto.DatabaseID(dbID) - instance.Peers = &kayak.Peers{ - Term: 1, - Leader: &kayak.Server{ - Role: proto.Leader, - ID: nodeID, - PubKey: pubKey, - }, - Servers: []*kayak.Server{ - { - Role: proto.Leader, - ID: nodeID, - PubKey: pubKey, - }, + instance.Peers = &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: 1, + Leader: nodeID, + Servers: []proto.NodeID{nodeID}, }, - PubKey: pubKey, } if err = instance.Peers.Sign(privKey); err != nil { return diff --git a/worker/dbms.go b/worker/dbms.go index 4eb6ff748..647d4d786 100644 --- a/worker/dbms.go +++ b/worker/dbms.go @@ -23,17 +23,14 @@ import ( "path/filepath" "sync" - "github.com/pkg/errors" - - ka "github.com/CovenantSQL/CovenantSQL/kayak/api" - kt "github.com/CovenantSQL/CovenantSQL/kayak/transport" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/sqlchain" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/pkg/errors" ) const ( @@ -48,7 +45,7 @@ const ( type DBMS struct { cfg *DBMSConfig dbMap sync.Map - kayakMux *kt.ETLSTransportService + kayakMux *DBKayakMuxService chainMux *sqlchain.MuxService rpc *DBMSRPCService } @@ -60,10 +57,16 @@ func NewDBMS(cfg *DBMSConfig) (dbms *DBMS, err error) { } // init kayak rpc mux - dbms.kayakMux = ka.NewMuxService(DBKayakRPCName, cfg.Server) + if dbms.kayakMux, err = NewDBKayakMuxService(DBKayakRPCName, cfg.Server); err != nil { + err = errors.Wrap(err, "register kayak mux service failed") + return + } // init sql-chain rpc mux - dbms.chainMux = sqlchain.NewMuxService(route.SQLChainRPCName, cfg.Server) + if dbms.chainMux, err = sqlchain.NewMuxService(route.SQLChainRPCName, cfg.Server); err != nil { + err = errors.Wrap(err, "register sqlchain mux service failed") + return + } // init service dbms.rpc = NewDBMSRPCService(route.DBRPCName, cfg.Server, dbms) @@ -128,7 +131,7 @@ func (dbms *DBMS) Init() (err error) { } // load current peers info from block producer - var dbMapping []wt.ServiceInstance + var dbMapping []types.ServiceInstance if dbMapping, err = dbms.getMappedInstances(); err != nil { err = errors.Wrap(err, "get mapped instances failed") return @@ -143,7 +146,7 @@ func (dbms *DBMS) Init() (err error) { return } -func (dbms *DBMS) initDatabases(meta *DBMSMeta, conf []wt.ServiceInstance) (err error) { +func (dbms *DBMS) initDatabases(meta *DBMSMeta, conf []types.ServiceInstance) (err error) { currentInstance := make(map[proto.DatabaseID]bool) for _, instanceConf := range conf { @@ -173,7 +176,7 @@ func (dbms *DBMS) initDatabases(meta *DBMSMeta, conf []wt.ServiceInstance) (err } // Create add new database to the miner dbms. -func (dbms *DBMS) Create(instance *wt.ServiceInstance, cleanup bool) (err error) { +func (dbms *DBMS) Create(instance *types.ServiceInstance, cleanup bool) (err error) { if _, alreadyExists := dbms.getMeta(instance.DatabaseID); alreadyExists { return ErrAlreadyExists } @@ -241,7 +244,7 @@ func (dbms *DBMS) Drop(dbID proto.DatabaseID) (err error) { } // Update apply the new peers config to dbms. -func (dbms *DBMS) Update(instance *wt.ServiceInstance) (err error) { +func (dbms *DBMS) Update(instance *types.ServiceInstance) (err error) { var db *Database var exists bool @@ -254,7 +257,7 @@ func (dbms *DBMS) Update(instance *wt.ServiceInstance) (err error) { } // Query handles query request in dbms. -func (dbms *DBMS) Query(req *wt.Request) (res *wt.Response, err error) { +func (dbms *DBMS) Query(req *types.Request) (res *types.Response, err error) { var db *Database var exists bool @@ -269,7 +272,7 @@ func (dbms *DBMS) Query(req *wt.Request) (res *wt.Response, err error) { } // Ack handles ack of previous response. -func (dbms *DBMS) Ack(ack *wt.Ack) (err error) { +func (dbms *DBMS) Ack(ack *types.Ack) (err error) { var db *Database var exists bool @@ -283,32 +286,6 @@ func (dbms *DBMS) Ack(ack *wt.Ack) (err error) { return db.Ack(ack) } -// GetRequest handles fetching original request of previous transactions. -func (dbms *DBMS) GetRequest(dbID proto.DatabaseID, offset uint64) (query *wt.Request, err error) { - var db *Database - var exists bool - - if db, exists = dbms.getMeta(dbID); !exists { - err = ErrNotExists - return - } - - var reqBytes []byte - if reqBytes, err = db.kayakRuntime.GetLog(offset); err != nil { - return - } - - // decode requests - var q wt.Request - if err = utils.DecodeMsgPack(reqBytes, &q); err != nil { - return - } - - query = &q - - return -} - func (dbms *DBMS) getMeta(dbID proto.DatabaseID) (db *Database, exists bool) { var rawDB interface{} @@ -334,14 +311,14 @@ func (dbms *DBMS) removeMeta(dbID proto.DatabaseID) (err error) { return dbms.writeMeta() } -func (dbms *DBMS) getMappedInstances() (instances []wt.ServiceInstance, err error) { +func (dbms *DBMS) getMappedInstances() (instances []types.ServiceInstance, err error) { var bpNodeID proto.NodeID if bpNodeID, err = rpc.GetCurrentBP(); err != nil { return } - req := &wt.InitService{} - res := new(wt.InitServiceResponse) + req := &types.InitService{} + res := new(types.InitServiceResponse) if err = rpc.NewCaller().CallNode(bpNodeID, route.BPDBGetNodeDatabases.String(), req, res); err != nil { return diff --git a/worker/dbms_config.go b/worker/dbms_config.go index d4f671659..bc8fcded7 100644 --- a/worker/dbms_config.go +++ b/worker/dbms_config.go @@ -24,7 +24,7 @@ import ( var ( // DefaultMaxReqTimeGap defines max time gap between request and server. - DefaultMaxReqTimeGap = time.Second * 5 + DefaultMaxReqTimeGap = time.Minute ) // DBMSConfig defines the local multi-database management system config. diff --git a/worker/dbms_mux.go b/worker/dbms_mux.go new file mode 100644 index 000000000..bb9a897fa --- /dev/null +++ b/worker/dbms_mux.go @@ -0,0 +1,69 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package worker + +import ( + "sync" + + "github.com/CovenantSQL/CovenantSQL/kayak" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/rpc" + "github.com/pkg/errors" +) + +const ( + // DBKayakMethodName defines the database kayak rpc method name. + DBKayakMethodName = "Call" +) + +// DBKayakMuxService defines a mux service for sqlchain kayak. +type DBKayakMuxService struct { + serviceName string + serviceMap sync.Map +} + +// NewDBKayakMuxService returns a new kayak mux service. +func NewDBKayakMuxService(serviceName string, server *rpc.Server) (s *DBKayakMuxService, err error) { + s = &DBKayakMuxService{ + serviceName: serviceName, + } + err = server.RegisterService(serviceName, s) + return +} + +func (s *DBKayakMuxService) register(id proto.DatabaseID, rt *kayak.Runtime) { + s.serviceMap.Store(id, rt) + +} + +func (s *DBKayakMuxService) unregister(id proto.DatabaseID) { + s.serviceMap.Delete(id) +} + +// Call handles kayak call. +func (s *DBKayakMuxService) Call(req *kt.RPCRequest, _ *interface{}) (err error) { + // call apply to specified kayak + // treat req.Instance as DatabaseID + id := proto.DatabaseID(req.Instance) + + if v, ok := s.serviceMap.Load(id); ok { + return v.(*kayak.Runtime).FollowerApply(req.Log) + } + + return errors.Wrapf(ErrUnknownMuxRequest, "instance %v", req.Instance) +} diff --git a/worker/dbms_rpc.go b/worker/dbms_rpc.go index b75cc9407..0bd8a5deb 100644 --- a/worker/dbms_rpc.go +++ b/worker/dbms_rpc.go @@ -17,12 +17,13 @@ package worker import ( - "context" - "runtime/trace" + //"context" + //"runtime/trace" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/CovenantSQL/CovenantSQL/types" + "github.com/pkg/errors" "github.com/rcrowley/go-metrics" ) @@ -52,25 +53,25 @@ func NewDBMSRPCService(serviceName string, server *rpc.Server, dbms *DBMS) (serv } // Query rpc, called by client to issue read/write query. -func (rpc *DBMSRPCService) Query(req *wt.Request, res *wt.Response) (err error) { +func (rpc *DBMSRPCService) Query(req *types.Request, res *types.Response) (err error) { // Just need to verify signature in db.saveAck //if err = req.Verify(); err != nil { // dbQueryFailCounter.Mark(1) // return //} - ctx := context.Background() - ctx, task := trace.NewTask(ctx, "Query") - defer task.End() - defer trace.StartRegion(ctx, "QueryRegion").End() + //ctx := context.Background() + //ctx, task := trace.NewTask(ctx, "Query") + //defer task.End() + //defer trace.StartRegion(ctx, "QueryRegion").End() // verify query is sent from the request node if req.Envelope.NodeID.String() != string(req.Header.NodeID) { // node id mismatch - err = ErrInvalidRequest + err = errors.Wrap(ErrInvalidRequest, "request node id mismatch in query") dbQueryFailCounter.Mark(1) return } - var r *wt.Response + var r *types.Response if r, err = rpc.dbms.Query(req); err != nil { dbQueryFailCounter.Mark(1) return @@ -83,19 +84,19 @@ func (rpc *DBMSRPCService) Query(req *wt.Request, res *wt.Response) (err error) } // Ack rpc, called by client to confirm read request. -func (rpc *DBMSRPCService) Ack(ack *wt.Ack, _ *wt.AckResponse) (err error) { +func (rpc *DBMSRPCService) Ack(ack *types.Ack, _ *types.AckResponse) (err error) { // Just need to verify signature in db.saveAck //if err = ack.Verify(); err != nil { // return //} - ctx := context.Background() - ctx, task := trace.NewTask(ctx, "Ack") - defer task.End() - defer trace.StartRegion(ctx, "AckRegion").End() + //ctx := context.Background() + //ctx, task := trace.NewTask(ctx, "Ack") + //defer task.End() + //defer trace.StartRegion(ctx, "AckRegion").End() // verify if ack node is the original ack node if ack.Envelope.NodeID.String() != string(ack.Header.Response.Request.NodeID) { - err = ErrInvalidRequest + err = errors.Wrap(ErrInvalidRequest, "request node id mismatch in ack") return } @@ -106,10 +107,10 @@ func (rpc *DBMSRPCService) Ack(ack *wt.Ack, _ *wt.AckResponse) (err error) { } // Deploy rpc, called by BP to create/drop database and update peers. -func (rpc *DBMSRPCService) Deploy(req *wt.UpdateService, _ *wt.UpdateServiceResponse) (err error) { +func (rpc *DBMSRPCService) Deploy(req *types.UpdateService, _ *types.UpdateServiceResponse) (err error) { // verify request node is block producer if !route.IsPermitted(&req.Envelope, route.DBSDeploy) { - err = ErrInvalidRequest + err = errors.Wrap(ErrInvalidRequest, "node not permitted for deploy request") return } @@ -120,20 +121,13 @@ func (rpc *DBMSRPCService) Deploy(req *wt.UpdateService, _ *wt.UpdateServiceResp // create/drop/update switch req.Header.Op { - case wt.CreateDB: + case types.CreateDB: err = rpc.dbms.Create(&req.Header.Instance, true) - case wt.UpdateDB: + case types.UpdateDB: err = rpc.dbms.Update(&req.Header.Instance) - case wt.DropDB: + case types.DropDB: err = rpc.dbms.Drop(req.Header.Instance.DatabaseID) } return } - -// GetRequest rpc, called by observer to fetch original request by log offset. -func (rpc *DBMSRPCService) GetRequest(req *wt.GetRequestReq, resp *wt.GetRequestResp) (err error) { - // TODO(xq262144), check permission - resp.Request, err = rpc.dbms.GetRequest(req.DatabaseID, req.LogOffset) - return -} diff --git a/worker/dbms_test.go b/worker/dbms_test.go index a0f2875ae..a424828cf 100644 --- a/worker/dbms_test.go +++ b/worker/dbms_test.go @@ -24,12 +24,10 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/CovenantSQL/CovenantSQL/types" . "github.com/smartystreets/goconvey/convey" ) @@ -64,10 +62,10 @@ func TestDBMS(t *testing.T) { So(err, ShouldBeNil) // add database - var req *wt.UpdateService - var res wt.UpdateServiceResponse - var peers *kayak.Peers - var block *ct.Block + var req *types.UpdateService + var res types.UpdateServiceResponse + var peers *proto.Peers + var block *types.Block dbID := proto.DatabaseID("db") @@ -80,9 +78,9 @@ func TestDBMS(t *testing.T) { So(err, ShouldBeNil) // call with no BP privilege - req = new(wt.UpdateService) - req.Header.Op = wt.CreateDB - req.Header.Instance = wt.ServiceInstance{ + req = new(types.UpdateService) + req.Header.Op = types.CreateDB + req.Header.Instance = types.ServiceInstance{ DatabaseID: dbID, Peers: peers, GenesisBlock: block, @@ -97,9 +95,9 @@ func TestDBMS(t *testing.T) { Convey("queries", func() { // sending write query - var writeQuery *wt.Request - var queryRes *wt.Response - writeQuery, err = buildQueryWithDatabaseID(wt.WriteQuery, 1, 1, dbID, []string{ + var writeQuery *types.Request + var queryRes *types.Response + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 1, dbID, []string{ "create table test (test int)", "insert into test values(1)", }) @@ -110,20 +108,10 @@ func TestDBMS(t *testing.T) { err = queryRes.Verify() So(err, ShouldBeNil) So(queryRes.Header.RowCount, ShouldEqual, 0) - So(queryRes.Header.LogOffset, ShouldEqual, 1) - - var reqGetRequest wt.GetRequestReq - var respGetRequest *wt.GetRequestResp - - reqGetRequest.DatabaseID = dbID - reqGetRequest.LogOffset = queryRes.Header.LogOffset - err = testRequest(route.DBSGetRequest, reqGetRequest, &respGetRequest) - So(err, ShouldBeNil) - So(respGetRequest.Request.Header.HeaderHash, ShouldResemble, writeQuery.Header.HeaderHash) // sending read query - var readQuery *wt.Request - readQuery, err = buildQueryWithDatabaseID(wt.ReadQuery, 1, 2, dbID, []string{ + var readQuery *types.Request + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 2, dbID, []string{ "select * from test", }) So(err, ShouldBeNil) @@ -140,20 +128,20 @@ func TestDBMS(t *testing.T) { So(queryRes.Payload.Rows[0].Values[0], ShouldEqual, 1) // sending read ack - var ack *wt.Ack + var ack *types.Ack ack, err = buildAck(queryRes) So(err, ShouldBeNil) - var ackRes wt.AckResponse + var ackRes types.AckResponse err = testRequest(route.DBSAck, ack, &ackRes) So(err, ShouldBeNil) }) Convey("query non-existent database", func() { // sending write query - var writeQuery *wt.Request - var queryRes *wt.Response - writeQuery, err = buildQueryWithDatabaseID(wt.WriteQuery, 1, 1, + var writeQuery *types.Request + var queryRes *types.Response + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 1, proto.DatabaseID("db_not_exists"), []string{ "create table test (test int)", "insert into test values(1)", @@ -169,9 +157,9 @@ func TestDBMS(t *testing.T) { peers, err = getPeers(2) So(err, ShouldBeNil) - req = new(wt.UpdateService) - req.Header.Op = wt.UpdateDB - req.Header.Instance = wt.ServiceInstance{ + req = new(types.UpdateService) + req.Header.Op = types.UpdateDB + req.Header.Instance = types.ServiceInstance{ DatabaseID: dbID, Peers: peers, } @@ -184,9 +172,9 @@ func TestDBMS(t *testing.T) { Convey("drop database before shutdown", func() { // drop database - req = new(wt.UpdateService) - req.Header.Op = wt.DropDB - req.Header.Instance = wt.ServiceInstance{ + req = new(types.UpdateService) + req.Header.Op = types.DropDB + req.Header.Instance = types.ServiceInstance{ DatabaseID: dbID, } err = req.Sign(privateKey) diff --git a/worker/errors.go b/worker/errors.go index 439a8cb2e..02f832ea7 100644 --- a/worker/errors.go +++ b/worker/errors.go @@ -39,4 +39,7 @@ var ( // ErrSpaceLimitExceeded defines errors on disk space exceeding limit. ErrSpaceLimitExceeded = errors.New("space limit exceeded") + + // ErrUnknownMuxRequest indicates that the a multiplexing request endpoint is not found. + ErrUnknownMuxRequest = errors.New("unknown multiplexing request") ) diff --git a/worker/types/ack_type.go b/worker/otypes/ack_type.go similarity index 63% rename from worker/types/ack_type.go rename to worker/otypes/ack_type.go index 564c70cf4..795c99e1a 100644 --- a/worker/types/ack_type.go +++ b/worker/otypes/ack_type.go @@ -14,11 +14,9 @@ * limitations under the License. */ -package types +package otypes import ( - "bytes" - "encoding/binary" "time" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" @@ -38,9 +36,9 @@ type AckHeader struct { // SignedAckHeader defines client signed ack entity. type SignedAckHeader struct { AckHeader - HeaderHash hash.Hash `json:"hh"` - Signee *asymmetric.PublicKey `json:"e"` - Signature *asymmetric.Signature `json:"s"` + Hash hash.Hash `json:"hh"` + Signee *asymmetric.PublicKey `json:"e"` + Signature *asymmetric.Signature `json:"s"` } // Ack defines a whole client ack request entity. @@ -52,57 +50,17 @@ type Ack struct { // AckResponse defines client ack response entity. type AckResponse struct{} -// Serialize structure to bytes. -func (h *AckHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.Write(h.Response.Serialize()) - binary.Write(buf, binary.LittleEndian, uint64(len(h.NodeID))) - buf.WriteString(string(h.NodeID)) - binary.Write(buf, binary.LittleEndian, int64(h.Timestamp.UnixNano())) - - return buf.Bytes() -} - -// Serialize structure to bytes. -func (sh *SignedAckHeader) Serialize() []byte { - if sh == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.Write(sh.AckHeader.Serialize()) - buf.Write(sh.HeaderHash[:]) - if sh.Signee != nil { - buf.Write(sh.Signee.Serialize()) - } else { - buf.WriteRune('\000') - } - if sh.Signature != nil { - buf.Write(sh.Signature.Serialize()) - } else { - buf.WriteRune('\000') - } - - return buf.Bytes() -} - // Verify checks hash and signature in ack header. func (sh *SignedAckHeader) Verify() (err error) { // verify response if err = sh.Response.Verify(); err != nil { return } - if err = verifyHash(&sh.AckHeader, &sh.HeaderHash); err != nil { + if err = verifyHash(&sh.AckHeader, &sh.Hash); err != nil { return } // verify sign - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { + if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { return ErrSignVerification } return @@ -119,24 +77,17 @@ func (sh *SignedAckHeader) Sign(signer *asymmetric.PrivateKey, verifyReqHeader b } // build hash - buildHash(&sh.AckHeader, &sh.HeaderHash) + if err = buildHash(&sh.AckHeader, &sh.Hash); err != nil { + return + } // sign - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) + sh.Signature, err = signer.Sign(sh.Hash[:]) sh.Signee = signer.PubKey() return } -// Serialize structure to bytes. -func (a *Ack) Serialize() []byte { - if a == nil { - return []byte{'\000'} - } - - return a.Header.Serialize() -} - // Verify checks hash and signature in ack. func (a *Ack) Verify() error { return a.Header.Verify() @@ -148,9 +99,9 @@ func (a *Ack) Sign(signer *asymmetric.PrivateKey, verifyReqHeader bool) (err err return a.Header.Sign(signer, verifyReqHeader) } -// ResponseHeaderHash returns the deep shadowed Response HeaderHash field. -func (sh *SignedAckHeader) ResponseHeaderHash() hash.Hash { - return sh.AckHeader.Response.HeaderHash +// ResponseHash returns the deep shadowed Response Hash field. +func (sh *SignedAckHeader) ResponseHash() hash.Hash { + return sh.AckHeader.Response.Hash } // SignedRequestHeader returns the deep shadowed Request reference. diff --git a/worker/types/ack_type_gen.go b/worker/otypes/ack_type_gen.go similarity index 95% rename from worker/types/ack_type_gen.go rename to worker/otypes/ack_type_gen.go index 10e1687b0..9c4a845c0 100644 --- a/worker/types/ack_type_gen.go +++ b/worker/otypes/ack_type_gen.go @@ -1,4 +1,4 @@ -package types +package otypes // Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. @@ -116,7 +116,7 @@ func (z *SignedAckHeader) MarshalHash() (o []byte, err error) { o = append(o, 0x83) o = hsp.AppendTime(o, z.AckHeader.Timestamp) o = append(o, 0x84) - if oTemp, err := z.HeaderHash.MarshalHash(); err != nil { + if oTemp, err := z.Hash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -138,6 +138,6 @@ func (z *SignedAckHeader) Msgsize() (s int) { } else { s += z.Signature.Msgsize() } - s += 10 + 1 + 9 + z.AckHeader.Response.Msgsize() + 7 + z.AckHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 11 + z.HeaderHash.Msgsize() + s += 10 + 1 + 9 + z.AckHeader.Response.Msgsize() + 7 + z.AckHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 5 + z.Hash.Msgsize() return } diff --git a/worker/types/request_type_gen_test.go b/worker/otypes/ack_type_gen_test.go similarity index 55% rename from worker/types/request_type_gen_test.go rename to worker/otypes/ack_type_gen_test.go index a47471bfb..3fc0faf87 100644 --- a/worker/types/request_type_gen_test.go +++ b/worker/otypes/ack_type_gen_test.go @@ -1,4 +1,4 @@ -package types +package otypes // Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. @@ -9,8 +9,8 @@ import ( "testing" ) -func TestMarshalHashQueryKey(t *testing.T) { - v := QueryKey{} +func TestMarshalHashAck(t *testing.T) { + v := Ack{} binary.Read(rand.Reader, binary.BigEndian, &v) bts1, err := v.MarshalHash() if err != nil { @@ -25,8 +25,8 @@ func TestMarshalHashQueryKey(t *testing.T) { } } -func BenchmarkMarshalHashQueryKey(b *testing.B) { - v := QueryKey{} +func BenchmarkMarshalHashAck(b *testing.B) { + v := Ack{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -34,8 +34,8 @@ func BenchmarkMarshalHashQueryKey(b *testing.B) { } } -func BenchmarkAppendMsgQueryKey(b *testing.B) { - v := QueryKey{} +func BenchmarkAppendMsgAck(b *testing.B) { + v := Ack{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalHash() b.SetBytes(int64(len(bts))) @@ -46,8 +46,8 @@ func BenchmarkAppendMsgQueryKey(b *testing.B) { } } -func TestMarshalHashRequestHeader(t *testing.T) { - v := RequestHeader{} +func TestMarshalHashAckHeader(t *testing.T) { + v := AckHeader{} binary.Read(rand.Reader, binary.BigEndian, &v) bts1, err := v.MarshalHash() if err != nil { @@ -62,8 +62,8 @@ func TestMarshalHashRequestHeader(t *testing.T) { } } -func BenchmarkMarshalHashRequestHeader(b *testing.B) { - v := RequestHeader{} +func BenchmarkMarshalHashAckHeader(b *testing.B) { + v := AckHeader{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -71,8 +71,8 @@ func BenchmarkMarshalHashRequestHeader(b *testing.B) { } } -func BenchmarkAppendMsgRequestHeader(b *testing.B) { - v := RequestHeader{} +func BenchmarkAppendMsgAckHeader(b *testing.B) { + v := AckHeader{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalHash() b.SetBytes(int64(len(bts))) @@ -83,8 +83,8 @@ func BenchmarkAppendMsgRequestHeader(b *testing.B) { } } -func TestMarshalHashSignedRequestHeader(t *testing.T) { - v := SignedRequestHeader{} +func TestMarshalHashAckResponse(t *testing.T) { + v := AckResponse{} binary.Read(rand.Reader, binary.BigEndian, &v) bts1, err := v.MarshalHash() if err != nil { @@ -99,8 +99,8 @@ func TestMarshalHashSignedRequestHeader(t *testing.T) { } } -func BenchmarkMarshalHashSignedRequestHeader(b *testing.B) { - v := SignedRequestHeader{} +func BenchmarkMarshalHashAckResponse(b *testing.B) { + v := AckResponse{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -108,8 +108,45 @@ func BenchmarkMarshalHashSignedRequestHeader(b *testing.B) { } } -func BenchmarkAppendMsgSignedRequestHeader(b *testing.B) { - v := SignedRequestHeader{} +func BenchmarkAppendMsgAckResponse(b *testing.B) { + v := AckResponse{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedAckHeader(t *testing.T) { + v := SignedAckHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedAckHeader(b *testing.B) { + v := SignedAckHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedAckHeader(b *testing.B) { + v := SignedAckHeader{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalHash() b.SetBytes(int64(len(bts))) diff --git a/kayak/api/doc.go b/worker/otypes/doc.go similarity index 87% rename from kayak/api/doc.go rename to worker/otypes/doc.go index 6a9ec371c..089862dc6 100644 --- a/kayak/api/doc.go +++ b/worker/otypes/doc.go @@ -15,6 +15,6 @@ */ /* -Package api provides simplified kayak api with pre-defined practical options. +Package otypes defines miner node export types. */ -package api +package otypes diff --git a/worker/types/errors.go b/worker/otypes/errors.go similarity index 98% rename from worker/types/errors.go rename to worker/otypes/errors.go index b95037572..5c44a3c9d 100644 --- a/worker/types/errors.go +++ b/worker/otypes/errors.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package types +package otypes import "errors" diff --git a/worker/types/get_request.go b/worker/otypes/get_request.go similarity index 98% rename from worker/types/get_request.go rename to worker/otypes/get_request.go index 5f19cef99..cd1bd4567 100644 --- a/worker/types/get_request.go +++ b/worker/otypes/get_request.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package types +package otypes import "github.com/CovenantSQL/CovenantSQL/proto" diff --git a/worker/types/init_service_type.go b/worker/otypes/init_service_type.go similarity index 53% rename from worker/types/init_service_type.go rename to worker/otypes/init_service_type.go index 8c97f1c5e..54d2af65a 100644 --- a/worker/types/init_service_type.go +++ b/worker/otypes/init_service_type.go @@ -14,18 +14,13 @@ * limitations under the License. */ -package types +package otypes import ( - "bytes" - "encoding/binary" - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" - "github.com/CovenantSQL/CovenantSQL/utils" + ct "github.com/CovenantSQL/CovenantSQL/sqlchain/otypes" ) //go:generate hsp @@ -47,7 +42,7 @@ type ResourceMeta struct { // ServiceInstance defines single instance to be initialized. type ServiceInstance struct { DatabaseID proto.DatabaseID - Peers *kayak.Peers + Peers *proto.Peers ResourceMeta ResourceMeta GenesisBlock *ct.Block } @@ -60,9 +55,9 @@ type InitServiceResponseHeader struct { // SignedInitServiceResponseHeader defines signed worker service init response header. type SignedInitServiceResponseHeader struct { InitServiceResponseHeader - HeaderHash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + Hash hash.Hash + Signee *asymmetric.PublicKey + Signature *asymmetric.Signature } // InitServiceResponse defines worker service init response. @@ -70,90 +65,14 @@ type InitServiceResponse struct { Header SignedInitServiceResponseHeader } -// Serialize structure to bytes. -func (m *ResourceMeta) Serialize() []byte { - if m == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - binary.Write(buf, binary.LittleEndian, m.Node) - binary.Write(buf, binary.LittleEndian, m.Space) - binary.Write(buf, binary.LittleEndian, m.Memory) - - return buf.Bytes() -} - -// Serialize structure to bytes. -func (i *ServiceInstance) Serialize() []byte { - if i == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.WriteString(string(i.DatabaseID)) - buf.Write(i.Peers.Serialize()) - buf.Write(i.ResourceMeta.Serialize()) - if i.GenesisBlock != nil { - genesisBlock, _ := utils.EncodeMsgPack(i.GenesisBlock) - buf.Write(genesisBlock.Bytes()) - } else { - buf.Write([]byte{'\000'}) - } - - return buf.Bytes() -} - -// Serialize structure to bytes. -func (h *InitServiceResponseHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - binary.Write(buf, binary.LittleEndian, uint64(len(h.Instances))) - for _, instance := range h.Instances { - buf.Write(instance.Serialize()) - } - - return buf.Bytes() -} - -// Serialize structure to bytes. -func (sh *SignedInitServiceResponseHeader) Serialize() []byte { - if sh == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.Write(sh.InitServiceResponseHeader.Serialize()) - buf.Write(sh.HeaderHash[:]) - if sh.Signee != nil { - buf.Write(sh.Signee.Serialize()) - } else { - buf.WriteRune('\000') - } - if sh.Signature != nil { - buf.Write(sh.Signature.Serialize()) - } else { - buf.WriteRune('\000') - } - - return buf.Bytes() -} - // Verify checks hash and signature in init service response header. func (sh *SignedInitServiceResponseHeader) Verify() (err error) { // verify hash - if err = verifyHash(&sh.InitServiceResponseHeader, &sh.HeaderHash); err != nil { + if err = verifyHash(&sh.InitServiceResponseHeader, &sh.Hash); err != nil { return } // verify sign - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { + if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { return ErrSignVerification } return @@ -162,24 +81,17 @@ func (sh *SignedInitServiceResponseHeader) Verify() (err error) { // Sign the request. func (sh *SignedInitServiceResponseHeader) Sign(signer *asymmetric.PrivateKey) (err error) { // build hash - buildHash(&sh.InitServiceResponseHeader, &sh.HeaderHash) + if err = buildHash(&sh.InitServiceResponseHeader, &sh.Hash); err != nil { + return + } // sign - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) + sh.Signature, err = signer.Sign(sh.Hash[:]) sh.Signee = signer.PubKey() return } -// Serialize structure to bytes. -func (rs *InitServiceResponse) Serialize() []byte { - if rs == nil { - return []byte{'\000'} - } - - return rs.Header.Serialize() -} - // Verify checks hash and signature in init service response header. func (rs *InitServiceResponse) Verify() error { return rs.Header.Verify() diff --git a/worker/types/init_service_type_gen.go b/worker/otypes/init_service_type_gen.go similarity index 98% rename from worker/types/init_service_type_gen.go rename to worker/otypes/init_service_type_gen.go index d250cd883..a538613bb 100644 --- a/worker/types/init_service_type_gen.go +++ b/worker/otypes/init_service_type_gen.go @@ -1,4 +1,4 @@ -package types +package otypes // Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. @@ -188,7 +188,7 @@ func (z *SignedInitServiceResponseHeader) MarshalHash() (o []byte, err error) { } } o = append(o, 0x84) - if oTemp, err := z.HeaderHash.MarshalHash(); err != nil { + if oTemp, err := z.Hash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -214,6 +214,6 @@ func (z *SignedInitServiceResponseHeader) Msgsize() (s int) { for za0001 := range z.InitServiceResponseHeader.Instances { s += z.InitServiceResponseHeader.Instances[za0001].Msgsize() } - s += 11 + z.HeaderHash.Msgsize() + s += 5 + z.Hash.Msgsize() return } diff --git a/worker/otypes/init_service_type_gen_test.go b/worker/otypes/init_service_type_gen_test.go new file mode 100644 index 000000000..ee88b0eb6 --- /dev/null +++ b/worker/otypes/init_service_type_gen_test.go @@ -0,0 +1,232 @@ +package otypes + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "testing" +) + +func TestMarshalHashInitService(t *testing.T) { + v := InitService{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashInitService(b *testing.B) { + v := InitService{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgInitService(b *testing.B) { + v := InitService{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashInitServiceResponse(t *testing.T) { + v := InitServiceResponse{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashInitServiceResponse(b *testing.B) { + v := InitServiceResponse{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgInitServiceResponse(b *testing.B) { + v := InitServiceResponse{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashInitServiceResponseHeader(t *testing.T) { + v := InitServiceResponseHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashInitServiceResponseHeader(b *testing.B) { + v := InitServiceResponseHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgInitServiceResponseHeader(b *testing.B) { + v := InitServiceResponseHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashResourceMeta(t *testing.T) { + v := ResourceMeta{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashResourceMeta(b *testing.B) { + v := ResourceMeta{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgResourceMeta(b *testing.B) { + v := ResourceMeta{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashServiceInstance(t *testing.T) { + v := ServiceInstance{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashServiceInstance(b *testing.B) { + v := ServiceInstance{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgServiceInstance(b *testing.B) { + v := ServiceInstance{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedInitServiceResponseHeader(t *testing.T) { + v := SignedInitServiceResponseHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedInitServiceResponseHeader(b *testing.B) { + v := SignedInitServiceResponseHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedInitServiceResponseHeader(b *testing.B) { + v := SignedInitServiceResponseHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} diff --git a/worker/types/no_ack_report_type.go b/worker/otypes/no_ack_report_type.go similarity index 56% rename from worker/types/no_ack_report_type.go rename to worker/otypes/no_ack_report_type.go index aa163176e..24b0fbfc4 100644 --- a/worker/types/no_ack_report_type.go +++ b/worker/otypes/no_ack_report_type.go @@ -14,16 +14,13 @@ * limitations under the License. */ -package types +package otypes import ( - "bytes" - "encoding/binary" "time" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" ) @@ -39,9 +36,9 @@ type NoAckReportHeader struct { // SignedNoAckReportHeader defines worker worker issued/signed client no ack report. type SignedNoAckReportHeader struct { NoAckReportHeader - HeaderHash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + Hash hash.Hash + Signee *asymmetric.PublicKey + Signature *asymmetric.Signature } // NoAckReport defines whole worker no client ack report. @@ -55,15 +52,15 @@ type AggrNoAckReportHeader struct { NodeID proto.NodeID // aggregated report node id Timestamp time.Time // time in UTC zone Reports []SignedNoAckReportHeader // no-ack reports - Peers *kayak.Peers // serving peers during report + Peers *proto.Peers // serving peers during report } // SignedAggrNoAckReportHeader defines worker leader aggregated/signed client no ack report. type SignedAggrNoAckReportHeader struct { AggrNoAckReportHeader - HeaderHash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + Hash hash.Hash + Signee *asymmetric.PublicKey + Signature *asymmetric.Signature } // AggrNoAckReport defines whole worker leader no client ack report. @@ -72,46 +69,6 @@ type AggrNoAckReport struct { Header SignedAggrNoAckReportHeader } -// Serialize structure to bytes. -func (h *NoAckReportHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - binary.Write(buf, binary.LittleEndian, uint64(len(h.NodeID))) - buf.WriteString(string(h.NodeID)) - binary.Write(buf, binary.LittleEndian, int64(h.Timestamp.UnixNano())) - buf.Write(h.Response.Serialize()) - - return buf.Bytes() -} - -// Serialize structure to bytes. -func (sh *SignedNoAckReportHeader) Serialize() []byte { - if sh == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.Write(sh.NoAckReportHeader.Serialize()) - buf.Write(sh.HeaderHash[:]) - if sh.Signee != nil { - buf.Write(sh.Signee.Serialize()) - } else { - buf.WriteRune('\000') - } - if sh.Signature != nil { - buf.Write(sh.Signature.Serialize()) - } else { - buf.WriteRune('\000') - } - - return buf.Bytes() -} - // Verify checks hash and signature in signed no ack report header. func (sh *SignedNoAckReportHeader) Verify() (err error) { // verify original response @@ -119,11 +76,11 @@ func (sh *SignedNoAckReportHeader) Verify() (err error) { return } // verify hash - if err = verifyHash(&sh.NoAckReportHeader, &sh.HeaderHash); err != nil { + if err = verifyHash(&sh.NoAckReportHeader, &sh.Hash); err != nil { return } // validate signature - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { + if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { return ErrSignVerification } return @@ -137,24 +94,17 @@ func (sh *SignedNoAckReportHeader) Sign(signer *asymmetric.PrivateKey) (err erro } // build hash - buildHash(&sh.NoAckReportHeader, &sh.HeaderHash) + if err = buildHash(&sh.NoAckReportHeader, &sh.Hash); err != nil { + return + } // sign - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) + sh.Signature, err = signer.Sign(sh.Hash[:]) sh.Signee = signer.PubKey() return } -// Serialize structure to bytes. -func (r *NoAckReport) Serialize() []byte { - if r == nil { - return []byte{'\000'} - } - - return r.Header.Serialize() -} - // Verify checks hash and signature in whole no ack report. func (r *NoAckReport) Verify() error { return r.Header.Verify() @@ -165,50 +115,6 @@ func (r *NoAckReport) Sign(signer *asymmetric.PrivateKey) error { return r.Header.Sign(signer) } -// Serialize structure to bytes. -func (h *AggrNoAckReportHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - binary.Write(buf, binary.LittleEndian, uint64(len(h.NodeID))) - buf.WriteString(string(h.NodeID)) - binary.Write(buf, binary.LittleEndian, int64(h.Timestamp.UnixNano())) - binary.Write(buf, binary.LittleEndian, uint64(len(h.Reports))) - for _, r := range h.Reports { - buf.Write(r.Serialize()) - } - buf.Write(h.Peers.Serialize()) - - return buf.Bytes() -} - -// Serialize structure to bytes. -func (sh *SignedAggrNoAckReportHeader) Serialize() []byte { - if sh == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.Write(sh.AggrNoAckReportHeader.Serialize()) - buf.Write(sh.HeaderHash[:]) - if sh.Signee != nil { - buf.Write(sh.Signee.Serialize()) - } else { - buf.WriteRune('\000') - } - if sh.Signature != nil { - buf.Write(sh.Signature.Serialize()) - } else { - buf.WriteRune('\000') - } - - return buf.Bytes() -} - // Verify checks hash and signature in aggregated no ack report. func (sh *SignedAggrNoAckReportHeader) Verify() (err error) { // verify original reports @@ -218,11 +124,11 @@ func (sh *SignedAggrNoAckReportHeader) Verify() (err error) { } } // verify hash - if err = verifyHash(&sh.AggrNoAckReportHeader, &sh.HeaderHash); err != nil { + if err = verifyHash(&sh.AggrNoAckReportHeader, &sh.Hash); err != nil { return } // verify signature - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { + if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { return ErrSignVerification } return @@ -237,24 +143,17 @@ func (sh *SignedAggrNoAckReportHeader) Sign(signer *asymmetric.PrivateKey) (err } // verify hash - buildHash(&sh.AggrNoAckReportHeader, &sh.HeaderHash) + if err = buildHash(&sh.AggrNoAckReportHeader, &sh.Hash); err != nil { + return + } // verify signature - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) + sh.Signature, err = signer.Sign(sh.Hash[:]) sh.Signee = signer.PubKey() return } -// Serialize structure to bytes. -func (r *AggrNoAckReport) Serialize() []byte { - if r == nil { - return []byte{'\000'} - } - - return r.Header.Serialize() -} - // Verify the whole aggregation no ack report. func (r *AggrNoAckReport) Verify() (err error) { return r.Header.Verify() diff --git a/worker/types/no_ack_report_type_gen.go b/worker/otypes/no_ack_report_type_gen.go similarity index 95% rename from worker/types/no_ack_report_type_gen.go rename to worker/otypes/no_ack_report_type_gen.go index f8c660233..d2a3408b7 100644 --- a/worker/types/no_ack_report_type_gen.go +++ b/worker/otypes/no_ack_report_type_gen.go @@ -1,4 +1,4 @@ -package types +package otypes // Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. @@ -169,7 +169,7 @@ func (z *SignedAggrNoAckReportHeader) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } o = append(o, 0x84) - if oTemp, err := z.HeaderHash.MarshalHash(); err != nil { + if oTemp, err := z.Hash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -191,7 +191,7 @@ func (z *SignedAggrNoAckReportHeader) Msgsize() (s int) { } else { s += z.Signature.Msgsize() } - s += 22 + z.AggrNoAckReportHeader.Msgsize() + 11 + z.HeaderHash.Msgsize() + s += 22 + z.AggrNoAckReportHeader.Msgsize() + 5 + z.Hash.Msgsize() return } @@ -236,7 +236,7 @@ func (z *SignedNoAckReportHeader) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } o = append(o, 0x84) - if oTemp, err := z.HeaderHash.MarshalHash(); err != nil { + if oTemp, err := z.Hash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -258,6 +258,6 @@ func (z *SignedNoAckReportHeader) Msgsize() (s int) { } else { s += z.Signature.Msgsize() } - s += 18 + 1 + 7 + z.NoAckReportHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 9 + z.NoAckReportHeader.Response.Msgsize() + 11 + z.HeaderHash.Msgsize() + s += 18 + 1 + 7 + z.NoAckReportHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 9 + z.NoAckReportHeader.Response.Msgsize() + 5 + z.Hash.Msgsize() return } diff --git a/worker/otypes/no_ack_report_type_gen_test.go b/worker/otypes/no_ack_report_type_gen_test.go new file mode 100644 index 000000000..bf3e1bb8b --- /dev/null +++ b/worker/otypes/no_ack_report_type_gen_test.go @@ -0,0 +1,232 @@ +package otypes + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "testing" +) + +func TestMarshalHashAggrNoAckReport(t *testing.T) { + v := AggrNoAckReport{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashAggrNoAckReport(b *testing.B) { + v := AggrNoAckReport{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgAggrNoAckReport(b *testing.B) { + v := AggrNoAckReport{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashAggrNoAckReportHeader(t *testing.T) { + v := AggrNoAckReportHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashAggrNoAckReportHeader(b *testing.B) { + v := AggrNoAckReportHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgAggrNoAckReportHeader(b *testing.B) { + v := AggrNoAckReportHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashNoAckReport(t *testing.T) { + v := NoAckReport{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashNoAckReport(b *testing.B) { + v := NoAckReport{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgNoAckReport(b *testing.B) { + v := NoAckReport{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashNoAckReportHeader(t *testing.T) { + v := NoAckReportHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashNoAckReportHeader(b *testing.B) { + v := NoAckReportHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgNoAckReportHeader(b *testing.B) { + v := NoAckReportHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedAggrNoAckReportHeader(t *testing.T) { + v := SignedAggrNoAckReportHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedAggrNoAckReportHeader(b *testing.B) { + v := SignedAggrNoAckReportHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedAggrNoAckReportHeader(b *testing.B) { + v := SignedAggrNoAckReportHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedNoAckReportHeader(t *testing.T) { + v := SignedNoAckReportHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedNoAckReportHeader(b *testing.B) { + v := SignedNoAckReportHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedNoAckReportHeader(b *testing.B) { + v := SignedNoAckReportHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} diff --git a/worker/types/request_type.go b/worker/otypes/request_type.go similarity index 62% rename from worker/types/request_type.go rename to worker/otypes/request_type.go index 26101475b..768c3165a 100644 --- a/worker/types/request_type.go +++ b/worker/otypes/request_type.go @@ -14,22 +14,17 @@ * limitations under the License. */ -package types +package otypes import ( - "bytes" - "database/sql" - "encoding/binary" "time" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/utils" ) //go:generate hsp -//hsp:ignore Query Queries Payload RequestPayload Request // QueryType enumerates available query type, currently read/write. type QueryType int32 @@ -41,21 +36,16 @@ const ( WriteQuery ) +// NamedArg defines the named argument structure for database. +type NamedArg struct { + Name string + Value interface{} +} + // Query defines single query. type Query struct { Pattern string - Args []sql.NamedArg -} - -func (t QueryType) String() string { - switch t { - case ReadQuery: - return "read" - case WriteQuery: - return "write" - default: - return "unknown" - } + Args []NamedArg } // RequestPayload defines a queries payload. @@ -85,9 +75,9 @@ type QueryKey struct { // SignedRequestHeader defines a signed query request header. type SignedRequestHeader struct { RequestHeader - HeaderHash hash.Hash `json:"hh"` - Signee *asymmetric.PublicKey `json:"e"` - Signature *asymmetric.Signature `json:"s"` + Hash hash.Hash `json:"hh"` + Signee *asymmetric.PublicKey `json:"e"` + Signature *asymmetric.Signature `json:"s"` } // Request defines a complete query request. @@ -97,67 +87,25 @@ type Request struct { Payload RequestPayload `json:"p"` } -// Serialize returns byte based binary form of struct. -func (p *RequestPayload) Serialize() []byte { - // HACK(xq262144): currently use idiomatic serialization for hash generation - buf, _ := utils.EncodeMsgPack(p) - - return buf.Bytes() -} - -// Serialize returns bytes based binary form of struct. -func (h *RequestHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - binary.Write(buf, binary.LittleEndian, h.QueryType) - binary.Write(buf, binary.LittleEndian, uint64(len(h.NodeID))) - buf.WriteString(string(h.NodeID)) - buf.WriteString(string(h.DatabaseID)) - binary.Write(buf, binary.LittleEndian, h.ConnectionID) - binary.Write(buf, binary.LittleEndian, h.SeqNo) - binary.Write(buf, binary.LittleEndian, int64(h.Timestamp.UnixNano())) // use nanoseconds unix epoch - binary.Write(buf, binary.LittleEndian, h.BatchCount) - buf.Write(h.QueriesHash[:]) - - return buf.Bytes() -} - -// Serialize returns bytes based binary form of struct. -func (sh *SignedRequestHeader) Serialize() []byte { - if sh == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.Write(sh.RequestHeader.Serialize()) - buf.Write(sh.HeaderHash[:]) - if sh.Signee != nil { - buf.Write(sh.Signee.Serialize()) - } else { - buf.WriteRune('\000') - } - if sh.Signature != nil { - buf.Write(sh.Signature.Serialize()) - } else { - buf.WriteRune('\000') +func (t QueryType) String() string { + switch t { + case ReadQuery: + return "read" + case WriteQuery: + return "write" + default: + return "unknown" } - - return buf.Bytes() } // Verify checks hash and signature in request header. func (sh *SignedRequestHeader) Verify() (err error) { // verify hash - if err = verifyHash(&sh.RequestHeader, &sh.HeaderHash); err != nil { + if err = verifyHash(&sh.RequestHeader, &sh.Hash); err != nil { return } // verify sign - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { + if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { return ErrSignVerification } return nil @@ -166,33 +114,21 @@ func (sh *SignedRequestHeader) Verify() (err error) { // Sign the request. func (sh *SignedRequestHeader) Sign(signer *asymmetric.PrivateKey) (err error) { // compute hash - buildHash(&sh.RequestHeader, &sh.HeaderHash) + if err = buildHash(&sh.RequestHeader, &sh.Hash); err != nil { + return + } if signer == nil { return ErrSignRequest } // sign - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) + sh.Signature, err = signer.Sign(sh.Hash[:]) sh.Signee = signer.PubKey() return } -// Serialize returns bytes based binary form of struct. -func (r *Request) Serialize() []byte { - if r == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.Write(r.Header.Serialize()) - buf.Write(r.Payload.Serialize()) - - return buf.Bytes() -} - // Verify checks hash and signature in whole request. func (r *Request) Verify() (err error) { // verify payload hash in signed header @@ -209,7 +145,9 @@ func (r *Request) Sign(signer *asymmetric.PrivateKey) (err error) { r.Header.BatchCount = uint64(len(r.Payload.Queries)) // compute payload hash - buildHash(&r.Payload, &r.Header.QueriesHash) + if err = buildHash(&r.Payload, &r.Header.QueriesHash); err != nil { + return + } return r.Header.Sign(signer) } diff --git a/worker/otypes/request_type_gen.go b/worker/otypes/request_type_gen.go new file mode 100644 index 000000000..0dd0e6375 --- /dev/null +++ b/worker/otypes/request_type_gen.go @@ -0,0 +1,277 @@ +package otypes + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + hsp "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// MarshalHash marshals for hash +func (z NamedArg) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + o, err = hsp.AppendIntf(o, z.Value) + if err != nil { + return + } + o = append(o, 0x82) + o = hsp.AppendString(o, z.Name) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z NamedArg) Msgsize() (s int) { + s = 1 + 6 + hsp.GuessSize(z.Value) + 5 + hsp.StringPrefixSize + len(z.Name) + return +} + +// MarshalHash marshals for hash +func (z *Query) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + o = hsp.AppendArrayHeader(o, uint32(len(z.Args))) + for za0001 := range z.Args { + // map header, size 2 + o = append(o, 0x82, 0x82) + o = hsp.AppendString(o, z.Args[za0001].Name) + o = append(o, 0x82) + o, err = hsp.AppendIntf(o, z.Args[za0001].Value) + if err != nil { + return + } + } + o = append(o, 0x82) + o = hsp.AppendString(o, z.Pattern) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Query) Msgsize() (s int) { + s = 1 + 5 + hsp.ArrayHeaderSize + for za0001 := range z.Args { + s += 1 + 5 + hsp.StringPrefixSize + len(z.Args[za0001].Name) + 6 + hsp.GuessSize(z.Args[za0001].Value) + } + s += 8 + hsp.StringPrefixSize + len(z.Pattern) + return +} + +// MarshalHash marshals for hash +func (z *QueryKey) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 3 + o = append(o, 0x83, 0x83) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x83) + o = hsp.AppendUint64(o, z.ConnectionID) + o = append(o, 0x83) + o = hsp.AppendUint64(o, z.SeqNo) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *QueryKey) Msgsize() (s int) { + s = 1 + 7 + z.NodeID.Msgsize() + 13 + hsp.Uint64Size + 6 + hsp.Uint64Size + return +} + +// MarshalHash marshals for hash +func (z QueryType) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + o = hsp.AppendInt32(o, int32(z)) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z QueryType) Msgsize() (s int) { + s = hsp.Int32Size + return +} + +// MarshalHash marshals for hash +func (z *Request) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 3 + // map header, size 1 + o = append(o, 0x83, 0x83, 0x81, 0x81) + o = hsp.AppendArrayHeader(o, uint32(len(z.Payload.Queries))) + for za0001 := range z.Payload.Queries { + // map header, size 2 + o = append(o, 0x82, 0x82) + o = hsp.AppendString(o, z.Payload.Queries[za0001].Pattern) + o = append(o, 0x82) + o = hsp.AppendArrayHeader(o, uint32(len(z.Payload.Queries[za0001].Args))) + for za0002 := range z.Payload.Queries[za0001].Args { + // map header, size 2 + o = append(o, 0x82, 0x82) + o = hsp.AppendString(o, z.Payload.Queries[za0001].Args[za0002].Name) + o = append(o, 0x82) + o, err = hsp.AppendIntf(o, z.Payload.Queries[za0001].Args[za0002].Value) + if err != nil { + return + } + } + } + o = append(o, 0x83) + if oTemp, err := z.Header.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x83) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Request) Msgsize() (s int) { + s = 1 + 8 + 1 + 8 + hsp.ArrayHeaderSize + for za0001 := range z.Payload.Queries { + s += 1 + 8 + hsp.StringPrefixSize + len(z.Payload.Queries[za0001].Pattern) + 5 + hsp.ArrayHeaderSize + for za0002 := range z.Payload.Queries[za0001].Args { + s += 1 + 5 + hsp.StringPrefixSize + len(z.Payload.Queries[za0001].Args[za0002].Name) + 6 + hsp.GuessSize(z.Payload.Queries[za0001].Args[za0002].Value) + } + } + s += 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *RequestHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 8 + o = append(o, 0x88, 0x88) + o = hsp.AppendInt32(o, int32(z.QueryType)) + o = append(o, 0x88) + if oTemp, err := z.QueriesHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x88) + if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x88) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x88) + o = hsp.AppendTime(o, z.Timestamp) + o = append(o, 0x88) + o = hsp.AppendUint64(o, z.ConnectionID) + o = append(o, 0x88) + o = hsp.AppendUint64(o, z.SeqNo) + o = append(o, 0x88) + o = hsp.AppendUint64(o, z.BatchCount) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *RequestHeader) Msgsize() (s int) { + s = 1 + 10 + hsp.Int32Size + 12 + z.QueriesHash.Msgsize() + 11 + z.DatabaseID.Msgsize() + 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize + 13 + hsp.Uint64Size + 6 + hsp.Uint64Size + 11 + hsp.Uint64Size + return +} + +// MarshalHash marshals for hash +func (z *RequestPayload) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 1 + o = append(o, 0x81, 0x81) + o = hsp.AppendArrayHeader(o, uint32(len(z.Queries))) + for za0001 := range z.Queries { + if oTemp, err := z.Queries[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *RequestPayload) Msgsize() (s int) { + s = 1 + 8 + hsp.ArrayHeaderSize + for za0001 := range z.Queries { + s += z.Queries[za0001].Msgsize() + } + return +} + +// MarshalHash marshals for hash +func (z *SignedRequestHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 4 + o = append(o, 0x84, 0x84) + if z.Signee == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Signee.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x84) + if z.Signature == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Signature.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x84) + if oTemp, err := z.RequestHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x84) + if oTemp, err := z.Hash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedRequestHeader) Msgsize() (s int) { + s = 1 + 7 + if z.Signee == nil { + s += hsp.NilSize + } else { + s += z.Signee.Msgsize() + } + s += 10 + if z.Signature == nil { + s += hsp.NilSize + } else { + s += z.Signature.Msgsize() + } + s += 14 + z.RequestHeader.Msgsize() + 5 + z.Hash.Msgsize() + return +} diff --git a/worker/otypes/request_type_gen_test.go b/worker/otypes/request_type_gen_test.go new file mode 100644 index 000000000..c1371bf13 --- /dev/null +++ b/worker/otypes/request_type_gen_test.go @@ -0,0 +1,269 @@ +package otypes + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "testing" +) + +func TestMarshalHashNamedArg(t *testing.T) { + v := NamedArg{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashNamedArg(b *testing.B) { + v := NamedArg{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgNamedArg(b *testing.B) { + v := NamedArg{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashQuery(t *testing.T) { + v := Query{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashQuery(b *testing.B) { + v := Query{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgQuery(b *testing.B) { + v := Query{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashQueryKey(t *testing.T) { + v := QueryKey{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashQueryKey(b *testing.B) { + v := QueryKey{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgQueryKey(b *testing.B) { + v := QueryKey{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashRequest(t *testing.T) { + v := Request{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashRequest(b *testing.B) { + v := Request{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgRequest(b *testing.B) { + v := Request{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashRequestHeader(t *testing.T) { + v := RequestHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashRequestHeader(b *testing.B) { + v := RequestHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgRequestHeader(b *testing.B) { + v := RequestHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashRequestPayload(t *testing.T) { + v := RequestPayload{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashRequestPayload(b *testing.B) { + v := RequestPayload{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgRequestPayload(b *testing.B) { + v := RequestPayload{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedRequestHeader(t *testing.T) { + v := SignedRequestHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedRequestHeader(b *testing.B) { + v := SignedRequestHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedRequestHeader(b *testing.B) { + v := SignedRequestHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} diff --git a/worker/otypes/response_type.go b/worker/otypes/response_type.go new file mode 100644 index 000000000..931eaa06d --- /dev/null +++ b/worker/otypes/response_type.go @@ -0,0 +1,128 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package otypes + +import ( + "time" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/pkg/errors" +) + +//go:generate hsp + +// ResponseRow defines single row of query response. +type ResponseRow struct { + Values []interface{} +} + +// ResponsePayload defines column names and rows of query response. +type ResponsePayload struct { + Columns []string `json:"c"` + DeclTypes []string `json:"t"` + Rows []ResponseRow `json:"r"` +} + +// ResponseHeader defines a query response header. +type ResponseHeader struct { + Request SignedRequestHeader `json:"r"` + NodeID proto.NodeID `json:"id"` // response node id + Timestamp time.Time `json:"t"` // time in UTC zone + RowCount uint64 `json:"c"` // response row count of payload + LogOffset uint64 `json:"o"` // request log offset + LastInsertID int64 `json:"l"` // insert insert id + AffectedRows int64 `json:"a"` // affected rows + DataHash hash.Hash `json:"dh"` // hash of query response +} + +// SignedResponseHeader defines a signed query response header. +type SignedResponseHeader struct { + ResponseHeader + Hash hash.Hash `json:"h"` + Signee *asymmetric.PublicKey `json:"e"` + Signature *asymmetric.Signature `json:"s"` +} + +// Response defines a complete query response. +type Response struct { + Header SignedResponseHeader `json:"h"` + Payload ResponsePayload `json:"p"` +} + +// Verify checks hash and signature in response header. +func (sh *SignedResponseHeader) Verify() (err error) { + // verify original request header + if err = sh.Request.Verify(); err != nil { + return + } + // verify hash + if err = verifyHash(&sh.ResponseHeader, &sh.Hash); err != nil { + return + } + // verify signature + if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { + return ErrSignVerification + } + + return nil +} + +// Sign the request. +func (sh *SignedResponseHeader) Sign(signer *asymmetric.PrivateKey) (err error) { + // make sure original header is signed + if err = sh.Request.Verify(); err != nil { + err = errors.Wrapf(err, "SignedResponseHeader %v", sh) + return + } + + // build our hash + if err = buildHash(&sh.ResponseHeader, &sh.Hash); err != nil { + return + } + + // sign + sh.Signature, err = signer.Sign(sh.Hash[:]) + sh.Signee = signer.PubKey() + + return +} + +// Verify checks hash and signature in whole response. +func (sh *Response) Verify() (err error) { + // verify data hash in header + if err = verifyHash(&sh.Payload, &sh.Header.DataHash); err != nil { + return + } + + return sh.Header.Verify() +} + +// Sign the request. +func (sh *Response) Sign(signer *asymmetric.PrivateKey) (err error) { + // set rows count + sh.Header.RowCount = uint64(len(sh.Payload.Rows)) + + // build hash in header + if err = buildHash(&sh.Payload, &sh.Header.DataHash); err != nil { + return + } + + // sign the request + return sh.Header.Sign(signer) +} diff --git a/worker/types/response_type_gen.go b/worker/otypes/response_type_gen.go similarity index 89% rename from worker/types/response_type_gen.go rename to worker/otypes/response_type_gen.go index 8ce0f4c36..b3d6e68ce 100644 --- a/worker/types/response_type_gen.go +++ b/worker/otypes/response_type_gen.go @@ -1,4 +1,4 @@ -package types +package otypes // Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. @@ -36,37 +36,41 @@ func (z *Response) Msgsize() (s int) { func (z *ResponseHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) - // map header, size 6 - o = append(o, 0x86, 0x86) + // map header, size 8 + o = append(o, 0x88, 0x88) if oTemp, err := z.Request.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) + o = append(o, 0x88) if oTemp, err := z.DataHash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) + o = append(o, 0x88) + o = hsp.AppendInt64(o, z.LastInsertID) + o = append(o, 0x88) + o = hsp.AppendInt64(o, z.AffectedRows) + o = append(o, 0x88) if oTemp, err := z.NodeID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) + o = append(o, 0x88) o = hsp.AppendTime(o, z.Timestamp) - o = append(o, 0x86) + o = append(o, 0x88) o = hsp.AppendUint64(o, z.RowCount) - o = append(o, 0x86) + o = append(o, 0x88) o = hsp.AppendUint64(o, z.LogOffset) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *ResponseHeader) Msgsize() (s int) { - s = 1 + 8 + z.Request.Msgsize() + 9 + z.DataHash.Msgsize() + 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize + 9 + hsp.Uint64Size + 10 + hsp.Uint64Size + s = 1 + 8 + z.Request.Msgsize() + 9 + z.DataHash.Msgsize() + 13 + hsp.Int64Size + 13 + hsp.Int64Size + 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize + 9 + hsp.Uint64Size + 10 + hsp.Uint64Size return } @@ -178,7 +182,7 @@ func (z *SignedResponseHeader) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } o = append(o, 0x84) - if oTemp, err := z.HeaderHash.MarshalHash(); err != nil { + if oTemp, err := z.Hash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -200,6 +204,6 @@ func (z *SignedResponseHeader) Msgsize() (s int) { } else { s += z.Signature.Msgsize() } - s += 15 + z.ResponseHeader.Msgsize() + 11 + z.HeaderHash.Msgsize() + s += 15 + z.ResponseHeader.Msgsize() + 5 + z.Hash.Msgsize() return } diff --git a/worker/otypes/response_type_gen_test.go b/worker/otypes/response_type_gen_test.go new file mode 100644 index 000000000..3b263b1ed --- /dev/null +++ b/worker/otypes/response_type_gen_test.go @@ -0,0 +1,195 @@ +package otypes + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "testing" +) + +func TestMarshalHashResponse(t *testing.T) { + v := Response{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashResponse(b *testing.B) { + v := Response{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgResponse(b *testing.B) { + v := Response{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashResponseHeader(t *testing.T) { + v := ResponseHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashResponseHeader(b *testing.B) { + v := ResponseHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgResponseHeader(b *testing.B) { + v := ResponseHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashResponsePayload(t *testing.T) { + v := ResponsePayload{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashResponsePayload(b *testing.B) { + v := ResponsePayload{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgResponsePayload(b *testing.B) { + v := ResponsePayload{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashResponseRow(t *testing.T) { + v := ResponseRow{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashResponseRow(b *testing.B) { + v := ResponseRow{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgResponseRow(b *testing.B) { + v := ResponseRow{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedResponseHeader(t *testing.T) { + v := SignedResponseHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedResponseHeader(b *testing.B) { + v := SignedResponseHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedResponseHeader(b *testing.B) { + v := SignedResponseHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} diff --git a/worker/types/types_test.go b/worker/otypes/types_test.go similarity index 69% rename from worker/types/types_test.go rename to worker/otypes/types_test.go index 9df73ff44..aa498a453 100644 --- a/worker/types/types_test.go +++ b/worker/otypes/types_test.go @@ -14,17 +14,13 @@ * limitations under the License. */ -package types +package otypes import ( - "bytes" - "database/sql" "testing" "time" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/pkg/errors" @@ -41,35 +37,6 @@ func getCommKeys() (*asymmetric.PrivateKey, *asymmetric.PublicKey) { return asymmetric.PrivKeyFromBytes(testPriv) } -type myTestBytes []byte - -func (bytes myTestBytes) Serialize() (res []byte) { - res = make([]byte, len(bytes)) - copy(res, bytes[:]) - return -} - -func Test_buildHash(t *testing.T) { - Convey("build", t, func() { - var a, b hash.Hash - var tb myTestBytes = []byte("test") - buildHash(tb, &a) - b = hash.THashH([]byte("test")) - So(a, ShouldResemble, b) - }) - - Convey("test verify", t, func() { - var a, b hash.Hash - var tb myTestBytes = []byte("test") - var err error - buildHash(tb, &a) - err = verifyHash(tb, &a) - So(err, ShouldBeNil) - err = verifyHash(tb, &b) - So(err, ShouldNotBeNil) - }) -} - func TestSignedRequestHeader_Sign(t *testing.T) { privKey, _ := getCommKeys() @@ -126,7 +93,7 @@ func TestRequest_Sign(t *testing.T) { Queries: []Query{ { Pattern: "INSERT INTO test VALUES(?)", - Args: []sql.NamedArg{ + Args: []NamedArg{ { Value: 1, }, @@ -134,7 +101,7 @@ func TestRequest_Sign(t *testing.T) { }, { Pattern: "INSERT INTO test VALUES(?)", - Args: []sql.NamedArg{ + Args: []NamedArg{ { Value: "happy", }, @@ -155,28 +122,6 @@ func TestRequest_Sign(t *testing.T) { err = verifyHash(&req.Payload, &req.Header.QueriesHash) So(err, ShouldBeNil) - Convey("serialize", func() { - So(req.Serialize(), ShouldNotBeEmpty) - So((*Request)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*RequestHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*RequestPayload)(nil).Serialize(), ShouldNotBeEmpty) - So((*SignedRequestHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - - s, err := req.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - // test nils - req.Header.Signee = nil - req.Header.Signature = nil - - s, err = req.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - So(req.Serialize(), ShouldNotBeEmpty) - }) - Convey("verify", func() { err = req.Verify() So(err, ShouldBeNil) @@ -192,7 +137,7 @@ func TestRequest_Sign(t *testing.T) { Convey("header change without signing", func() { req.Header.Timestamp = req.Header.Timestamp.Add(time.Second) - buildHash(&req.Header.RequestHeader, &req.Header.HeaderHash) + buildHash(&req.Header.RequestHeader, &req.Header.Hash) err = req.Verify() So(err, ShouldNotBeNil) }) @@ -242,6 +187,7 @@ func TestResponse_Sign(t *testing.T) { "test_float", "test_binary_string", "test_string", + "test_empty_time", }, DeclTypes: []string{ "INTEGER", @@ -251,6 +197,7 @@ func TestResponse_Sign(t *testing.T) { "FLOAT", "BLOB", "TEXT", + "DATETIME", }, Rows: []ResponseRow{ { @@ -262,15 +209,14 @@ func TestResponse_Sign(t *testing.T) { float64(1.0001), "11111\0001111111", "11111111111111", + time.Time{}, }, }, }, }, } - var data *bytes.Buffer var err error - var rres Response // sign directly, embedded original request is not filled err = res.Sign(privKey) @@ -292,40 +238,20 @@ func TestResponse_Sign(t *testing.T) { err = verifyHash(&res.Payload, &res.Header.DataHash) So(err, ShouldBeNil) - Convey("serialize", func() { - So(res.Serialize(), ShouldNotBeEmpty) - So((*Response)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*ResponseRow)(nil).Serialize(), ShouldNotBeEmpty) - So((*ResponseHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*ResponsePayload)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*SignedResponseHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - - data, err = utils.EncodeMsgPack(res.Header) - So(err, ShouldBeNil) - err = utils.DecodeMsgPack(data.Bytes(), &rres.Header) - So(err, ShouldBeNil) - So(&res.Header, ShouldResemble, &rres.Header) - - s, err := res.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - // test nils - res.Header.Signee = nil - res.Header.Signature = nil - - s, err = res.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - So(res.Serialize(), ShouldNotBeEmpty) - }) - // verify Convey("verify", func() { err = res.Verify() So(err, ShouldBeNil) + Convey("encode/decode verify", func() { + buf, err := utils.EncodeMsgPack(res) + So(err, ShouldBeNil) + var r *Response + err = utils.DecodeMsgPack(buf.Bytes(), &r) + So(err, ShouldBeNil) + err = r.Verify() + So(err, ShouldBeNil) + }) Convey("request change", func() { res.Header.Request.BatchCount = 200 @@ -346,7 +272,7 @@ func TestResponse_Sign(t *testing.T) { }) Convey("header change without signing", func() { res.Header.Timestamp = res.Header.Timestamp.Add(time.Second) - buildHash(&res.Header.ResponseHeader, &res.Header.HeaderHash) + buildHash(&res.Header.ResponseHeader, &res.Header.Hash) err = res.Verify() So(err, ShouldNotBeNil) @@ -385,9 +311,7 @@ func TestAck_Sign(t *testing.T) { }, } - var data *bytes.Buffer var err error - var rack Ack Convey("get query key", func() { key := ack.Header.SignedRequestHeader().GetQueryKey() @@ -416,33 +340,6 @@ func TestAck_Sign(t *testing.T) { err = ack.Sign(privKey, true) So(err, ShouldBeNil) - Convey("serialize", func() { - So(ack.Serialize(), ShouldNotBeEmpty) - So((*Ack)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*AckHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*SignedAckHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - - data, err = utils.EncodeMsgPack(ack.Header) - So(err, ShouldBeNil) - err = utils.DecodeMsgPack(data.Bytes(), &rack.Header) - So(err, ShouldBeNil) - So(&ack.Header, ShouldResemble, &rack.Header) - - s, err := ack.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - // test nils - ack.Header.Signee = nil - ack.Header.Signature = nil - - s, err = ack.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - So(ack.Serialize(), ShouldNotBeEmpty) - }) - Convey("verify", func() { err = ack.Verify() So(err, ShouldBeNil) @@ -468,7 +365,7 @@ func TestAck_Sign(t *testing.T) { Convey("header change without signing", func() { ack.Header.Timestamp = ack.Header.Timestamp.Add(time.Second) - buildHash(&ack.Header.AckHeader, &ack.Header.HeaderHash) + buildHash(&ack.Header.AckHeader, &ack.Header.Hash) err = ack.Verify() So(err, ShouldNotBeNil) @@ -525,27 +422,6 @@ func TestNoAckReport_Sign(t *testing.T) { err = noAck.Sign(privKey) So(err, ShouldBeNil) - Convey("serialize", func() { - So(noAck.Serialize(), ShouldNotBeEmpty) - So((*NoAckReport)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*NoAckReportHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*SignedNoAckReportHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - - s, err := noAck.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - // test nils - noAck.Header.Signee = nil - noAck.Header.Signature = nil - - s, err = noAck.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - So(noAck.Serialize(), ShouldNotBeEmpty) - }) - Convey("verify", func() { err = noAck.Verify() So(err, ShouldBeNil) @@ -574,7 +450,7 @@ func TestNoAckReport_Sign(t *testing.T) { Convey("header change without signing", func() { noAck.Header.Timestamp = noAck.Header.Timestamp.Add(time.Second) - buildHash(&noAck.Header.NoAckReportHeader, &noAck.Header.HeaderHash) + buildHash(&noAck.Header.NoAckReportHeader, &noAck.Header.Hash) err = noAck.Verify() So(err, ShouldNotBeNil) @@ -640,20 +516,13 @@ func TestAggrNoAckReport_Sign(t *testing.T) { }, }, }, - Peers: &kayak.Peers{ - Term: uint64(1), - Leader: &kayak.Server{ - Role: proto.Leader, - ID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), - }, - Servers: []*kayak.Server{ - { - Role: proto.Leader, - ID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), - }, - { - Role: proto.Follower, - ID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + Peers: &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: uint64(1), + Leader: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + Servers: []proto.NodeID{ + proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), }, }, }, @@ -687,27 +556,6 @@ func TestAggrNoAckReport_Sign(t *testing.T) { err = aggrNoAck.Sign(privKey) So(err, ShouldBeNil) - Convey("serialize", func() { - So(aggrNoAck.Serialize(), ShouldNotBeEmpty) - So((*AggrNoAckReport)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*AggrNoAckReportHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*SignedAggrNoAckReportHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - - s, err := aggrNoAck.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - // test nils - aggrNoAck.Header.Signee = nil - aggrNoAck.Header.Signature = nil - - s, err = aggrNoAck.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - So(aggrNoAck.Serialize(), ShouldNotBeEmpty) - }) - Convey("verify", func() { err = aggrNoAck.Verify() So(err, ShouldBeNil) @@ -743,7 +591,7 @@ func TestAggrNoAckReport_Sign(t *testing.T) { Convey("header change without signing", func() { aggrNoAck.Header.Timestamp = aggrNoAck.Header.Timestamp.Add(time.Second) - buildHash(&aggrNoAck.Header.AggrNoAckReportHeader, &aggrNoAck.Header.HeaderHash) + buildHash(&aggrNoAck.Header.AggrNoAckReportHeader, &aggrNoAck.Header.Hash) err = aggrNoAck.Verify() So(err, ShouldNotBeNil) @@ -753,7 +601,7 @@ func TestAggrNoAckReport_Sign(t *testing.T) { } func TestInitServiceResponse_Sign(t *testing.T) { - privKey, pubKey := getCommKeys() + privKey, _ := getCommKeys() Convey("sign", t, func() { var err error @@ -764,24 +612,15 @@ func TestInitServiceResponse_Sign(t *testing.T) { Instances: []ServiceInstance{ { DatabaseID: proto.DatabaseID("db1"), - Peers: &kayak.Peers{ - Term: uint64(1), - Leader: &kayak.Server{ - Role: proto.Leader, - ID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), - }, - Servers: []*kayak.Server{ - { - Role: proto.Leader, - ID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), - }, - { - Role: proto.Follower, - ID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + Peers: &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: uint64(1), + Leader: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + Servers: []proto.NodeID{ + proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), }, }, - PubKey: pubKey, - Signature: nil, }, // TODO(xq262144), should integrated with genesis block serialization test GenesisBlock: nil, @@ -794,28 +633,6 @@ func TestInitServiceResponse_Sign(t *testing.T) { // sign err = initServiceResponse.Sign(privKey) - Convey("serialize", func() { - So(initServiceResponse.Serialize(), ShouldNotBeEmpty) - So((*ServiceInstance)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*InitServiceResponse)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*InitServiceResponseHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*SignedInitServiceResponseHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - - s, err := initServiceResponse.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - // test nils - initServiceResponse.Header.Signee = nil - initServiceResponse.Header.Signature = nil - - s, err = initServiceResponse.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - So(initServiceResponse.Serialize(), ShouldNotBeEmpty) - }) - Convey("verify", func() { err = initServiceResponse.Verify() So(err, ShouldBeNil) @@ -830,7 +647,7 @@ func TestInitServiceResponse_Sign(t *testing.T) { Convey("header change without signing", func() { initServiceResponse.Header.Instances[0].DatabaseID = proto.DatabaseID("db2") - buildHash(&initServiceResponse.Header.InitServiceResponseHeader, &initServiceResponse.Header.HeaderHash) + buildHash(&initServiceResponse.Header.InitServiceResponseHeader, &initServiceResponse.Header.Hash) s, err := initServiceResponse.Header.InitServiceResponseHeader.MarshalHash() So(err, ShouldBeNil) @@ -844,7 +661,7 @@ func TestInitServiceResponse_Sign(t *testing.T) { } func TestUpdateService_Sign(t *testing.T) { - privKey, pubKey := getCommKeys() + privKey, _ := getCommKeys() Convey("sign", t, func() { var err error @@ -855,24 +672,15 @@ func TestUpdateService_Sign(t *testing.T) { Op: CreateDB, Instance: ServiceInstance{ DatabaseID: proto.DatabaseID("db1"), - Peers: &kayak.Peers{ - Term: uint64(1), - Leader: &kayak.Server{ - Role: proto.Leader, - ID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), - }, - Servers: []*kayak.Server{ - { - Role: proto.Leader, - ID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), - }, - { - Role: proto.Follower, - ID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + Peers: &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: uint64(1), + Leader: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + Servers: []proto.NodeID{ + proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), }, }, - PubKey: pubKey, - Signature: nil, }, // TODO(xq262144), should integrated with genesis block serialization test GenesisBlock: nil, @@ -884,26 +692,6 @@ func TestUpdateService_Sign(t *testing.T) { // sign err = updateServiceReq.Sign(privKey) - Convey("serialize", func() { - So(updateServiceReq.Serialize(), ShouldNotBeEmpty) - So((*UpdateService)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*UpdateServiceHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*SignedUpdateServiceHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - - s, err := updateServiceReq.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - updateServiceReq.Header.Signee = nil - updateServiceReq.Header.Signature = nil - - s, err = updateServiceReq.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - So(updateServiceReq.Serialize(), ShouldNotBeEmpty) - }) - Convey("verify", func() { err = updateServiceReq.Verify() So(err, ShouldBeNil) @@ -917,7 +705,7 @@ func TestUpdateService_Sign(t *testing.T) { Convey("header change without signing", func() { updateServiceReq.Header.Instance.DatabaseID = proto.DatabaseID("db2") - buildHash(&updateServiceReq.Header.UpdateServiceHeader, &updateServiceReq.Header.HeaderHash) + buildHash(&updateServiceReq.Header.UpdateServiceHeader, &updateServiceReq.Header.Hash) err = updateServiceReq.Verify() So(err, ShouldNotBeNil) diff --git a/worker/types/update_service_type.go b/worker/otypes/update_service_type.go similarity index 64% rename from worker/types/update_service_type.go rename to worker/otypes/update_service_type.go index ccea45e27..24c83c49b 100644 --- a/worker/types/update_service_type.go +++ b/worker/otypes/update_service_type.go @@ -14,12 +14,9 @@ * limitations under the License. */ -package types +package otypes import ( - "bytes" - "encoding/binary" - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/proto" @@ -48,9 +45,9 @@ type UpdateServiceHeader struct { // SignedUpdateServiceHeader defines signed service update header. type SignedUpdateServiceHeader struct { UpdateServiceHeader - HeaderHash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + Hash hash.Hash + Signee *asymmetric.PublicKey + Signature *asymmetric.Signature } // UpdateService defines service update type. @@ -62,52 +59,14 @@ type UpdateService struct { // UpdateServiceResponse defines empty response entity. type UpdateServiceResponse struct{} -// Serialize structure to bytes. -func (h *UpdateServiceHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - binary.Write(buf, binary.LittleEndian, int32(h.Op)) - buf.Write(h.Instance.Serialize()) - - return buf.Bytes() -} - -// Serialize structure to bytes. -func (sh *SignedUpdateServiceHeader) Serialize() []byte { - if sh == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.Write(sh.UpdateServiceHeader.Serialize()) - buf.Write(sh.HeaderHash[:]) - if sh.Signee != nil { - buf.Write(sh.Signee.Serialize()) - } else { - buf.WriteRune('\000') - } - if sh.Signature != nil { - buf.Write(sh.Signature.Serialize()) - } else { - buf.WriteRune('\000') - } - - return buf.Bytes() -} - // Verify checks hash and signature in update service header. func (sh *SignedUpdateServiceHeader) Verify() (err error) { // verify hash - if err = verifyHash(&sh.UpdateServiceHeader, &sh.HeaderHash); err != nil { + if err = verifyHash(&sh.UpdateServiceHeader, &sh.Hash); err != nil { return } // verify sign - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { + if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { return ErrSignVerification } return @@ -116,24 +75,17 @@ func (sh *SignedUpdateServiceHeader) Verify() (err error) { // Sign the request. func (sh *SignedUpdateServiceHeader) Sign(signer *asymmetric.PrivateKey) (err error) { // build hash - buildHash(&sh.UpdateServiceHeader, &sh.HeaderHash) + if err = buildHash(&sh.UpdateServiceHeader, &sh.Hash); err != nil { + return + } // sign - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) + sh.Signature, err = signer.Sign(sh.Hash[:]) sh.Signee = signer.PubKey() return } -// Serialize structure to bytes. -func (s *UpdateService) Serialize() []byte { - if s == nil { - return []byte{'\000'} - } - - return s.Header.Serialize() -} - // Verify checks hash and signature in update service. func (s *UpdateService) Verify() error { return s.Header.Verify() diff --git a/worker/types/update_service_type_gen.go b/worker/otypes/update_service_type_gen.go similarity index 96% rename from worker/types/update_service_type_gen.go rename to worker/otypes/update_service_type_gen.go index 1d262f53a..e48777622 100644 --- a/worker/types/update_service_type_gen.go +++ b/worker/otypes/update_service_type_gen.go @@ -1,4 +1,4 @@ -package types +package otypes // Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. @@ -41,7 +41,7 @@ func (z *SignedUpdateServiceHeader) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } o = append(o, 0x84) - if oTemp, err := z.HeaderHash.MarshalHash(); err != nil { + if oTemp, err := z.Hash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -63,7 +63,7 @@ func (z *SignedUpdateServiceHeader) Msgsize() (s int) { } else { s += z.Signature.Msgsize() } - s += 20 + 1 + 3 + hsp.Int32Size + 9 + z.UpdateServiceHeader.Instance.Msgsize() + 11 + z.HeaderHash.Msgsize() + s += 20 + 1 + 3 + hsp.Int32Size + 9 + z.UpdateServiceHeader.Instance.Msgsize() + 5 + z.Hash.Msgsize() return } diff --git a/worker/otypes/update_service_type_gen_test.go b/worker/otypes/update_service_type_gen_test.go new file mode 100644 index 000000000..d78a63158 --- /dev/null +++ b/worker/otypes/update_service_type_gen_test.go @@ -0,0 +1,158 @@ +package otypes + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "testing" +) + +func TestMarshalHashSignedUpdateServiceHeader(t *testing.T) { + v := SignedUpdateServiceHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedUpdateServiceHeader(b *testing.B) { + v := SignedUpdateServiceHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedUpdateServiceHeader(b *testing.B) { + v := SignedUpdateServiceHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashUpdateService(t *testing.T) { + v := UpdateService{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashUpdateService(b *testing.B) { + v := UpdateService{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgUpdateService(b *testing.B) { + v := UpdateService{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashUpdateServiceHeader(t *testing.T) { + v := UpdateServiceHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashUpdateServiceHeader(b *testing.B) { + v := UpdateServiceHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgUpdateServiceHeader(b *testing.B) { + v := UpdateServiceHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashUpdateServiceResponse(t *testing.T) { + v := UpdateServiceResponse{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashUpdateServiceResponse(b *testing.B) { + v := UpdateServiceResponse{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgUpdateServiceResponse(b *testing.B) { + v := UpdateServiceResponse{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} diff --git a/worker/types/util.go b/worker/otypes/util.go similarity index 66% rename from worker/types/util.go rename to worker/otypes/util.go index fb6bbd152..0108aec1e 100644 --- a/worker/types/util.go +++ b/worker/otypes/util.go @@ -14,26 +14,33 @@ * limitations under the License. */ -package types +package otypes import ( "github.com/CovenantSQL/CovenantSQL/crypto/hash" ) -type canSerialize interface { - Serialize() []byte +type canMarshalHash interface { + MarshalHash() ([]byte, error) } -func verifyHash(data canSerialize, h *hash.Hash) (err error) { +func verifyHash(data canMarshalHash, h *hash.Hash) (err error) { var newHash hash.Hash - buildHash(data, &newHash) + if err = buildHash(data, &newHash); err != nil { + return + } if !newHash.IsEqual(h) { return ErrHashVerification } return } -func buildHash(data canSerialize, h *hash.Hash) { - newHash := hash.THashH(data.Serialize()) +func buildHash(data canMarshalHash, h *hash.Hash) (err error) { + var hashBytes []byte + if hashBytes, err = data.MarshalHash(); err != nil { + return + } + newHash := hash.THashH(hashBytes) copy(h[:], newHash[:]) + return } diff --git a/worker/types/request_type_gen.go b/worker/types/request_type_gen.go deleted file mode 100644 index 542809bd5..000000000 --- a/worker/types/request_type_gen.go +++ /dev/null @@ -1,145 +0,0 @@ -package types - -// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. - -import ( - hsp "github.com/CovenantSQL/HashStablePack/marshalhash" -) - -// MarshalHash marshals for hash -func (z *QueryKey) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 3 - o = append(o, 0x83, 0x83) - if oTemp, err := z.NodeID.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x83) - o = hsp.AppendUint64(o, z.ConnectionID) - o = append(o, 0x83) - o = hsp.AppendUint64(o, z.SeqNo) - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *QueryKey) Msgsize() (s int) { - s = 1 + 7 + z.NodeID.Msgsize() + 13 + hsp.Uint64Size + 6 + hsp.Uint64Size - return -} - -// MarshalHash marshals for hash -func (z QueryType) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - o = hsp.AppendInt32(o, int32(z)) - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z QueryType) Msgsize() (s int) { - s = hsp.Int32Size - return -} - -// MarshalHash marshals for hash -func (z *RequestHeader) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 8 - o = append(o, 0x88, 0x88) - o = hsp.AppendInt32(o, int32(z.QueryType)) - o = append(o, 0x88) - if oTemp, err := z.QueriesHash.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x88) - if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x88) - if oTemp, err := z.NodeID.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x88) - o = hsp.AppendTime(o, z.Timestamp) - o = append(o, 0x88) - o = hsp.AppendUint64(o, z.ConnectionID) - o = append(o, 0x88) - o = hsp.AppendUint64(o, z.SeqNo) - o = append(o, 0x88) - o = hsp.AppendUint64(o, z.BatchCount) - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *RequestHeader) Msgsize() (s int) { - s = 1 + 10 + hsp.Int32Size + 12 + z.QueriesHash.Msgsize() + 11 + z.DatabaseID.Msgsize() + 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize + 13 + hsp.Uint64Size + 6 + hsp.Uint64Size + 11 + hsp.Uint64Size - return -} - -// MarshalHash marshals for hash -func (z *SignedRequestHeader) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 4 - o = append(o, 0x84, 0x84) - if z.Signee == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signee.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x84) - if z.Signature == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signature.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x84) - if oTemp, err := z.RequestHeader.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x84) - if oTemp, err := z.HeaderHash.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *SignedRequestHeader) Msgsize() (s int) { - s = 1 + 7 - if z.Signee == nil { - s += hsp.NilSize - } else { - s += z.Signee.Msgsize() - } - s += 10 - if z.Signature == nil { - s += hsp.NilSize - } else { - s += z.Signature.Msgsize() - } - s += 14 + z.RequestHeader.Msgsize() + 11 + z.HeaderHash.Msgsize() - return -} diff --git a/worker/types/response_type.go b/worker/types/response_type.go deleted file mode 100644 index 1f0d3ee77..000000000 --- a/worker/types/response_type.go +++ /dev/null @@ -1,216 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package types - -import ( - "bytes" - "encoding/binary" - "time" - - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/utils" - "github.com/pkg/errors" -) - -//go:generate hsp - -// ResponseRow defines single row of query response. -type ResponseRow struct { - Values []interface{} -} - -// ResponsePayload defines column names and rows of query response. -type ResponsePayload struct { - Columns []string `json:"c"` - DeclTypes []string `json:"t"` - Rows []ResponseRow `json:"r"` -} - -// ResponseHeader defines a query response header. -type ResponseHeader struct { - Request SignedRequestHeader `json:"r"` - NodeID proto.NodeID `json:"id"` // response node id - Timestamp time.Time `json:"t"` // time in UTC zone - RowCount uint64 `json:"c"` // response row count of payload - LogOffset uint64 `json:"o"` // request log offset - DataHash hash.Hash `json:"dh"` // hash of query response -} - -// SignedResponseHeader defines a signed query response header. -type SignedResponseHeader struct { - ResponseHeader - HeaderHash hash.Hash `json:"h"` - Signee *asymmetric.PublicKey `json:"e"` - Signature *asymmetric.Signature `json:"s"` -} - -// Response defines a complete query response. -type Response struct { - Header SignedResponseHeader `json:"h"` - Payload ResponsePayload `json:"p"` -} - -// Serialize structure to bytes. -func (r *ResponseRow) Serialize() []byte { - // HACK(xq262144), currently use idiomatic serialization for hash generation - buf, _ := utils.EncodeMsgPack(r) - - return buf.Bytes() -} - -// Serialize structure to bytes. -func (r *ResponsePayload) Serialize() []byte { - if r == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - binary.Write(buf, binary.LittleEndian, uint64(len(r.Columns))) - for _, c := range r.Columns { - buf.WriteString(c) - } - - binary.Write(buf, binary.LittleEndian, uint64(len(r.DeclTypes))) - for _, t := range r.DeclTypes { - buf.WriteString(t) - } - - binary.Write(buf, binary.LittleEndian, uint64(len(r.Rows))) - for _, row := range r.Rows { - buf.Write(row.Serialize()) - } - - return buf.Bytes() -} - -// Serialize structure to bytes. -func (h *ResponseHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.Write(h.Request.Serialize()) - binary.Write(buf, binary.LittleEndian, uint64(len(h.NodeID))) - buf.WriteString(string(h.NodeID)) - binary.Write(buf, binary.LittleEndian, int64(h.Timestamp.UnixNano())) - binary.Write(buf, binary.LittleEndian, h.RowCount) - binary.Write(buf, binary.LittleEndian, h.LogOffset) - buf.Write(h.DataHash[:]) - - return buf.Bytes() -} - -// Serialize structure to bytes. -func (sh *SignedResponseHeader) Serialize() []byte { - if sh == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.Write(sh.ResponseHeader.Serialize()) - buf.Write(sh.HeaderHash[:]) - if sh.Signee != nil { - buf.Write(sh.Signee.Serialize()) - } else { - buf.WriteRune('\000') - } - if sh.Signature != nil { - buf.Write(sh.Signature.Serialize()) - } else { - buf.WriteRune('\000') - } - - return buf.Bytes() -} - -// Verify checks hash and signature in response header. -func (sh *SignedResponseHeader) Verify() (err error) { - // verify original request header - if err = sh.Request.Verify(); err != nil { - return - } - // verify hash - if err = verifyHash(&sh.ResponseHeader, &sh.HeaderHash); err != nil { - return - } - // verify signature - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { - return ErrSignVerification - } - - return nil -} - -// Sign the request. -func (sh *SignedResponseHeader) Sign(signer *asymmetric.PrivateKey) (err error) { - // make sure original header is signed - if err = sh.Request.Verify(); err != nil { - err = errors.Wrapf(err, "SignedResponseHeader %v", sh) - return - } - - // build our hash - buildHash(&sh.ResponseHeader, &sh.HeaderHash) - - // sign - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) - sh.Signee = signer.PubKey() - - return -} - -// Serialize structure to bytes. -func (sh *Response) Serialize() []byte { - if sh == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.Write(sh.Header.Serialize()) - buf.Write(sh.Payload.Serialize()) - - return buf.Bytes() -} - -// Verify checks hash and signature in whole response. -func (sh *Response) Verify() (err error) { - // verify data hash in header - if err = verifyHash(&sh.Payload, &sh.Header.DataHash); err != nil { - return - } - - return sh.Header.Verify() -} - -// Sign the request. -func (sh *Response) Sign(signer *asymmetric.PrivateKey) (err error) { - // set rows count - sh.Header.RowCount = uint64(len(sh.Payload.Rows)) - - // build hash in header - buildHash(&sh.Payload, &sh.Header.DataHash) - - // sign the request - return sh.Header.Sign(signer) -} diff --git a/xenomint/chain.go b/xenomint/chain.go new file mode 100644 index 000000000..5b24c5956 --- /dev/null +++ b/xenomint/chain.go @@ -0,0 +1,101 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xenomint + +import ( + ca "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/kms" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/types" + xi "github.com/CovenantSQL/CovenantSQL/xenomint/interfaces" + xs "github.com/CovenantSQL/CovenantSQL/xenomint/sqlite" + xt "github.com/CovenantSQL/CovenantSQL/xenomint/types" +) + +const ( + inCommandBufferLength = 100000 + outCommandBufferLength = 100000 +) + +type applyRequest struct { + request *types.Request + response *types.Response +} + +type blockNode struct { + parent *blockNode + // Cached block fields + hash hash.Hash + count int32 + height int32 + // Cached block object, may be nil + block *xt.Block +} + +// Chain defines the xenomint chain structure. +type Chain struct { + state *State + // Cached fields + priv *ca.PrivateKey +} + +// NewChain returns new chain instance. +func NewChain(filename string) (c *Chain, err error) { + var ( + strg xi.Storage + state *State + priv *ca.PrivateKey + ) + // generate empty nodeId + nodeID := proto.NodeID("0000000000000000000000000000000000000000000000000000000000000000") + + // TODO(leventeliu): add multiple storage engine support. + if strg, err = xs.NewSqlite(filename); err != nil { + return + } + if state, err = NewState(nodeID, strg); err != nil { + return + } + if priv, err = kms.GetLocalPrivateKey(); err != nil { + return + } + c = &Chain{ + state: state, + priv: priv, + } + return +} + +// Query queries req from local chain state and returns the query results in resp. +func (c *Chain) Query(req *types.Request) (resp *types.Response, err error) { + var ref *QueryTracker + if ref, resp, err = c.state.Query(req); err != nil { + return + } + if err = resp.Sign(c.priv); err != nil { + return + } + ref.UpdateResp(resp) + return +} + +// Stop stops chain workers and RPC service. +func (c *Chain) Stop() (err error) { + // Close all opened resources + return c.state.Close(true) +} diff --git a/xenomint/chain_test.go b/xenomint/chain_test.go new file mode 100644 index 000000000..a20fa4fa1 --- /dev/null +++ b/xenomint/chain_test.go @@ -0,0 +1,170 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xenomint + +import ( + "database/sql" + "fmt" + "math/rand" + "os" + "path" + "testing" + + ca "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/kms" + "github.com/CovenantSQL/CovenantSQL/types" +) + +func setupBenchmarkChain(b *testing.B) (c *Chain, n int, r []*types.Request) { + // Setup chain state + var ( + fl = path.Join(testingDataDir, b.Name()) + err error + stmt *sql.Stmt + ) + if c, err = NewChain(fmt.Sprint("file:", fl)); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + if _, err = c.state.strg.Writer().Exec( + `CREATE TABLE "bench" ("k" INT, "v1" TEXT, "v2" TEXT, "v3" TEXT, PRIMARY KEY("k"))`, + ); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + if stmt, err = c.state.strg.Writer().Prepare( + `INSERT INTO "bench" VALUES (?, ?, ?, ?)`, + ); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + for i := 0; i < benchmarkKeySpace; i++ { + var ( + vals [benchmarkVNum][benchmarkVLen]byte + args [benchmarkVNum + 1]interface{} + ) + args[0] = i + for i := range vals { + rand.Read(vals[i][:]) + args[i+1] = string(vals[i][:]) + } + if _, err = stmt.Exec(args[:]...); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + } + n = benchmarkKeySpace + // Setup query requests + var ( + sel = `SELECT "v1", "v2", "v3" FROM "bench" WHERE "k"=?` + ins = `INSERT INTO "bench" VALUES (?, ?, ?, ?) + ON CONFLICT("k") DO UPDATE SET + "v1"="excluded"."v1", + "v2"="excluded"."v2", + "v3"="excluded"."v3" +` + priv *ca.PrivateKey + src = make([][]interface{}, benchmarkKeySpace) + ) + if priv, err = kms.GetLocalPrivateKey(); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + r = make([]*types.Request, 2*benchmarkKeySpace) + // Read query key space [0, n-1] + for i := 0; i < benchmarkKeySpace; i++ { + r[i] = buildRequest(types.ReadQuery, []types.Query{ + buildQuery(sel, i), + }) + if err = r[i].Sign(priv); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + } + // Write query key space [n, 2n-1] + for i := range src { + var vals [benchmarkVNum][benchmarkVLen]byte + src[i] = make([]interface{}, benchmarkVNum+1) + src[i][0] = i + benchmarkKeySpace + for j := range vals { + rand.Read(vals[j][:]) + src[i][j+1] = string(vals[j][:]) + } + } + for i := 0; i < benchmarkKeySpace; i++ { + r[benchmarkKeySpace+i] = buildRequest(types.WriteQuery, []types.Query{ + buildQuery(ins, src[i]...), + }) + if err = r[i+benchmarkKeySpace].Sign(priv); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + } + + b.ResetTimer() + return +} + +func teardownBenchmarkChain(b *testing.B, c *Chain) { + b.StopTimer() + + var ( + fl = path.Join(testingDataDir, b.Name()) + err error + ) + if err = c.Stop(); err != nil { + b.Fatalf("Failed to teardown bench environment: %v", err) + } + if err = os.Remove(fl); err != nil { + b.Fatalf("Failed to teardown bench environment: %v", err) + } + if err = os.Remove(fmt.Sprint(fl, "-shm")); err != nil && !os.IsNotExist(err) { + b.Fatalf("Failed to teardown bench environment: %v", err) + } + if err = os.Remove(fmt.Sprint(fl, "-wal")); err != nil && !os.IsNotExist(err) { + b.Fatalf("Failed to teardown bench environment: %v", err) + } +} + +func BenchmarkChainParallelWrite(b *testing.B) { + var c, n, r = setupBenchmarkChain(b) + b.RunParallel(func(pb *testing.PB) { + var err error + for i := 0; pb.Next(); i++ { + if _, err = c.Query(r[n+rand.Intn(n)]); err != nil { + b.Fatalf("Failed to execute: %v", err) + } + if (i+1)%benchmarkQueriesPerBlock == 0 { + if err = c.state.commit(); err != nil { + b.Fatalf("Failed to commit block: %v", err) + } + } + } + }) + teardownBenchmarkChain(b, c) +} + +func BenchmarkChainParallelMixRW(b *testing.B) { + var c, n, r = setupBenchmarkChain(b) + b.RunParallel(func(pb *testing.PB) { + var err error + for i := 0; pb.Next(); i++ { + if _, err = c.Query(r[rand.Intn(2*n)]); err != nil { + b.Fatalf("Failed to execute: %v", err) + } + if (i+1)%benchmarkQueriesPerBlock == 0 { + if err = c.state.commit(); err != nil { + b.Fatalf("Failed to commit block: %v", err) + } + } + } + }) + teardownBenchmarkChain(b, c) +} diff --git a/xenomint/doc.go b/xenomint/doc.go new file mode 100644 index 000000000..3ee1ded09 --- /dev/null +++ b/xenomint/doc.go @@ -0,0 +1,18 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package xenomint provides an eventual consistency implementation of the blockchain database. +package xenomint diff --git a/xenomint/errors.go b/xenomint/errors.go new file mode 100644 index 000000000..725b140cb --- /dev/null +++ b/xenomint/errors.go @@ -0,0 +1,38 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xenomint + +import ( + "errors" +) + +var ( + // ErrMissingParent indicates the parent of the current query attempt is missing. + ErrMissingParent = errors.New("query missing parent") + // ErrInvalidRequest indicates the query is invalid. + ErrInvalidRequest = errors.New("invalid request") + // ErrQueryExists indicates the query already exists in pool. + ErrQueryExists = errors.New("query already exists") + // ErrStateClosed indicates the state is closed. + ErrStateClosed = errors.New("state is closed") + // ErrQueryConflict indicates the there is a conflict on query replay. + ErrQueryConflict = errors.New("query conflict") + // ErrLocalBehindRemote indicates the local state is behind the remote. + ErrLocalBehindRemote = errors.New("local state is behind the remote") + // ErrMuxServiceNotFound indicates that the multiplexing service endpoint is not found. + ErrMuxServiceNotFound = errors.New("mux service not found") +) diff --git a/chain/interfaces/doc.go b/xenomint/interfaces/doc.go similarity index 88% rename from chain/interfaces/doc.go rename to xenomint/interfaces/doc.go index 222335fed..c873b128c 100644 --- a/chain/interfaces/doc.go +++ b/xenomint/interfaces/doc.go @@ -14,5 +14,5 @@ * limitations under the License. */ -// Package interfaces defines commonly used interfaces for block chain. +// Package interfaces defines common used interfaces of the xenomint package. package interfaces diff --git a/kayak/util.go b/xenomint/interfaces/interfaces.go similarity index 68% rename from kayak/util.go rename to xenomint/interfaces/interfaces.go index ab53256c7..17427cde9 100644 --- a/kayak/util.go +++ b/xenomint/interfaces/interfaces.go @@ -14,20 +14,17 @@ * limitations under the License. */ -package kayak +package interfaces import ( - "encoding/binary" + "database/sql" ) -// Converts bytes to an integer. -func bytesToUint64(b []byte) uint64 { - return binary.BigEndian.Uint64(b) -} - -// Converts a uint to a byte slice. -func uint64ToBytes(u uint64) []byte { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, u) - return buf +// Storage is the interface implemented by an object that returns standard *sql.DB as DirtyReader, +// Reader, or Writer and can be closed by Close. +type Storage interface { + DirtyReader() *sql.DB + Reader() *sql.DB + Writer() *sql.DB + Close() error } diff --git a/xenomint/mux.go b/xenomint/mux.go new file mode 100644 index 000000000..b1bd308ee --- /dev/null +++ b/xenomint/mux.go @@ -0,0 +1,124 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xenomint + +import ( + //"context" + //"runtime/trace" + "sync" + + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/rpc" + "github.com/CovenantSQL/CovenantSQL/types" +) + +// MuxService defines multiplexing service of xenomint chain. +type MuxService struct { + ServiceName string + // serviceMap maps DatabaseID to *Chain. + serviceMap sync.Map +} + +// NewMuxService returns a new MuxService instance and registers it to server. +func NewMuxService(name string, server *rpc.Server) (service *MuxService, err error) { + var s = &MuxService{ + ServiceName: name, + } + if err = server.RegisterService(name, s); err != nil { + return + } + service = s + return +} + +func (s *MuxService) register(id proto.DatabaseID, c *Chain) { + s.serviceMap.Store(id, c) +} + +func (s *MuxService) unregister(id proto.DatabaseID) { + s.serviceMap.Delete(id) +} + +func (s *MuxService) route(id proto.DatabaseID) (c *Chain, err error) { + var ( + i interface{} + ok bool + ) + if i, ok = s.serviceMap.Load(id); !ok { + err = ErrMuxServiceNotFound + return + } + if c, ok = i.(*Chain); !ok { + err = ErrMuxServiceNotFound + return + } + return +} + +// MuxQueryRequest defines a request of the Query RPC method. +type MuxQueryRequest struct { + proto.DatabaseID + proto.Envelope + Request *types.Request +} + +// MuxQueryResponse defines a response of the Query RPC method. +type MuxQueryResponse struct { + proto.DatabaseID + proto.Envelope + Response *types.Response +} + +// Query is the RPC method to process database query on mux service. +func (s *MuxService) Query(req *MuxQueryRequest, resp *MuxQueryResponse) (err error) { + //var ctx, task = trace.NewTask(context.Background(), "MuxService.Query") + //defer task.End() + //defer trace.StartRegion(ctx, "Total").End() + var ( + c *Chain + r *types.Response + ) + if c, err = s.route(req.DatabaseID); err != nil { + return + } + if r, err = c.Query(req.Request); err != nil { + return + } + resp = &MuxQueryResponse{ + Envelope: req.Envelope, + DatabaseID: req.DatabaseID, + Response: r, + } + return +} + +// MuxLeaderCommitRequest a request of the MuxLeaderCommitResponse RPC method. +type MuxLeaderCommitRequest struct { + proto.DatabaseID + proto.Envelope + // Height is the expected block height of this commit. + Height int32 +} + +// MuxLeaderCommitResponse a response of the MuxLeaderCommitResponse RPC method. +type MuxLeaderCommitResponse struct { + proto.DatabaseID + proto.Envelope + // Height is the expected block height of this commit. + Height int32 + Offset uint64 +} diff --git a/xenomint/mux_test.go b/xenomint/mux_test.go new file mode 100644 index 000000000..e4385ac5a --- /dev/null +++ b/xenomint/mux_test.go @@ -0,0 +1,289 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xenomint + +import ( + "database/sql" + "fmt" + "math/rand" + "os" + "path" + "strings" + "testing" + + "github.com/CovenantSQL/CovenantSQL/conf" + con "github.com/CovenantSQL/CovenantSQL/consistent" + ca "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/kms" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/route" + "github.com/CovenantSQL/CovenantSQL/rpc" + "github.com/CovenantSQL/CovenantSQL/types" +) + +type nodeRPCInfo struct { + node proto.Node + server *rpc.Server +} + +func setupBenchmarkMuxParallel(b *testing.B) ( + bp, miner *nodeRPCInfo, ms *MuxService, r []*MuxQueryRequest, +) { + var ( + priv *ca.PrivateKey + nis []proto.Node + dht *route.DHTService + bpSv, mnSv *rpc.Server + err error + ) + // Use testing private key to create several nodes + if priv, err = kms.GetLocalPrivateKey(); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + if nis, err = createNodesWithPublicKey(priv.PubKey(), testingNonceDifficulty, 3); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } else if l := len(nis); l != 3 { + b.Fatalf("Failed to setup bench environment: unexpected length %d", l) + } + // Setup block producer RPC and register server address + bpSv = rpc.NewServer() + if err = bpSv.InitRPCServer( + "localhost:0", testingPrivateKeyFile, testingMasterKey, + ); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + nis[0].Addr = bpSv.Listener.Addr().String() + nis[0].Role = proto.Leader + // Setup miner RPC and register server address + mnSv = rpc.NewServer() + if err = mnSv.InitRPCServer( + "localhost:0", testingPrivateKeyFile, testingMasterKey, + ); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + nis[1].Addr = mnSv.Listener.Addr().String() + nis[1].Role = proto.Miner + // Setup client + nis[2].Role = proto.Client + // Setup global config + conf.GConf = &conf.Config{ + IsTestMode: true, + GenerateKeyPair: false, + MinNodeIDDifficulty: testingNonceDifficulty, + BP: &conf.BPInfo{ + PublicKey: priv.PubKey(), + NodeID: nis[0].ID, + Nonce: nis[0].Nonce, + }, + KnownNodes: nis, + } + // Register DHT service, this will also initialize the public key store + if dht, err = route.NewDHTService( + testingPublicKeyStoreFile, &con.KMSStorage{}, true, + ); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } else if err = bpSv.RegisterService(route.DHTRPCName, dht); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + kms.SetLocalNodeIDNonce(nis[2].ID.ToRawNodeID().CloneBytes(), &nis[2].Nonce) + for i := range nis { + route.SetNodeAddrCache(nis[i].ID.ToRawNodeID(), nis[i].Addr) + kms.SetNode(&nis[i]) + } + // Register mux service + if ms, err = NewMuxService(benchmarkRPCName, mnSv); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + + // Setup query requests + var ( + sel = `SELECT "v1", "v2", "v3" FROM "bench" WHERE "k"=?` + ins = `INSERT INTO "bench" VALUES (?, ?, ?, ?) + ON CONFLICT("k") DO UPDATE SET + "v1"="excluded"."v1", + "v2"="excluded"."v2", + "v3"="excluded"."v3" +` + src = make([][]interface{}, benchmarkKeySpace) + ) + r = make([]*MuxQueryRequest, 2*benchmarkKeySpace) + // Read query key space [0, n-1] + for i := 0; i < benchmarkKeySpace; i++ { + var req = buildRequest(types.ReadQuery, []types.Query{ + buildQuery(sel, i), + }) + if err = req.Sign(priv); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + r[i] = &MuxQueryRequest{ + DatabaseID: benchmarkDatabaseID, + Request: req, + } + } + // Write query key space [n, 2n-1] + for i := range src { + var vals [benchmarkVNum][benchmarkVLen]byte + src[i] = make([]interface{}, benchmarkVNum+1) + src[i][0] = i + benchmarkKeySpace + for j := range vals { + rand.Read(vals[j][:]) + src[i][j+1] = string(vals[j][:]) + } + } + for i := 0; i < benchmarkKeySpace; i++ { + var req = buildRequest(types.WriteQuery, []types.Query{ + buildQuery(ins, src[i]...), + }) + if err = req.Sign(priv); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + r[benchmarkKeySpace+i] = &MuxQueryRequest{ + DatabaseID: benchmarkDatabaseID, + Request: req, + } + } + + bp = &nodeRPCInfo{ + node: nis[0], + server: bpSv, + } + miner = &nodeRPCInfo{ + node: nis[1], + server: mnSv, + } + + go bpSv.Serve() + go mnSv.Serve() + //ca.BypassSignature = true + return +} + +func teardownBenchmarkMuxParallel(b *testing.B, bpSv, mnSv *rpc.Server) { + //ca.BypassSignature = false + mnSv.Stop() + bpSv.Stop() +} + +func setupSubBenchmarkMuxParallel(b *testing.B, ms *MuxService) (c *Chain) { + // Setup chain state + var ( + fl = path.Join(testingDataDir, strings.Replace(b.Name(), "/", "-", -1)) + err error + stmt *sql.Stmt + ) + if c, err = NewChain(fmt.Sprint("file:", fl)); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + if _, err = c.state.strg.Writer().Exec( + `CREATE TABLE "bench" ("k" INT, "v1" TEXT, "v2" TEXT, "v3" TEXT, PRIMARY KEY("k"))`, + ); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + if stmt, err = c.state.strg.Writer().Prepare( + `INSERT INTO "bench" VALUES (?, ?, ?, ?)`, + ); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + for i := 0; i < benchmarkKeySpace; i++ { + var ( + vals [benchmarkVNum][benchmarkVLen]byte + args [benchmarkVNum + 1]interface{} + ) + args[0] = i + for i := range vals { + rand.Read(vals[i][:]) + args[i+1] = string(vals[i][:]) + } + if _, err = stmt.Exec(args[:]...); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + } + ms.register(benchmarkDatabaseID, c) + + b.ResetTimer() + return +} + +func teardownSubBenchmarkMuxParallel(b *testing.B, ms *MuxService) { + b.StopTimer() + + var ( + fl = path.Join(testingDataDir, strings.Replace(b.Name(), "/", "-", -1)) + err error + c *Chain + ) + // Stop RPC server + if c, err = ms.route(benchmarkDatabaseID); err != nil { + b.Fatalf("Failed to teardown bench environment: %v", err) + } + ms.unregister(benchmarkDatabaseID) + // Close chain + if err = c.Stop(); err != nil { + b.Fatalf("Failed to teardown bench environment: %v", err) + } + if err = os.Remove(fl); err != nil { + b.Fatalf("Failed to teardown bench environment: %v", err) + } + if err = os.Remove(fmt.Sprint(fl, "-shm")); err != nil && !os.IsNotExist(err) { + b.Fatalf("Failed to teardown bench environment: %v", err) + } + if err = os.Remove(fmt.Sprint(fl, "-wal")); err != nil && !os.IsNotExist(err) { + b.Fatalf("Failed to teardown bench environment: %v", err) + } +} + +func BenchmarkMuxParallel(b *testing.B) { + var bp, s, ms, r = setupBenchmarkMuxParallel(b) + defer teardownBenchmarkMuxParallel(b, bp.server, s.server) + var benchmarks = []struct { + name string + randkey func(n int) int // Returns a random key from given key space + }{ + { + name: "Write", + randkey: func(n int) int { return n + rand.Intn(n) }, + }, { + name: "MixRW", + randkey: func(n int) int { return rand.Intn(2 * n) }, + }, + } + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + var c = setupSubBenchmarkMuxParallel(b, ms) + defer teardownSubBenchmarkMuxParallel(b, ms) + b.RunParallel(func(pb *testing.PB) { + var ( + err error + method = fmt.Sprintf("%s.%s", benchmarkRPCName, "Query") + caller = rpc.NewPersistentCaller(s.node.ID) + ) + for i := 0; pb.Next(); i++ { + if err = caller.Call( + method, &r[bm.randkey(benchmarkKeySpace)], &MuxQueryResponse{}, + ); err != nil { + b.Fatalf("Failed to execute: %v", err) + } + if (i+1)%benchmarkQueriesPerBlock == 0 { + if err = c.state.commit(); err != nil { + b.Fatalf("Failed to commit block: %v", err) + } + } + } + }) + }) + } +} diff --git a/xenomint/pool.go b/xenomint/pool.go new file mode 100644 index 000000000..38a1f6511 --- /dev/null +++ b/xenomint/pool.go @@ -0,0 +1,141 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xenomint + +import ( + "sync" + "sync/atomic" + + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/types" +) + +// QueryTracker defines an object to track query as a request - response pair. +type QueryTracker struct { + sync.RWMutex + Req *types.Request + Resp *types.Response +} + +// UpdateResp updates response of the QueryTracker within locking scope. +func (q *QueryTracker) UpdateResp(resp *types.Response) { + q.Lock() + defer q.Unlock() + q.Resp = resp +} + +// Ready reports whether the query is ready for block producing. It is assumed that all objects +// should be ready shortly. +func (q *QueryTracker) Ready() bool { + q.RLock() + defer q.RUnlock() + return q.Resp != nil +} + +type pool struct { + // Failed queries: hash => Request + failed map[hash.Hash]*types.Request + // Succeeded queries and their index + queries []*QueryTracker + index map[uint64]int + // Atomic counters for stats + failedRequestCount int32 + trackerCount int32 +} + +func newPool() *pool { + return &pool{ + failed: make(map[hash.Hash]*types.Request), + queries: make([]*QueryTracker, 0), + index: make(map[uint64]int), + } +} + +func (p *pool) enqueue(sp uint64, q *QueryTracker) { + var pos = len(p.queries) + p.queries = append(p.queries, q) + p.index[sp] = pos + atomic.StoreInt32(&p.trackerCount, int32(len(p.queries))) + return +} + +func (p *pool) setFailed(req *types.Request) { + p.failed[req.Header.Hash()] = req + atomic.StoreInt32(&p.failedRequestCount, int32(len(p.failed))) +} + +func (p *pool) failedList() (reqs []*types.Request) { + reqs = make([]*types.Request, 0, len(p.failed)) + for _, v := range p.failed { + reqs = append(reqs, v) + } + return +} + +func (p *pool) removeFailed(req *types.Request) { + delete(p.failed, req.Header.Hash()) + atomic.StoreInt32(&p.failedRequestCount, int32(len(p.failed))) +} + +func (p *pool) match(sp uint64, req *types.Request) bool { + var ( + pos int + ok bool + ) + if pos, ok = p.index[sp]; !ok { + return false + } + if p.queries[pos].Req.Header.Hash() != req.Header.Hash() { + return false + } + return true +} + +func (p *pool) matchLast(sp uint64) bool { + var ( + pos int + ok bool + ) + if pos, ok = p.index[sp]; !ok { + return false + } + if pos != len(p.queries)-1 { + return false + } + return true +} + +func (p *pool) truncate(sp uint64) { + var ( + pos int + ok bool + ni map[uint64]int + ) + if pos, ok = p.index[sp]; !ok { + return + } + // Rebuild index + ni = make(map[uint64]int) + for k, v := range p.index { + if k > sp { + ni[k] = v - (pos + 1) + } + } + p.index = ni + p.queries = p.queries[pos+1:] + atomic.StoreInt32(&p.trackerCount, int32(len(p.queries))) +} diff --git a/xenomint/pool_test.go b/xenomint/pool_test.go new file mode 100644 index 000000000..f68ede27a --- /dev/null +++ b/xenomint/pool_test.go @@ -0,0 +1,17 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xenomint diff --git a/xenomint/sqlite/doc.go b/xenomint/sqlite/doc.go new file mode 100644 index 000000000..bd513d369 --- /dev/null +++ b/xenomint/sqlite/doc.go @@ -0,0 +1,18 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package sqlite introduces a sqlite3 implementation of the xenomint/interfaces.Storage interface. +package sqlite diff --git a/xenomint/sqlite/sqlite.go b/xenomint/sqlite/sqlite.go new file mode 100644 index 000000000..0ea8bf846 --- /dev/null +++ b/xenomint/sqlite/sqlite.go @@ -0,0 +1,120 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sqlite + +import ( + "database/sql" + + "github.com/CovenantSQL/CovenantSQL/storage" + "github.com/CovenantSQL/go-sqlite3-encrypt" +) + +const ( + serializableDriver = "sqlite3" + dirtyReadDriver = "sqlite3-dirty-reader" +) + +func init() { + sql.Register(dirtyReadDriver, &sqlite3.SQLiteDriver{ + ConnectHook: func(c *sqlite3.SQLiteConn) (err error) { + if _, err = c.Exec("PRAGMA read_uncommitted=1", nil); err != nil { + return + } + return + }, + }) +} + +// SQLite3 is the sqlite3 implementation of the xenomint/interfaces.Storage interface. +type SQLite3 struct { + filename string + dirtyReader *sql.DB + reader *sql.DB + writer *sql.DB +} + +// NewSqlite returns a new SQLite3 instance attached to filename. +func NewSqlite(filename string) (s *SQLite3, err error) { + var ( + instance = &SQLite3{filename: filename} + shmRODSN string + privRODSN string + shmRWDSN string + dsn *storage.DSN + ) + + if dsn, err = storage.NewDSN(filename); err != nil { + return + } + + dsnRO := dsn.Clone() + dsnRO.AddParam("_journal_mode", "WAL") + dsnRO.AddParam("_query_only", "on") + dsnRO.AddParam("cache", "shared") + shmRODSN = dsnRO.Format() + + dsnPrivRO := dsn.Clone() + dsnPrivRO.AddParam("_journal_mode", "WAL") + dsnPrivRO.AddParam("_query_only", "on") + privRODSN = dsnPrivRO.Format() + + dsnSHMRW := dsn.Clone() + dsnSHMRW.AddParam("_journal_mode", "WAL") + dsnSHMRW.AddParam("cache", "shared") + shmRWDSN = dsnSHMRW.Format() + + if instance.dirtyReader, err = sql.Open(dirtyReadDriver, shmRODSN); err != nil { + return + } + if instance.reader, err = sql.Open(serializableDriver, privRODSN); err != nil { + return + } + if instance.writer, err = sql.Open(serializableDriver, shmRWDSN); err != nil { + return + } + s = instance + return +} + +// DirtyReader implements DirtyReader method of the xenomint/interfaces.Storage interface. +func (s *SQLite3) DirtyReader() *sql.DB { + return s.dirtyReader +} + +// Reader implements Reader method of the xenomint/interfaces.Storage interface. +func (s *SQLite3) Reader() *sql.DB { + return s.reader +} + +// Writer implements Writer method of the xenomint/interfaces.Storage interface. +func (s *SQLite3) Writer() *sql.DB { + return s.writer +} + +// Close implements Close method of the xenomint/interfaces.Storage interface. +func (s *SQLite3) Close() (err error) { + if err = s.dirtyReader.Close(); err != nil { + return + } + if err = s.reader.Close(); err != nil { + return + } + if err = s.writer.Close(); err != nil { + return + } + return +} diff --git a/xenomint/sqlite/sqlite_test.go b/xenomint/sqlite/sqlite_test.go new file mode 100644 index 000000000..6288b8f51 --- /dev/null +++ b/xenomint/sqlite/sqlite_test.go @@ -0,0 +1,1072 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sqlite + +import ( + "database/sql" + "fmt" + "math/rand" + "os" + "path" + "sync" + "sync/atomic" + "testing" + "time" + + xi "github.com/CovenantSQL/CovenantSQL/xenomint/interfaces" + . "github.com/smartystreets/goconvey/convey" +) + +func TestStorage(t *testing.T) { + Convey("Given a sqlite storage implementation", t, func() { + const passes = 1000 + var ( + fl = path.Join(testingDataDir, t.Name()) + st xi.Storage + err error + ) + st, err = NewSqlite(fmt.Sprint("file:", fl)) + So(err, ShouldBeNil) + So(st, ShouldNotBeNil) + Reset(func() { + // Clean database file after each pass + err = st.Close() + So(err, ShouldBeNil) + err = os.Remove(fl) + So(err, ShouldBeNil) + err = os.Remove(fmt.Sprint(fl, "-shm")) + So(err == nil || os.IsNotExist(err), ShouldBeTrue) + err = os.Remove(fmt.Sprint(fl, "-wal")) + So(err == nil || os.IsNotExist(err), ShouldBeTrue) + }) + Convey("When a basic KV table is created", func(c C) { + // Create basic table for testing + _, err = st.Writer().Exec(`CREATE TABLE "t1" ("k" INT, "v" TEXT, PRIMARY KEY("k"))`) + So(err, ShouldBeNil) + Convey("When storage is closed", func() { + err = st.Close() + So(err, ShouldBeNil) + Convey("The storage should report error for any incoming query", func() { + err = st.DirtyReader().QueryRow(`SELECT "v" FROM "t1" WHERE "k"=?`, 1).Scan(nil) + So(err, ShouldNotBeNil) + So(err.Error(), ShouldEqual, "sql: database is closed") + err = st.Reader().QueryRow(`SELECT "v" FROM "t1" WHERE "k"=?`, 1).Scan(nil) + So(err, ShouldNotBeNil) + So(err.Error(), ShouldEqual, "sql: database is closed") + _, err = st.Writer().Exec(`INSERT INTO "t1" ("k", "v") VALUES (?, ?)`, 1, "v1") + So(err, ShouldNotBeNil) + So(err.Error(), ShouldEqual, "sql: database is closed") + }) + }) + Convey("The storage should report error when readers attempt to write", func() { + _, err = st.DirtyReader().Exec(`INSERT INTO "t1" ("k", "v") VALUES (?, ?)`, 1, "v1") + So(err, ShouldNotBeNil) + So(err.Error(), ShouldEqual, "attempt to write a readonly database") + _, err = st.Reader().Exec(`INSERT INTO "t1" ("k", "v") VALUES (?, ?)`, 1, "v1") + So(err, ShouldNotBeNil) + So(err.Error(), ShouldEqual, "attempt to write a readonly database") + }) + Convey("The storage should work properly under concurrent reading/writing", func(c C) { + var ( + ec = make(chan error, passes) + sc = make(chan struct{}) + wg = &sync.WaitGroup{} + + abortReaders = func() { close(sc) } + ) + for i := 0; i < passes; i++ { + wg.Add(1) + go func(k int) { + var ticker = time.NewTicker(1 * time.Millisecond) + defer func() { + ticker.Stop() + wg.Done() + }() + for { + select { + case <-ticker.C: + var ( + err error + v string + ) + if err = st.Reader().QueryRow( + `SELECT "v" FROM "t1" WHERE "k"=?`, k, + ).Scan(&v); err != sql.ErrNoRows { + if err != nil { + ec <- err + } else { + c.Printf("\n Read pair from t1: k=%d v=%s ", k, v) + } + return + } + case <-sc: + return + } + } + }(i) + } + defer func() { + wg.Wait() + close(ec) + var errs = len(ec) + for err = range ec { + Printf("\n Get error from channel: %v ", err) + } + So(errs, ShouldBeZeroValue) + }() + for i := 0; i < passes; i++ { + if _, err = st.Writer().Exec( + `INSERT INTO "t1" ("k", "v") VALUES (?, ?)`, i, fmt.Sprintf("v%d", i), + ); err != nil { + abortReaders() + } + So(err, ShouldBeNil) + c.Printf("\n Write pair to t1: k=%d v=v%d ", i, i) + } + }) + Convey("The storage should see uncommitted changes from dirty reader", func(c C) { + var ( + tx *sql.Tx + ec = make(chan error, passes) + sc = make(chan struct{}) + wg = &sync.WaitGroup{} + + abortReaders = func() { close(sc) } + ) + // Open transaction + tx, err = st.Writer().Begin() + So(err, ShouldBeNil) + So(tx, ShouldNotBeNil) + for i := 0; i < passes; i++ { + wg.Add(1) + go func(k int) { + var ticker = time.NewTicker(1 * time.Millisecond) + defer func() { + ticker.Stop() + wg.Done() + }() + for { + select { + case <-ticker.C: + var ( + err error + v string + ) + if err = st.DirtyReader().QueryRow( + `SELECT "v" FROM "t1" WHERE "k"=?`, k, + ).Scan(&v); err != sql.ErrNoRows { + if err != nil { + ec <- err + } else { + c.Printf("\n Dirty read pair from t1: k=%d v=%s ", + k, v) + } + return + } + case <-sc: + return + } + } + }(i) + } + defer func() { + wg.Wait() + close(ec) + var errs = len(ec) + for err = range ec { + Printf("\n Get error from channel: %v ", err) + } + So(errs, ShouldBeZeroValue) + err = tx.Commit() + So(err, ShouldBeNil) + }() + for i := 0; i < passes; i++ { + var ( + v = fmt.Sprintf("v%d", i) + rv string + ) + if _, err = tx.Exec( + `INSERT INTO "t1" ("k", "v") VALUES (?, ?)`, i, v, + ); err != nil { + abortReaders() + } + So(err, ShouldBeNil) + // No isolation between operations on the same database connection + if err = tx.QueryRow( + `SELECT "v" FROM "t1" WHERE "k"=?`, i, + ).Scan(&rv); err != nil || rv != v { + abortReaders() + } + So(err, ShouldBeNil) + So(rv, ShouldEqual, v) + c.Printf("\n Write pair to t1 in transaction: k=%d v=%s ", i, v) + } + // Reader connection should not see any uncommitted change + for i := 0; i < passes; i++ { + err = st.Reader().QueryRow(`SELECT "v" FROM "t1" WHERE "k"=?`, i).Scan(nil) + So(err, ShouldEqual, sql.ErrNoRows) + } + }) + }) + }) +} + +const ( + benchmarkQueriesPerTx = 100 + benchmarkVNum = 3 + benchmarkVLen = 333 + benchmarkKeySubspaceLength = 1000000 + + benchmarkReservedKeyOffset = iota * benchmarkKeySubspaceLength + benchmarkNewKeyOffset + benchmarkKeySpace +) + +type keygen interface { + next() int + reset() +} + +type randKeygen struct { + offset int + length int +} + +func newRandKeygen(offset, length int) *randKeygen { + return &randKeygen{ + offset: offset, + length: length, + } +} + +func newIndexRandKeygen(length int) *randKeygen { return newRandKeygen(0, length) } + +func (k *randKeygen) next() int { return rand.Intn(k.length) + k.offset } +func (k *randKeygen) reset() {} + +type permKeygen struct { + offset int + length int + perm []int + pos int32 +} + +func newPermKeygen(offset, length int) *permKeygen { + return &permKeygen{ + offset: offset, + length: length, + perm: rand.Perm(length), + } +} + +func newIndexPermKeygen(length int) *permKeygen { return newPermKeygen(0, length) } + +func (k *permKeygen) next() int { + var pos = atomic.AddInt32(&k.pos, 1) - 1 + if pos >= int32(k.length) { + panic("permKeygen: keys have been exhausted") + } + return k.perm[pos] + k.offset +} + +func (k *permKeygen) reset() { k.pos = 0 } + +var ( + irkg = newIndexRandKeygen(benchmarkKeySubspaceLength) + ipkg = newIndexPermKeygen(benchmarkKeySubspaceLength) + rrkg = newRandKeygen(benchmarkReservedKeyOffset, benchmarkKeySubspaceLength) + nrkg = newRandKeygen(benchmarkNewKeyOffset, benchmarkKeySubspaceLength) + trkg = newRandKeygen(0, benchmarkKeySpace) +) + +func setupBenchmarkStorage(b *testing.B) ( + st xi.Storage, + q string, makeDest func() []interface{}, + e string, src [][]interface{}, +) { + // Setup storage + var ( + fl = path.Join(testingDataDir, b.Name()) + err error + stmt *sql.Stmt + ) + if st, err = NewSqlite(fmt.Sprint("file:", fl)); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + if _, err = st.Writer().Exec( + `CREATE TABLE "t2" ("k" INT, "v1" TEXT, "v2" TEXT, "v3" TEXT, PRIMARY KEY("k"))`, + ); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + if stmt, err = st.Writer().Prepare( + `INSERT INTO "t2" VALUES (?, ?, ?, ?)`, + ); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + for i := 0; i < benchmarkKeySubspaceLength; i++ { + var ( + vals [benchmarkVNum][benchmarkVLen]byte + args [benchmarkVNum + 1]interface{} + ) + args[0] = benchmarkReservedKeyOffset + i + for i := range vals { + rand.Read(vals[i][:]) + args[i+1] = string(vals[i][:]) + } + if _, err = stmt.Exec(args[:]...); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + if i%10000 == 0 { + fmt.Printf("Done setup key at %v\n", i) + } + } + // Setup query string and dest slice + q = `SELECT "v1", "v2", "v3" FROM "t2" WHERE "k"=?` + makeDest = func() (dest []interface{}) { + var outv [benchmarkVNum]string + dest = make([]interface{}, benchmarkVNum) + for i := range outv { + dest[i] = &outv[i] + } + return + } + // Setup execute string and src table + // + // NOTE(leventeliu): allowing IGNORE and REPLACE both have impact on benchmark result, + // while UPSERT is the best! + // + // e = `INSERT OR IGNORE INTO "t2" VALUES (?, ?, ?, ?)` + // e = `REPLACE INTO "t2" VALUES (?, ?, ?, ?)` + e = `INSERT INTO "t2" VALUES (?, ?, ?, ?) + ON CONFLICT("k") DO UPDATE SET + "v1"="excluded"."v1", + "v2"="excluded"."v2", + "v3"="excluded"."v3" +` + src = make([][]interface{}, benchmarkKeySubspaceLength) + for i := range src { + var vals [benchmarkVNum][benchmarkVLen]byte + src[i] = make([]interface{}, benchmarkVNum+1) + src[i][0] = benchmarkNewKeyOffset + i + for j := range vals { + rand.Read(vals[j][:]) + src[i][j+1] = string(vals[j][:]) + } + } + + return +} + +func teardownBenchmarkStorage(b *testing.B, st xi.Storage) { + var ( + fl = path.Join(testingDataDir, b.Name()) + err error + ) + if err = st.Close(); err != nil { + b.Fatalf("Failed to teardown bench environment: %v", err) + } + if err = os.Remove(fl); err != nil { + b.Fatalf("Failed to teardown bench environment: %v", err) + } + if err = os.Remove(fmt.Sprint(fl, "-shm")); err != nil && !os.IsNotExist(err) { + b.Fatalf("Failed to teardown bench environment: %v", err) + } + if err = os.Remove(fmt.Sprint(fl, "-wal")); err != nil && !os.IsNotExist(err) { + b.Fatalf("Failed to teardown bench environment: %v", err) + } +} + +func setupSubBenchmarkStorage(b *testing.B, st xi.Storage) { + // Reset key generators + irkg.reset() + ipkg.reset() + rrkg.reset() + nrkg.reset() + trkg.reset() +} + +func teardownSubBenchmarkStorage(b *testing.B, st xi.Storage) { + var ( + d = `DELETE FROM "t2" WHERE "k">=?` + err error + ) + if _, err = st.Writer().Exec(d, benchmarkNewKeyOffset); err != nil { + b.Fatalf("Failed to teardown sub bench environment: %v", err) + } +} + +type benchmarkProfile struct { + name string + parall bool + proc func(*testing.B, int) + pproc func(*testing.PB) + bg func(*testing.B, *sync.WaitGroup, <-chan struct{}) +} + +func BenchmarkStorage(b *testing.B) { + var ( + st, q, dm, e, src = setupBenchmarkStorage(b) + + tx *sql.Tx + dest = dm() + read = func(b *testing.B, conn *sql.DB, dest []interface{}) { + var err error + if err = conn.QueryRow(q, rrkg.next()).Scan(dest...); err != nil { + b.Fatalf("Failed to query values: %v", err) + } + } + readTx = func(b *testing.B, i int, conn *sql.DB, dest []interface{}) { + var err error + if i%benchmarkQueriesPerTx == 0 { + if tx, err = conn.Begin(); err != nil { + b.Fatalf("Failed to begin transaction: %v", err) + } + } + // Query in [n, 2n-1] key space + if err = tx.QueryRow(q, nrkg.next()).Scan(dest...); err != nil && err != sql.ErrNoRows { + b.Fatalf("Failed to query values: %v", err) + } + if (i+1)%benchmarkQueriesPerTx == 0 || i == b.N-1 { + if err = tx.Rollback(); err != nil { + b.Fatalf("Failed to close transaction: %v", err) + } + } + } + write = func(b *testing.B, conn *sql.DB) { + var err error + if _, err = conn.Exec(e, src[ipkg.next()]...); err != nil { + b.Errorf("Failed to execute: %v", err) + } + } + writeTx = func(b *testing.B, i int, conn *sql.DB) { + var err error + if i%benchmarkQueriesPerTx == 0 { + if tx, err = st.Writer().Begin(); err != nil { + b.Errorf("Failed to begin transaction: %v", err) + } + } + if _, err = tx.Exec(e, src[ipkg.next()]...); err != nil { + b.Errorf("Failed to execute: %v", err) + } + if (i+1)%benchmarkQueriesPerTx == 0 || i == b.N-1 { + if err = tx.Commit(); err != nil { + b.Errorf("Failed to commit transaction: %v", err) + } + } + } + mixRW = func(b *testing.B, rconn, wconn *sql.DB, dest []interface{}) { + if rand.Int()%2 == 0 { + read(b, rconn, dest) + } else { + write(b, wconn) + } + } + + bgw = func(b *testing.B, wg *sync.WaitGroup, sc <-chan struct{}) { + busyWrite(b, wg, sc, st, ipkg, e, src) + } + bgbwtx = func(b *testing.B, wg *sync.WaitGroup, sc <-chan struct{}) { + busyWriteTx(b, wg, sc, st, ipkg, e, src) + } + bgiwtx = func(b *testing.B, wg *sync.WaitGroup, sc <-chan struct{}) { + idleWriteTx(b, wg, sc, st, ipkg, e, src) + } + + pproc = func(pb *testing.PB, proc func()) { + for pb.Next() { + proc() + } + } + + profiles = [...]benchmarkProfile{ + { + name: "SequentialDirtyRead", + proc: func(b *testing.B, _ int) { read(b, st.DirtyReader(), dest) }, + }, { + name: "SequentialRead", + proc: func(b *testing.B, _ int) { read(b, st.Reader(), dest) }, + }, { + name: "SequentialWrite", + proc: func(b *testing.B, _ int) { write(b, st.Writer()) }, + }, { + name: "SequentialWriteTx", + proc: func(b *testing.B, i int) { writeTx(b, i, st.Writer()) }, + }, { + name: "SequentialMixDRW", + proc: func(b *testing.B, _ int) { mixRW(b, st.DirtyReader(), st.Writer(), dest) }, + }, { + name: "SequentialMixRW", + proc: func(b *testing.B, _ int) { mixRW(b, st.Reader(), st.Writer(), dest) }, + }, { + name: "SequentialDirtyReadWithBackgroundWriter", + proc: func(b *testing.B, _ int) { read(b, st.DirtyReader(), dest) }, + bg: bgw, + }, { + name: "SequentialReadWithBackgroundWriter", + proc: func(b *testing.B, _ int) { read(b, st.Reader(), dest) }, + bg: bgw, + }, { + name: "SequentialDirtyReadWithBackgroundBusyTxWriter", + proc: func(b *testing.B, _ int) { read(b, st.DirtyReader(), dest) }, + bg: bgbwtx, + }, { + name: "SequentialReadWithBackgroundBusyTxWriter", + proc: func(b *testing.B, _ int) { read(b, st.Reader(), dest) }, + bg: bgbwtx, + }, { + name: "SequentialDirtyReadWithBackgroundIdleTxWriter", + proc: func(b *testing.B, _ int) { read(b, st.DirtyReader(), dest) }, + bg: bgiwtx, + }, { + name: "SequentialReadWithBackgroundIdleTxWriter", + proc: func(b *testing.B, _ int) { read(b, st.Reader(), dest) }, + bg: bgiwtx, + }, { + name: "SequentialDirtyReadTxWithBackgroundWriter", + proc: func(b *testing.B, i int) { readTx(b, i, st.DirtyReader(), dest) }, + bg: bgw, + }, { + name: "SequentialReadTxWithBackgroundWriter", + proc: func(b *testing.B, i int) { readTx(b, i, st.Reader(), dest) }, + bg: bgw, + }, { + name: "SequentialDirtyReadTxWithBackgroundBusyTxWriter", + proc: func(b *testing.B, i int) { readTx(b, i, st.DirtyReader(), dest) }, + bg: bgbwtx, + }, { + name: "SequentialReadTxWithBackgroundBusyTxWriter", + proc: func(b *testing.B, i int) { readTx(b, i, st.Reader(), dest) }, + bg: bgbwtx, + }, { + name: "SequentialDirtyReadTxWithBackgroundIdleTxWriter", + proc: func(b *testing.B, i int) { readTx(b, i, st.DirtyReader(), dest) }, + bg: bgiwtx, + }, { + name: "SequentialReadTxWithBackgroundIdleTxWriter", + proc: func(b *testing.B, i int) { readTx(b, i, st.Reader(), dest) }, + bg: bgiwtx, + }, { + name: "ParallelDirtyRead", + parall: true, + pproc: func(pb *testing.PB) { + pproc(pb, func() { read(b, st.DirtyReader(), dm()) }) + }, + }, { + name: "ParallelRead", + parall: true, + pproc: func(pb *testing.PB) { + pproc(pb, func() { read(b, st.Reader(), dm()) }) + }, + }, { + name: "ParallelWrite", + parall: true, + pproc: func(pb *testing.PB) { + pproc(pb, func() { write(b, st.Writer()) }) + }, + }, { + name: "ParallelMixDRW", + parall: true, + pproc: func(pb *testing.PB) { + pproc(pb, func() { mixRW(b, st.DirtyReader(), st.Writer(), dm()) }) + }, + }, { + name: "ParallelMixRW", + parall: true, + pproc: func(pb *testing.PB) { + pproc(pb, func() { mixRW(b, st.Reader(), st.Writer(), dm()) }) + }, + }, + } + ) + defer teardownBenchmarkStorage(b, st) + // Run benchmark profiles + for _, v := range profiles { + b.Run(v.name, func(b *testing.B) { + // Setup environment for sub-benchmark + setupSubBenchmarkStorage(b, st) + defer teardownSubBenchmarkStorage(b, st) + // Start background goroutine + var ( + wg = &sync.WaitGroup{} + sc = make(chan struct{}) + ) + if v.bg != nil { + wg.Add(1) + go v.bg(b, wg, sc) + } + defer func() { + close(sc) + wg.Wait() + }() + // Test body + b.ResetTimer() + if v.parall { + // Run parallel + b.RunParallel(v.pproc) + } else { + // Run sequential + for i := 0; i < b.N; i++ { + v.proc(b, i) + } + } + b.StopTimer() + }) + } +} + +//func BenchmarkStorageSequentialDirtyRead(b *testing.B) { +// var ( +// st, q, dm, _, _ = setupBenchmarkStorage(b) +// dest = dm() +// err error +// ) +// for i := 0; i < b.N; i++ { +// if err = st.DirtyReader().QueryRow(q, rrkg.next()).Scan(dest...); err != nil { +// b.Fatalf("Failed to query values: %v", err) +// } +// } +// teardownBenchmarkStorage(b, st) +//} +// +//func BenchmarkStoargeSequentialRead(b *testing.B) { +// var ( +// st, q, dm, _, _ = setupBenchmarkStorage(b) +// dest = dm() +// err error +// ) +// for i := 0; i < b.N; i++ { +// if err = st.Reader().QueryRow(q, rrkg.next()).Scan(dest...); err != nil { +// b.Fatalf("Failed to query values: %v", err) +// } +// } +// teardownBenchmarkStorage(b, st) +//} +// +//func BenchmarkStoargeSequentialWrite(b *testing.B) { +// var ( +// st, _, _, e, src = setupBenchmarkStorage(b) +// err error +// ) +// b.Run("BenchmarkStoargeSequentialWrite", func(b *testing.B) { +// deleteBenchmarkData(b, st) +// b.ResetTimer() +// for i := 0; i < b.N; i++ { +// if _, err = st.Writer().Exec(e, src[ipkg.next()]...); err != nil { +// b.Errorf("Failed to execute: %v", err) +// } +// } +// }) +// teardownBenchmarkStorage(b, st) +//} +// +//func BenchmarkStoargeSequentialWriteTx(b *testing.B) { +// var ( +// st, _, _, e, src = setupBenchmarkStorage(b) +// tx *sql.Tx +// err error +// ) +// for i := 0; i < b.N; i++ { +// if i%benchmarkQueriesPerTx == 0 { +// if tx, err = st.Writer().Begin(); err != nil { +// b.Errorf("Failed to begin transaction: %v", err) +// } +// } +// if _, err = tx.Exec(e, src[ipkg.next()]...); err != nil { +// b.Errorf("Failed to execute: %v", err) +// } +// if (i+1)%benchmarkQueriesPerTx == 0 || i == b.N-1 { +// if err = tx.Commit(); err != nil { +// b.Errorf("Failed to commit transaction: %v", err) +// } +// } +// } +// teardownBenchmarkStorage(b, st) +//} +// +// BW is a background writer function passed to benchmark helper. +//type BW func( +// *testing.B, *sync.WaitGroup, <-chan struct{}, xi.Storage, keygen, string, [][]interface{}, +//) + +func busyWrite( + b *testing.B, + wg *sync.WaitGroup, sc <-chan struct{}, + st xi.Storage, kg keygen, e string, src [][]interface{}, +) { + defer wg.Done() + var err error + for { + select { + case <-sc: + return + default: + if _, err = st.Writer().Exec(e, src[kg.next()]...); err != nil { + b.Errorf("Failed to execute: %v", err) + } + } + } +} + +func busyWriteTx( + b *testing.B, + wg *sync.WaitGroup, sc <-chan struct{}, + st xi.Storage, kg keygen, e string, src [][]interface{}, +) { + defer wg.Done() + var ( + tx *sql.Tx + err error + ) + for i := 0; ; i++ { + // Begin + if i%benchmarkQueriesPerTx == 0 { + if tx, err = st.Writer().Begin(); err != nil { + b.Errorf("Failed to begin transaction: %v", err) + } + } + // Exec + select { + case <-sc: + // Also commit on exiting + if tx != nil { + if err = tx.Commit(); err != nil { + b.Errorf("Failed to commit transaction: %v", err) + } + tx = nil + } + return + default: + // Exec + if _, err = tx.Exec(e, src[kg.next()]...); err != nil { + b.Errorf("Failed to execute: %v", err) + } + } + // Commit + if (i+1)%benchmarkQueriesPerTx == 0 { + if err = tx.Commit(); err != nil { + b.Errorf("Failed to commit transaction: %v", err) + } + tx = nil + } + } +} + +func idleWriteTx( + b *testing.B, + wg *sync.WaitGroup, sc <-chan struct{}, + st xi.Storage, kg keygen, e string, src [][]interface{}, +) { + const writeIntlMS = 1 + var ( + tx *sql.Tx + err error + ticker = time.NewTicker(writeIntlMS * time.Millisecond) + ) + defer func() { + ticker.Stop() + wg.Done() + }() + for i := 0; ; i++ { + // Begin + if i%benchmarkQueriesPerTx == 0 { + if tx, err = st.Writer().Begin(); err != nil { + b.Errorf("Failed to begin transaction: %v", err) + } + } + // Exec + select { + case <-ticker.C: + // Exec + if _, err = tx.Exec(e, src[kg.next()]...); err != nil { + b.Errorf("Failed to execute: %v", err) + } + case <-sc: + // Also commit on exiting + if tx != nil { + if err = tx.Commit(); err != nil { + b.Errorf("Failed to commit transaction: %v", err) + } + tx = nil + } + return + } + // Commit + if (i+1)%benchmarkQueriesPerTx == 0 { + if err = tx.Commit(); err != nil { + b.Errorf("Failed to commit transaction: %v", err) + } + tx = nil + } + } +} + +// GR is a get reader function passed to benchmark helper. +//type GR func(xi.Storage) *sql.DB +// +//func getDirtyReader(st xi.Storage) *sql.DB { return st.DirtyReader() } +//func getReader(st xi.Storage) *sql.DB { return st.Reader() } +// +//func benchmarkStorageSequentialReadWithBackgroundWriter(b *testing.B, getReader GR, write BW) { +// var ( +// st, q, dm, e, src = setupBenchmarkStorage(b) +// +// dest = dm() +// wg = &sync.WaitGroup{} +// sc = make(chan struct{}) +// +// err error +// ) +// +// // Start background writer +// wg.Add(1) +// go write(b, wg, sc, st, ipkg, e, src) +// +// for i := 0; i < b.N; i++ { +// if err = getReader(st).QueryRow( +// q, trkg.next(), +// ).Scan(dest...); err != nil && err != sql.ErrNoRows { +// b.Fatalf("Failed to query values: %v", err) +// } +// } +// +// // Exit background writer +// close(sc) +// wg.Wait() +// +// teardownBenchmarkStorage(b, st) +//} +// +//func BenchmarkStoargeSequentialDirtyReadWithBackgroundWriter(b *testing.B) { +// benchmarkStorageSequentialReadWithBackgroundWriter(b, getDirtyReader, busyWrite) +//} +// +//func BenchmarkStoargeSequentialReadWithBackgroundWriter(b *testing.B) { +// benchmarkStorageSequentialReadWithBackgroundWriter(b, getReader, busyWrite) +//} +// +//func BenchmarkStoargeSequentialDirtyReadWithBackgroundBusyTxWriter(b *testing.B) { +// benchmarkStorageSequentialReadWithBackgroundWriter(b, getDirtyReader, busyWriteTx) +//} +// +//func BenchmarkStoargeSequentialReadWithBackgroundBusyTxWriter(b *testing.B) { +// benchmarkStorageSequentialReadWithBackgroundWriter(b, getReader, busyWriteTx) +//} +// +//func BenchmarkStoargeSequentialDirtyReadWithBackgroundIdleTxWriter(b *testing.B) { +// benchmarkStorageSequentialReadWithBackgroundWriter(b, getDirtyReader, idleWriteTx) +//} +// +//func BenchmarkStoargeSequentialReadWithBackgroundIdleTxWriter(b *testing.B) { +// benchmarkStorageSequentialReadWithBackgroundWriter(b, getReader, idleWriteTx) +//} +// +//func benchmarkStorageSequentialReadTxWithBackgroundWriter(b *testing.B, getReader GR, write BW) { +// var ( +// st, q, dm, e, src = setupBenchmarkStorage(b) +// +// dest = dm() +// wg = &sync.WaitGroup{} +// sc = make(chan struct{}) +// +// err error +// tx *sql.Tx +// ) +// +// // Start background writer +// wg.Add(1) +// go write(b, wg, sc, st, ipkg, e, src) +// +// for i := 0; i < b.N; i++ { +// if i%benchmarkQueriesPerTx == 0 { +// if tx, err = getReader(st).Begin(); err != nil { +// b.Fatalf("Failed to begin transaction: %v", err) +// } +// } +// // Query in [n, 2n-1] key space +// if err = tx.QueryRow(q, nrkg.next()).Scan(dest...); err != nil && err != sql.ErrNoRows { +// b.Fatalf("Failed to query values: %v", err) +// } +// if (i+1)%benchmarkQueriesPerTx == 0 || i == b.N-1 { +// if err = tx.Rollback(); err != nil { +// b.Fatalf("Failed to close transaction: %v", err) +// } +// } +// } +// +// // Exit background writer +// close(sc) +// wg.Wait() +// +// teardownBenchmarkStorage(b, st) +//} +// +//func BenchmarkStoargeSequentialDirtyReadTxWithBackgroundWriter(b *testing.B) { +// benchmarkStorageSequentialReadTxWithBackgroundWriter(b, getDirtyReader, busyWrite) +//} +// +//func BenchmarkStoargeSequentialReadTxWithBackgroundWriter(b *testing.B) { +// benchmarkStorageSequentialReadTxWithBackgroundWriter(b, getReader, busyWrite) +//} +// +//func BenchmarkStoargeSequentialDirtyReadTxWithBackgroundBusyTxWriter(b *testing.B) { +// benchmarkStorageSequentialReadTxWithBackgroundWriter(b, getDirtyReader, busyWriteTx) +//} +// +//func BenchmarkStoargeSequentialReadTxWithBackgroundBusyTxWriter(b *testing.B) { +// benchmarkStorageSequentialReadTxWithBackgroundWriter(b, getReader, busyWriteTx) +//} +// +//func BenchmarkStoargeSequentialDirtyReadTxWithBackgroundIdleTxWriter(b *testing.B) { +// benchmarkStorageSequentialReadTxWithBackgroundWriter(b, getDirtyReader, idleWriteTx) +//} +// +//func BenchmarkStoargeSequentialReadTxWithBackgroundIdleTxWriter(b *testing.B) { +// benchmarkStorageSequentialReadTxWithBackgroundWriter(b, getReader, idleWriteTx) +//} +// +//func BenchmarkStoargeSequentialMixDRW(b *testing.B) { +// var ( +// st, q, dm, e, src = setupBenchmarkStorage(b) +// dest = dm() +// err error +// ) +// for i := 0; i < b.N; i++ { +// if rand.Int()%2 == 0 { +// if err = st.DirtyReader().QueryRow(q, rrkg.next()).Scan(dest...); err != nil { +// b.Fatalf("Failed to query values: %v", err) +// } +// } else { +// if _, err = st.Writer().Exec(e, src[ipkg.next()]...); err != nil { +// b.Fatalf("Failed to execute: %v", err) +// } +// } +// } +// teardownBenchmarkStorage(b, st) +//} +// +//func BenchmarkStoargeSequentialMixRW(b *testing.B) { +// var ( +// st, q, dm, e, src = setupBenchmarkStorage(b) +// dest = dm() +// err error +// ) +// for i := 0; i < b.N; i++ { +// if rand.Int()%2 == 0 { +// if err = st.Reader().QueryRow(q, rrkg.next()).Scan(dest...); err != nil { +// b.Fatalf("Failed to query values: %v", err) +// } +// } else { +// if _, err = st.Writer().Exec(e, src[ipkg.next()]...); err != nil { +// b.Fatalf("Failed to execute: %v", err) +// } +// } +// } +// teardownBenchmarkStorage(b, st) +//} +// +//func BenchmarkStorageParallelDirtyRead(b *testing.B) { +// var ( +// st, q, dm, _, _ = setupBenchmarkStorage(b) +// ) +// b.RunParallel(func(pb *testing.PB) { +// var ( +// dest = dm() +// err error +// ) +// for pb.Next() { +// if err = st.DirtyReader().QueryRow(q, rrkg.next()).Scan(dest...); err != nil { +// b.Fatalf("Failed to query values: %v", err) +// } +// } +// }) +// teardownBenchmarkStorage(b, st) +//} +// +//func BenchmarkStorageParallelRead(b *testing.B) { +// var ( +// st, q, dm, _, _ = setupBenchmarkStorage(b) +// ) +// b.RunParallel(func(pb *testing.PB) { +// var ( +// dest = dm() +// err error +// ) +// for pb.Next() { +// if err = st.DirtyReader().QueryRow(q, rrkg.next()).Scan(dest...); err != nil { +// b.Fatalf("Failed to query values: %v", err) +// } +// } +// }) +// teardownBenchmarkStorage(b, st) +//} +// +//func BenchmarkStoargeParallelWrite(b *testing.B) { +// var st, _, _, e, src = setupBenchmarkStorage(b) +// b.RunParallel(func(pb *testing.PB) { +// var err error +// for pb.Next() { +// if _, err = st.Writer().Exec(e, src[ipkg.next()]...); err != nil { +// b.Fatalf("Failed to execute: %v", err) +// } +// } +// }) +// teardownBenchmarkStorage(b, st) +//} +// +//func BenchmarkStorageParallelMixDRW(b *testing.B) { +// var st, q, dm, e, src = setupBenchmarkStorage(b) +// b.RunParallel(func(pb *testing.PB) { +// var ( +// dest = dm() +// err error +// ) +// for pb.Next() { +// if rand.Int()%2 == 0 { +// if err = st.DirtyReader().QueryRow(q, rrkg.next()).Scan(dest...); err != nil { +// b.Fatalf("Failed to query values: %v", err) +// } +// } else { +// if _, err = st.Writer().Exec(e, src[ipkg.next()]...); err != nil { +// b.Fatalf("Failed to execute: %v", err) +// } +// } +// } +// }) +// teardownBenchmarkStorage(b, st) +//} +// +//func BenchmarkStorageParallelMixRW(b *testing.B) { +// var st, q, dm, e, src = setupBenchmarkStorage(b) +// b.RunParallel(func(pb *testing.PB) { +// var ( +// dest = dm() +// err error +// ) +// for pb.Next() { +// if rand.Int()%2 == 0 { +// if err = st.Reader().QueryRow(q, rrkg.next()).Scan(dest...); err != nil { +// b.Fatalf("Failed to query values: %v", err) +// } +// } else { +// if _, err = st.Writer().Exec(e, src[ipkg.next()]...); err != nil { +// b.Fatalf("Failed to execute: %v", err) +// } +// } +// } +// }) +// teardownBenchmarkStorage(b, st) +//} diff --git a/xenomint/sqlite/xxx_test.go b/xenomint/sqlite/xxx_test.go new file mode 100644 index 000000000..3c211b975 --- /dev/null +++ b/xenomint/sqlite/xxx_test.go @@ -0,0 +1,74 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sqlite + +import ( + "io/ioutil" + "math/rand" + "os" + "syscall" + "testing" + "time" + + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +var ( + testingDataDir string +) + +func setup() { + const minNoFile uint64 = 4096 + var ( + err error + lmt syscall.Rlimit + ) + + if testingDataDir, err = ioutil.TempDir("", "CovenantSQL"); err != nil { + panic(err) + } + + rand.Seed(time.Now().UnixNano()) + + if err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &lmt); err != nil { + panic(err) + } + if lmt.Max < minNoFile { + panic("insufficient max RLIMIT_NOFILE") + } + lmt.Cur = lmt.Max + if err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &lmt); err != nil { + panic(err) + } + + log.SetOutput(os.Stdout) + log.SetLevel(log.DebugLevel) +} + +func teardown() { + if err := os.RemoveAll(testingDataDir); err != nil { + panic(err) + } +} + +func TestMain(m *testing.M) { + os.Exit(func() int { + setup() + defer teardown() + return m.Run() + }()) +} diff --git a/xenomint/state.go b/xenomint/state.go new file mode 100644 index 000000000..8599cf012 --- /dev/null +++ b/xenomint/state.go @@ -0,0 +1,635 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xenomint + +import ( + "database/sql" + "io" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/types" + "github.com/CovenantSQL/CovenantSQL/utils/log" + xi "github.com/CovenantSQL/CovenantSQL/xenomint/interfaces" + "github.com/CovenantSQL/sqlparser" + "github.com/pkg/errors" +) + +// State defines a xenomint state which is bound to a underlying storage. +type State struct { + sync.RWMutex + strg xi.Storage + pool *pool + closed bool + nodeID proto.NodeID + + // TODO(leventeliu): Reload savepoint from last block on chain initialization, and rollback + // any ongoing transaction on exit. + // + // unc is the uncommitted transaction. + unc *sql.Tx + origin uint64 // origin is the original savepoint of the current transaction + cmpoint uint64 // cmpoint is the last commit point of the current transaction + current uint64 // current is the current savepoint of the current transaction + hasSchemaChange uint32 // indicates schema change happens in this uncommitted transaction +} + +// NewState returns a new State bound to strg. +func NewState(nodeID proto.NodeID, strg xi.Storage) (s *State, err error) { + var t = &State{ + nodeID: nodeID, + strg: strg, + pool: newPool(), + } + if t.unc, err = t.strg.Writer().Begin(); err != nil { + return + } + t.setSavepoint() + s = t + return +} + +func (s *State) incSeq() { + s.current++ +} + +func (s *State) setNextTxID() { + s.origin = s.current + s.cmpoint = s.current +} + +func (s *State) setCommitPoint() { + s.cmpoint = s.current +} + +func (s *State) rollbackID(id uint64) { + s.current = id +} + +// InitTx sets the initial id of the current transaction. This method is not safe for concurrency +// and should only be called at initialization. +func (s *State) InitTx(id uint64) { + s.origin = id + s.cmpoint = id + s.current = id + s.setSavepoint() +} + +func (s *State) getID() uint64 { + return atomic.LoadUint64(&s.current) +} + +// Close commits any ongoing transaction if needed and closes the underlying storage. +func (s *State) Close(commit bool) (err error) { + if s.closed { + return + } + if s.unc != nil { + if commit { + if err = s.uncCommit(); err != nil { + return + } + } else { + // Only rollback to last commmit point + if err = s.rollback(); err != nil { + return + } + if err = s.uncCommit(); err != nil { + return + } + } + } + if err = s.strg.Close(); err != nil { + return + } + s.closed = true + return +} + +func convertQueryAndBuildArgs(pattern string, args []types.NamedArg) (containsDDL bool, p string, ifs []interface{}, err error) { + var ( + tokenizer = sqlparser.NewStringTokenizer(pattern) + stmt sqlparser.Statement + lastPos int + query string + queryParts []string + ) + + for { + stmt, err = sqlparser.ParseNext(tokenizer) + + if err != nil && err != io.EOF { + return + } + + if err == io.EOF { + err = nil + break + } + + query = pattern[lastPos : tokenizer.Position-1] + lastPos = tokenizer.Position + 1 + + // translate show statement + if showStmt, ok := stmt.(*sqlparser.Show); ok { + origQuery := query + + switch showStmt.Type { + case "table": + if showStmt.ShowCreate { + query = "SELECT sql FROM sqlite_master WHERE type = \"table\" AND tbl_name = \"" + + showStmt.OnTable.Name.String() + "\"" + } else { + query = "PRAGMA table_info(" + showStmt.OnTable.Name.String() + ")" + } + case "index": + query = "SELECT name FROM sqlite_master WHERE type = \"index\" AND tbl_name = \"" + + showStmt.OnTable.Name.String() + "\"" + case "tables": + query = "SELECT name FROM sqlite_master WHERE type = \"table\"" + } + + log.WithFields(log.Fields{ + "from": origQuery, + "to": query, + }).Debug("query translated") + } else if _, ok := stmt.(*sqlparser.DDL); ok { + containsDDL = true + } + + queryParts = append(queryParts, query) + } + + p = strings.Join(queryParts, "; ") + + ifs = make([]interface{}, len(args)) + for i, v := range args { + ifs[i] = sql.NamedArg{ + Name: v.Name, + Value: v.Value, + } + } + return +} + +func buildTypeNamesFromSQLColumnTypes(types []*sql.ColumnType) (names []string) { + names = make([]string, len(types)) + for i, v := range types { + names[i] = v.DatabaseTypeName() + } + return +} + +type sqlQuerier interface { + Query(query string, args ...interface{}) (*sql.Rows, error) +} + +func readSingle( + qer sqlQuerier, q *types.Query) (names []string, types []string, data [][]interface{}, err error, +) { + var ( + rows *sql.Rows + cols []*sql.ColumnType + pattern string + args []interface{} + ) + + if _, pattern, args, err = convertQueryAndBuildArgs(q.Pattern, q.Args); err != nil { + return + } + if rows, err = qer.Query(pattern, args...); err != nil { + return + } + defer rows.Close() + // Fetch column names and types + if names, err = rows.Columns(); err != nil { + return + } + if cols, err = rows.ColumnTypes(); err != nil { + return + } + types = buildTypeNamesFromSQLColumnTypes(cols) + // Scan data row by row + data = make([][]interface{}, 0) + for rows.Next() { + var ( + row = make([]interface{}, len(cols)) + dest = make([]interface{}, len(cols)) + ) + for i := range row { + dest[i] = &row[i] + } + if err = rows.Scan(dest...); err != nil { + return + } + data = append(data, row) + } + return +} + +func buildRowsFromNativeData(data [][]interface{}) (rows []types.ResponseRow) { + rows = make([]types.ResponseRow, len(data)) + for i, v := range data { + rows[i].Values = v + } + return +} + +func (s *State) read(req *types.Request) (ref *QueryTracker, resp *types.Response, err error) { + var ( + ierr error + cnames, ctypes []string + data [][]interface{} + ) + // TODO(leventeliu): no need to run every read query here. + for i, v := range req.Payload.Queries { + if cnames, ctypes, data, ierr = readSingle(s.strg.DirtyReader(), &v); ierr != nil { + err = errors.Wrapf(ierr, "query at #%d failed", i) + // Add to failed pool list + s.pool.setFailed(req) + return + } + } + // Build query response + ref = &QueryTracker{Req: req} + resp = &types.Response{ + Header: types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ + Request: req.Header, + NodeID: s.nodeID, + Timestamp: s.getLocalTime(), + RowCount: uint64(len(data)), + LogOffset: s.getID(), + }, + }, + Payload: types.ResponsePayload{ + Columns: cnames, + DeclTypes: ctypes, + Rows: buildRowsFromNativeData(data), + }, + } + return +} + +func (s *State) readTx(req *types.Request) (ref *QueryTracker, resp *types.Response, err error) { + var ( + tx *sql.Tx + id uint64 + ierr error + cnames, ctypes []string + data [][]interface{} + querier sqlQuerier + ) + id = s.getID() + if atomic.LoadUint32(&s.hasSchemaChange) == 1 { + // lock transaction + s.Lock() + defer s.Unlock() + s.setSavepoint() + querier = s.unc + defer s.rollbackTo(id) + } else { + if tx, ierr = s.strg.DirtyReader().Begin(); ierr != nil { + err = errors.Wrap(ierr, "open tx failed") + return + } + querier = tx + defer tx.Rollback() + } + + for i, v := range req.Payload.Queries { + if cnames, ctypes, data, ierr = readSingle(querier, &v); ierr != nil { + err = errors.Wrapf(ierr, "query at #%d failed", i) + // Add to failed pool list + s.pool.setFailed(req) + return + } + } + // Build query response + ref = &QueryTracker{Req: req} + resp = &types.Response{ + Header: types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ + Request: req.Header, + NodeID: s.nodeID, + Timestamp: s.getLocalTime(), + RowCount: uint64(len(data)), + LogOffset: id, + }, + }, + Payload: types.ResponsePayload{ + Columns: cnames, + DeclTypes: ctypes, + Rows: buildRowsFromNativeData(data), + }, + } + return +} + +func (s *State) writeSingle(q *types.Query) (res sql.Result, err error) { + var ( + containsDDL bool + pattern string + args []interface{} + ) + + if containsDDL, pattern, args, err = convertQueryAndBuildArgs(q.Pattern, q.Args); err != nil { + return + } + if res, err = s.unc.Exec(pattern, args...); err == nil { + if containsDDL { + atomic.StoreUint32(&s.hasSchemaChange, 1) + } + s.incSeq() + } + return +} + +func (s *State) setSavepoint() (savepoint uint64) { + savepoint = s.getID() + s.unc.Exec("SAVEPOINT \"?\"", savepoint) + return +} + +func (s *State) rollbackTo(savepoint uint64) { + s.rollbackID(savepoint) + s.unc.Exec("ROLLBACK TO \"?\"", savepoint) +} + +func (s *State) write(req *types.Request) (ref *QueryTracker, resp *types.Response, err error) { + var ( + savepoint uint64 + query = &QueryTracker{Req: req} + totalAffectedRows int64 + curAffectedRows int64 + lastInsertID int64 + ) + + // TODO(leventeliu): savepoint is a sqlite-specified solution for nested transaction. + if err = func() (err error) { + var ierr error + s.Lock() + defer s.Unlock() + savepoint = s.getID() + for i, v := range req.Payload.Queries { + var res sql.Result + if res, ierr = s.writeSingle(&v); ierr != nil { + err = errors.Wrapf(ierr, "execute at #%d failed", i) + // Add to failed pool list + s.pool.setFailed(req) + s.rollbackTo(savepoint) + return + } + + curAffectedRows, _ = res.RowsAffected() + lastInsertID, _ = res.LastInsertId() + totalAffectedRows += curAffectedRows + } + s.setSavepoint() + s.pool.enqueue(savepoint, query) + return + }(); err != nil { + return + } + // Build query response + ref = query + resp = &types.Response{ + Header: types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ + Request: req.Header, + NodeID: s.nodeID, + Timestamp: s.getLocalTime(), + RowCount: 0, + LogOffset: savepoint, + AffectedRows: totalAffectedRows, + LastInsertID: lastInsertID, + }, + }, + } + return +} + +func (s *State) replay(req *types.Request, resp *types.Response) (err error) { + var ( + ierr error + savepoint uint64 + query = &QueryTracker{Req: req, Resp: resp} + ) + s.Lock() + defer s.Unlock() + savepoint = s.getID() + if resp.Header.ResponseHeader.LogOffset != savepoint { + err = errors.Wrapf( + ErrQueryConflict, + "local id %d vs replaying id %d", savepoint, resp.Header.ResponseHeader.LogOffset, + ) + return + } + for i, v := range req.Payload.Queries { + if _, ierr = s.writeSingle(&v); ierr != nil { + err = errors.Wrapf(ierr, "execute at #%d failed", i) + s.rollbackTo(savepoint) + return + } + } + s.setSavepoint() + s.pool.enqueue(savepoint, query) + return +} + +// ReplayBlock replays the queries from block. It also checks and skips some preceding pooled +// queries. +func (s *State) ReplayBlock(block *types.Block) (err error) { + var ( + ierr error + lastsp uint64 // Last savepoint + ) + s.Lock() + defer s.Unlock() + for i, q := range block.QueryTxs { + var query = &QueryTracker{Req: q.Request, Resp: &types.Response{Header: *q.Response}} + lastsp = s.getID() + if q.Response.ResponseHeader.LogOffset > lastsp { + err = ErrMissingParent + return + } + // Match and skip already pooled query + if q.Response.ResponseHeader.LogOffset < lastsp { + if !s.pool.match(q.Response.ResponseHeader.LogOffset, q.Request) { + err = ErrQueryConflict + return + } + continue + } + // Replay query + for j, v := range q.Request.Payload.Queries { + if q.Request.Header.QueryType == types.ReadQuery { + continue + } + if q.Request.Header.QueryType != types.WriteQuery { + err = errors.Wrapf(ErrInvalidRequest, "replay block at %d:%d", i, j) + s.rollbackTo(lastsp) + return + } + if _, ierr = s.writeSingle(&v); ierr != nil { + err = errors.Wrapf(ierr, "execute at %d:%d failed", i, j) + s.rollbackTo(lastsp) + return + } + } + s.setSavepoint() + s.pool.enqueue(lastsp, query) + } + // Remove duplicate failed queries from local pool + for _, r := range block.FailedReqs { + s.pool.removeFailed(r) + } + // Check if the current transaction is ok to commit + if s.pool.matchLast(lastsp) { + if err = s.uncCommit(); err != nil { + // FATAL ERROR + return + } + if s.unc, err = s.strg.Writer().Begin(); err != nil { + // FATAL ERROR + return + } + s.setNextTxID() + } else { + // Set commit point only, transaction is not actually committed. This commit point will be + // used on exiting. + s.setCommitPoint() + } + s.setSavepoint() + // Truncate pooled queries + s.pool.truncate(lastsp) + return +} + +func (s *State) commit() (err error) { + s.Lock() + defer s.Unlock() + if err = s.uncCommit(); err != nil { + return + } + if s.unc, err = s.strg.Writer().Begin(); err != nil { + return + } + s.setNextTxID() + s.setSavepoint() + _ = s.pool.queries + s.pool = newPool() + return +} + +// CommitEx commits the current transaction and returns all the pooled queries. +func (s *State) CommitEx() (failed []*types.Request, queries []*QueryTracker, err error) { + s.Lock() + defer s.Unlock() + if err = s.uncCommit(); err != nil { + // FATAL ERROR + return + } + if s.unc, err = s.strg.Writer().Begin(); err != nil { + // FATAL ERROR + return + } + s.setNextTxID() + s.setSavepoint() + // Return pooled items and reset + failed = s.pool.failedList() + queries = s.pool.queries + s.pool = newPool() + return +} + +func (s *State) uncCommit() (err error) { + if err = s.unc.Commit(); err != nil { + return + } + + // reset schema change flag + atomic.StoreUint32(&s.hasSchemaChange, 0) + + return +} + +func (s *State) rollback() (err error) { + s.Lock() + defer s.Unlock() + s.rollbackTo(s.cmpoint) + s.current = s.cmpoint + return +} + +func (s *State) getLocalTime() time.Time { + return time.Now().UTC() +} + +// Query does the query(ies) in req, pools the request and persists any change to +// the underlying storage. +func (s *State) Query(req *types.Request) (ref *QueryTracker, resp *types.Response, err error) { + switch req.Header.QueryType { + case types.ReadQuery: + return s.readTx(req) + case types.WriteQuery: + return s.write(req) + default: + err = ErrInvalidRequest + } + return +} + +// Replay replays a write log from other peer to replicate storage state. +func (s *State) Replay(req *types.Request, resp *types.Response) (err error) { + // NOTE(leventeliu): in the current implementation, failed requests are not tracked in remote + // nodes (while replaying via Replay calls). Because we don't want to actually replay read + // queries in all synchronized nodes, meanwhile, whether a request will fail or not + // remains unknown until we actually replay it -- a dead end here. + // So we just keep failed requests in local pool and report them in the next local block + // producing. + switch req.Header.QueryType { + case types.ReadQuery: + return + case types.WriteQuery: + return s.replay(req, resp) + default: + err = ErrInvalidRequest + } + return +} + +// Stat prints the statistic message of the State object. +func (s *State) Stat(id proto.DatabaseID) { + var ( + p = func() *pool { + s.RLock() + defer s.RUnlock() + return s.pool + }() + fc = atomic.LoadInt32(&p.failedRequestCount) + tc = atomic.LoadInt32(&p.trackerCount) + ) + log.WithFields(log.Fields{ + "database_id": id, + "pooled_fail_request_count": fc, + "pooled_query_tracker": tc, + }).Info("Xeno pool stats") +} diff --git a/xenomint/state_test.go b/xenomint/state_test.go new file mode 100644 index 000000000..783142a15 --- /dev/null +++ b/xenomint/state_test.go @@ -0,0 +1,521 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xenomint + +import ( + "database/sql" + "fmt" + "os" + "path" + "testing" + + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/types" + xi "github.com/CovenantSQL/CovenantSQL/xenomint/interfaces" + xs "github.com/CovenantSQL/CovenantSQL/xenomint/sqlite" + "github.com/pkg/errors" + . "github.com/smartystreets/goconvey/convey" +) + +func TestState(t *testing.T) { + Convey("Given a chain state object", t, func() { + var ( + id1 = proto.DatabaseID("db-x1") + fl1 = path.Join(testingDataDir, fmt.Sprint(t.Name(), "x1")) + fl2 = path.Join(testingDataDir, fmt.Sprint(t.Name(), "x2")) + st1, st2 *State + strg1, strg2 xi.Storage + err error + ) + nodeID := proto.NodeID("0000000000000000000000000000000000000000000000000000000000000000") + strg1, err = xs.NewSqlite(fmt.Sprint("file:", fl1)) + So(err, ShouldBeNil) + So(strg1, ShouldNotBeNil) + st1, err = NewState(nodeID, strg1) + So(err, ShouldBeNil) + So(st1, ShouldNotBeNil) + Reset(func() { + // Clean database file after each pass + err = st1.Close(true) + So(err, ShouldBeNil) + err = os.Remove(fl1) + So(err, ShouldBeNil) + err = os.Remove(fmt.Sprint(fl1, "-shm")) + So(err == nil || os.IsNotExist(err), ShouldBeTrue) + err = os.Remove(fmt.Sprint(fl1, "-wal")) + So(err == nil || os.IsNotExist(err), ShouldBeTrue) + }) + strg2, err = xs.NewSqlite(fmt.Sprint("file:", fl2)) + So(err, ShouldBeNil) + So(strg1, ShouldNotBeNil) + st2, err = NewState(nodeID, strg2) + So(err, ShouldBeNil) + So(st1, ShouldNotBeNil) + Reset(func() { + // Clean database file after each pass + err = st2.Close(true) + So(err, ShouldBeNil) + err = os.Remove(fl2) + So(err, ShouldBeNil) + err = os.Remove(fmt.Sprint(fl2, "-shm")) + So(err == nil || os.IsNotExist(err), ShouldBeTrue) + err = os.Remove(fmt.Sprint(fl2, "-wal")) + So(err == nil || os.IsNotExist(err), ShouldBeTrue) + }) + Convey("When storage is closed", func() { + err = st1.Close(false) + So(err, ShouldBeNil) + Convey("The storage should report error for any incoming query", func() { + var req = buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`CREATE TABLE t1 (k INT, v TEXT, PRIMARY KEY(k))`), + }) + _, _, err = st1.Query(req) + So(err, ShouldNotBeNil) + err = errors.Cause(err) + So(err, ShouldNotBeNil) + So(err, ShouldEqual, sql.ErrTxDone) + }) + }) + Convey("The state will report error on read with uncommitted schema change", func() { + var ( + req = buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`CREATE TABLE t1 (k INT, v TEXT, PRIMARY KEY(k))`), + }) + resp *types.Response + ) + _, resp, err = st1.Query(req) + So(err, ShouldBeNil) + So(resp, ShouldNotBeNil) + _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT * FROM t1`), + })) + // any schema change query will trigger performance degradation mode in current block + So(err, ShouldBeNil) + }) + Convey("When a basic KV table is created", func() { + var ( + values = [][]interface{}{ + {int64(1), []byte("v1")}, + {int64(2), []byte("v2")}, + {int64(3), []byte("v3")}, + {int64(4), []byte("v4")}, + } + req = buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`CREATE TABLE t1 (k INT, v TEXT, PRIMARY KEY(k))`), + }) + resp *types.Response + ) + _, resp, err = st1.Query(req) + So(err, ShouldBeNil) + So(resp, ShouldNotBeNil) + err = st1.commit() + So(err, ShouldBeNil) + _, resp, err = st2.Query(req) + So(err, ShouldBeNil) + So(resp, ShouldNotBeNil) + err = st2.commit() + Convey("The state should not change after attempted writing in read query", func() { + _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, 1, "v1"), + buildQuery(`SELECT v FROM t1 WHERE k=?`, 1), + })) + // The use of Query instead of Exec won't produce an "attempt to write" error + // like Exec, but it should still keep it readonly -- which means writes will + // be ignored in this case. + So(err, ShouldBeNil) + So(resp.Header.RowCount, ShouldEqual, 0) + }) + Convey("The state should report invalid request with unknown query type", func() { + req = buildRequest(types.QueryType(0xff), []types.Query{ + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, values[0]...), + }) + _, resp, err = st1.Query(req) + So(err, ShouldEqual, ErrInvalidRequest) + So(resp, ShouldBeNil) + err = st1.Replay(req, nil) + So(err, ShouldEqual, ErrInvalidRequest) + }) + Convey("The state should report error on malformed queries", func() { + _, resp, err = st1.Query(buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`XXXXXX INTO t1 (k, v) VALUES (?, ?)`, values[0]...), + })) + So(err, ShouldNotBeNil) + So(resp, ShouldBeNil) + st1.Stat(id1) + err = st1.Replay(buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`XXXXXX INTO t1 (k, v) VALUES (?, ?)`, values[0]...), + }), &types.Response{ + Header: types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ + LogOffset: st1.getID(), + }, + }, + }) + So(err, ShouldNotBeNil) + _, resp, err = st1.Query(buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`INSERT INTO t2 (k, v) VALUES (?, ?)`, values[0]...), + })) + So(err, ShouldNotBeNil) + So(resp, ShouldBeNil) + st1.Stat(id1) + err = st1.Replay(buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`INSERT INTO t2 (k, v) VALUES (?, ?)`, values[0]...), + }), &types.Response{ + Header: types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ + LogOffset: st1.getID(), + }, + }, + }) + So(err, ShouldNotBeNil) + st1.Stat(id1) + _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`XXXXXX v FROM t1`), + })) + So(err, ShouldNotBeNil) + So(resp, ShouldBeNil) + st1.Stat(id1) + _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT v FROM t2`), + })) + So(err, ShouldNotBeNil) + So(resp, ShouldBeNil) + st1.Stat(id1) + _, resp, err = st1.read(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT v FROM t2`), + })) + So(err, ShouldNotBeNil) + So(resp, ShouldBeNil) + st1.Stat(id1) + }) + Convey("The state should work properly with reading/writing queries", func() { + _, resp, err = st1.Query(buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, values[0]...), + })) + So(err, ShouldBeNil) + So(resp.Header.RowCount, ShouldEqual, 0) + _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT v FROM t1 WHERE k=?`, values[0][0]), + })) + So(err, ShouldBeNil) + So(resp.Header.RowCount, ShouldEqual, 1) + So(resp.Payload, ShouldResemble, types.ResponsePayload{ + Columns: []string{"v"}, + DeclTypes: []string{"TEXT"}, + Rows: []types.ResponseRow{{Values: values[0][1:]}}, + }) + st1.Stat(id1) + + _, resp, err = st1.Query(buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, values[1]...), + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?); +INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), + })) + So(err, ShouldBeNil) + So(resp.Header.RowCount, ShouldEqual, 0) + _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT v FROM t1`), + })) + So(err, ShouldBeNil) + So(resp.Header.RowCount, ShouldEqual, 4) + So(resp.Payload, ShouldResemble, types.ResponsePayload{ + Columns: []string{"v"}, + DeclTypes: []string{"TEXT"}, + Rows: []types.ResponseRow{ + {Values: values[0][1:]}, + {Values: values[1][1:]}, + {Values: values[2][1:]}, + {Values: values[3][1:]}, + }, + }) + st1.Stat(id1) + + _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT * FROM t1`), + })) + So(err, ShouldBeNil) + So(resp.Payload, ShouldResemble, types.ResponsePayload{ + Columns: []string{"k", "v"}, + DeclTypes: []string{"INT", "TEXT"}, + Rows: []types.ResponseRow{ + {Values: values[0][:]}, + {Values: values[1][:]}, + {Values: values[2][:]}, + {Values: values[3][:]}, + }, + }) + st1.Stat(id1) + + // Test show statements + _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SHOW TABLE t1`), + })) + So(err, ShouldBeNil) + So(resp, ShouldNotBeNil) + _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SHOW CREATE TABLE t1`), + })) + So(err, ShouldBeNil) + So(resp, ShouldNotBeNil) + _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SHOW INDEX FROM TABLE t1`), + })) + So(err, ShouldBeNil) + So(resp, ShouldNotBeNil) + _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SHOW TABLES`), + })) + So(err, ShouldBeNil) + So(resp, ShouldNotBeNil) + st1.Stat(id1) + + // Also test a non-transaction read implementation + _, resp, err = st1.read(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT * FROM t1`), + })) + So(err, ShouldBeNil) + So(resp.Payload, ShouldResemble, types.ResponsePayload{ + Columns: []string{"k", "v"}, + DeclTypes: []string{"INT", "TEXT"}, + Rows: []types.ResponseRow{ + {Values: values[0][:]}, + {Values: values[1][:]}, + {Values: values[2][:]}, + {Values: values[3][:]}, + }, + }) + st1.Stat(id1) + }) + Convey("The state should skip read query while replaying", func() { + err = st1.Replay(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT * FROM t1`), + }), nil) + So(err, ShouldBeNil) + }) + Convey("The state should report conflict state while replaying bad request", func() { + err = st1.Replay(buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, values[0]...), + }), &types.Response{ + Header: types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ + LogOffset: uint64(0xff), + }, + }, + }) + err = errors.Cause(err) + So(err, ShouldEqual, ErrQueryConflict) + }) + Convey("The state should be reproducible in another instance", func() { + var ( + qt *QueryTracker + reqs = []*types.Request{ + buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, values[0]...), + }), + buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, values[1]...), + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?); +INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), + }), + buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`DELETE FROM t1 WHERE k=?`, values[2][0]), + }), + } + ) + for i := range reqs { + qt, resp, err = st1.Query(reqs[i]) + So(err, ShouldBeNil) + So(qt, ShouldNotBeNil) + So(resp, ShouldNotBeNil) + qt.UpdateResp(resp) + // Replay to st2 + err = st2.Replay(reqs[i], resp) + So(err, ShouldBeNil) + } + // Should be in same state + for i := range values { + var resp1, resp2 *types.Response + req = buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT v FROM t1 WHERE k=?`, values[i][0]), + }) + _, resp1, err = st1.Query(req) + So(err, ShouldBeNil) + So(resp1, ShouldNotBeNil) + _, resp2, err = st2.Query(req) + So(err, ShouldBeNil) + So(resp2, ShouldNotBeNil) + So(resp1.Payload, ShouldResemble, resp2.Payload) + } + }) + Convey("When queries are committed to blocks on state instance #1", func() { + var ( + qt *QueryTracker + reqs = []*types.Request{ + buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, values[0]...), + }), + buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, values[1]...), + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?); +INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), + }), + buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`DELETE FROM t1 WHERE k=?`, values[2][0]), + }), + } + + cmtpos = 0 + cmtps = []int{1, len(reqs) - 1} + blocks = make([]*types.Block, len(cmtps)) + ) + for i := range reqs { + var resp *types.Response + qt, resp, err = st1.Query(reqs[i]) + So(err, ShouldBeNil) + So(qt, ShouldNotBeNil) + So(resp, ShouldNotBeNil) + qt.UpdateResp(resp) + // Commit block if matches the next commit point + if cmtpos < len(cmtps) && i == cmtps[cmtpos] { + var qts []*QueryTracker + _, qts, err = st1.CommitEx() + So(err, ShouldBeNil) + So(qts, ShouldNotBeNil) + blocks[cmtpos] = &types.Block{ + QueryTxs: make([]*types.QueryAsTx, len(qts)), + } + for i, v := range qts { + blocks[cmtpos].QueryTxs[i] = &types.QueryAsTx{ + Request: v.Req, + Response: &v.Resp.Header, + } + } + cmtpos++ + } + } + Convey( + "The state should report missing parent while replaying later block first", + func() { + err = st2.ReplayBlock(blocks[len(blocks)-1]) + So(err, ShouldEqual, ErrMissingParent) + }, + ) + Convey( + "The state should report conflict error while replaying modified query", + func() { + // Replay by request to st2 first + for _, v := range blocks { + for _, w := range v.QueryTxs { + err = st2.Replay(w.Request, &types.Response{ + Header: *w.Response, + }) + So(err, ShouldBeNil) + } + } + // Try to replay modified block #0 + var blockx = &types.Block{ + QueryTxs: []*types.QueryAsTx{ + &types.QueryAsTx{ + Request: &types.Request{ + Header: types.SignedRequestHeader{ + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: [32]byte{ + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 1, + }, + }, + }, + }, + Response: &types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ + LogOffset: blocks[0].QueryTxs[0].Response.LogOffset, + }, + }, + }, + }, + } + blockx.QueryTxs[0].Request.Header.DataHash = hash.Hash{0x0, 0x0, 0x0, 0x1} + err = st2.ReplayBlock(blockx) + So(err, ShouldEqual, ErrQueryConflict) + }, + ) + Convey( + "The state should be reproducible with block replaying in empty instance #2", + func() { + // Block replaying + for i := range blocks { + err = st2.ReplayBlock(blocks[i]) + So(err, ShouldBeNil) + } + // Should be in same state + for i := range values { + var resp1, resp2 *types.Response + req = buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT v FROM t1 WHERE k=?`, values[i][0]), + }) + _, resp1, err = st1.Query(req) + So(err, ShouldBeNil) + So(resp1, ShouldNotBeNil) + _, resp2, err = st2.Query(req) + So(err, ShouldBeNil) + So(resp2, ShouldNotBeNil) + So(resp1.Payload, ShouldResemble, resp2.Payload) + } + }, + ) + Convey( + "The state should be reproducible with block replaying in synchronized"+ + " instance #2", + func() { + // Replay by request to st2 first + for _, v := range blocks { + for _, w := range v.QueryTxs { + err = st2.Replay(w.Request, &types.Response{ + Header: *w.Response, + }) + So(err, ShouldBeNil) + } + } + // Block replaying + for i := range blocks { + err = st2.ReplayBlock(blocks[i]) + So(err, ShouldBeNil) + } + // Should be in same state + for i := range values { + var resp1, resp2 *types.Response + req = buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT v FROM t1 WHERE k=?`, values[i][0]), + }) + _, resp1, err = st1.Query(req) + So(err, ShouldBeNil) + So(resp1, ShouldNotBeNil) + _, resp2, err = st2.Query(req) + So(err, ShouldBeNil) + So(resp2, ShouldNotBeNil) + So(resp1.Payload, ShouldResemble, resp2.Payload) + } + }, + ) + }) + }) + }) +} diff --git a/xenomint/types/block.go b/xenomint/types/block.go new file mode 100644 index 000000000..2621786ab --- /dev/null +++ b/xenomint/types/block.go @@ -0,0 +1,102 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "time" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/merkle" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/types" +) + +//go:generate hsp + +// BlockHeader defines a block header. +type BlockHeader struct { + Version int32 + Producer proto.NodeID + GenesisHash hash.Hash + ParentHash hash.Hash + MerkleRoot hash.Hash + Timestamp time.Time +} + +// SignedBlockHeader defines a block along with its hasher, signer and verifier. +type SignedBlockHeader struct { + BlockHeader + DefaultHashSignVerifierImpl +} + +// Sign signs the block header. +func (h *SignedBlockHeader) Sign(signer *asymmetric.PrivateKey) error { + return h.DefaultHashSignVerifierImpl.Sign(&h.BlockHeader, signer) +} + +// Verify verifies the block header. +func (h *SignedBlockHeader) Verify() error { + return h.DefaultHashSignVerifierImpl.Verify(&h.BlockHeader) +} + +// Block defines a block including a signed block header and its query list. +type Block struct { + SignedBlockHeader + ReadQueries []*types.Ack + WriteQueries []*types.Ack +} + +// Sign signs the block. +func (b *Block) Sign(signer *asymmetric.PrivateKey) (err error) { + // Update header fields: generate merkle root from queries + var hashes []*hash.Hash + for _, v := range b.ReadQueries { + h := v.Header.Hash() + hashes = append(hashes, &h) + } + for _, v := range b.WriteQueries { + h := v.Header.Hash() + hashes = append(hashes, &h) + } + if err = b.MerkleRoot.SetBytes(merkle.NewMerkle(hashes).GetRoot()[:]); err != nil { + return + } + // Sign block header + return b.SignedBlockHeader.Sign(signer) +} + +// Verify verifies the block. +func (b *Block) Verify() error { + // Verify header fields: compare merkle root from queries + var hashes []*hash.Hash + for _, v := range b.ReadQueries { + h := v.Header.Hash() + hashes = append(hashes, &h) + } + for _, v := range b.WriteQueries { + h := v.Header.Hash() + hashes = append(hashes, &h) + } + if mroot := merkle.NewMerkle(hashes).GetRoot(); !mroot.IsEqual( + &b.SignedBlockHeader.MerkleRoot, + ) { + return ErrMerkleRootNotMatch + } + // Verify block header signature + return b.SignedBlockHeader.Verify() +} diff --git a/xenomint/types/block_gen.go b/xenomint/types/block_gen.go new file mode 100644 index 000000000..4c19a8bf2 --- /dev/null +++ b/xenomint/types/block_gen.go @@ -0,0 +1,143 @@ +package types + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + hsp "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// MarshalHash marshals for hash +func (z *Block) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 3 + // map header, size 2 + o = append(o, 0x83, 0x83, 0x82, 0x82) + if oTemp, err := z.SignedBlockHeader.BlockHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.SignedBlockHeader.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x83) + o = hsp.AppendArrayHeader(o, uint32(len(z.ReadQueries))) + for za0001 := range z.ReadQueries { + if z.ReadQueries[za0001] == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.ReadQueries[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + } + o = append(o, 0x83) + o = hsp.AppendArrayHeader(o, uint32(len(z.WriteQueries))) + for za0002 := range z.WriteQueries { + if z.WriteQueries[za0002] == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.WriteQueries[za0002].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Block) Msgsize() (s int) { + s = 1 + 18 + 1 + 12 + z.SignedBlockHeader.BlockHeader.Msgsize() + 28 + z.SignedBlockHeader.DefaultHashSignVerifierImpl.Msgsize() + 12 + hsp.ArrayHeaderSize + for za0001 := range z.ReadQueries { + if z.ReadQueries[za0001] == nil { + s += hsp.NilSize + } else { + s += z.ReadQueries[za0001].Msgsize() + } + } + s += 13 + hsp.ArrayHeaderSize + for za0002 := range z.WriteQueries { + if z.WriteQueries[za0002] == nil { + s += hsp.NilSize + } else { + s += z.WriteQueries[za0002].Msgsize() + } + } + return +} + +// MarshalHash marshals for hash +func (z *BlockHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 6 + o = append(o, 0x86, 0x86) + if oTemp, err := z.GenesisHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x86) + if oTemp, err := z.ParentHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x86) + if oTemp, err := z.MerkleRoot.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x86) + o = hsp.AppendInt32(o, z.Version) + o = append(o, 0x86) + if oTemp, err := z.Producer.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x86) + o = hsp.AppendTime(o, z.Timestamp) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *BlockHeader) Msgsize() (s int) { + s = 1 + 12 + z.GenesisHash.Msgsize() + 11 + z.ParentHash.Msgsize() + 11 + z.MerkleRoot.Msgsize() + 8 + hsp.Int32Size + 9 + z.Producer.Msgsize() + 10 + hsp.TimeSize + return +} + +// MarshalHash marshals for hash +func (z *SignedBlockHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.BlockHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedBlockHeader) Msgsize() (s int) { + s = 1 + 12 + z.BlockHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + return +} diff --git a/kayak/types_gen_test.go b/xenomint/types/block_gen_test.go similarity index 72% rename from kayak/types_gen_test.go rename to xenomint/types/block_gen_test.go index 17a02ec9d..9d948edad 100644 --- a/kayak/types_gen_test.go +++ b/xenomint/types/block_gen_test.go @@ -1,4 +1,4 @@ -package kayak +package types // Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. @@ -9,8 +9,8 @@ import ( "testing" ) -func TestMarshalHashLog(t *testing.T) { - v := Log{} +func TestMarshalHashBlock(t *testing.T) { + v := Block{} binary.Read(rand.Reader, binary.BigEndian, &v) bts1, err := v.MarshalHash() if err != nil { @@ -25,8 +25,8 @@ func TestMarshalHashLog(t *testing.T) { } } -func BenchmarkMarshalHashLog(b *testing.B) { - v := Log{} +func BenchmarkMarshalHashBlock(b *testing.B) { + v := Block{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -34,8 +34,8 @@ func BenchmarkMarshalHashLog(b *testing.B) { } } -func BenchmarkAppendMsgLog(b *testing.B) { - v := Log{} +func BenchmarkAppendMsgBlock(b *testing.B) { + v := Block{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalHash() b.SetBytes(int64(len(bts))) @@ -46,8 +46,8 @@ func BenchmarkAppendMsgLog(b *testing.B) { } } -func TestMarshalHashPeers(t *testing.T) { - v := Peers{} +func TestMarshalHashBlockHeader(t *testing.T) { + v := BlockHeader{} binary.Read(rand.Reader, binary.BigEndian, &v) bts1, err := v.MarshalHash() if err != nil { @@ -62,8 +62,8 @@ func TestMarshalHashPeers(t *testing.T) { } } -func BenchmarkMarshalHashPeers(b *testing.B) { - v := Peers{} +func BenchmarkMarshalHashBlockHeader(b *testing.B) { + v := BlockHeader{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -71,8 +71,8 @@ func BenchmarkMarshalHashPeers(b *testing.B) { } } -func BenchmarkAppendMsgPeers(b *testing.B) { - v := Peers{} +func BenchmarkAppendMsgBlockHeader(b *testing.B) { + v := BlockHeader{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalHash() b.SetBytes(int64(len(bts))) @@ -83,8 +83,8 @@ func BenchmarkAppendMsgPeers(b *testing.B) { } } -func TestMarshalHashServer(t *testing.T) { - v := Server{} +func TestMarshalHashSignedBlockHeader(t *testing.T) { + v := SignedBlockHeader{} binary.Read(rand.Reader, binary.BigEndian, &v) bts1, err := v.MarshalHash() if err != nil { @@ -99,8 +99,8 @@ func TestMarshalHashServer(t *testing.T) { } } -func BenchmarkMarshalHashServer(b *testing.B) { - v := Server{} +func BenchmarkMarshalHashSignedBlockHeader(b *testing.B) { + v := SignedBlockHeader{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -108,8 +108,8 @@ func BenchmarkMarshalHashServer(b *testing.B) { } } -func BenchmarkAppendMsgServer(b *testing.B) { - v := Server{} +func BenchmarkAppendMsgSignedBlockHeader(b *testing.B) { + v := SignedBlockHeader{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalHash() b.SetBytes(int64(len(bts))) diff --git a/xenomint/types/block_test.go b/xenomint/types/block_test.go new file mode 100644 index 000000000..e971bfffe --- /dev/null +++ b/xenomint/types/block_test.go @@ -0,0 +1,87 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "testing" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/CovenantSQL/CovenantSQL/types" + . "github.com/smartystreets/goconvey/convey" +) + +func TestBlock(t *testing.T) { + Convey("Given a block and a pair of keys", t, func() { + var ( + block = &Block{ + SignedBlockHeader: SignedBlockHeader{ + BlockHeader: BlockHeader{}, + }, + ReadQueries: []*types.Ack{ + { + Header: types.SignedAckHeader{ + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: hash.Hash{0x0, 0x0, 0x0, 0x1}, + }, + }, + }, + }, + WriteQueries: []*types.Ack{ + { + Header: types.SignedAckHeader{ + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: hash.Hash{0x0, 0x0, 0x0, 0x2}, + }, + }, + }, + }, + } + priv, _, err = asymmetric.GenSecp256k1KeyPair() + ) + So(err, ShouldBeNil) + So(priv, ShouldNotBeNil) + Convey("When the block is signed by the key pair", func() { + err = block.Sign(priv) + So(err, ShouldBeNil) + Convey("The block should be verifiable", func() { + err = block.Verify() + So(err, ShouldBeNil) + }) + Convey("The object should have data hash", func() { + var enc, err = block.BlockHeader.MarshalHash() + So(err, ShouldBeNil) + So(enc, ShouldNotBeNil) + So(block.SignedBlockHeader.Hash(), ShouldEqual, hash.THashH(enc)) + }) + Convey("When the queries is modified", func() { + block.ReadQueries = append(block.ReadQueries, &types.Ack{ + Header: types.SignedAckHeader{ + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: hash.Hash{0x0, 0x0, 0x0, 0x3}, + }, + }, + }) + Convey("The verifier should return merkle root not match error", func() { + err = block.Verify() + So(err, ShouldEqual, ErrMerkleRootNotMatch) + }) + }) + }) + }) +} diff --git a/blockproducer/types/common.go b/xenomint/types/common.go similarity index 81% rename from blockproducer/types/common.go rename to xenomint/types/common.go index 024cc9646..cd4f48a1f 100644 --- a/blockproducer/types/common.go +++ b/xenomint/types/common.go @@ -21,20 +21,24 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/hash" ) -type marshalHasher interface { - MarshalHash() ([]byte, error) -} +//go:generate hsp // DefaultHashSignVerifierImpl defines a default implementation of hashSignVerifier. type DefaultHashSignVerifierImpl struct { - Hash hash.Hash + DataHash hash.Hash Signee *asymmetric.PublicKey Signature *asymmetric.Signature } -// GetHash implements hashSignVerifier.GetHash. -func (i *DefaultHashSignVerifierImpl) GetHash() hash.Hash { - return i.Hash +// marshalHasher is the interface implemented by an object that can be stably +// marshalled and hashed. +type marshalHasher interface { + MarshalHash() ([]byte, error) +} + +// Hash implements hashSignVerifier.Hash. +func (i *DefaultHashSignVerifierImpl) Hash() hash.Hash { + return i.DataHash } // Sign implements hashSignVerifier.Sign. @@ -49,7 +53,7 @@ func (i *DefaultHashSignVerifierImpl) Sign( if i.Signature, err = signer.Sign(h[:]); err != nil { return } - i.Hash = h + i.DataHash = h i.Signee = signer.PubKey() return } @@ -61,12 +65,12 @@ func (i *DefaultHashSignVerifierImpl) Verify(obj marshalHasher) (err error) { return } var h = hash.THashH(enc) - if !i.Hash.IsEqual(&h) { - err = ErrSignVerification + if !i.DataHash.IsEqual(&h) { + err = ErrHashValueNotMatch return } if !i.Signature.Verify(h[:], i.Signee) { - err = ErrSignVerification + err = ErrSignatureNotMatch return } return diff --git a/blockproducer/types/common_gen.go b/xenomint/types/common_gen.go similarity index 93% rename from blockproducer/types/common_gen.go rename to xenomint/types/common_gen.go index 9f8cefc8a..8fdb3ee08 100644 --- a/blockproducer/types/common_gen.go +++ b/xenomint/types/common_gen.go @@ -32,7 +32,7 @@ func (z *DefaultHashSignVerifierImpl) MarshalHash() (o []byte, err error) { } } o = append(o, 0x83) - if oTemp, err := z.Hash.MarshalHash(); err != nil { + if oTemp, err := z.DataHash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -54,6 +54,6 @@ func (z *DefaultHashSignVerifierImpl) Msgsize() (s int) { } else { s += z.Signature.Msgsize() } - s += 5 + z.Hash.Msgsize() + s += 9 + z.DataHash.Msgsize() return } diff --git a/blockproducer/types/common_gen_test.go b/xenomint/types/common_gen_test.go similarity index 100% rename from blockproducer/types/common_gen_test.go rename to xenomint/types/common_gen_test.go diff --git a/xenomint/types/common_test.go b/xenomint/types/common_test.go new file mode 100644 index 000000000..76f6c43b7 --- /dev/null +++ b/xenomint/types/common_test.go @@ -0,0 +1,100 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "math/big" + "testing" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + . "github.com/smartystreets/goconvey/convey" +) + +var ( + dummyHash = []byte{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + } +) + +type DummyHeader struct{} + +func (h *DummyHeader) MarshalHash() ([]byte, error) { + return dummyHash, nil +} + +type DummyObject struct { + DummyHeader + DefaultHashSignVerifierImpl +} + +func (o *DummyObject) Sign(signer *asymmetric.PrivateKey) error { + return o.DefaultHashSignVerifierImpl.Sign(&o.DummyHeader, signer) +} + +func (o *DummyObject) Verify() error { + return o.DefaultHashSignVerifierImpl.Verify(&o.DummyHeader) +} + +func TestDefaultHashSignVerifierImpl(t *testing.T) { + Convey("Given a dummy object and a pair of keys", t, func() { + var ( + obj = &DummyObject{} + priv, _, err = asymmetric.GenSecp256k1KeyPair() + ) + So(err, ShouldBeNil) + So(priv, ShouldNotBeNil) + Convey("When the object is signed by the key pair", func() { + err = obj.Sign(priv) + So(err, ShouldBeNil) + Convey("The object should be verifiable", func() { + err = obj.Verify() + So(err, ShouldBeNil) + }) + Convey("The object should have data hash", func() { + So(obj.Hash(), ShouldEqual, hash.THashH(dummyHash)) + }) + Convey("When the hash is modified", func() { + obj.DefaultHashSignVerifierImpl.DataHash = hash.Hash{0x0, 0x0, 0x0, 0x1} + Convey("The verifier should return hash value not match error", func() { + err = obj.Verify() + So(err, ShouldEqual, ErrHashValueNotMatch) + }) + }) + Convey("When the signee is modified", func() { + var _, pub, err = asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + obj.DefaultHashSignVerifierImpl.Signee = pub + Convey("The verifier should return signature not match error", func() { + err = obj.Verify() + So(err, ShouldEqual, ErrSignatureNotMatch) + }) + }) + Convey("When the signature is modified", func() { + var val = obj.DefaultHashSignVerifierImpl.Signature.R + val.Add(val, big.NewInt(1)) + Convey("The verifier should return signature not match error", func() { + err = obj.Verify() + So(err, ShouldEqual, ErrSignatureNotMatch) + }) + }) + }) + }) +} diff --git a/xenomint/types/doc.go b/xenomint/types/doc.go new file mode 100644 index 000000000..28c02e239 --- /dev/null +++ b/xenomint/types/doc.go @@ -0,0 +1,17 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types diff --git a/xenomint/types/errors.go b/xenomint/types/errors.go new file mode 100644 index 000000000..10bb609f8 --- /dev/null +++ b/xenomint/types/errors.go @@ -0,0 +1,30 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "errors" +) + +var ( + // ErrMerkleRootNotMatch indicates the merkle root not match error from verifier. + ErrMerkleRootNotMatch = errors.New("merkle root not match") + // ErrHashValueNotMatch indicates the hash value not match error from verifier. + ErrHashValueNotMatch = errors.New("hash value not match") + // ErrSignatureNotMatch indicates the signature not match error from verifier. + ErrSignatureNotMatch = errors.New("signature not match") +) diff --git a/xenomint/types/xxx_test.go b/xenomint/types/xxx_test.go new file mode 100644 index 000000000..04c291621 --- /dev/null +++ b/xenomint/types/xxx_test.go @@ -0,0 +1,43 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "math/rand" + "os" + "testing" + "time" + + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +func setup() { + rand.Seed(time.Now().UnixNano()) + log.SetOutput(os.Stdout) + log.SetLevel(log.DebugLevel) +} + +func teardown() { +} + +func TestMain(m *testing.M) { + os.Exit(func() int { + setup() + defer teardown() + return m.Run() + }()) +} diff --git a/xenomint/xxx_test.go b/xenomint/xxx_test.go new file mode 100644 index 000000000..aabe8fe80 --- /dev/null +++ b/xenomint/xxx_test.go @@ -0,0 +1,221 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xenomint + +import ( + "io/ioutil" + "math/rand" + "os" + "path" + //"runtime/trace" + "sync" + "syscall" + "testing" + "time" + + ca "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/kms" + pc "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/types" + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +const ( + benchmarkQueriesPerBlock = 100 + + benchmarkRPCName = "BENCH" + benchmarkDatabaseID = "0x0" + + benchmarkVNum = 3 + benchmarkVLen = 333 + // benchmarkKeySpace defines the key space for benchmarking. + // + // We will have `benchmarkKeySpace` preserved records in the generated testing table and + // another `benchmarkKeySpace` constructed incoming records returned from the setup function. + benchmarkKeySpace = 100000 +) + +var ( + testingDataDir string + testingTraceFile *os.File + testingPrivateKeyFile string + testingPublicKeyStoreFile string + testingNonceDifficulty int + + testingPrivateKey *ca.PrivateKey + testingPublicKey *ca.PublicKey + + testingMasterKey = []byte(`?08Rl%WUih4V0H+c`) +) + +func buildQuery(query string, args ...interface{}) types.Query { + var nargs = make([]types.NamedArg, len(args)) + for i := range args { + nargs[i] = types.NamedArg{ + Name: "", + Value: args[i], + } + } + return types.Query{ + Pattern: query, + Args: nargs, + } +} + +func buildRequest(qt types.QueryType, qs []types.Query) *types.Request { + var ( + id proto.NodeID + err error + ) + if id, err = kms.GetLocalNodeID(); err != nil { + id = proto.NodeID("00000000000000000000000000000000") + } + return &types.Request{ + Header: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ + NodeID: id, + Timestamp: time.Now().UTC(), + QueryType: qt, + }, + }, + Payload: types.RequestPayload{Queries: qs}, + } +} + +func concat(args [][]interface{}) (ret []interface{}) { + var ( + tlen int + ) + for _, v := range args { + tlen += len(v) + } + ret = make([]interface{}, 0, tlen) + for _, v := range args { + ret = append(ret, v...) + } + return +} + +func createNodesWithPublicKey( + pub *ca.PublicKey, diff int, num int) (nis []proto.Node, err error, +) { + var ( + nic = make(chan pc.NonceInfo) + block = pc.MiningBlock{Data: pub.Serialize(), NonceChan: nic, Stop: nil} + miner = pc.NewCPUMiner(nil) + wg = &sync.WaitGroup{} + + next pc.Uint256 + ni pc.NonceInfo + ) + + defer func() { + wg.Wait() + close(nic) + }() + + nis = make([]proto.Node, num) + for i := range nis { + wg.Add(1) + go func() { + defer wg.Done() + miner.ComputeBlockNonce(block, next, diff) + }() + ni = <-nic + nis[i] = proto.Node{ + ID: proto.NodeID(ni.Hash.String()), + Nonce: ni.Nonce, + PublicKey: pub, + } + next = ni.Nonce + next.Inc() + } + + return +} + +func setup() { + const minNoFile uint64 = 4096 + var ( + err error + lmt syscall.Rlimit + ) + + if testingDataDir, err = ioutil.TempDir("", "CovenantSQL"); err != nil { + panic(err) + } + + rand.Seed(time.Now().UnixNano()) + + // Set NOFILE limit + if err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &lmt); err != nil { + panic(err) + } + if lmt.Max < minNoFile { + panic("insufficient max RLIMIT_NOFILE") + } + lmt.Cur = lmt.Max + if err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &lmt); err != nil { + panic(err) + } + + // Initialze kms + testingNonceDifficulty = 2 + testingPrivateKeyFile = path.Join(testingDataDir, "private.key") + testingPublicKeyStoreFile = path.Join(testingDataDir, "public.keystore") + if testingPrivateKey, testingPublicKey, err = ca.GenSecp256k1KeyPair(); err != nil { + panic(err) + } + kms.Unittest = true + kms.SetLocalKeyPair(testingPrivateKey, testingPublicKey) + if err = kms.SavePrivateKey( + testingPrivateKeyFile, testingPrivateKey, testingMasterKey, + ); err != nil { + panic(err) + } + + // Setup runtime trace for testing + //if testingTraceFile, err = ioutil.TempFile("", "CovenantSQL.trace."); err != nil { + // panic(err) + //} + //if err = trace.Start(testingTraceFile); err != nil { + // panic(err) + //} + + log.SetOutput(os.Stdout) + log.SetLevel(log.DebugLevel) +} + +func teardown() { + //trace.Stop() + var err error + //if err = testingTraceFile.Close(); err != nil { + // panic(err) + //} + if err = os.RemoveAll(testingDataDir); err != nil { + panic(err) + } +} + +func TestMain(m *testing.M) { + os.Exit(func() int { + setup() + defer teardown() + return m.Run() + }()) +}