From 670bd0af8e296389e7cf1d876b43236699e0bc12 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 2 Nov 2018 01:28:02 +0800 Subject: [PATCH 01/32] Kayak performance refactor --- blockproducer/chain.go | 12 +- blockproducer/chain_test.go | 19 +- blockproducer/config.go | 5 +- blockproducer/db_service.go | 65 +- blockproducer/db_service_map.go | 25 +- blockproducer/db_service_map_test.go | 2 +- blockproducer/db_service_test.go | 2 +- blockproducer/db_service_types.go | 150 ++- blockproducer/db_service_types_gen.go | 547 ++++++++++ blockproducer/db_service_types_gen_test.go | 602 +++++++++++ blockproducer/errors.go | 2 - blockproducer/helper_test.go | 25 +- blockproducer/runtime.go | 7 +- blockproducer/xxx_test.go | 55 +- client/conn.go | 16 +- client/driver.go | 12 +- client/helper_test.go | 48 +- cmd/cql-minerd/dbms.go | 33 +- cmd/cql-observer/api.go | 8 +- cmd/cql-observer/observation_test.go | 2 +- cmd/cql-observer/service.go | 12 +- cmd/cqld/adapter.go | 209 ++-- cmd/cqld/bootstrap.go | 74 +- cmd/cqld/initconf.go | 33 +- kayak/api/mux.go => cmd/cqld/kayak.go | 25 +- cmd/hotfix/hash-upgrade/main.go | 37 +- kayak/api/twopc.go | 116 -- kayak/api/twopc_integ_test.go | 388 ------- kayak/boltdb_store.go | 284 ----- kayak/boltdb_store_test.go | 376 ------- kayak/{doc.go => caller.go} | 9 +- kayak/inmem_store_test.go | 142 --- kayak/mock_Config_test.go | 41 - kayak/mock_LogStore_test.go | 123 --- kayak/mock_Runner_test.go | 82 -- kayak/mock_StableStore_test.go | 97 -- kayak/mock_Worker_test.go | 81 -- kayak/mock_kayak_test.go | 492 --------- kayak/{util.go => rpc.go} | 22 +- kayak/runtime.go | 846 +++++++++++++-- kayak/runtime_test.go | 367 ------- kayak/test/runtime_test.go | 347 ++++++ kayak/tracker.go | 159 +++ kayak/transport/etls_transport.go | 197 ---- kayak/transport/etls_transport_test.go | 345 ------ kayak/transport/network_transport.go | 273 ----- kayak/transport/network_transport_test.go | 463 -------- kayak/twopc_runner.go | 829 --------------- kayak/twopc_runner_test.go | 995 ------------------ kayak/types.go | 327 ------ kayak/types/config.go | 49 + kayak/{api => types}/doc.go | 5 +- kayak/types/errors.go | 36 + kayak/types/handler.go | 25 + kayak/types/log.go | 70 ++ kayak/types/rpc.go | 26 + kayak/types/wal.go | 27 + kayak/types_gen.go | 190 ---- kayak/types_test.go | 252 ----- kayak/{transport => wal}/doc.go | 5 +- kayak/{ => wal}/errors.go | 18 +- kayak/wal/leveldb_wal.go | 272 +++++ kayak/wal/leveldb_wal_test.go | 91 ++ kayak/wal/mem_wal.go | 128 +++ kayak/wal/mem_wal_test.go | 172 +++ proto/errors.go | 27 + proto/nodeinfo.go | 1 - proto/servers.go | 103 ++ proto/servers_gen.go | 102 ++ .../servers_gen_test.go | 51 +- rpc/sharedsecret.go | 5 +- sqlchain/chain.go | 49 +- sqlchain/chain_test.go | 14 +- sqlchain/config.go | 5 +- sqlchain/mux.go | 6 +- sqlchain/observer.go | 13 +- sqlchain/queryindex.go | 34 +- sqlchain/queryindex_test.go | 22 +- sqlchain/rpc.go | 6 +- sqlchain/runtime.go | 25 +- sqlchain/storage/storage.go | 12 +- sqlchain/storage/storage_test.go | 12 +- sqlchain/xxx_test.go | 30 +- worker/db.go | 101 +- worker/db_config.go | 3 +- worker/db_storage.go | 124 +-- worker/db_test.go | 83 +- worker/dbms.go | 30 +- worker/dbms_mux.go | 65 ++ worker/dbms_test.go | 7 +- worker/errors.go | 3 + worker/types/ack_type.go | 73 +- worker/types/ack_type_gen.go | 4 +- worker/types/init_service_type.go | 116 +- worker/types/init_service_type_gen.go | 4 +- worker/types/no_ack_report_type.go | 139 +-- worker/types/no_ack_report_type_gen.go | 8 +- worker/types/request_type.go | 116 +- worker/types/request_type_gen.go | 136 ++- worker/types/request_type_gen_test.go | 148 +++ worker/types/response_type.go | 114 +- worker/types/response_type_gen.go | 4 +- worker/types/types_test.go | 289 +---- worker/types/update_service_type.go | 66 +- worker/types/update_service_type_gen.go | 4 +- worker/types/util.go | 19 +- 106 files changed, 4752 insertions(+), 8215 deletions(-) create mode 100644 blockproducer/db_service_types_gen.go create mode 100644 blockproducer/db_service_types_gen_test.go rename kayak/api/mux.go => cmd/cqld/kayak.go (55%) delete mode 100644 kayak/api/twopc.go delete mode 100644 kayak/api/twopc_integ_test.go delete mode 100644 kayak/boltdb_store.go delete mode 100644 kayak/boltdb_store_test.go rename kayak/{doc.go => caller.go} (76%) delete mode 100644 kayak/inmem_store_test.go delete mode 100644 kayak/mock_Config_test.go delete mode 100644 kayak/mock_LogStore_test.go delete mode 100644 kayak/mock_Runner_test.go delete mode 100644 kayak/mock_StableStore_test.go delete mode 100644 kayak/mock_Worker_test.go delete mode 100644 kayak/mock_kayak_test.go rename kayak/{util.go => rpc.go} (69%) delete mode 100644 kayak/runtime_test.go create mode 100644 kayak/test/runtime_test.go create mode 100644 kayak/tracker.go delete mode 100644 kayak/transport/etls_transport.go delete mode 100644 kayak/transport/etls_transport_test.go delete mode 100644 kayak/transport/network_transport.go delete mode 100644 kayak/transport/network_transport_test.go delete mode 100644 kayak/twopc_runner.go delete mode 100644 kayak/twopc_runner_test.go delete mode 100644 kayak/types.go create mode 100644 kayak/types/config.go rename kayak/{api => types}/doc.go (86%) create mode 100644 kayak/types/errors.go create mode 100644 kayak/types/handler.go create mode 100644 kayak/types/log.go create mode 100644 kayak/types/rpc.go create mode 100644 kayak/types/wal.go delete mode 100644 kayak/types_gen.go delete mode 100644 kayak/types_test.go rename kayak/{transport => wal}/doc.go (84%) rename kayak/{ => wal}/errors.go (62%) create mode 100644 kayak/wal/leveldb_wal.go create mode 100644 kayak/wal/leveldb_wal_test.go create mode 100644 kayak/wal/mem_wal.go create mode 100644 kayak/wal/mem_wal_test.go create mode 100644 proto/errors.go create mode 100644 proto/servers.go create mode 100644 proto/servers_gen.go rename kayak/types_gen_test.go => proto/servers_gen_test.go (60%) create mode 100644 worker/dbms_mux.go diff --git a/blockproducer/chain.go b/blockproducer/chain.go index 59904afbd..b8a836652 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -374,7 +374,7 @@ func (c *Chain) produceBlock(now time.Time) error { peers := c.rt.getPeers() wg := &sync.WaitGroup{} for _, s := range peers.Servers { - if !s.ID.IsEqual(&c.rt.nodeID) { + if !s.IsEqual(&c.rt.nodeID) { wg.Add(1) go func(id proto.NodeID) { defer wg.Done() @@ -399,7 +399,7 @@ func (c *Chain) produceBlock(now time.Time) error { "node": id, }).Debug("success advising block") } - }(s.ID) + }(s) } } @@ -723,12 +723,12 @@ func (c *Chain) syncHead() { succ := false for i, s := range peers.Servers { - if !s.ID.IsEqual(&c.rt.nodeID) { - err = c.cl.CallNode(s.ID, route.MCCFetchBlock.String(), req, resp) + if !s.IsEqual(&c.rt.nodeID) { + err = c.cl.CallNode(s, route.MCCFetchBlock.String(), req, resp) if err != nil || resp.Block == nil { log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), - "remote": fmt.Sprintf("[%d/%d] %s", i, len(peers.Servers), s.ID), + "remote": fmt.Sprintf("[%d/%d] %s", i, len(peers.Servers), s), "curr_turn": c.rt.getNextTurn(), "head_height": c.rt.getHead().getHeight(), "head_block": c.rt.getHead().getHeader().String(), @@ -738,7 +738,7 @@ func (c *Chain) syncHead() { c.blocksFromRPC <- resp.Block log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), - "remote": fmt.Sprintf("[%d/%d] %s", i, len(peers.Servers), s.ID), + "remote": fmt.Sprintf("[%d/%d] %s", i, len(peers.Servers), s), "curr_turn": c.rt.getNextTurn(), "head_height": c.rt.getHead().getHeight(), "head_block": c.rt.getHead().getHeader().String(), diff --git a/blockproducer/chain_test.go b/blockproducer/chain_test.go index 450a4f4cd..c809dde19 100644 --- a/blockproducer/chain_test.go +++ b/blockproducer/chain_test.go @@ -26,7 +26,6 @@ import ( pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" pt "github.com/CovenantSQL/CovenantSQL/blockproducer/types" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" @@ -69,7 +68,7 @@ func TestChain(t *testing.T) { So(err, ShouldBeNil) _, peers, err := createTestPeersWithPrivKeys(priv, testPeersNumber) - cfg := NewConfig(genesis, fl.Name(), rpcServer, peers, peers.Servers[0].ID, testPeriod, testTick) + cfg := NewConfig(genesis, fl.Name(), rpcServer, peers, peers.Servers[0], testPeriod, testTick) chain, err := NewChain(cfg) So(err, ShouldBeNil) ao, ok := chain.ms.readonly.accounts[testAddress1] @@ -199,7 +198,7 @@ func TestMultiNode(t *testing.T) { } var nis []cpuminer.NonceInfo - var peers *kayak.Peers + var peers *proto.Peers peerInited := false for i := range chains { // create tmp file @@ -219,13 +218,13 @@ func TestMultiNode(t *testing.T) { So(err, ShouldBeNil) for i, p := range peers.Servers { - t.Logf("Peer #%d: %s", i, p.ID) + t.Logf("Peer #%d: %s", i, p) } peerInited = true } - cfg := NewConfig(genesis, fl.Name(), server, peers, peers.Servers[i].ID, testPeriod, testTick) + cfg := NewConfig(genesis, fl.Name(), server, peers, peers.Servers[i], testPeriod, testTick) // init chain chains[i], err = NewChain(cfg) @@ -235,8 +234,14 @@ func TestMultiNode(t *testing.T) { pub, err := kms.GetLocalPublicKey() So(err, ShouldBeNil) node := proto.Node{ - ID: peers.Servers[i].ID, - Role: peers.Servers[i].Role, + ID: peers.Servers[i], + Role: func(peers *proto.Peers, i int) proto.ServerRole { + if peers.Leader.IsEqual(&peers.Servers[i]) { + return proto.Leader + } else { + return proto.Follower + } + }(peers, i), Addr: server.Listener.Addr().String(), PublicKey: pub, Nonce: nis[i].Nonce, diff --git a/blockproducer/config.go b/blockproducer/config.go index d80ae5dbe..175d2a1d6 100644 --- a/blockproducer/config.go +++ b/blockproducer/config.go @@ -20,7 +20,6 @@ import ( "time" "github.com/CovenantSQL/CovenantSQL/blockproducer/types" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/rpc" ) @@ -37,7 +36,7 @@ type Config struct { Server *rpc.Server - Peers *kayak.Peers + Peers *proto.Peers NodeID proto.NodeID Period time.Duration @@ -46,7 +45,7 @@ type Config struct { // NewConfig creates new config. func NewConfig(genesis *types.Block, dataFile string, - server *rpc.Server, peers *kayak.Peers, + server *rpc.Server, peers *proto.Peers, nodeID proto.NodeID, period time.Duration, tick time.Duration) *Config { config := Config{ Genesis: genesis, diff --git a/blockproducer/db_service.go b/blockproducer/db_service.go index 511227bc3..8deea1e03 100644 --- a/blockproducer/db_service.go +++ b/blockproducer/db_service.go @@ -25,7 +25,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/metric" "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" "github.com/CovenantSQL/CovenantSQL/proto" @@ -92,7 +91,7 @@ func (s *DBService) CreateDatabase(req *CreateDatabaseRequest, resp *CreateDatab log.WithField("db", dbID).Debug("generated database id") // allocate nodes - var peers *kayak.Peers + var peers *proto.Peers if peers, err = s.allocateNodes(0, dbID, req.Header.ResourceMeta); err != nil { return } @@ -139,7 +138,7 @@ func (s *DBService) CreateDatabase(req *CreateDatabaseRequest, resp *CreateDatab return } - if err = s.batchSendSvcReq(initSvcReq, rollbackReq, s.peersToNodes(peers)); err != nil { + if err = s.batchSendSvcReq(initSvcReq, rollbackReq, peers.Servers); err != nil { return } @@ -208,7 +207,7 @@ func (s *DBService) DropDatabase(req *DropDatabaseRequest, resp *DropDatabaseRes return } - if err = s.batchSendSvcReq(dropDBSvcReq, nil, s.peersToNodes(instanceMeta.Peers)); err != nil { + if err = s.batchSendSvcReq(dropDBSvcReq, nil, instanceMeta.Peers.Servers); err != nil { return } @@ -324,7 +323,7 @@ func (s *DBService) generateDatabaseID(reqNodeID *proto.RawNodeID) (dbID proto.D } } -func (s *DBService) allocateNodes(lastTerm uint64, dbID proto.DatabaseID, resourceMeta wt.ResourceMeta) (peers *kayak.Peers, err error) { +func (s *DBService) allocateNodes(lastTerm uint64, dbID proto.DatabaseID, resourceMeta wt.ResourceMeta) (peers *proto.Peers, err error) { curRange := int(resourceMeta.Node) excludeNodes := make(map[proto.NodeID]bool) var allocated []allocatedNode @@ -444,7 +443,7 @@ func (s *DBService) allocateNodes(lastTerm uint64, dbID proto.DatabaseID, resour } // build peers - return s.buildPeers(lastTerm+1, nodes, nodeAllocated) + return s.buildPeers(lastTerm+1, nodeAllocated) } curRange += int(resourceMeta.Node) @@ -479,54 +478,26 @@ func (s *DBService) getMetric(metric metric.MetricMap, keys []string) (value uin return } -func (s *DBService) buildPeers(term uint64, nodes []proto.Node, allocated []proto.NodeID) (peers *kayak.Peers, err error) { +func (s *DBService) buildPeers(term uint64, allocated []proto.NodeID) (peers *proto.Peers, err error) { log.WithFields(log.Fields{ "term": term, "nodes": allocated, }).Debug("build peers for term/nodes") // get local private key - var pubKey *asymmetric.PublicKey - if pubKey, err = kms.GetLocalPublicKey(); err != nil { - return - } - var privKey *asymmetric.PrivateKey if privKey, err = kms.GetLocalPrivateKey(); err != nil { return } // get allocated node info - allocatedMap := make(map[proto.NodeID]bool) - - for _, nodeID := range allocated { - allocatedMap[nodeID] = true - } - - allocatedNodes := make([]proto.Node, 0, len(allocated)) - - for _, node := range nodes { - if allocatedMap[node.ID] { - allocatedNodes = append(allocatedNodes, node) - } - } - - peers = &kayak.Peers{ - Term: term, - PubKey: pubKey, - Servers: make([]*kayak.Server, len(allocated)), - } - - for idx, node := range allocatedNodes { - peers.Servers[idx] = &kayak.Server{ - Role: proto.Follower, - ID: node.ID, - PubKey: node.PublicKey, - } + peers = &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: term, + Servers: allocated, + }, } - // choose the first node as leader, allocateNodes sort the allocated node list by memory size - peers.Servers[0].Role = proto.Leader peers.Leader = peers.Servers[0] // sign the peers structure @@ -591,17 +562,3 @@ func (s *DBService) batchSendSingleSvcReq(req *wt.UpdateService, nodes []proto.N return } - -func (s *DBService) peersToNodes(peers *kayak.Peers) (nodes []proto.NodeID) { - if peers == nil { - return - } - - nodes = make([]proto.NodeID, 0, len(peers.Servers)) - - for _, s := range peers.Servers { - nodes = append(nodes, s.ID) - } - - return -} diff --git a/blockproducer/db_service_map.go b/blockproducer/db_service_map.go index fa506ec4e..6c260677a 100644 --- a/blockproducer/db_service_map.go +++ b/blockproducer/db_service_map.go @@ -21,6 +21,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/proto" wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/pkg/errors" ) // DBMetaPersistence defines database meta persistence api. @@ -61,10 +62,10 @@ func InitServiceMap(persistImpl DBMetaPersistence) (s *DBServiceMap, err error) s.dbMap[meta.DatabaseID] = meta for _, server := range meta.Peers.Servers { - if s.nodeMap[server.ID] == nil { - s.nodeMap[server.ID] = make(map[proto.DatabaseID]bool) + if s.nodeMap[server] == nil { + s.nodeMap[server] = make(map[proto.DatabaseID]bool) } - s.nodeMap[server.ID][meta.DatabaseID] = true + s.nodeMap[server][meta.DatabaseID] = true } } @@ -76,8 +77,8 @@ func (c *DBServiceMap) Set(meta wt.ServiceInstance) (err error) { c.Lock() defer c.Unlock() - if !meta.Peers.Verify() { - return ErrInvalidDBPeersConfig + if err = meta.Peers.Verify(); err != nil { + return errors.Wrap(err, "verify peers failed") } // remove previous records @@ -86,8 +87,8 @@ func (c *DBServiceMap) Set(meta wt.ServiceInstance) (err error) { if oldMeta, ok = c.dbMap[meta.DatabaseID]; ok { for _, s := range oldMeta.Peers.Servers { - if c.nodeMap[s.ID] != nil { - delete(c.nodeMap[s.ID], meta.DatabaseID) + if c.nodeMap[s] != nil { + delete(c.nodeMap[s], meta.DatabaseID) } } } @@ -96,10 +97,10 @@ func (c *DBServiceMap) Set(meta wt.ServiceInstance) (err error) { c.dbMap[meta.DatabaseID] = meta for _, s := range meta.Peers.Servers { - if c.nodeMap[s.ID] == nil { - c.nodeMap[s.ID] = make(map[proto.DatabaseID]bool) + if c.nodeMap[s] == nil { + c.nodeMap[s] = make(map[proto.DatabaseID]bool) } - c.nodeMap[s.ID][meta.DatabaseID] = true + c.nodeMap[s][meta.DatabaseID] = true } // set to persistence @@ -140,8 +141,8 @@ func (c *DBServiceMap) Delete(dbID proto.DatabaseID) (err error) { // delete from cache if meta, ok = c.dbMap[dbID]; ok { for _, s := range meta.Peers.Servers { - if c.nodeMap[s.ID] != nil { - delete(c.nodeMap[s.ID], dbID) + if c.nodeMap[s] != nil { + delete(c.nodeMap[s], dbID) } } } diff --git a/blockproducer/db_service_map_test.go b/blockproducer/db_service_map_test.go index 52c1e518c..ed2afb375 100644 --- a/blockproducer/db_service_map_test.go +++ b/blockproducer/db_service_map_test.go @@ -101,7 +101,7 @@ func TestServiceMap(t *testing.T) { So(err, ShouldBeNil) instance.Peers.Servers = append(instance.Peers.Servers, instance.Peers.Servers[0]) // something new - instance.Peers.Servers[1].ID = proto.NodeID("00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35") + instance.Peers.Servers[1] = proto.NodeID("00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35") err = instance.Peers.Sign(privKey) So(err, ShouldBeNil) err = svcMap.Set(instance) diff --git a/blockproducer/db_service_test.go b/blockproducer/db_service_test.go index da00e68ba..6c464256e 100644 --- a/blockproducer/db_service_test.go +++ b/blockproducer/db_service_test.go @@ -131,7 +131,7 @@ func TestService(t *testing.T) { }) // use the database - serverID := createDBRes.Header.InstanceMeta.Peers.Leader.ID + serverID := createDBRes.Header.InstanceMeta.Peers.Leader dbID := createDBRes.Header.InstanceMeta.DatabaseID var queryReq *wt.Request queryReq, err = buildQuery(wt.WriteQuery, 1, 1, dbID, []string{ diff --git a/blockproducer/db_service_types.go b/blockproducer/db_service_types.go index 6edac9581..3f94c991f 100644 --- a/blockproducer/db_service_types.go +++ b/blockproducer/db_service_types.go @@ -23,36 +23,29 @@ import ( wt "github.com/CovenantSQL/CovenantSQL/worker/types" ) +//go:generate hsp + // CreateDatabaseRequestHeader defines client create database rpc header. type CreateDatabaseRequestHeader struct { ResourceMeta wt.ResourceMeta } -// Serialize structure to bytes. -func (h *CreateDatabaseRequestHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - return h.ResourceMeta.Serialize() -} - // SignedCreateDatabaseRequestHeader defines signed client create database request header. type SignedCreateDatabaseRequestHeader struct { CreateDatabaseRequestHeader - HeaderHash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + Hash hash.Hash + Signee *asymmetric.PublicKey + Signature *asymmetric.Signature } // Verify checks hash and signature in create database request header. func (sh *SignedCreateDatabaseRequestHeader) Verify() (err error) { // verify hash - if err = verifyHash(&sh.CreateDatabaseRequestHeader, &sh.HeaderHash); err != nil { + if err = verifyHash(&sh.CreateDatabaseRequestHeader, &sh.Hash); err != nil { return } // verify sign - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { + if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { return wt.ErrSignVerification } return @@ -61,10 +54,12 @@ func (sh *SignedCreateDatabaseRequestHeader) Verify() (err error) { // Sign the request. func (sh *SignedCreateDatabaseRequestHeader) Sign(signer *asymmetric.PrivateKey) (err error) { // build hash - buildHash(&sh.CreateDatabaseRequestHeader, &sh.HeaderHash) + if err = buildHash(&sh.CreateDatabaseRequestHeader, &sh.Hash); err != nil { + return + } // sign - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) + sh.Signature, err = signer.Sign(sh.Hash[:]) sh.Signee = signer.PubKey() return @@ -92,31 +87,22 @@ type CreateDatabaseResponseHeader struct { InstanceMeta wt.ServiceInstance } -// Serialize structure to bytes. -func (h *CreateDatabaseResponseHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - return h.InstanceMeta.Serialize() -} - // SignedCreateDatabaseResponseHeader defines signed client create database response header. type SignedCreateDatabaseResponseHeader struct { CreateDatabaseResponseHeader - HeaderHash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + Hash hash.Hash + Signee *asymmetric.PublicKey + Signature *asymmetric.Signature } // Verify checks hash and signature in create database response header. func (sh *SignedCreateDatabaseResponseHeader) Verify() (err error) { // verify hash - if err = verifyHash(&sh.CreateDatabaseResponseHeader, &sh.HeaderHash); err != nil { + if err = verifyHash(&sh.CreateDatabaseResponseHeader, &sh.Hash); err != nil { return } // verify sign - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { + if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { return wt.ErrSignVerification } return @@ -125,10 +111,12 @@ func (sh *SignedCreateDatabaseResponseHeader) Verify() (err error) { // Sign the response. func (sh *SignedCreateDatabaseResponseHeader) Sign(signer *asymmetric.PrivateKey) (err error) { // build hash - buildHash(&sh.CreateDatabaseResponseHeader, &sh.HeaderHash) + if err = buildHash(&sh.CreateDatabaseResponseHeader, &sh.Hash); err != nil { + return + } // sign - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) + sh.Signature, err = signer.Sign(sh.Hash[:]) sh.Signee = signer.PubKey() return @@ -156,31 +144,22 @@ type DropDatabaseRequestHeader struct { DatabaseID proto.DatabaseID } -// Serialize structure to bytes. -func (h *DropDatabaseRequestHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - return []byte(h.DatabaseID) -} - // SignedDropDatabaseRequestHeader defines signed client drop database rpc request header. type SignedDropDatabaseRequestHeader struct { DropDatabaseRequestHeader - HeaderHash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + Hash hash.Hash + Signee *asymmetric.PublicKey + Signature *asymmetric.Signature } // Verify checks hash and signature in request header. func (sh *SignedDropDatabaseRequestHeader) Verify() (err error) { // verify hash - if err = verifyHash(&sh.DropDatabaseRequestHeader, &sh.HeaderHash); err != nil { + if err = verifyHash(&sh.DropDatabaseRequestHeader, &sh.Hash); err != nil { return } // verify sign - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { + if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { return wt.ErrSignVerification } return @@ -189,10 +168,12 @@ func (sh *SignedDropDatabaseRequestHeader) Verify() (err error) { // Sign the request. func (sh *SignedDropDatabaseRequestHeader) Sign(signer *asymmetric.PrivateKey) (err error) { // build hash - buildHash(&sh.DropDatabaseRequestHeader, &sh.HeaderHash) + if err = buildHash(&sh.DropDatabaseRequestHeader, &sh.Hash); err != nil { + return + } // sign - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) + sh.Signature, err = signer.Sign(sh.Hash[:]) sh.Signee = signer.PubKey() return @@ -222,31 +203,22 @@ type GetDatabaseRequestHeader struct { DatabaseID proto.DatabaseID } -// Serialize structure to bytes. -func (h *GetDatabaseRequestHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - return []byte(h.DatabaseID) -} - // SignedGetDatabaseRequestHeader defines signed client get database rpc request header entity. type SignedGetDatabaseRequestHeader struct { GetDatabaseRequestHeader - HeaderHash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + Hash hash.Hash + Signee *asymmetric.PublicKey + Signature *asymmetric.Signature } // Verify checks hash and signature in request header. func (sh *SignedGetDatabaseRequestHeader) Verify() (err error) { // verify hash - if err = verifyHash(&sh.GetDatabaseRequestHeader, &sh.HeaderHash); err != nil { + if err = verifyHash(&sh.GetDatabaseRequestHeader, &sh.Hash); err != nil { return } // verify sign - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { + if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { return wt.ErrSignVerification } return @@ -255,10 +227,12 @@ func (sh *SignedGetDatabaseRequestHeader) Verify() (err error) { // Sign the request. func (sh *SignedGetDatabaseRequestHeader) Sign(signer *asymmetric.PrivateKey) (err error) { // build hash - buildHash(&sh.GetDatabaseRequestHeader, &sh.HeaderHash) + if err = buildHash(&sh.GetDatabaseRequestHeader, &sh.Hash); err != nil { + return + } // sign - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) + sh.Signature, err = signer.Sign(sh.Hash[:]) sh.Signee = signer.PubKey() return @@ -285,31 +259,22 @@ type GetDatabaseResponseHeader struct { InstanceMeta wt.ServiceInstance } -// Serialize structure to bytes. -func (h *GetDatabaseResponseHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - return h.InstanceMeta.Serialize() -} - // SignedGetDatabaseResponseHeader defines client get database rpc response header entity. type SignedGetDatabaseResponseHeader struct { GetDatabaseResponseHeader - HeaderHash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + Hash hash.Hash + Signee *asymmetric.PublicKey + Signature *asymmetric.Signature } // Verify checks hash and signature in response header. func (sh *SignedGetDatabaseResponseHeader) Verify() (err error) { // verify hash - if err = verifyHash(&sh.GetDatabaseResponseHeader, &sh.HeaderHash); err != nil { + if err = verifyHash(&sh.GetDatabaseResponseHeader, &sh.Hash); err != nil { return } // verify sign - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { + if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { return wt.ErrSignVerification } return @@ -318,10 +283,12 @@ func (sh *SignedGetDatabaseResponseHeader) Verify() (err error) { // Sign the request. func (sh *SignedGetDatabaseResponseHeader) Sign(signer *asymmetric.PrivateKey) (err error) { // build hash - buildHash(&sh.GetDatabaseResponseHeader, &sh.HeaderHash) + if err = buildHash(&sh.GetDatabaseResponseHeader, &sh.Hash); err != nil { + return + } // sign - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) + sh.Signature, err = signer.Sign(sh.Hash[:]) sh.Signee = signer.PubKey() return @@ -343,21 +310,28 @@ func (r *GetDatabaseResponse) Sign(signer *asymmetric.PrivateKey) (err error) { return r.Header.Sign(signer) } -// FIXIT(xq262144) remove duplicated interface in utils package. -type canSerialize interface { - Serialize() []byte +// FIXME(xq262144) remove duplicated interface in utils package. +type canMarshalHash interface { + MarshalHash() ([]byte, error) } -func verifyHash(data canSerialize, h *hash.Hash) (err error) { +func verifyHash(data canMarshalHash, h *hash.Hash) (err error) { var newHash hash.Hash - buildHash(data, &newHash) + if err = buildHash(data, &newHash); err != nil { + return + } if !newHash.IsEqual(h) { - return wt.ErrHashVerification + return ErrSignVerification } return } -func buildHash(data canSerialize, h *hash.Hash) { - newHash := hash.THashH(data.Serialize()) +func buildHash(data canMarshalHash, h *hash.Hash) (err error) { + var hashBytes []byte + if hashBytes, err = data.MarshalHash(); err != nil { + return + } + newHash := hash.THashH(hashBytes) copy(h[:], newHash[:]) + return } diff --git a/blockproducer/db_service_types_gen.go b/blockproducer/db_service_types_gen.go new file mode 100644 index 000000000..4ab068cbb --- /dev/null +++ b/blockproducer/db_service_types_gen.go @@ -0,0 +1,547 @@ +package blockproducer + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + hsp "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// MarshalHash marshals for hash +func (z *CreateDatabaseRequest) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.Header.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *CreateDatabaseRequest) Msgsize() (s int) { + s = 1 + 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *CreateDatabaseRequestHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 1 + o = append(o, 0x81, 0x81) + if oTemp, err := z.ResourceMeta.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *CreateDatabaseRequestHeader) Msgsize() (s int) { + s = 1 + 13 + z.ResourceMeta.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *CreateDatabaseResponse) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.Header.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *CreateDatabaseResponse) Msgsize() (s int) { + s = 1 + 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *CreateDatabaseResponseHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 1 + o = append(o, 0x81, 0x81) + if oTemp, err := z.InstanceMeta.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *CreateDatabaseResponseHeader) Msgsize() (s int) { + s = 1 + 13 + z.InstanceMeta.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *DropDatabaseRequest) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.Header.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *DropDatabaseRequest) Msgsize() (s int) { + s = 1 + 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *DropDatabaseRequestHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 1 + o = append(o, 0x81, 0x81) + if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *DropDatabaseRequestHeader) Msgsize() (s int) { + s = 1 + 11 + z.DatabaseID.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z DropDatabaseResponse) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 0 + o = append(o, 0x80) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z DropDatabaseResponse) Msgsize() (s int) { + s = 1 + return +} + +// MarshalHash marshals for hash +func (z *GetDatabaseRequest) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.Header.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *GetDatabaseRequest) Msgsize() (s int) { + s = 1 + 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *GetDatabaseRequestHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 1 + o = append(o, 0x81, 0x81) + if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *GetDatabaseRequestHeader) Msgsize() (s int) { + s = 1 + 11 + z.DatabaseID.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *GetDatabaseResponse) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.Header.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *GetDatabaseResponse) Msgsize() (s int) { + s = 1 + 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *GetDatabaseResponseHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 1 + o = append(o, 0x81, 0x81) + if oTemp, err := z.InstanceMeta.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *GetDatabaseResponseHeader) Msgsize() (s int) { + s = 1 + 13 + z.InstanceMeta.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *SignedCreateDatabaseRequestHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 4 + o = append(o, 0x84, 0x84) + if z.Signee == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Signee.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x84) + if z.Signature == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Signature.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + // map header, size 1 + o = append(o, 0x84, 0x81, 0x81) + if oTemp, err := z.CreateDatabaseRequestHeader.ResourceMeta.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x84) + if oTemp, err := z.Hash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedCreateDatabaseRequestHeader) Msgsize() (s int) { + s = 1 + 7 + if z.Signee == nil { + s += hsp.NilSize + } else { + s += z.Signee.Msgsize() + } + s += 10 + if z.Signature == nil { + s += hsp.NilSize + } else { + s += z.Signature.Msgsize() + } + s += 28 + 1 + 13 + z.CreateDatabaseRequestHeader.ResourceMeta.Msgsize() + 5 + z.Hash.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *SignedCreateDatabaseResponseHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 4 + o = append(o, 0x84, 0x84) + if z.Signee == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Signee.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x84) + if z.Signature == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Signature.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + // map header, size 1 + o = append(o, 0x84, 0x81, 0x81) + if oTemp, err := z.CreateDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x84) + if oTemp, err := z.Hash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedCreateDatabaseResponseHeader) Msgsize() (s int) { + s = 1 + 7 + if z.Signee == nil { + s += hsp.NilSize + } else { + s += z.Signee.Msgsize() + } + s += 10 + if z.Signature == nil { + s += hsp.NilSize + } else { + s += z.Signature.Msgsize() + } + s += 29 + 1 + 13 + z.CreateDatabaseResponseHeader.InstanceMeta.Msgsize() + 5 + z.Hash.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *SignedDropDatabaseRequestHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 4 + o = append(o, 0x84, 0x84) + if z.Signee == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Signee.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x84) + if z.Signature == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Signature.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + // map header, size 1 + o = append(o, 0x84, 0x81, 0x81) + if oTemp, err := z.DropDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x84) + if oTemp, err := z.Hash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedDropDatabaseRequestHeader) Msgsize() (s int) { + s = 1 + 7 + if z.Signee == nil { + s += hsp.NilSize + } else { + s += z.Signee.Msgsize() + } + s += 10 + if z.Signature == nil { + s += hsp.NilSize + } else { + s += z.Signature.Msgsize() + } + s += 26 + 1 + 11 + z.DropDatabaseRequestHeader.DatabaseID.Msgsize() + 5 + z.Hash.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *SignedGetDatabaseRequestHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 4 + o = append(o, 0x84, 0x84) + if z.Signee == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Signee.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x84) + if z.Signature == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Signature.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + // map header, size 1 + o = append(o, 0x84, 0x81, 0x81) + if oTemp, err := z.GetDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x84) + if oTemp, err := z.Hash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedGetDatabaseRequestHeader) Msgsize() (s int) { + s = 1 + 7 + if z.Signee == nil { + s += hsp.NilSize + } else { + s += z.Signee.Msgsize() + } + s += 10 + if z.Signature == nil { + s += hsp.NilSize + } else { + s += z.Signature.Msgsize() + } + s += 25 + 1 + 11 + z.GetDatabaseRequestHeader.DatabaseID.Msgsize() + 5 + z.Hash.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *SignedGetDatabaseResponseHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 4 + o = append(o, 0x84, 0x84) + if z.Signee == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Signee.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x84) + if z.Signature == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Signature.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + // map header, size 1 + o = append(o, 0x84, 0x81, 0x81) + if oTemp, err := z.GetDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x84) + if oTemp, err := z.Hash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedGetDatabaseResponseHeader) Msgsize() (s int) { + s = 1 + 7 + if z.Signee == nil { + s += hsp.NilSize + } else { + s += z.Signee.Msgsize() + } + s += 10 + if z.Signature == nil { + s += hsp.NilSize + } else { + s += z.Signature.Msgsize() + } + s += 26 + 1 + 13 + z.GetDatabaseResponseHeader.InstanceMeta.Msgsize() + 5 + z.Hash.Msgsize() + return +} diff --git a/blockproducer/db_service_types_gen_test.go b/blockproducer/db_service_types_gen_test.go new file mode 100644 index 000000000..b9ad910bd --- /dev/null +++ b/blockproducer/db_service_types_gen_test.go @@ -0,0 +1,602 @@ +package blockproducer + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "testing" +) + +func TestMarshalHashCreateDatabaseRequest(t *testing.T) { + v := CreateDatabaseRequest{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashCreateDatabaseRequest(b *testing.B) { + v := CreateDatabaseRequest{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgCreateDatabaseRequest(b *testing.B) { + v := CreateDatabaseRequest{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashCreateDatabaseRequestHeader(t *testing.T) { + v := CreateDatabaseRequestHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashCreateDatabaseRequestHeader(b *testing.B) { + v := CreateDatabaseRequestHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgCreateDatabaseRequestHeader(b *testing.B) { + v := CreateDatabaseRequestHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashCreateDatabaseResponse(t *testing.T) { + v := CreateDatabaseResponse{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashCreateDatabaseResponse(b *testing.B) { + v := CreateDatabaseResponse{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgCreateDatabaseResponse(b *testing.B) { + v := CreateDatabaseResponse{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashCreateDatabaseResponseHeader(t *testing.T) { + v := CreateDatabaseResponseHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashCreateDatabaseResponseHeader(b *testing.B) { + v := CreateDatabaseResponseHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgCreateDatabaseResponseHeader(b *testing.B) { + v := CreateDatabaseResponseHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashDropDatabaseRequest(t *testing.T) { + v := DropDatabaseRequest{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashDropDatabaseRequest(b *testing.B) { + v := DropDatabaseRequest{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgDropDatabaseRequest(b *testing.B) { + v := DropDatabaseRequest{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashDropDatabaseRequestHeader(t *testing.T) { + v := DropDatabaseRequestHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashDropDatabaseRequestHeader(b *testing.B) { + v := DropDatabaseRequestHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgDropDatabaseRequestHeader(b *testing.B) { + v := DropDatabaseRequestHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashDropDatabaseResponse(t *testing.T) { + v := DropDatabaseResponse{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashDropDatabaseResponse(b *testing.B) { + v := DropDatabaseResponse{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgDropDatabaseResponse(b *testing.B) { + v := DropDatabaseResponse{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashGetDatabaseRequest(t *testing.T) { + v := GetDatabaseRequest{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashGetDatabaseRequest(b *testing.B) { + v := GetDatabaseRequest{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgGetDatabaseRequest(b *testing.B) { + v := GetDatabaseRequest{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashGetDatabaseRequestHeader(t *testing.T) { + v := GetDatabaseRequestHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashGetDatabaseRequestHeader(b *testing.B) { + v := GetDatabaseRequestHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgGetDatabaseRequestHeader(b *testing.B) { + v := GetDatabaseRequestHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashGetDatabaseResponse(t *testing.T) { + v := GetDatabaseResponse{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashGetDatabaseResponse(b *testing.B) { + v := GetDatabaseResponse{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgGetDatabaseResponse(b *testing.B) { + v := GetDatabaseResponse{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashGetDatabaseResponseHeader(t *testing.T) { + v := GetDatabaseResponseHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashGetDatabaseResponseHeader(b *testing.B) { + v := GetDatabaseResponseHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgGetDatabaseResponseHeader(b *testing.B) { + v := GetDatabaseResponseHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedCreateDatabaseRequestHeader(t *testing.T) { + v := SignedCreateDatabaseRequestHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedCreateDatabaseRequestHeader(b *testing.B) { + v := SignedCreateDatabaseRequestHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedCreateDatabaseRequestHeader(b *testing.B) { + v := SignedCreateDatabaseRequestHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedCreateDatabaseResponseHeader(t *testing.T) { + v := SignedCreateDatabaseResponseHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedCreateDatabaseResponseHeader(b *testing.B) { + v := SignedCreateDatabaseResponseHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedCreateDatabaseResponseHeader(b *testing.B) { + v := SignedCreateDatabaseResponseHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedDropDatabaseRequestHeader(t *testing.T) { + v := SignedDropDatabaseRequestHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedDropDatabaseRequestHeader(b *testing.B) { + v := SignedDropDatabaseRequestHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedDropDatabaseRequestHeader(b *testing.B) { + v := SignedDropDatabaseRequestHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedGetDatabaseRequestHeader(t *testing.T) { + v := SignedGetDatabaseRequestHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedGetDatabaseRequestHeader(b *testing.B) { + v := SignedGetDatabaseRequestHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedGetDatabaseRequestHeader(b *testing.B) { + v := SignedGetDatabaseRequestHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedGetDatabaseResponseHeader(t *testing.T) { + v := SignedGetDatabaseResponseHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedGetDatabaseResponseHeader(b *testing.B) { + v := SignedGetDatabaseResponseHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedGetDatabaseResponseHeader(b *testing.B) { + v := SignedGetDatabaseResponseHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} diff --git a/blockproducer/errors.go b/blockproducer/errors.go index 53e845bab..b6ac4e352 100644 --- a/blockproducer/errors.go +++ b/blockproducer/errors.go @@ -19,8 +19,6 @@ package blockproducer import "errors" var ( - // ErrInvalidDBPeersConfig defines database peers invalid error. - ErrInvalidDBPeersConfig = errors.New("invalid database peers config") // ErrNoSuchDatabase defines database meta not exists error. ErrNoSuchDatabase = errors.New("no such database") // ErrDatabaseAllocation defines database allocation failure error. diff --git a/blockproducer/helper_test.go b/blockproducer/helper_test.go index ccc5d7efa..a464632be 100644 --- a/blockproducer/helper_test.go +++ b/blockproducer/helper_test.go @@ -33,7 +33,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/metric" "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" "github.com/CovenantSQL/CovenantSQL/proto" @@ -139,11 +138,6 @@ func (p *stubDBMetaPersistence) GetAllDatabases() (instances []wt.ServiceInstanc } func (p *stubDBMetaPersistence) getInstanceMeta(dbID proto.DatabaseID) (instance wt.ServiceInstance, err error) { - var pubKey *asymmetric.PublicKey - if pubKey, err = kms.GetLocalPublicKey(); err != nil { - return - } - var privKey *asymmetric.PrivateKey if privKey, err = kms.GetLocalPrivateKey(); err != nil { return @@ -155,21 +149,12 @@ func (p *stubDBMetaPersistence) getInstanceMeta(dbID proto.DatabaseID) (instance } instance.DatabaseID = proto.DatabaseID(dbID) - instance.Peers = &kayak.Peers{ - Term: 1, - Leader: &kayak.Server{ - Role: proto.Leader, - ID: nodeID, - PubKey: pubKey, - }, - Servers: []*kayak.Server{ - { - Role: proto.Leader, - ID: nodeID, - PubKey: pubKey, - }, + instance.Peers = &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: 1, + Leader: nodeID, + Servers: []proto.NodeID{nodeID}, }, - PubKey: pubKey, } if err = instance.Peers.Sign(privKey); err != nil { return diff --git a/blockproducer/runtime.go b/blockproducer/runtime.go index b1e376f0d..a4e15bf03 100644 --- a/blockproducer/runtime.go +++ b/blockproducer/runtime.go @@ -21,7 +21,6 @@ import ( "sync" "time" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" @@ -50,7 +49,7 @@ type rt struct { // peersMutex protects following peers-relative fields. peersMutex sync.Mutex - peers *kayak.Peers + peers *proto.Peers nodeID proto.NodeID stateMutex sync.Mutex // Protects following fields. @@ -78,7 +77,7 @@ func (r *rt) now() time.Time { func newRuntime(cfg *Config, accountAddress proto.AccountAddress) *rt { var index uint32 for i, s := range cfg.Peers.Servers { - if cfg.NodeID.IsEqual(&s.ID) { + if cfg.NodeID.IsEqual(&s) { index = uint32(i) } } @@ -151,7 +150,7 @@ func (r *rt) getNextTurn() uint32 { return r.nextTurn } -func (r *rt) getPeers() *kayak.Peers { +func (r *rt) getPeers() *proto.Peers { r.peersMutex.Lock() defer r.peersMutex.Unlock() peers := r.peers.Clone() diff --git a/blockproducer/xxx_test.go b/blockproducer/xxx_test.go index 8361b0eeb..14972dc65 100644 --- a/blockproducer/xxx_test.go +++ b/blockproducer/xxx_test.go @@ -30,7 +30,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/utils/log" @@ -373,7 +372,7 @@ func createRandomString(offset, length int, s *string) { *s = string(buff) } -func createTestPeersWithPrivKeys(priv *asymmetric.PrivateKey, num int) (nis []cpuminer.NonceInfo, p *kayak.Peers, err error) { +func createTestPeersWithPrivKeys(priv *asymmetric.PrivateKey, num int) (nis []cpuminer.NonceInfo, p *proto.Peers, err error) { if num <= 0 { return } @@ -386,29 +385,20 @@ func createTestPeersWithPrivKeys(priv *asymmetric.PrivateKey, num int) (nis []cp return } - s := make([]*kayak.Server, num) + s := make([]proto.NodeID, num) h := &hash.Hash{} for i := range s { rand.Read(h[:]) - s[i] = &kayak.Server{ - Role: func() proto.ServerRole { - if i == 0 { - return proto.Leader - } - return proto.Follower - }(), - ID: proto.NodeID(nis[i].Hash.String()), - PubKey: pub, - } + s[i] = proto.NodeID(nis[i].Hash.String()) } - p = &kayak.Peers{ - Term: 0, - Leader: s[0], - Servers: s, - PubKey: pub, - Signature: nil, + p = &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: 0, + Leader: s[0], + Servers: s, + }, } if err = p.Sign(priv); err != nil { @@ -418,7 +408,7 @@ func createTestPeersWithPrivKeys(priv *asymmetric.PrivateKey, num int) (nis []cp return } -func createTestPeers(num int) (nis []cpuminer.NonceInfo, p *kayak.Peers, err error) { +func createTestPeers(num int) (nis []cpuminer.NonceInfo, p *proto.Peers, err error) { if num <= 0 { return } @@ -443,29 +433,20 @@ func createTestPeers(num int) (nis []cpuminer.NonceInfo, p *kayak.Peers, err err return } - s := make([]*kayak.Server, num) + s := make([]proto.NodeID, num) h := &hash.Hash{} for i := range s { rand.Read(h[:]) - s[i] = &kayak.Server{ - Role: func() proto.ServerRole { - if i == 0 { - return proto.Leader - } - return proto.Follower - }(), - ID: proto.NodeID(nis[i].Hash.String()), - PubKey: pub, - } + s[i] = proto.NodeID(nis[i].Hash.String()) } - p = &kayak.Peers{ - Term: 0, - Leader: s[0], - Servers: s, - PubKey: pub, - Signature: nil, + p = &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: 0, + Leader: s[0], + Servers: s, + }, } if err = p.Sign(priv); err != nil { diff --git a/client/conn.go b/client/conn.go index 74ef9d431..449a304fe 100644 --- a/client/conn.go +++ b/client/conn.go @@ -26,7 +26,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" @@ -100,7 +99,7 @@ func (c *conn) stopAckWorkers() { func (c *conn) ackWorker() { if rawPeers, ok := peerList.Load(c.dbID); ok { - if peers, ok := rawPeers.(*kayak.Peers); ok { + if peers, ok := rawPeers.(*proto.Peers); ok { var ( oneTime sync.Once pc *rpc.PersistentCaller @@ -114,7 +113,7 @@ func (c *conn) ackWorker() { break ackWorkerLoop } oneTime.Do(func() { - pc = rpc.NewPersistentCaller(peers.Leader.ID) + pc = rpc.NewPersistentCaller(peers.Leader) }) if err = ack.Sign(c.privKey, false); err != nil { log.WithField("target", pc.TargetID).WithError(err).Error("failed to sign ack") @@ -306,7 +305,7 @@ func (c *conn) addQuery(queryType wt.QueryType, query *wt.Query) (affectedRows i } func (c *conn) sendQuery(queryType wt.QueryType, queries []wt.Query) (affectedRows int64, lastInsertID int64, rows driver.Rows, err error) { - var peers *kayak.Peers + var peers *proto.Peers if peers, err = cacheGetPeers(c.dbID, c.privKey); err != nil { return } @@ -321,7 +320,7 @@ func (c *conn) sendQuery(queryType wt.QueryType, queries []wt.Query) (affectedRo "type": queryType.String(), "connID": connID, "seqNo": seqNo, - "target": peers.Leader.ID, + "target": peers.Leader, "source": c.localNodeID, }).WithError(err).Debug("send query") }() @@ -347,7 +346,7 @@ func (c *conn) sendQuery(queryType wt.QueryType, queries []wt.Query) (affectedRo return } - c.pCaller = rpc.NewPersistentCaller(peers.Leader.ID) + c.pCaller = rpc.NewPersistentCaller(peers.Leader) var response wt.Response if err = c.pCaller.Call(route.DBSQuery.String(), req, &response); err != nil { return @@ -388,10 +387,11 @@ func convertQuery(query string, args []driver.NamedValue) (sq *wt.Query) { Pattern: query, } - sq.Args = make([]sql.NamedArg, len(args)) + sq.Args = make([]wt.NamedArg, len(args)) for i, v := range args { - sq.Args[i] = sql.Named(v.Name, v.Value) + sq.Args[i].Name = v.Name + sq.Args[i].Value = v.Value } return diff --git a/client/driver.go b/client/driver.go index 4522fac11..274c32e70 100644 --- a/client/driver.go +++ b/client/driver.go @@ -25,19 +25,17 @@ import ( "sync/atomic" "time" - "github.com/pkg/errors" - bp "github.com/CovenantSQL/CovenantSQL/blockproducer" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/utils/log" wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/pkg/errors" ) const ( @@ -53,7 +51,7 @@ var ( driverInitialized uint32 peersUpdaterRunning uint32 - peerList sync.Map // map[proto.DatabaseID]*kayak.Peers + peerList sync.Map // map[proto.DatabaseID]*proto.Peers connIDLock sync.Mutex connIDAvail []uint64 globalSeqNo uint64 @@ -321,7 +319,7 @@ func stopPeersUpdater() { atomic.StoreUint32(&peersUpdaterRunning, 0) } -func cacheGetPeers(dbID proto.DatabaseID, privKey *asymmetric.PrivateKey) (peers *kayak.Peers, err error) { +func cacheGetPeers(dbID proto.DatabaseID, privKey *asymmetric.PrivateKey) (peers *proto.Peers, err error) { var ok bool var rawPeers interface{} var cacheHit bool @@ -334,7 +332,7 @@ func cacheGetPeers(dbID proto.DatabaseID, privKey *asymmetric.PrivateKey) (peers }() if rawPeers, ok = peerList.Load(dbID); ok { - if peers, ok = rawPeers.(*kayak.Peers); ok { + if peers, ok = rawPeers.(*proto.Peers); ok { cacheHit = true return } @@ -344,7 +342,7 @@ func cacheGetPeers(dbID proto.DatabaseID, privKey *asymmetric.PrivateKey) (peers return getPeers(dbID, privKey) } -func getPeers(dbID proto.DatabaseID, privKey *asymmetric.PrivateKey) (peers *kayak.Peers, err error) { +func getPeers(dbID proto.DatabaseID, privKey *asymmetric.PrivateKey) (peers *proto.Peers, err error) { req := new(bp.GetDatabaseRequest) req.Header.DatabaseID = dbID diff --git a/client/helper_test.go b/client/helper_test.go index ed267535a..c8cdead5a 100644 --- a/client/helper_test.go +++ b/client/helper_test.go @@ -34,7 +34,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" @@ -112,11 +111,6 @@ func (s *stubBPDBService) GetNodeDatabases(req *wt.InitService, resp *wt.InitSer } func (s *stubBPDBService) getInstanceMeta(dbID proto.DatabaseID) (instance wt.ServiceInstance, err error) { - var pubKey *asymmetric.PublicKey - if pubKey, err = kms.GetLocalPublicKey(); err != nil { - return - } - var privKey *asymmetric.PrivateKey if privKey, err = kms.GetLocalPrivateKey(); err != nil { return @@ -128,21 +122,12 @@ func (s *stubBPDBService) getInstanceMeta(dbID proto.DatabaseID) (instance wt.Se } instance.DatabaseID = proto.DatabaseID(dbID) - instance.Peers = &kayak.Peers{ - Term: 1, - Leader: &kayak.Server{ - Role: proto.Leader, - ID: nodeID, - PubKey: pubKey, + instance.Peers = &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: 1, + Leader: nodeID, + Servers: []proto.NodeID{nodeID}, }, - Servers: []*kayak.Server{ - { - Role: proto.Leader, - ID: nodeID, - PubKey: pubKey, - }, - }, - PubKey: pubKey, } if err = instance.Peers.Sign(privKey); err != nil { return @@ -204,7 +189,7 @@ func startTestService() (stopTestService func(), tempDir string, err error) { // add database var req *wt.UpdateService var res wt.UpdateServiceResponse - var peers *kayak.Peers + var peers *proto.Peers var block *ct.Block dbID := proto.DatabaseID("db") @@ -401,7 +386,7 @@ func getKeys() (privKey *asymmetric.PrivateKey, pubKey *asymmetric.PublicKey, er return } -func genPeers(term uint64) (peers *kayak.Peers, err error) { +func genPeers(term uint64) (peers *proto.Peers, err error) { // get node id var nodeID proto.NodeID if nodeID, err = kms.GetLocalNodeID(); err != nil { @@ -409,24 +394,19 @@ func genPeers(term uint64) (peers *kayak.Peers, err error) { } // get private/public key - var pubKey *asymmetric.PublicKey var privateKey *asymmetric.PrivateKey - if privateKey, pubKey, err = getKeys(); err != nil { + if privateKey, _, err = getKeys(); err != nil { return } // generate peers and sign - server := &kayak.Server{ - Role: proto.Leader, - ID: nodeID, - PubKey: pubKey, - } - peers = &kayak.Peers{ - Term: term, - Leader: server, - Servers: []*kayak.Server{server}, - PubKey: pubKey, + peers = &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: term, + Leader: nodeID, + Servers: []proto.NodeID{nodeID}, + }, } err = peers.Sign(privateKey) return diff --git a/cmd/cql-minerd/dbms.go b/cmd/cql-minerd/dbms.go index c2b712304..d117aec44 100644 --- a/cmd/cql-minerd/dbms.go +++ b/cmd/cql-minerd/dbms.go @@ -27,7 +27,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/rpc" @@ -65,14 +64,8 @@ func startDBMS(server *rpc.Server) (dbms *worker.DBMS, err error) { // add test fixture database if conf.GConf.Miner.IsTestMode { // in test mode - - var pubKey *asymmetric.PublicKey var privKey *asymmetric.PrivateKey - if pubKey, err = kms.GetLocalPublicKey(); err != nil { - err = errors.Wrap(err, "get local public key failed") - return - } if privKey, err = kms.GetLocalPrivateKey(); err != nil { err = errors.Wrap(err, "get local private key failed") return @@ -81,28 +74,12 @@ func startDBMS(server *rpc.Server) (dbms *worker.DBMS, err error) { // add database to miner for _, testFixture := range conf.GConf.Miner.TestFixtures { // build test db instance configuration - dbPeers := &kayak.Peers{ - Term: testFixture.Term, - Leader: &kayak.Server{ - Role: proto.Leader, - ID: testFixture.Leader, + dbPeers := &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: testFixture.Term, + Leader: testFixture.Leader, + Servers: testFixture.Servers, }, - Servers: (func(servers []proto.NodeID) (ks []*kayak.Server) { - ks = make([]*kayak.Server, len(servers)) - - for i, s := range servers { - ks[i] = &kayak.Server{ - Role: proto.Follower, - ID: s, - } - if s == testFixture.Leader { - ks[i].Role = proto.Leader - } - } - - return - })(testFixture.Servers), - PubKey: pubKey, } if err = dbPeers.Sign(privKey); err != nil { diff --git a/cmd/cql-observer/api.go b/cmd/cql-observer/api.go index 2d644eed1..61b82aee0 100644 --- a/cmd/cql-observer/api.go +++ b/cmd/cql-observer/api.go @@ -79,19 +79,19 @@ func (a *explorerAPI) GetAck(rw http.ResponseWriter, r *http.Request) { sendResponse(200, true, "", map[string]interface{}{ "ack": map[string]interface{}{ "request": map[string]interface{}{ - "hash": ack.Response.Request.HeaderHash.String(), + "hash": ack.Response.Request.Hash.String(), "timestamp": a.formatTime(ack.Response.Request.Timestamp), "node": ack.Response.Request.NodeID, "type": ack.Response.Request.QueryType.String(), "count": ack.Response.Request.BatchCount, }, "response": map[string]interface{}{ - "hash": ack.Response.HeaderHash.String(), + "hash": ack.Response.Hash.String(), "timestamp": a.formatTime(ack.Response.Timestamp), "node": ack.Response.NodeID, "log_position": ack.Response.LogOffset, }, - "hash": ack.HeaderHash.String(), + "hash": ack.Hash.String(), "timestamp": a.formatTime(ack.AckHeader.Timestamp), "node": ack.AckHeader.NodeID, }, @@ -365,7 +365,7 @@ func (a *explorerAPI) formatRequest(req *wt.Request) map[string]interface{} { return map[string]interface{}{ "request": map[string]interface{}{ - "hash": req.Header.HeaderHash.String(), + "hash": req.Header.Hash.String(), "timestamp": a.formatTime(req.Header.Timestamp), "node": req.Header.NodeID, "type": req.Header.QueryType.String(), diff --git a/cmd/cql-observer/observation_test.go b/cmd/cql-observer/observation_test.go index a0be43d42..8b7919d05 100644 --- a/cmd/cql-observer/observation_test.go +++ b/cmd/cql-observer/observation_test.go @@ -426,7 +426,7 @@ func TestFullProcess(t *testing.T) { So(ensureSuccess(res.Interface()), ShouldResemble, byHashRequestResult) // test get first log offset, should be a create table statement - res, err = getJSON("offset/%v/1", dbID) + res, err = getJSON("offset/%v/0", dbID) So(err, ShouldBeNil) So(ensureSuccess(res.String("request", "queries", "0", "pattern")), ShouldContainSubstring, "CREATE TABLE") diff --git a/cmd/cql-observer/service.go b/cmd/cql-observer/service.go index 53b4b9720..addabbbee 100644 --- a/cmd/cql-observer/service.go +++ b/cmd/cql-observer/service.go @@ -324,7 +324,7 @@ func (s *Service) startSubscribe(dbID proto.DatabaseID) (err error) { func (s *Service) addAckedQuery(dbID proto.DatabaseID, ack *wt.SignedAckHeader) (err error) { log.WithFields(log.Fields{ - "ack": ack.HeaderHash.String(), + "ack": ack.Hash.String(), "db": dbID, }).Debug("add ack query") @@ -353,11 +353,11 @@ func (s *Service) addAckedQuery(dbID proto.DatabaseID, ack *wt.SignedAckHeader) } key := offsetToBytes(req.LogOffset) - key = append(key, resp.Request.Header.HeaderHash.CloneBytes()...) + key = append(key, resp.Request.Header.Hash.CloneBytes()...) log.WithFields(log.Fields{ "offset": req.LogOffset, - "reqHash": resp.Request.Header.HeaderHash.String(), + "reqHash": resp.Request.Header.Hash.String(), "reqQueries": resp.Request.Payload.Queries, }).Debug("add write request") @@ -378,7 +378,7 @@ func (s *Service) addAckedQuery(dbID proto.DatabaseID, ack *wt.SignedAckHeader) if err != nil { return } - err = ob.Put(resp.Request.Header.HeaderHash.CloneBytes(), offsetToBytes(req.LogOffset)) + err = ob.Put(resp.Request.Header.Hash.CloneBytes(), offsetToBytes(req.LogOffset)) return }); err != nil { return @@ -395,7 +395,7 @@ func (s *Service) addAckedQuery(dbID proto.DatabaseID, ack *wt.SignedAckHeader) if err != nil { return } - err = ab.Put(ack.HeaderHash.CloneBytes(), ackBytes.Bytes()) + err = ab.Put(ack.Hash.CloneBytes(), ackBytes.Bytes()) return }) } @@ -480,7 +480,7 @@ func (s *Service) minerRequest(dbID proto.DatabaseID, method string, request int return } - return s.caller.CallNode(instance.Peers.Leader.ID, method, request, response) + return s.caller.CallNode(instance.Peers.Leader, method, request, response) } func (s *Service) getUpstream(dbID proto.DatabaseID) (instance *wt.ServiceInstance, err error) { diff --git a/cmd/cqld/adapter.go b/cmd/cqld/adapter.go index c8b4c442a..41427b8b9 100644 --- a/cmd/cqld/adapter.go +++ b/cmd/cqld/adapter.go @@ -20,20 +20,20 @@ import ( "bytes" "context" "database/sql" - "errors" "os" bp "github.com/CovenantSQL/CovenantSQL/blockproducer" "github.com/CovenantSQL/CovenantSQL/consistent" "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/kayak" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/sqlchain/storage" - "github.com/CovenantSQL/CovenantSQL/twopc" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/pkg/errors" ) const ( @@ -51,6 +51,12 @@ type LocalStorage struct { *storage.Storage } +type compiledLog struct { + cmdType string + queries []storage.Query + nodeToSet *proto.Node +} + func initStorage(dbFile string) (stor *LocalStorage, err error) { var st *storage.Storage if st, err = storage.New(dbFile); err != nil { @@ -78,94 +84,93 @@ func initStorage(dbFile string) (stor *LocalStorage, err error) { return } -// Prepare implements twopc Worker.Prepare -func (s *LocalStorage) Prepare(ctx context.Context, wb twopc.WriteBatch) (err error) { - payload, err := s.decodeLog(wb) - if err != nil { - log.WithError(err).Error("decode log failed") +// EncodePayload implements kayak.types.Handler.EncodePayload. +func (s *LocalStorage) EncodePayload(request interface{}) (data []byte, err error) { + var buf *bytes.Buffer + if buf, err = utils.EncodeMsgPack(request); err != nil { + err = errors.Wrap(err, "encode kayak payload failed") return } - execLog, err := s.compileExecLog(payload) - if err != nil { - log.WithError(err).Error("compile exec log failed") - return - } - return s.Storage.Prepare(ctx, execLog) + + data = buf.Bytes() + return } -// Commit implements twopc Worker.Commit -func (s *LocalStorage) Commit(ctx context.Context, wb twopc.WriteBatch) (_ interface{}, err error) { - payload, err := s.decodeLog(wb) - if err != nil { - log.WithError(err).Error("decode log failed") +// DecodePayload implements kayak.types.Handler.DecodePayload. +func (s *LocalStorage) DecodePayload(data []byte) (request interface{}, err error) { + var kp *KayakPayload + + if err = utils.DecodeMsgPack(data, &kp); err != nil { + err = errors.Wrap(err, "decode kayak payload failed") return } - err = s.commit(ctx, payload) + + request = kp return } -func (s *LocalStorage) commit(ctx context.Context, payload *KayakPayload) (err error) { - var nodeToSet proto.Node - err = utils.DecodeMsgPack(payload.Data, &nodeToSet) - if err != nil { - log.WithError(err).Error("unmarshal node from payload failed") - return - } - execLog, err := s.compileExecLog(payload) - if err != nil { - log.WithError(err).Error("compile exec log failed") +// Check implements kayak.types.Handler.Check. +func (s *LocalStorage) Check(req interface{}) (err error) { + return nil +} + +// Commit implements kayak.types.Handler.Commit. +func (s *LocalStorage) Commit(req interface{}) (_ interface{}, err error) { + var kp *KayakPayload + var cl *compiledLog + var ok bool + + if kp, ok = req.(*KayakPayload); !ok || kp == nil { + err = errors.Wrapf(kt.ErrInvalidLog, "invalid kayak payload %#v", req) return } - err = route.SetNodeAddrCache(nodeToSet.ID.ToRawNodeID(), nodeToSet.Addr) - if err != nil { - log.WithFields(log.Fields{ - "id": nodeToSet.ID, - "addr": nodeToSet.Addr, - }).WithError(err).Error("set node addr cache failed") - } - err = kms.SetNode(&nodeToSet) - if err != nil { - log.WithField("node", nodeToSet).WithError(err).Error("kms set node failed") - } - // if s.consistent == nil, it is called during Init. and AddCache will be called by consistent.InitConsistent - if s.consistent != nil { - s.consistent.AddCache(nodeToSet) + if cl, err = s.compileLog(kp); err != nil { + err = errors.Wrap(err, "compile log failed") + return } - _, err = s.Storage.Commit(ctx, execLog) - return -} + if cl.nodeToSet != nil { + err = route.SetNodeAddrCache(cl.nodeToSet.ID.ToRawNodeID(), cl.nodeToSet.Addr) + if err != nil { + log.WithFields(log.Fields{ + "id": cl.nodeToSet.ID, + "addr": cl.nodeToSet.Addr, + }).WithError(err).Error("set node addr cache failed") + } + err = kms.SetNode(cl.nodeToSet) + if err != nil { + log.WithField("node", cl.nodeToSet).WithError(err).Error("kms set node failed") + } -// Rollback implements twopc Worker.Rollback -func (s *LocalStorage) Rollback(ctx context.Context, wb twopc.WriteBatch) (err error) { - payload, err := s.decodeLog(wb) - if err != nil { - log.WithError(err).Error("decode log failed") - return + // if s.consistent == nil, it is called during Init. and AddCache will be called by consistent.InitConsistent + if s.consistent != nil { + s.consistent.AddCache(*cl.nodeToSet) + } } - execLog, err := s.compileExecLog(payload) - if err != nil { - log.WithError(err).Error("compile exec log failed") - return + + // execute query + if _, err = s.Storage.Exec(context.Background(), cl.queries); err != nil { + err = errors.Wrap(err, "execute query in dht database failed") } - return s.Storage.Rollback(ctx, execLog) + return } -func (s *LocalStorage) compileExecLog(payload *KayakPayload) (execLog *storage.ExecLog, err error) { +func (s *LocalStorage) compileLog(payload *KayakPayload) (result *compiledLog, err error) { switch payload.Command { case CmdSet: var nodeToSet proto.Node err = utils.DecodeMsgPack(payload.Data, &nodeToSet) if err != nil { - log.WithError(err).Error("compileExecLog: unmarshal node from payload failed") + log.WithError(err).Error("compileLog: unmarshal node from payload failed") return } query := "INSERT OR REPLACE INTO `dht` (`id`, `node`) VALUES (?, ?);" log.Debugf("sql: %#v", query) - execLog = &storage.ExecLog{ - Queries: []storage.Query{ + result = &compiledLog{ + cmdType: payload.Command, + queries: []storage.Query{ { Pattern: query, Args: []sql.NamedArg{ @@ -174,16 +179,18 @@ func (s *LocalStorage) compileExecLog(payload *KayakPayload) (execLog *storage.E }, }, }, + nodeToSet: &nodeToSet, } case CmdSetDatabase: var instance wt.ServiceInstance if err = utils.DecodeMsgPack(payload.Data, &instance); err != nil { - log.WithError(err).Error("compileExecLog: unmarshal instance meta failed") + log.WithError(err).Error("compileLog: unmarshal instance meta failed") return } query := "INSERT OR REPLACE INTO `databases` (`id`, `meta`) VALUES (? ,?);" - execLog = &storage.ExecLog{ - Queries: []storage.Query{ + result = &compiledLog{ + cmdType: payload.Command, + queries: []storage.Query{ { Pattern: query, Args: []sql.NamedArg{ @@ -196,14 +203,15 @@ func (s *LocalStorage) compileExecLog(payload *KayakPayload) (execLog *storage.E case CmdDeleteDatabase: var instance wt.ServiceInstance if err = utils.DecodeMsgPack(payload.Data, &instance); err != nil { - log.WithError(err).Error("compileExecLog: unmarshal instance id failed") + log.WithError(err).Error("compileLog: unmarshal instance id failed") return } // TODO(xq262144), should add additional limit 1 after delete clause // however, currently the go-sqlite3 query := "DELETE FROM `databases` WHERE `id` = ?" - execLog = &storage.ExecLog{ - Queries: []storage.Query{ + result = &compiledLog{ + cmdType: payload.Command, + queries: []storage.Query{ { Pattern: query, Args: []sql.NamedArg{ @@ -213,27 +221,9 @@ func (s *LocalStorage) compileExecLog(payload *KayakPayload) (execLog *storage.E }, } default: - err = errors.New("undefined command: " + payload.Command) - log.Error(err) - } - return -} - -func (s *LocalStorage) decodeLog(wb twopc.WriteBatch) (payload *KayakPayload, err error) { - var bytesPayload []byte - var ok bool - payload = new(KayakPayload) - - if bytesPayload, ok = wb.([]byte); !ok { - err = kayak.ErrInvalidLog - return - } - err = utils.DecodeMsgPack(bytesPayload, payload) - if err != nil { - log.WithError(err).Error("unmarshal payload failed") - return + err = errors.Errorf("undefined command: %v", payload.Command) + log.WithError(err).Error("compile log failed") } - return } @@ -256,20 +246,7 @@ func (s *KayakKVServer) Init(storePath string, initNodes []proto.Node) (err erro Command: CmdSet, Data: nodeBuf.Bytes(), } - - var execLog *storage.ExecLog - execLog, err = s.KVStorage.compileExecLog(payload) - if err != nil { - log.WithError(err).Error("compile exec log failed") - return - } - err = s.KVStorage.Storage.Prepare(context.Background(), execLog) - if err != nil { - log.WithError(err).Error("init kayak KV prepare node failed") - return - } - - err = s.KVStorage.commit(context.Background(), payload) + _, err = s.KVStorage.Commit(payload) if err != nil { log.WithError(err).Error("init kayak KV commit node failed") return @@ -296,15 +273,9 @@ func (s *KayakKVServer) SetNode(node *proto.Node) (err error) { Data: nodeBuf.Bytes(), } - writeData, err := utils.EncodeMsgPack(payload) - if err != nil { - log.WithError(err).Error("marshal payload failed") - return err - } - - _, _, err = s.Runtime.Apply(writeData.Bytes()) + _, _, err = s.Runtime.Apply(context.Background(), payload) if err != nil { - log.Errorf("Apply set node failed: %#v\nPayload:\n %#v", err, writeData) + log.Errorf("Apply set node failed: %#v\nPayload:\n %#v", err, payload) } return @@ -367,15 +338,9 @@ func (s *KayakKVServer) SetDatabase(meta wt.ServiceInstance) (err error) { Data: metaBuf.Bytes(), } - writeData, err := utils.EncodeMsgPack(payload) - if err != nil { - log.WithError(err).Error("marshal payload failed") - return err - } - - _, _, err = s.Runtime.Apply(writeData.Bytes()) + _, _, err = s.Runtime.Apply(context.Background(), payload) if err != nil { - log.Errorf("Apply set database failed: %#v\nPayload:\n %#v", err, writeData) + log.Errorf("Apply set database failed: %#v\nPayload:\n %#v", err, payload) } return @@ -396,15 +361,9 @@ func (s *KayakKVServer) DeleteDatabase(dbID proto.DatabaseID) (err error) { Data: metaBuf.Bytes(), } - writeData, err := utils.EncodeMsgPack(payload) - if err != nil { - log.WithError(err).Error("marshal payload failed") - return err - } - - _, _, err = s.Runtime.Apply(writeData.Bytes()) + _, _, err = s.Runtime.Apply(context.Background(), payload) if err != nil { - log.Errorf("Apply set database failed: %#v\nPayload:\n %#v", err, writeData) + log.Errorf("Apply set database failed: %#v\nPayload:\n %#v", err, payload) } return diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index e885e5d3a..781c64b26 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -20,6 +20,7 @@ import ( "fmt" "os" "os/signal" + "path/filepath" "syscall" "time" @@ -29,23 +30,21 @@ import ( "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/kayak" - ka "github.com/CovenantSQL/CovenantSQL/kayak/api" - kt "github.com/CovenantSQL/CovenantSQL/kayak/transport" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + kl "github.com/CovenantSQL/CovenantSQL/kayak/wal" "github.com/CovenantSQL/CovenantSQL/metric" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" - "github.com/CovenantSQL/CovenantSQL/twopc" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/pkg/errors" "golang.org/x/crypto/ssh/terminal" ) const ( - //nodeDirPattern = "./node_%v" - //pubKeyStoreFile = "public.keystore" - //privateKeyFile = "private.key" - //dhtFileName = "dht.db" kayakServiceName = "Kayak" + kayakMethodName = "Call" + kayakWalFileName = "kayak.ldb" ) func runNode(nodeID proto.NodeID, listenAddr string) (err error) { @@ -71,19 +70,18 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { } // init nodes - log.Info("init peers") + log.WithField("node", nodeID).Info("init peers") _, peers, thisNode, err := initNodePeers(nodeID, conf.GConf.PubKeyStoreFile) if err != nil { log.WithError(err).Error("init nodes and peers failed") return } - var service *kt.ETLSTransportService var server *rpc.Server // create server - log.Info("create server") - if service, server, err = createServer( + log.WithField("addr", listenAddr).Info("create server") + if server, err = createServer( conf.GConf.PrivateKeyFile, conf.GConf.PubKeyStoreFile, masterKey, listenAddr); err != nil { log.WithError(err).Error("create server failed") return @@ -100,7 +98,7 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { // init kayak log.Info("init kayak runtime") var kayakRuntime *kayak.Runtime - if _, kayakRuntime, err = initKayakTwoPC(rootPath, thisNode, peers, st, service); err != nil { + if kayakRuntime, err = initKayakTwoPC(rootPath, thisNode, peers, st, server); err != nil { log.WithError(err).Error("init kayak runtime failed") return } @@ -192,35 +190,59 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { return } -func createServer(privateKeyPath, pubKeyStorePath string, masterKey []byte, listenAddr string) (service *kt.ETLSTransportService, server *rpc.Server, err error) { +func createServer(privateKeyPath, pubKeyStorePath string, masterKey []byte, listenAddr string) (server *rpc.Server, err error) { os.Remove(pubKeyStorePath) server = rpc.NewServer() - if err != nil { - return - } - err = server.InitRPCServer(listenAddr, privateKeyPath, masterKey) - service = ka.NewMuxService(kayakServiceName, server) + if err = server.InitRPCServer(listenAddr, privateKeyPath, masterKey); err != nil { + err = errors.Wrap(err, "init rpc server failed") + } return } -func initKayakTwoPC(rootDir string, node *proto.Node, peers *kayak.Peers, worker twopc.Worker, service *kt.ETLSTransportService) (config kayak.Config, runtime *kayak.Runtime, err error) { +func initKayakTwoPC(rootDir string, node *proto.Node, peers *proto.Peers, h kt.Handler, server *rpc.Server) (runtime *kayak.Runtime, err error) { // create kayak config - log.Info("create twopc config") - config = ka.NewTwoPCConfig(rootDir, service, worker) + log.Info("create kayak config") + + walPath := filepath.Join(rootDir, kayakWalFileName) + + var logWal kt.Wal + if logWal, err = kl.NewLevelDBWal(walPath); err != nil { + err = errors.Wrap(err, "init kayak log pool failed") + return + } + + config := &kt.RuntimeConfig{ + Handler: h, + PrepareThreshold: 1.0, + CommitThreshold: 1.0, + PrepareTimeout: time.Second, + CommitTimeout: time.Second * 60, + Peers: peers, + Wal: logWal, + NodeID: node.ID, + ServiceName: kayakServiceName, + MethodName: kayakMethodName, + } // create kayak runtime - log.Info("create kayak runtime") - runtime, err = ka.NewTwoPCKayak(peers, config) - if err != nil { + log.Info("init kayak runtime") + if runtime, err = kayak.NewRuntime(config); err != nil { + err = errors.Wrap(err, "init kayak runtime failed") + return + } + + // register rpc service + if _, err = NewKayakService(server, kayakServiceName, runtime); err != nil { + err = errors.Wrap(err, "init kayak rpc service failed") return } // init runtime - log.Info("init kayak twopc runtime") - err = runtime.Init() + log.Info("start kayak runtime") + runtime.Start() return } diff --git a/cmd/cqld/initconf.go b/cmd/cqld/initconf.go index cf2f96a0d..961f6c950 100644 --- a/cmd/cqld/initconf.go +++ b/cmd/cqld/initconf.go @@ -20,33 +20,22 @@ import ( "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/utils/log" ) -func initNodePeers(nodeID proto.NodeID, publicKeystorePath string) (nodes *[]proto.Node, peers *kayak.Peers, thisNode *proto.Node, err error) { +func initNodePeers(nodeID proto.NodeID, publicKeystorePath string) (nodes *[]proto.Node, peers *proto.Peers, thisNode *proto.Node, err error) { privateKey, err := kms.GetLocalPrivateKey() if err != nil { log.WithError(err).Fatal("get local private key failed") } - publicKey, err := kms.GetLocalPublicKey() - if err != nil { - log.WithError(err).Fatal("get local public key failed") - } - - leader := &kayak.Server{ - Role: proto.Leader, - ID: conf.GConf.BP.NodeID, - PubKey: publicKey, - } - peers = &kayak.Peers{ - Term: 1, - Leader: leader, - Servers: []*kayak.Server{}, - PubKey: publicKey, + peers = &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: 1, + Leader: conf.GConf.BP.NodeID, + }, } if conf.GConf.KnownNodes != nil { @@ -54,11 +43,7 @@ func initNodePeers(nodeID proto.NodeID, publicKeystorePath string) (nodes *[]pro if n.Role == proto.Leader || n.Role == proto.Follower { //FIXME all KnownNodes conf.GConf.KnownNodes[i].PublicKey = kms.BP.PublicKey - peers.Servers = append(peers.Servers, &kayak.Server{ - Role: n.Role, - ID: n.ID, - PubKey: publicKey, - }) + peers.Servers = append(peers.Servers, n.ID) } } } @@ -77,7 +62,7 @@ func initNodePeers(nodeID proto.NodeID, publicKeystorePath string) (nodes *[]pro // set p route and public keystore if conf.GConf.KnownNodes != nil { - for _, p := range conf.GConf.KnownNodes { + for i, p := range conf.GConf.KnownNodes { rawNodeIDHash, err := hash.NewHashFromStr(string(p.ID)) if err != nil { log.WithError(err).Error("load hash from node id failed") @@ -102,7 +87,7 @@ func initNodePeers(nodeID proto.NodeID, publicKeystorePath string) (nodes *[]pro } if p.ID == nodeID { kms.SetLocalNodeIDNonce(rawNodeID.CloneBytes(), &p.Nonce) - thisNode = &p + thisNode = &conf.GConf.KnownNodes[i] } } } diff --git a/kayak/api/mux.go b/cmd/cqld/kayak.go similarity index 55% rename from kayak/api/mux.go rename to cmd/cqld/kayak.go index 305df8177..807054268 100644 --- a/kayak/api/mux.go +++ b/cmd/cqld/kayak.go @@ -14,19 +14,28 @@ * limitations under the License. */ -package api +package main import ( - kt "github.com/CovenantSQL/CovenantSQL/kayak/transport" + "github.com/CovenantSQL/CovenantSQL/kayak" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/rpc" ) -// NewMuxService create a new transport mux service and register to rpc server. -func NewMuxService(serviceName string, server *rpc.Server) (service *kt.ETLSTransportService) { - service = &kt.ETLSTransportService{ - ServiceName: serviceName, +type KayakService struct { + serviceName string + rt *kayak.Runtime +} + +func NewKayakService(server *rpc.Server, serviceName string, rt *kayak.Runtime) (s *KayakService, err error) { + s = &KayakService{ + serviceName: serviceName, + rt: rt, } - server.RegisterService(serviceName, service) + err = server.RegisterService(serviceName, s) + return +} - return service +func (s *KayakService) Call(req *kt.RPCRequest, _ *interface{}) (err error) { + return s.rt.FollowerApply(req.Log) } diff --git a/cmd/hotfix/hash-upgrade/main.go b/cmd/hotfix/hash-upgrade/main.go index 0b51cfa96..310e31b15 100644 --- a/cmd/hotfix/hash-upgrade/main.go +++ b/cmd/hotfix/hash-upgrade/main.go @@ -27,8 +27,8 @@ import ( "strings" "time" + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/sqlchain/storage" ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" @@ -47,16 +47,16 @@ func init() { flag.StringVar(&privateKey, "private", "private.key", "private key to use for signing") } -// OldBlock type mocks current sqlchain block type for custom serialization. -type OldBlock ct.Block +// Block_ type mocks current sqlchain block type for custom serialization. +type Block_ ct.Block -// MarshalBinary implements custom binary marshaller for OldBlock. -func (b *OldBlock) MarshalBinary() ([]byte, error) { +// MarshalBinary implements custom binary marshaller for Block_. +func (b *Block_) MarshalBinary() ([]byte, error) { return nil, nil } -// UnmarshalBinary implements custom binary unmarshaller for OldBlock. -func (b *OldBlock) UnmarshalBinary(data []byte) (err error) { +// UnmarshalBinary implements custom binary unmarshaller for Block_. +func (b *Block_) UnmarshalBinary(data []byte) (err error) { reader := bytes.NewReader(data) var headerBuf []byte @@ -81,12 +81,28 @@ func (b *OldBlock) UnmarshalBinary(data []byte) (err error) { return } +// Server_ ports back the original kayak server structure. +type Server_ struct { + Role proto.ServerRole + ID proto.NodeID + PubKey *asymmetric.PublicKey +} + +// Peers_ ports back the original kayak peers structure. +type Peers_ struct { + Term uint64 + Leader *Server_ + Servers []*Server_ + PubKey *asymmetric.PublicKey + Signature *asymmetric.Signature +} + // ServiceInstance defines the old service instance type before marshaller updates. type ServiceInstance struct { DatabaseID proto.DatabaseID - Peers *kayak.Peers + Peers *Peers_ ResourceMeta wt.ResourceMeta - GenesisBlock *OldBlock + GenesisBlock *Block_ } func main() { @@ -150,7 +166,8 @@ func main() { } newInstance.DatabaseID = instance.DatabaseID - newInstance.Peers = instance.Peers + // TODO: re-construct peers structure + // newInstance.Peers = instance.Peers newInstance.ResourceMeta = instance.ResourceMeta newInstance.GenesisBlock = &ct.Block{ SignedHeader: instance.GenesisBlock.SignedHeader, diff --git a/kayak/api/twopc.go b/kayak/api/twopc.go deleted file mode 100644 index 91b8231eb..000000000 --- a/kayak/api/twopc.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package api - -import ( - "time" - - "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" - kt "github.com/CovenantSQL/CovenantSQL/kayak/transport" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/twopc" - "github.com/CovenantSQL/CovenantSQL/utils/log" -) - -var ( - // DefaultProcessTimeout defines package default process timeout. - DefaultProcessTimeout = time.Second * 5 - // DefaultTransportID defines default transport id for service multiplex. - DefaultTransportID = "DEFAULT" -) - -// TwoPCOptions defines optional arguments for kayak twopc config. -type TwoPCOptions struct { - ProcessTimeout time.Duration - NodeID proto.NodeID - TransportID string - Logger *log.Logger -} - -// NewTwoPCOptions creates empty twopc configuration options. -func NewTwoPCOptions() *TwoPCOptions { - return &TwoPCOptions{ - ProcessTimeout: DefaultProcessTimeout, - TransportID: DefaultTransportID, - } -} - -// NewDefaultTwoPCOptions creates twopc configuration options with default settings. -func NewDefaultTwoPCOptions() *TwoPCOptions { - nodeID, _ := kms.GetLocalNodeID() - return NewTwoPCOptions().WithNodeID(nodeID) -} - -// WithProcessTimeout set custom process timeout to options. -func (o *TwoPCOptions) WithProcessTimeout(timeout time.Duration) *TwoPCOptions { - o.ProcessTimeout = timeout - return o -} - -// WithNodeID set custom node id to options. -func (o *TwoPCOptions) WithNodeID(nodeID proto.NodeID) *TwoPCOptions { - o.NodeID = nodeID - return o -} - -// WithTransportID set custom transport id to options. -func (o *TwoPCOptions) WithTransportID(id string) *TwoPCOptions { - o.TransportID = id - return o -} - -// WithLogger set custom logger to options. -func (o *TwoPCOptions) WithLogger(l *log.Logger) *TwoPCOptions { - o.Logger = l - return o -} - -// NewTwoPCKayak creates new kayak runtime. -func NewTwoPCKayak(peers *kayak.Peers, config kayak.Config) (*kayak.Runtime, error) { - return kayak.NewRuntime(config, peers) -} - -// NewTwoPCConfig creates new twopc config object. -func NewTwoPCConfig(rootDir string, service *kt.ETLSTransportService, worker twopc.Worker) kayak.Config { - return NewTwoPCConfigWithOptions(rootDir, service, worker, NewDefaultTwoPCOptions()) -} - -// NewTwoPCConfigWithOptions creates new twopc config object with custom options. -func NewTwoPCConfigWithOptions(rootDir string, service *kt.ETLSTransportService, - worker twopc.Worker, options *TwoPCOptions) kayak.Config { - runner := kayak.NewTwoPCRunner() - xptCfg := &kt.ETLSTransportConfig{ - TransportService: service, - NodeID: options.NodeID, - TransportID: options.TransportID, - ServiceName: service.ServiceName, - } - xpt := kt.NewETLSTransport(xptCfg) - cfg := &kayak.TwoPCConfig{ - RuntimeConfig: kayak.RuntimeConfig{ - RootDir: rootDir, - LocalID: options.NodeID, - Runner: runner, - Transport: xpt, - ProcessTimeout: options.ProcessTimeout, - }, - Storage: worker, - } - - return cfg -} diff --git a/kayak/api/twopc_integ_test.go b/kayak/api/twopc_integ_test.go deleted file mode 100644 index 7014a810e..000000000 --- a/kayak/api/twopc_integ_test.go +++ /dev/null @@ -1,388 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package api - -import ( - "context" - "crypto/rand" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "sync" - "testing" - "time" - - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" - "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/route" - "github.com/CovenantSQL/CovenantSQL/rpc" - "github.com/CovenantSQL/CovenantSQL/twopc" - "github.com/CovenantSQL/CovenantSQL/utils/log" - . "github.com/smartystreets/goconvey/convey" - "github.com/stretchr/testify/mock" -) - -// MockWorker is an autogenerated mock type for the Worker type -type MockWorker struct { - mock.Mock -} - -// Commit provides a mock function with given fields: ctx, wb -func (_m *MockWorker) Commit(ctx context.Context, wb twopc.WriteBatch) (interface{}, error) { - ret := _m.Called(context.Background(), wb) - - var r0 interface{} - if rf, ok := ret.Get(0).(func(context.Context, twopc.WriteBatch) interface{}); ok { - r0 = rf(ctx, wb) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(interface{}) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, twopc.WriteBatch) error); ok { - r1 = rf(ctx, wb) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Prepare provides a mock function with given fields: ctx, wb -func (_m *MockWorker) Prepare(ctx context.Context, wb twopc.WriteBatch) error { - ret := _m.Called(context.Background(), wb) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, twopc.WriteBatch) error); ok { - r0 = rf(ctx, wb) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Rollback provides a mock function with given fields: ctx, wb -func (_m *MockWorker) Rollback(ctx context.Context, wb twopc.WriteBatch) error { - ret := _m.Called(context.Background(), wb) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, twopc.WriteBatch) error); ok { - r0 = rf(ctx, wb) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -type CallCollector struct { - l sync.Mutex - callOrder []string -} - -func (c *CallCollector) Append(call string) { - c.l.Lock() - defer c.l.Unlock() - c.callOrder = append(c.callOrder, call) -} - -func (c *CallCollector) Get() []string { - c.l.Lock() - defer c.l.Unlock() - return c.callOrder[:] -} - -func (c *CallCollector) Reset() { - c.l.Lock() - defer c.l.Unlock() - c.callOrder = c.callOrder[:0] -} - -type mockRes struct { - rootDir string - nodeID proto.NodeID - worker *MockWorker - server *rpc.Server - config kayak.Config - runtime *kayak.Runtime - listenAddr string -} - -func initKMS() (err error) { - var f *os.File - f, err = ioutil.TempFile("", "keystore_") - f.Close() - os.Remove(f.Name()) - route.InitKMS(f.Name()) - - // flag as test - kms.Unittest = true - - return -} - -func testWithNewNode() (mock *mockRes, err error) { - mock = &mockRes{} - addr := "127.0.0.1:0" - - // random node id - randBytes := make([]byte, 4) - rand.Read(randBytes) - mock.nodeID = proto.NodeID(hash.THashH(randBytes).String()) - kms.SetLocalNodeIDNonce(mock.nodeID.ToRawNodeID().CloneBytes(), &cpuminer.Uint256{}) - - // mock rpc server - mock.server, err = rpc.NewServerWithService(rpc.ServiceMap{}) - if err != nil { - return - } - _, testFile, _, _ := runtime.Caller(0) - privKeyPath := filepath.Join(filepath.Dir(testFile), "../../test/node_standalone/private.key") - if err = mock.server.InitRPCServer(addr, privKeyPath, []byte("")); err != nil { - return - } - mock.listenAddr = mock.server.Listener.Addr().String() - route.SetNodeAddrCache(mock.nodeID.ToRawNodeID(), mock.listenAddr) - var nonce *cpuminer.Uint256 - if nonce, err = kms.GetLocalNonce(); err != nil { - return - } - var pubKey *asymmetric.PublicKey - if pubKey, err = kms.GetLocalPublicKey(); err != nil { - return - } - if err = kms.SetPublicKey(mock.nodeID, *nonce, pubKey); err != nil { - return - } - - // create mux service for kayak - service := NewMuxService("Kayak", mock.server) - mock.rootDir, err = ioutil.TempDir("", "kayak_test") - if err != nil { - return - } - - // worker - mock.worker = &MockWorker{} - - // create two pc config - options := NewTwoPCOptions(). - WithNodeID(mock.nodeID). - WithProcessTimeout(time.Millisecond * 300). - WithTransportID(DefaultTransportID). - WithLogger(log.StandardLogger()) - mock.config = NewTwoPCConfigWithOptions(mock.rootDir, service, mock.worker, options) - - return -} - -func createRuntime(peers *kayak.Peers, mock *mockRes) (err error) { - mock.runtime, err = NewTwoPCKayak(peers, mock.config) - return -} - -func testPeersFixture(term uint64, servers []*kayak.Server) *kayak.Peers { - testPriv := []byte{ - 0xea, 0xf0, 0x2c, 0xa3, 0x48, 0xc5, 0x24, 0xe6, - 0x39, 0x26, 0x55, 0xba, 0x4d, 0x29, 0x60, 0x3c, - 0xd1, 0xa7, 0x34, 0x7d, 0x9d, 0x65, 0xcf, 0xe9, - 0x3c, 0xe1, 0xeb, 0xff, 0xdc, 0xa2, 0x26, 0x94, - } - privKey, pubKey := asymmetric.PrivKeyFromBytes(testPriv) - - newServers := make([]*kayak.Server, 0, len(servers)) - var leaderNode *kayak.Server - - for _, s := range servers { - newS := &kayak.Server{ - Role: s.Role, - ID: s.ID, - PubKey: pubKey, - } - newServers = append(newServers, newS) - if newS.Role == proto.Leader { - leaderNode = newS - } - } - - peers := &kayak.Peers{ - Term: term, - Leader: leaderNode, - Servers: servers, - PubKey: pubKey, - } - - peers.Sign(privKey) - - return peers -} - -func TestExampleTwoPCCommit(t *testing.T) { - // cleanup log storage after execution - cleanupDir := func(c *mockRes) { - os.RemoveAll(c.rootDir) - } - - // only commit logic - Convey("commit", t, func() { - var err error - - err = initKMS() - So(err, ShouldBeNil) - - lMock, err := testWithNewNode() - So(err, ShouldBeNil) - f1Mock, err := testWithNewNode() - So(err, ShouldBeNil) - f2Mock, err := testWithNewNode() - So(err, ShouldBeNil) - - // peers is a simple 3-node peer configuration - peers := testPeersFixture(1, []*kayak.Server{ - { - Role: proto.Leader, - ID: lMock.nodeID, - }, - { - Role: proto.Follower, - ID: f1Mock.nodeID, - }, - { - Role: proto.Follower, - ID: f2Mock.nodeID, - }, - }) - defer cleanupDir(lMock) - defer cleanupDir(f1Mock) - defer cleanupDir(f2Mock) - - // create runtime - err = createRuntime(peers, lMock) - So(err, ShouldBeNil) - err = createRuntime(peers, f1Mock) - So(err, ShouldBeNil) - err = createRuntime(peers, f2Mock) - So(err, ShouldBeNil) - - // init - err = lMock.runtime.Init() - So(err, ShouldBeNil) - err = f1Mock.runtime.Init() - So(err, ShouldBeNil) - err = f2Mock.runtime.Init() - So(err, ShouldBeNil) - - // make request issuer as leader node - kms.SetLocalNodeIDNonce(lMock.nodeID.ToRawNodeID().CloneBytes(), &cpuminer.Uint256{}) - - // payload to send - testPayload := []byte("test data") - - // underlying worker mock, prepare/commit/rollback with be received the decoded data - callOrder := &CallCollector{} - f1Mock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("prepare") - }) - f2Mock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("prepare") - }) - f1Mock.worker.On("Commit", mock.Anything, testPayload). - Return(nil, nil).Run(func(args mock.Arguments) { - callOrder.Append("commit") - }) - f2Mock.worker.On("Commit", mock.Anything, testPayload). - Return(nil, nil).Run(func(args mock.Arguments) { - callOrder.Append("commit") - }) - lMock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("prepare") - }) - lMock.worker.On("Commit", mock.Anything, testPayload). - Return(nil, nil).Run(func(args mock.Arguments) { - callOrder.Append("commit") - }) - - // start server - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - lMock.server.Serve() - }() - wg.Add(1) - go func() { - defer wg.Done() - f1Mock.server.Serve() - }() - wg.Add(1) - go func() { - defer wg.Done() - f2Mock.server.Serve() - }() - - // process the encoded data - _, _, err = lMock.runtime.Apply(testPayload) - So(err, ShouldBeNil) - So(callOrder.Get(), ShouldResemble, []string{ - "prepare", - "prepare", - "prepare", - "commit", - "commit", - "commit", - }) - - // process the encoded data again - callOrder.Reset() - _, _, err = lMock.runtime.Apply(testPayload) - So(err, ShouldBeNil) - So(callOrder.Get(), ShouldResemble, []string{ - "prepare", - "prepare", - "prepare", - "commit", - "commit", - "commit", - }) - - // shutdown - lMock.runtime.Shutdown() - f1Mock.runtime.Shutdown() - f2Mock.runtime.Shutdown() - - // close - lMock.server.Listener.Close() - f1Mock.server.Listener.Close() - f2Mock.server.Listener.Close() - lMock.server.Stop() - f1Mock.server.Stop() - f2Mock.server.Stop() - - wg.Wait() - }) -} diff --git a/kayak/boltdb_store.go b/kayak/boltdb_store.go deleted file mode 100644 index 23ab9ce90..000000000 --- a/kayak/boltdb_store.go +++ /dev/null @@ -1,284 +0,0 @@ -/* - * Copyright 2018 HashiCorp. - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kayak - -import ( - "errors" - - "github.com/CovenantSQL/CovenantSQL/utils" - "github.com/coreos/bbolt" -) - -const ( - // Permissions to use on the db file. This is only used if the - // database file does not exist and needs to be created. - dbFileMode = 0600 -) - -var ( - // Bucket names we perform transactions in - dbLogs = []byte("logs") - dbConf = []byte("conf") - - // ErrKeyNotFound is an error indicating a given key does not exist - ErrKeyNotFound = errors.New("not found") -) - -// BoltStore provides access to BoltDB for Raft to store and retrieve -// log entries. It also provides key/value storage, and can be used as -// a LogStore and StableStore. -type BoltStore struct { - // conn is the underlying handle to the db. - conn *bolt.DB - - // The path to the Bolt database file - path string -} - -// Options contains all the configuration used to open the BoltDB -type Options struct { - // Path is the file path to the BoltDB to use - Path string - - // BoltOptions contains any specific BoltDB options you might - // want to specify [e.g. open timeout] - BoltOptions *bolt.Options - - // NoSync causes the database to skip fsync calls after each - // write to the log. This is unsafe, so it should be used - // with caution. - NoSync bool -} - -// readOnly returns true if the contained bolt options say to open -// the DB in readOnly mode [this can be useful to tools that want -// to examine the log] -func (o *Options) readOnly() bool { - return o != nil && o.BoltOptions != nil && o.BoltOptions.ReadOnly -} - -// NewBoltStore takes a file path and returns a connected Raft backend. -func NewBoltStore(path string) (*BoltStore, error) { - return NewBoltStoreWithOptions(Options{Path: path}) -} - -// NewBoltStoreWithOptions uses the supplied options to open the BoltDB and prepare it for use as a raft backend. -func NewBoltStoreWithOptions(options Options) (*BoltStore, error) { - // Try to connect - handle, err := bolt.Open(options.Path, dbFileMode, options.BoltOptions) - if err != nil { - return nil, err - } - handle.NoSync = options.NoSync - - // Create the new store - store := &BoltStore{ - conn: handle, - path: options.Path, - } - - // If the store was opened read-only, don't try and create buckets - if !options.readOnly() { - // Set up our buckets - if err := store.initialize(); err != nil { - store.Close() - return nil, err - } - } - return store, nil -} - -// initialize is used to set up all of the buckets. -func (b *BoltStore) initialize() error { - tx, err := b.conn.Begin(true) - if err != nil { - return err - } - defer tx.Rollback() - - // Create all the buckets - if _, err := tx.CreateBucketIfNotExists(dbLogs); err != nil { - return err - } - if _, err := tx.CreateBucketIfNotExists(dbConf); err != nil { - return err - } - - return tx.Commit() -} - -// Close is used to gracefully close the DB connection. -func (b *BoltStore) Close() error { - return b.conn.Close() -} - -// FirstIndex returns the first known index from the Raft log. -func (b *BoltStore) FirstIndex() (uint64, error) { - tx, err := b.conn.Begin(false) - if err != nil { - return 0, err - } - defer tx.Rollback() - - curs := tx.Bucket(dbLogs).Cursor() - if first, _ := curs.First(); first != nil { - return bytesToUint64(first), nil - } - - return 0, nil -} - -// LastIndex returns the last known index from the Raft log. -func (b *BoltStore) LastIndex() (uint64, error) { - tx, err := b.conn.Begin(false) - if err != nil { - return 0, err - } - defer tx.Rollback() - - curs := tx.Bucket(dbLogs).Cursor() - if last, _ := curs.Last(); last != nil { - return bytesToUint64(last), nil - } - return 0, nil -} - -// GetLog is used to retrieve a log from BoltDB at a given index. -func (b *BoltStore) GetLog(idx uint64, log *Log) error { - tx, err := b.conn.Begin(false) - if err != nil { - return err - } - defer tx.Rollback() - - bucket := tx.Bucket(dbLogs) - val := bucket.Get(uint64ToBytes(idx)) - - if val == nil { - return ErrKeyNotFound - } - return utils.DecodeMsgPack(val, log) -} - -// StoreLog is used to store a single raft log. -func (b *BoltStore) StoreLog(log *Log) error { - return b.StoreLogs([]*Log{log}) -} - -// StoreLogs is used to store a set of raft logs. -func (b *BoltStore) StoreLogs(logs []*Log) error { - tx, err := b.conn.Begin(true) - if err != nil { - return err - } - defer tx.Rollback() - - for _, log := range logs { - key := uint64ToBytes(log.Index) - val, err := utils.EncodeMsgPack(log) - if err != nil { - return err - } - bucket := tx.Bucket(dbLogs) - if err := bucket.Put(key, val.Bytes()); err != nil { - return err - } - } - - return tx.Commit() -} - -// DeleteRange is used to delete logs within a given range inclusively. -func (b *BoltStore) DeleteRange(min, max uint64) error { - minKey := uint64ToBytes(min) - - tx, err := b.conn.Begin(true) - if err != nil { - return err - } - defer tx.Rollback() - - curs := tx.Bucket(dbLogs).Cursor() - for k, _ := curs.Seek(minKey); k != nil; k, _ = curs.Next() { - // Handle out-of-range log index - if bytesToUint64(k) > max { - break - } - - // Delete in-range log index - if err := curs.Delete(); err != nil { - return err - } - } - - return tx.Commit() -} - -// Set is used to set a key/value set outside of the raft log. -func (b *BoltStore) Set(k, v []byte) error { - tx, err := b.conn.Begin(true) - if err != nil { - return err - } - defer tx.Rollback() - - bucket := tx.Bucket(dbConf) - if err := bucket.Put(k, v); err != nil { - return err - } - - return tx.Commit() -} - -// Get is used to retrieve a value from the k/v store by key. -func (b *BoltStore) Get(k []byte) ([]byte, error) { - tx, err := b.conn.Begin(false) - if err != nil { - return nil, err - } - defer tx.Rollback() - - bucket := tx.Bucket(dbConf) - val := bucket.Get(k) - - if val == nil { - return nil, ErrKeyNotFound - } - return append([]byte(nil), val...), nil -} - -// SetUint64 is like Set, but handles uint64 values. -func (b *BoltStore) SetUint64(key []byte, val uint64) error { - return b.Set(key, uint64ToBytes(val)) -} - -// GetUint64 is like Get, but handles uint64 values. -func (b *BoltStore) GetUint64(key []byte) (uint64, error) { - val, err := b.Get(key) - if err != nil { - return 0, err - } - return bytesToUint64(val), nil -} - -// Sync performs an fsync on the database file handle. This is not necessary -// under normal operation unless NoSync is enabled, in which this forces the -// database file to sync against the disk. -func (b *BoltStore) Sync() error { - return b.conn.Sync() -} diff --git a/kayak/boltdb_store_test.go b/kayak/boltdb_store_test.go deleted file mode 100644 index 994f12857..000000000 --- a/kayak/boltdb_store_test.go +++ /dev/null @@ -1,376 +0,0 @@ -/* - * Copyright 2018 HashiCorp. - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kayak - -import ( - "io/ioutil" - "os" - "testing" - "time" - - "github.com/coreos/bbolt" - . "github.com/smartystreets/goconvey/convey" -) - -func testBoltStore(t testing.TB) *BoltStore { - fh, err := ioutil.TempFile("", "bolt") - if err != nil { - t.Fatalf("err: %s", err) - } - os.Remove(fh.Name()) - - // Successfully creates and returns a store - store, err := NewBoltStore(fh.Name()) - if err != nil { - t.Fatalf("err: %s", err) - } - - return store -} - -func testLog(idx uint64, data string) *Log { - return &Log{ - Data: []byte(data), - Index: idx, - } -} - -func TestBoltStore_Implements(t *testing.T) { - Convey("test bolt store implements", t, func() { - var store interface{} = &BoltStore{} - var ok bool - _, ok = store.(StableStore) - So(ok, ShouldBeTrue) - _, ok = store.(LogStore) - So(ok, ShouldBeTrue) - }) -} - -func TestBoltOptionsTimeout(t *testing.T) { - Convey("test bolt options timeout", t, func() { - fh, err := ioutil.TempFile("", "bolt") - So(err, ShouldBeNil) - os.Remove(fh.Name()) - defer os.Remove(fh.Name()) - options := Options{ - Path: fh.Name(), - BoltOptions: &bolt.Options{ - Timeout: time.Second / 10, - }, - } - store, err := NewBoltStoreWithOptions(options) - So(err, ShouldBeNil) - defer store.Close() - // trying to open it again should timeout - doneCh := make(chan error, 1) - go func() { - _, err := NewBoltStoreWithOptions(options) - doneCh <- err - }() - select { - case err := <-doneCh: - So(err, ShouldNotBeNil) - So(err.Error(), ShouldEqual, "timeout") - case <-time.After(5 * time.Second): - Print("Gave up waiting for timeout response") - } - }) -} - -func TestBoltOptionsReadOnly(t *testing.T) { - Convey("test bolt options readonly", t, func() { - var err error - fh, err := ioutil.TempFile("", "bolt") - So(err, ShouldBeNil) - defer os.Remove(fh.Name()) - store, err := NewBoltStore(fh.Name()) - So(err, ShouldBeNil) - // Create the log - log := testLog(1, "log1") - // Attempt to store the log - err = store.StoreLog(log) - So(err, ShouldBeNil) - store.Close() - options := Options{ - Path: fh.Name(), - BoltOptions: &bolt.Options{ - Timeout: time.Second / 10, - ReadOnly: true, - }, - } - roStore, err := NewBoltStoreWithOptions(options) - So(err, ShouldBeNil) - defer roStore.Close() - result := new(Log) - err = roStore.GetLog(1, result) - So(err, ShouldBeNil) - - // Ensure the log comes back the same - So(result, ShouldResemble, log) - // Attempt to store the log, should fail on a read-only store/ - err = roStore.StoreLog(log) - So(err, ShouldEqual, bolt.ErrDatabaseReadOnly) - }) -} - -func TestNewBoltStore(t *testing.T) { - Convey("TestNewBoltStore", t, func() { - var err error - fh, err := ioutil.TempFile("", "bolt") - So(err, ShouldBeNil) - os.Remove(fh.Name()) - defer os.Remove(fh.Name()) - - // Successfully creates and returns a store - store, err := NewBoltStore(fh.Name()) - So(err, ShouldBeNil) - - // Ensure the file was created - So(store.path, ShouldEqual, fh.Name()) - _, err = os.Stat(fh.Name()) - So(err, ShouldBeNil) - - // Close the store so we can open again - err = store.Close() - So(err, ShouldBeNil) - - // Ensure our tables were created - db, err := bolt.Open(fh.Name(), dbFileMode, nil) - So(err, ShouldBeNil) - tx, err := db.Begin(true) - So(err, ShouldBeNil) - _, err = tx.CreateBucket([]byte(dbLogs)) - So(err, ShouldEqual, bolt.ErrBucketExists) - _, err = tx.CreateBucket([]byte(dbConf)) - So(err, ShouldEqual, bolt.ErrBucketExists) - }) -} - -func TestBoltStore_FirstIndex(t *testing.T) { - Convey("FirstIndex", t, func() { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - // Should get 0 index on empty log - var err error - idx, err := store.FirstIndex() - So(err, ShouldBeNil) - So(idx, ShouldEqual, uint64(0)) - - // Set a mock raft log - logs := []*Log{ - testLog(1, "log1"), - testLog(2, "log2"), - testLog(3, "log3"), - } - err = store.StoreLogs(logs) - So(err, ShouldBeNil) - - // Fetch the first Raft index - idx, err = store.FirstIndex() - So(err, ShouldBeNil) - So(idx, ShouldEqual, uint64(1)) - }) -} - -func TestBoltStore_LastIndex(t *testing.T) { - Convey("LastIndex", t, func() { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - // Should get 0 index on empty log - var err error - idx, err := store.LastIndex() - So(err, ShouldBeNil) - So(idx, ShouldEqual, uint64(0)) - - // Set a mock raft log - logs := []*Log{ - testLog(1, "log1"), - testLog(2, "log2"), - testLog(3, "log3"), - } - err = store.StoreLogs(logs) - So(err, ShouldBeNil) - - // Fetch the last Raft index - idx, err = store.LastIndex() - So(err, ShouldBeNil) - So(idx, ShouldEqual, uint64(3)) - }) -} - -func TestBoltStore_GetLog(t *testing.T) { - Convey("GetLog", t, func() { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - log := new(Log) - - // Should return an error on non-existent log - var err error - err = store.GetLog(1, log) - So(err, ShouldEqual, ErrKeyNotFound) - - // Set a mock raft log - logs := []*Log{ - testLog(1, "log1"), - testLog(2, "log2"), - testLog(3, "log3"), - } - err = store.StoreLogs(logs) - So(err, ShouldBeNil) - - // Should return th/e proper log - err = store.GetLog(2, log) - So(err, ShouldBeNil) - So(log, ShouldResemble, logs[1]) - }) -} - -func TestBoltStore_SetLog(t *testing.T) { - Convey("SetLog", t, func() { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - // Create the log - log := testLog(1, "log1") - - // Attempt to store the log - var err error - err = store.StoreLog(log) - So(err, ShouldBeNil) - - // Retrieve the log again - result := new(Log) - err = store.GetLog(1, result) - So(err, ShouldBeNil) - - // Ensure the log comes back the same - So(result, ShouldResemble, log) - }) -} - -func TestBoltStore_SetLogs(t *testing.T) { - Convey("SetLogs", t, func() { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - // Create a set of logs - logs := []*Log{ - testLog(1, "log1"), - testLog(2, "log2"), - } - - // Attempt to store the logs - var err error - err = store.StoreLogs(logs) - So(err, ShouldBeNil) - - // Ensure we stored them all - result1, result2 := new(Log), new(Log) - err = store.GetLog(1, result1) - So(err, ShouldBeNil) - So(result1, ShouldResemble, logs[0]) - err = store.GetLog(2, result2) - So(err, ShouldBeNil) - So(result2, ShouldResemble, logs[1]) - }) -} - -func TestBoltStore_DeleteRange(t *testing.T) { - Convey("DeleteRange", t, func() { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - // Create a set of logs - log1 := testLog(1, "log1") - log2 := testLog(2, "log2") - log3 := testLog(3, "log3") - logs := []*Log{log1, log2, log3} - - // Attempt to store the logs - var err error - err = store.StoreLogs(logs) - So(err, ShouldBeNil) - - // Attempt to delete a range of logs - err = store.DeleteRange(1, 2) - So(err, ShouldBeNil) - - // Ensure the logs were deleted - err = store.GetLog(1, new(Log)) - So(err, ShouldEqual, ErrKeyNotFound) - err = store.GetLog(2, new(Log)) - So(err, ShouldEqual, ErrKeyNotFound) - }) -} - -func TestBoltStore_Set_Get(t *testing.T) { - Convey("Set_Get", t, func() { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - // Returns error on non-existent key - var err error - _, err = store.Get([]byte("bad")) - So(err, ShouldEqual, ErrKeyNotFound) - - k, v := []byte("hello"), []byte("world") - - // Try to set a k/v pair - err = store.Set(k, v) - So(err, ShouldBeNil) - - // Try to read it back - val, err := store.Get(k) - So(err, ShouldBeNil) - So(val, ShouldResemble, v) - }) -} - -func TestBoltStore_SetUint64_GetUint64(t *testing.T) { - Convey("SetUint64_GetUint64", t, func() { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - // Returns error on non-existent key - var err error - _, err = store.GetUint64([]byte("bad")) - So(err, ShouldEqual, ErrKeyNotFound) - - k, v := []byte("abc"), uint64(123) - - // Attempt to set the k/v pair - err = store.SetUint64(k, v) - So(err, ShouldBeNil) - - // Read back the value - val, err := store.GetUint64(k) - So(err, ShouldBeNil) - So(val, ShouldEqual, v) - }) -} diff --git a/kayak/doc.go b/kayak/caller.go similarity index 76% rename from kayak/doc.go rename to kayak/caller.go index fc2851c61..27c2ac634 100644 --- a/kayak/doc.go +++ b/kayak/caller.go @@ -14,8 +14,9 @@ * limitations under the License. */ -/* -Package kayak is a simple configurable multi-purpose consensus sdk. -The storage implementations contains code refactored from original hashicorp/raft and hashicorp/raft-boltdb repository. -*/ package kayak + +// Caller defines the rpc caller, supports mocks for the default rpc.PersistCaller. +type Caller interface { + Call(method string, req interface{}, resp interface{}) error +} diff --git a/kayak/inmem_store_test.go b/kayak/inmem_store_test.go deleted file mode 100644 index 9eee8dadb..000000000 --- a/kayak/inmem_store_test.go +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright 2018 HashiCorp. - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kayak - -import ( - "sync" -) - -// MockInmemStore implements the LogStore and StableStore interface. -// It should NOT EVER be used for production. It is used only for -// unit tests. Use the MDBStore implementation instead. -type MockInmemStore struct { - l sync.RWMutex - lowIndex uint64 - highIndex uint64 - logs map[uint64]*Log - kv map[string][]byte - kvInt map[string]uint64 -} - -// NewMockInmemStore returns a new in-memory backend. Do not ever -// use for production. Only for testing. -func NewMockInmemStore() *MockInmemStore { - i := &MockInmemStore{ - logs: make(map[uint64]*Log), - kv: make(map[string][]byte), - kvInt: make(map[string]uint64), - } - return i -} - -// FirstIndex implements the LogStore interface. -func (i *MockInmemStore) FirstIndex() (uint64, error) { - i.l.RLock() - defer i.l.RUnlock() - return i.lowIndex, nil -} - -// LastIndex implements the LogStore interface. -func (i *MockInmemStore) LastIndex() (uint64, error) { - i.l.RLock() - defer i.l.RUnlock() - return i.highIndex, nil -} - -// GetLog implements the LogStore interface. -func (i *MockInmemStore) GetLog(index uint64, log *Log) error { - i.l.RLock() - defer i.l.RUnlock() - l, ok := i.logs[index] - if !ok { - return ErrKeyNotFound - } - *log = *l - return nil -} - -// StoreLog implements the LogStore interface. -func (i *MockInmemStore) StoreLog(log *Log) error { - return i.StoreLogs([]*Log{log}) -} - -// StoreLogs implements the LogStore interface. -func (i *MockInmemStore) StoreLogs(logs []*Log) error { - i.l.Lock() - defer i.l.Unlock() - for _, l := range logs { - i.logs[l.Index] = l - if i.lowIndex == 0 { - i.lowIndex = l.Index - } - if l.Index > i.highIndex { - i.highIndex = l.Index - } - } - return nil -} - -// DeleteRange implements the LogStore interface. -func (i *MockInmemStore) DeleteRange(min, max uint64) error { - i.l.Lock() - defer i.l.Unlock() - for j := min; j <= max; j++ { - delete(i.logs, j) - } - if min <= i.lowIndex { - i.lowIndex = max + 1 - } - if max >= i.highIndex { - i.highIndex = min - 1 - } - if i.lowIndex > i.highIndex { - i.lowIndex = 0 - i.highIndex = 0 - } - return nil -} - -// Set implements the StableStore interface. -func (i *MockInmemStore) Set(key []byte, val []byte) error { - i.l.Lock() - defer i.l.Unlock() - i.kv[string(key)] = val - return nil -} - -// Get implements the StableStore interface. -func (i *MockInmemStore) Get(key []byte) ([]byte, error) { - i.l.RLock() - defer i.l.RUnlock() - return i.kv[string(key)], nil -} - -// SetUint64 implements the StableStore interface. -func (i *MockInmemStore) SetUint64(key []byte, val uint64) error { - i.l.Lock() - defer i.l.Unlock() - i.kvInt[string(key)] = val - return nil -} - -// GetUint64 implements the StableStore interface. -func (i *MockInmemStore) GetUint64(key []byte) (uint64, error) { - i.l.RLock() - defer i.l.RUnlock() - return i.kvInt[string(key)], nil -} diff --git a/kayak/mock_Config_test.go b/kayak/mock_Config_test.go deleted file mode 100644 index f50b067f2..000000000 --- a/kayak/mock_Config_test.go +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Code generated by mockery v1.0.0. DO NOT EDIT. -package kayak - -import mock "github.com/stretchr/testify/mock" - -// MockConfig is an autogenerated mock type for the Config type -type MockConfig struct { - mock.Mock -} - -// GetRuntimeConfig provides a mock function with given fields: -func (_m *MockConfig) GetRuntimeConfig() *RuntimeConfig { - ret := _m.Called() - - var r0 *RuntimeConfig - if rf, ok := ret.Get(0).(func() *RuntimeConfig); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*RuntimeConfig) - } - } - - return r0 -} diff --git a/kayak/mock_LogStore_test.go b/kayak/mock_LogStore_test.go deleted file mode 100644 index 0cbaf7aba..000000000 --- a/kayak/mock_LogStore_test.go +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Code generated by mockery v1.0.0. DO NOT EDIT. -package kayak - -import mock "github.com/stretchr/testify/mock" - -// MockLogStore is an autogenerated mock type for the LogStore type -type MockLogStore struct { - mock.Mock -} - -// DeleteRange provides a mock function with given fields: min, max -func (_m *MockLogStore) DeleteRange(min uint64, max uint64) error { - ret := _m.Called(min, max) - - var r0 error - if rf, ok := ret.Get(0).(func(uint64, uint64) error); ok { - r0 = rf(min, max) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// FirstIndex provides a mock function with given fields: -func (_m *MockLogStore) FirstIndex() (uint64, error) { - ret := _m.Called() - - var r0 uint64 - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) - } - - var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetLog provides a mock function with given fields: index, l -func (_m *MockLogStore) GetLog(index uint64, l *Log) error { - ret := _m.Called(index, l) - - var r0 error - if rf, ok := ret.Get(0).(func(uint64, *Log) error); ok { - r0 = rf(index, l) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// LastIndex provides a mock function with given fields: -func (_m *MockLogStore) LastIndex() (uint64, error) { - ret := _m.Called() - - var r0 uint64 - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) - } - - var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// StoreLog provides a mock function with given fields: l -func (_m *MockLogStore) StoreLog(l *Log) error { - ret := _m.Called(l) - - var r0 error - if rf, ok := ret.Get(0).(func(*Log) error); ok { - r0 = rf(l) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreLogs provides a mock function with given fields: logs -func (_m *MockLogStore) StoreLogs(logs []*Log) error { - ret := _m.Called(logs) - - var r0 error - if rf, ok := ret.Get(0).(func([]*Log) error); ok { - r0 = rf(logs) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/kayak/mock_Runner_test.go b/kayak/mock_Runner_test.go deleted file mode 100644 index 66706324d..000000000 --- a/kayak/mock_Runner_test.go +++ /dev/null @@ -1,82 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package kayak - -import mock "github.com/stretchr/testify/mock" - -// MockRunner is an autogenerated mock type for the Runner type -type MockRunner struct { - mock.Mock -} - -// Apply provides a mock function with given fields: data -func (_m *MockRunner) Apply(data []byte) (interface{}, uint64, error) { - ret := _m.Called(data) - - var r0 interface{} - if rf, ok := ret.Get(0).(func([]byte) interface{}); ok { - r0 = rf(data) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(interface{}) - } - } - - var r1 uint64 - if rf, ok := ret.Get(1).(func([]byte) uint64); ok { - r1 = rf(data) - } else { - r1 = ret.Get(1).(uint64) - } - - var r2 error - if rf, ok := ret.Get(2).(func([]byte) error); ok { - r2 = rf(data) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// Init provides a mock function with given fields: config, peers, logs, stable, transport -func (_m *MockRunner) Init(config Config, peers *Peers, logs LogStore, stable StableStore, transport Transport) error { - ret := _m.Called(config, peers, logs, stable, transport) - - var r0 error - if rf, ok := ret.Get(0).(func(Config, *Peers, LogStore, StableStore, Transport) error); ok { - r0 = rf(config, peers, logs, stable, transport) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Shutdown provides a mock function with given fields: wait -func (_m *MockRunner) Shutdown(wait bool) error { - ret := _m.Called(wait) - - var r0 error - if rf, ok := ret.Get(0).(func(bool) error); ok { - r0 = rf(wait) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UpdatePeers provides a mock function with given fields: peers -func (_m *MockRunner) UpdatePeers(peers *Peers) error { - ret := _m.Called(peers) - - var r0 error - if rf, ok := ret.Get(0).(func(*Peers) error); ok { - r0 = rf(peers) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/kayak/mock_StableStore_test.go b/kayak/mock_StableStore_test.go deleted file mode 100644 index 353e65d87..000000000 --- a/kayak/mock_StableStore_test.go +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Code generated by mockery v1.0.0. DO NOT EDIT. -package kayak - -import mock "github.com/stretchr/testify/mock" - -// MockStableStore is an autogenerated mock type for the StableStore type -type MockStableStore struct { - mock.Mock -} - -// Get provides a mock function with given fields: key -func (_m *MockStableStore) Get(key []byte) ([]byte, error) { - ret := _m.Called(key) - - var r0 []byte - if rf, ok := ret.Get(0).(func([]byte) []byte); ok { - r0 = rf(key) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func([]byte) error); ok { - r1 = rf(key) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetUint64 provides a mock function with given fields: key -func (_m *MockStableStore) GetUint64(key []byte) (uint64, error) { - ret := _m.Called(key) - - var r0 uint64 - if rf, ok := ret.Get(0).(func([]byte) uint64); ok { - r0 = rf(key) - } else { - r0 = ret.Get(0).(uint64) - } - - var r1 error - if rf, ok := ret.Get(1).(func([]byte) error); ok { - r1 = rf(key) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Set provides a mock function with given fields: key, val -func (_m *MockStableStore) Set(key []byte, val []byte) error { - ret := _m.Called(key, val) - - var r0 error - if rf, ok := ret.Get(0).(func([]byte, []byte) error); ok { - r0 = rf(key, val) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SetUint64 provides a mock function with given fields: key, val -func (_m *MockStableStore) SetUint64(key []byte, val uint64) error { - ret := _m.Called(key, val) - - var r0 error - if rf, ok := ret.Get(0).(func([]byte, uint64) error); ok { - r0 = rf(key, val) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/kayak/mock_Worker_test.go b/kayak/mock_Worker_test.go deleted file mode 100644 index b6dcc5e2e..000000000 --- a/kayak/mock_Worker_test.go +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Code generated by mockery v1.0.0. DO NOT EDIT. -package kayak - -import ( - "context" - - "github.com/CovenantSQL/CovenantSQL/twopc" - "github.com/stretchr/testify/mock" -) - -// MockWorker is an autogenerated mock type for the Worker type -type MockWorker struct { - mock.Mock -} - -// Commit provides a mock function with given fields: ctx, wb -func (_m *MockWorker) Commit(ctx context.Context, wb twopc.WriteBatch) (interface{}, error) { - ret := _m.Called(context.Background(), wb) - - var r0 interface{} - if rf, ok := ret.Get(0).(func(context.Context, twopc.WriteBatch) interface{}); ok { - r0 = rf(ctx, wb) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(interface{}) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, twopc.WriteBatch) error); ok { - r1 = rf(ctx, wb) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Prepare provides a mock function with given fields: ctx, wb -func (_m *MockWorker) Prepare(ctx context.Context, wb twopc.WriteBatch) error { - ret := _m.Called(context.Background(), wb) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, twopc.WriteBatch) error); ok { - r0 = rf(ctx, wb) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Rollback provides a mock function with given fields: ctx, wb -func (_m *MockWorker) Rollback(ctx context.Context, wb twopc.WriteBatch) error { - ret := _m.Called(context.Background(), wb) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, twopc.WriteBatch) error); ok { - r0 = rf(ctx, wb) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/kayak/mock_kayak_test.go b/kayak/mock_kayak_test.go deleted file mode 100644 index d3ca04405..000000000 --- a/kayak/mock_kayak_test.go +++ /dev/null @@ -1,492 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kayak - -import ( - "context" - "crypto/rand" - "errors" - "fmt" - "os" - "reflect" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/twopc" - "github.com/CovenantSQL/CovenantSQL/utils/log" - . "github.com/smartystreets/goconvey/convey" -) - -// common mocks -type MockTransportRouter struct { - reqSeq uint64 - transports map[proto.NodeID]*MockTransport - transportLock sync.Mutex -} - -type MockTransport struct { - nodeID proto.NodeID - router *MockTransportRouter - queue chan Request - waitQueue chan *MockResponse - giveUp map[uint64]bool -} - -type MockRequest struct { - transport *MockTransport - ctx context.Context - RequestID uint64 - NodeID proto.NodeID - Method string - Log *Log -} - -type MockResponse struct { - ResponseID uint64 - Data []byte - Error error -} - -type MockTwoPCWorker struct { - nodeID proto.NodeID - state string - data int64 - total int64 -} - -var ( - _ twopc.Worker = &MockTwoPCWorker{} -) - -func (m *MockTransportRouter) getTransport(nodeID proto.NodeID) *MockTransport { - m.transportLock.Lock() - defer m.transportLock.Unlock() - - if _, ok := m.transports[nodeID]; !ok { - m.transports[nodeID] = &MockTransport{ - nodeID: nodeID, - router: m, - queue: make(chan Request, 1000), - waitQueue: make(chan *MockResponse, 1000), - giveUp: make(map[uint64]bool), - } - } - - return m.transports[nodeID] -} - -func (m *MockTransportRouter) ResetTransport(nodeID proto.NodeID) { - m.transportLock.Lock() - defer m.transportLock.Unlock() - - if _, ok := m.transports[nodeID]; ok { - // reset - delete(m.transports, nodeID) - } -} - -func (m *MockTransportRouter) ResetAll() { - m.transportLock.Lock() - defer m.transportLock.Unlock() - - m.transports = make(map[proto.NodeID]*MockTransport) -} - -func (m *MockTransportRouter) getReqID() uint64 { - return atomic.AddUint64(&m.reqSeq, 1) -} - -func (m *MockTransport) Init() error { - return nil -} - -func (m *MockTransport) Request(ctx context.Context, nodeID proto.NodeID, method string, log *Log) ([]byte, error) { - return m.router.getTransport(nodeID).sendRequest(&MockRequest{ - RequestID: m.router.getReqID(), - NodeID: m.nodeID, - Method: method, - Log: log, - ctx: ctx, - }) -} - -func (m *MockTransport) Process() <-chan Request { - return m.queue -} - -func (m *MockTransport) Shutdown() error { - return nil -} - -func (m *MockTransport) sendRequest(req Request) ([]byte, error) { - r := req.(*MockRequest) - r.transport = m - - if log.GetLevel() >= log.DebugLevel { - fmt.Println() - } - log.Debugf("[%v] [%v] -> [%v] request %v", r.RequestID, r.NodeID, req.GetPeerNodeID(), r.GetLog()) - m.queue <- r - - for { - select { - case <-r.ctx.Done(): - // deadline reached - log.Debugf("[%v] [%v] -> [%v] request timeout", - r.RequestID, r.NodeID, req.GetPeerNodeID()) - m.giveUp[r.RequestID] = true - return nil, r.ctx.Err() - case res := <-m.waitQueue: - if res.ResponseID != r.RequestID { - // put back to queue - if !m.giveUp[res.ResponseID] { - m.waitQueue <- res - } else { - delete(m.giveUp, res.ResponseID) - } - } else { - log.Debugf("[%v] [%v] -> [%v] response %v: %v", - r.RequestID, req.GetPeerNodeID(), r.NodeID, res.Data, res.Error) - return res.Data, res.Error - } - } - } -} - -func (m *MockRequest) GetPeerNodeID() proto.NodeID { - return m.NodeID -} - -func (m *MockRequest) GetMethod() string { - return m.Method -} - -func (m *MockRequest) GetLog() *Log { - return m.Log -} - -func (m *MockRequest) SendResponse(v []byte, err error) error { - m.transport.waitQueue <- &MockResponse{ - ResponseID: m.RequestID, - Data: v, - Error: err, - } - - return nil -} - -func (w *MockTwoPCWorker) Prepare(ctx context.Context, wb twopc.WriteBatch) error { - // test prepare - if w.state != "" { - return errors.New("invalid state") - } - - value, ok := wb.(int64) - if !ok { - return errors.New("invalid data") - } - - w.state = "prepared" - w.data = value - - return nil -} - -func (w *MockTwoPCWorker) Commit(ctx context.Context, wb twopc.WriteBatch) (interface{}, error) { - // test commit - if w.state != "prepared" { - return nil, errors.New("invalid state") - } - - if !reflect.DeepEqual(wb, w.data) { - return nil, errors.New("commit data not same as last") - } - - w.total += w.data - w.state = "" - - return nil, nil -} - -func (w *MockTwoPCWorker) Rollback(ctx context.Context, wb twopc.WriteBatch) error { - // test rollback - if w.state != "prepared" { - return errors.New("invalid state") - } - - if !reflect.DeepEqual(wb, w.data) { - return errors.New("commit data not same as last") - } - - w.data = 0 - w.state = "" - - return nil -} - -func (w *MockTwoPCWorker) GetTotal() int64 { - return w.total -} - -func (w *MockTwoPCWorker) GetState() string { - return w.state -} - -type CallCollector struct { - l sync.Mutex - callOrder []string -} - -func (c *CallCollector) Append(call string) { - c.l.Lock() - defer c.l.Unlock() - c.callOrder = append(c.callOrder, call) -} - -func (c *CallCollector) Get() []string { - c.l.Lock() - defer c.l.Unlock() - return c.callOrder[:] -} - -func (c *CallCollector) Reset() { - c.l.Lock() - defer c.l.Unlock() - c.callOrder = c.callOrder[:0] -} - -func testPeersFixture(term uint64, servers []*Server) *Peers { - testPriv := []byte{ - 0xea, 0xf0, 0x2c, 0xa3, 0x48, 0xc5, 0x24, 0xe6, - 0x39, 0x26, 0x55, 0xba, 0x4d, 0x29, 0x60, 0x3c, - 0xd1, 0xa7, 0x34, 0x7d, 0x9d, 0x65, 0xcf, 0xe9, - 0x3c, 0xe1, 0xeb, 0xff, 0xdc, 0xa2, 0x26, 0x94, - } - privKey, pubKey := asymmetric.PrivKeyFromBytes(testPriv) - - newServers := make([]*Server, 0, len(servers)) - var leaderNode *Server - - for _, s := range servers { - newS := &Server{ - Role: s.Role, - ID: s.ID, - PubKey: pubKey, - } - newServers = append(newServers, newS) - if newS.Role == proto.Leader { - leaderNode = newS - } - } - - peers := &Peers{ - Term: term, - Leader: leaderNode, - Servers: servers, - PubKey: pubKey, - } - - peers.Sign(privKey) - - return peers -} - -func testLogFixture(data []byte) (log *Log) { - log = &Log{ - Index: uint64(1), - Term: uint64(1), - Data: data, - } - - log.ComputeHash() - - return -} - -// test mock library itself -func TestMockTransport(t *testing.T) { - Convey("test transport with request timeout", t, func() { - mockRouter := &MockTransportRouter{ - transports: make(map[proto.NodeID]*MockTransport), - } - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) - defer cancel() - - var err error - var response []byte - response, err = mockRouter.getTransport("a").Request( - ctx, "b", "Test", testLogFixture([]byte("happy"))) - - So(response, ShouldBeNil) - So(err, ShouldNotBeNil) - }) - - Convey("test transport with successful request", t, func(c C) { - mockRouter := &MockTransportRouter{ - transports: make(map[proto.NodeID]*MockTransport), - } - testLog := testLogFixture([]byte("happy")) - var wg sync.WaitGroup - - wg.Add(1) - - go func() { - defer wg.Done() - select { - case req := <-mockRouter.getTransport("d").Process(): - c.So(req.GetPeerNodeID(), ShouldEqual, proto.NodeID("c")) - c.So(req.GetMethod(), ShouldEqual, "Test") - c.So(req.GetLog(), ShouldResemble, testLog) - req.SendResponse([]byte("happy too"), nil) - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - var err error - var response []byte - response, err = mockRouter.getTransport("c").Request( - context.Background(), "d", "Test", testLog) - - c.So(err, ShouldBeNil) - c.So(response, ShouldResemble, []byte("happy too")) - }() - - wg.Wait() - }) - - Convey("test transport with concurrent request", t, FailureContinues, func(c C) { - mockRouter := &MockTransportRouter{ - transports: make(map[proto.NodeID]*MockTransport), - } - testLog := testLogFixture([]byte("happy")) - var wg sync.WaitGroup - - wg.Add(1) - go func() { - defer wg.Done() - var err error - var response []byte - response, err = mockRouter.getTransport("e").Request( - context.Background(), "g", "test1", testLog) - - c.So(err, ShouldBeNil) - c.So(response, ShouldResemble, []byte("happy e test1")) - }() - - wg.Add(1) - go func() { - defer wg.Done() - var err error - var response []byte - response, err = mockRouter.getTransport("f").Request( - context.Background(), "g", "test2", testLog) - - c.So(err, ShouldBeNil) - c.So(response, ShouldResemble, []byte("happy f test2")) - }() - - wg.Add(1) - go func() { - defer wg.Done() - - for i := 0; i < 2; i++ { - select { - case req := <-mockRouter.getTransport("g").Process(): - c.So(req.GetPeerNodeID(), ShouldBeIn, []proto.NodeID{"e", "f"}) - c.So(req.GetMethod(), ShouldBeIn, []string{"test1", "test2"}) - c.So(req.GetLog(), ShouldResemble, testLog) - req.SendResponse([]byte(fmt.Sprintf("happy %s %s", req.GetPeerNodeID(), req.GetMethod())), nil) - } - } - }() - - wg.Wait() - }) - - Convey("test transport with piped request", t, FailureContinues, func(c C) { - mockRouter := &MockTransportRouter{ - transports: make(map[proto.NodeID]*MockTransport), - } - var wg sync.WaitGroup - - randReq := testLogFixture([]byte("happy")) - randResp := make([]byte, 4) - rand.Read(randResp) - - t.Logf("test with request %d, response %d", randReq, randResp) - - wg.Add(1) - go func() { - defer wg.Done() - var err error - var response []byte - var req Request - - select { - case req = <-mockRouter.getTransport("j").Process(): - c.So(req.GetPeerNodeID(), ShouldEqual, proto.NodeID("i")) - c.So(req.GetMethod(), ShouldEqual, "pass1") - } - - response, err = mockRouter.getTransport("j").Request( - context.Background(), "k", "pass2", req.GetLog()) - - c.So(err, ShouldBeNil) - req.SendResponse(response, nil) - }() - - wg.Add(1) - go func() { - defer wg.Done() - select { - case req := <-mockRouter.getTransport("k").Process(): - c.So(req.GetPeerNodeID(), ShouldEqual, proto.NodeID("j")) - c.So(req.GetMethod(), ShouldEqual, "pass2") - c.So(req.GetLog(), ShouldResemble, randReq) - req.SendResponse(randResp, nil) - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - var err error - var response []byte - - response, err = mockRouter.getTransport("i").Request( - context.Background(), "j", "pass1", randReq) - - c.So(err, ShouldBeNil) - c.So(response, ShouldResemble, randResp) - }() - - wg.Wait() - }) -} - -func init() { - // set logger level by env - if os.Getenv("DEBUG") != "" { - log.SetLevel(log.DebugLevel) - } -} diff --git a/kayak/util.go b/kayak/rpc.go similarity index 69% rename from kayak/util.go rename to kayak/rpc.go index ab53256c7..15d09784b 100644 --- a/kayak/util.go +++ b/kayak/rpc.go @@ -16,18 +16,18 @@ package kayak -import ( - "encoding/binary" -) +// RPCHandler handles rpc. +type muxService struct { +} + +func (h *muxService) Prepare() (err error) { + return +} -// Converts bytes to an integer. -func bytesToUint64(b []byte) uint64 { - return binary.BigEndian.Uint64(b) +func (h *muxService) Rollback() (err error) { + return } -// Converts a uint to a byte slice. -func uint64ToBytes(u uint64) []byte { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, u) - return buf +func (h *muxService) Commit() (err error) { + return } diff --git a/kayak/runtime.go b/kayak/runtime.go index 3f300bacd..ff6c69606 100644 --- a/kayak/runtime.go +++ b/kayak/runtime.go @@ -17,163 +17,837 @@ package kayak import ( + "context" + "encoding/binary" "fmt" - "path/filepath" + "io" + "math" + "sync" + "sync/atomic" + "time" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/rpc" + "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/pkg/errors" ) const ( - // FileStorePath is the default log store filename - FileStorePath = "kayak.db" + // commit channel window size + commitWindow = 1 + // prepare window + trackerWindow = 10 ) -// Runtime defines common init/shutdown logic for different consensus protocol runner. +// Runtime defines the main kayak Runtime. type Runtime struct { - config *RuntimeConfig - runnerConfig Config - peers *Peers - isLeader bool - logStore *BoltStore + /// Indexes + // index for next log. + nextIndexLock sync.Mutex + nextIndex uint64 + // lastCommit, last commit log index + lastCommit uint64 + // pendingPrepares, prepares needs to be committed/rollback + pendingPrepares map[uint64]bool + pendingPreparesLock sync.RWMutex + + /// Runtime entities + // current node id. + nodeID proto.NodeID + // instance identifies kayak in multi-instance environment + // e.g. use database id for SQLChain scenario. + instanceID string + // wal defines the wal for kayak. + wal kt.Wal + // underlying handler + sh kt.Handler + + /// Peers info + // peers defines the server peers. + peers *proto.Peers + // cached role of current node in peers, calculated from peers info. + role proto.ServerRole + // cached followers in peers, calculated from peers info. + followers []proto.NodeID + // peers lock for peers update logic. + peersLock sync.RWMutex + // calculated min follower nodes for prepare. + minPreparedFollowers int + // calculated min follower nodes for commit. + minCommitFollowers int + + /// RPC related + // callerMap caches the caller for peering nodes. + callerMap sync.Map // map[proto.NodeID]Caller + // service name for mux service. + serviceName string + // rpc method for coordination requests. + rpcMethod string + // tracks the outgoing rpc requests. + rpcTrackCh chan *rpcTracker + + //// Parameters + // prepare threshold defines the minimum node count requirement for prepare operation. + prepareThreshold float64 + // commit threshold defines the minimum node count requirement for commit operation. + commitThreshold float64 + // prepare timeout defines the max allowed time for prepare operation. + prepareTimeout time.Duration + // commit timeout defines the max allowed time for commit operation. + commitTimeout time.Duration + // channel for awaiting commits. + commitCh chan *commitReq + + /// Sub-routines management. + started uint32 + stopCh chan struct{} + wg sync.WaitGroup } -// NewRuntime creates new runtime. -func NewRuntime(config Config, peers *Peers) (*Runtime, error) { - if config == nil || peers == nil { - return nil, ErrInvalidConfig +// commitReq defines the commit operation input. +type commitReq struct { + ctx context.Context + data interface{} + index uint64 + log *kt.Log + result chan *commitResult +} + +// commitResult defines the commit operation result. +type commitResult struct { + result interface{} + err error + rpc *rpcTracker +} + +// NewRuntime creates new kayak Runtime. +func NewRuntime(cfg *kt.RuntimeConfig) (rt *Runtime, err error) { + peers := cfg.Peers + + // verify peers + if err = peers.Verify(); err != nil { + err = errors.Wrap(err, "verify peers during kayak init failed") + return + } + + followers := make([]proto.NodeID, 0, len(peers.Servers)) + exists := false + var role proto.ServerRole + + for _, v := range peers.Servers { + if !v.IsEqual(&peers.Leader) { + followers = append(followers, v) + } + + if v.IsEqual(&cfg.NodeID) { + exists = true + if v.IsEqual(&peers.Leader) { + role = proto.Leader + } else { + role = proto.Follower + } + } + } + + if !exists { + err = errors.Wrapf(kt.ErrNotInPeer, "node %v not in peers %v", cfg.NodeID, peers) + return + } + + // calculate fan-out count according to threshold and peers info + minPreparedFollowers := int(math.Max(math.Ceil(cfg.PrepareThreshold*float64(len(peers.Servers))), 1) - 1) + minCommitFollowers := int(math.Max(math.Ceil(cfg.CommitThreshold*float64(len(peers.Servers))), 1) - 1) + + rt = &Runtime{ + // indexes + pendingPrepares: make(map[uint64]bool, commitWindow*2), + + // handler and logs + sh: cfg.Handler, + wal: cfg.Wal, + instanceID: cfg.InstanceID, + + // peers + peers: cfg.Peers, + nodeID: cfg.NodeID, + followers: followers, + role: role, + minPreparedFollowers: minPreparedFollowers, + minCommitFollowers: minCommitFollowers, + + // rpc related + serviceName: cfg.ServiceName, + rpcMethod: fmt.Sprintf("%v.%v", cfg.ServiceName, cfg.MethodName), + rpcTrackCh: make(chan *rpcTracker, trackerWindow), + + // commits related + prepareThreshold: cfg.PrepareThreshold, + prepareTimeout: cfg.PrepareTimeout, + commitThreshold: cfg.CommitThreshold, + commitTimeout: cfg.CommitTimeout, + commitCh: make(chan *commitReq, commitWindow), + + // stop coordinator + stopCh: make(chan struct{}), + } + + // read from pool to rebuild uncommitted log map + if err = rt.readLogs(); err != nil { + return + } + + return +} + +// Start starts the Runtime. +func (r *Runtime) Start() (err error) { + if !atomic.CompareAndSwapUint32(&r.started, 0, 1) { + return + } + + // start commit cycle + r.goFunc(r.commitCycle) + // start rpc tracker collector + // TODO(): + + return +} + +// Shutdown waits for the Runtime to stop. +func (r *Runtime) Shutdown() (err error) { + if !atomic.CompareAndSwapUint32(&r.started, 1, 2) { + return } - // config authentication check - if !peers.Verify() { - return nil, ErrInvalidConfig + select { + case <-r.stopCh: + default: + close(r.stopCh) } + r.wg.Wait() - // peers config verification - serverInPeers := false - runtime := &Runtime{ - config: config.GetRuntimeConfig(), - peers: peers, - runnerConfig: config, + return +} + +// Apply defines entry for Leader node. +func (r *Runtime) Apply(ctx context.Context, req interface{}) (result interface{}, logIndex uint64, err error) { + r.peersLock.RLock() + defer r.peersLock.RUnlock() + + var tmStart, tmLeaderPrepare, tmFollowerPrepare, tmLeaderRollback, tmRollback, tmCommit time.Time + + defer func() { + fields := log.Fields{ + "r": logIndex, + } + if !tmLeaderPrepare.Before(tmStart) { + fields["lp"] = tmLeaderPrepare.Sub(tmStart) + } + if !tmFollowerPrepare.Before(tmLeaderPrepare) { + fields["fp"] = tmFollowerPrepare.Sub(tmLeaderPrepare) + } + if !tmLeaderRollback.Before(tmFollowerPrepare) { + fields["lr"] = tmLeaderRollback.Sub(tmFollowerPrepare) + } + if !tmRollback.Before(tmLeaderRollback) { + fields["fr"] = tmRollback.Sub(tmLeaderRollback) + } + if !tmCommit.Before(tmFollowerPrepare) { + fields["c"] = tmCommit.Sub(tmFollowerPrepare) + } + log.WithFields(fields).Debug("kayak leader apply") + }() + + if r.role != proto.Leader { + // not leader + err = kt.ErrNotLeader + return + } + + tmStart = time.Now() + + // check prepare in leader + if err = r.doCheck(req); err != nil { + err = errors.Wrap(err, "leader verify log") + return } - for _, s := range peers.Servers { - if s.ID == runtime.config.LocalID { - serverInPeers = true + // encode request + var encBuf []byte + if encBuf, err = r.sh.EncodePayload(req); err != nil { + err = errors.Wrap(err, "encode kayak payload failed") + return + } - if s.Role == proto.Leader { - runtime.isLeader = true + // create prepare request + var prepareLog *kt.Log + if prepareLog, err = r.leaderLogPrepare(encBuf); err != nil { + // serve error, leader could not write logs, change leader in block producer + // TODO(): CHANGE LEADER + return + } + + tmLeaderPrepare = time.Now() + + // send prepare to all nodes + prepareTracker := r.rpc(prepareLog, r.minPreparedFollowers) + prepareCtx, prepareCtxCancelFunc := context.WithTimeout(ctx, r.prepareTimeout) + defer prepareCtxCancelFunc() + prepareErrors, prepareDone, _ := prepareTracker.get(prepareCtx) + if !prepareDone { + // timeout, rollback + err = kt.ErrPrepareTimeout + goto ROLLBACK + } + + // collect errors + if err = r.errorSummary(prepareErrors); err != nil { + goto ROLLBACK + } + + tmFollowerPrepare = time.Now() + + select { + case cResult := <-r.commitResult(ctx, nil, prepareLog): + if cResult != nil { + logIndex = prepareLog.Index + result = cResult.result + err = cResult.err + + // wait until context deadline or commit done + if cResult.rpc != nil { + cResult.rpc.get(ctx) + } + } else { + log.Fatal("IMPOSSIBLE BRANCH") + select { + case <-ctx.Done(): + err = errors.Wrap(ctx.Err(), "process commit timeout") + goto ROLLBACK + default: } } + case <-ctx.Done(): + // pipeline commit timeout + logIndex = prepareLog.Index + err = errors.Wrap(ctx.Err(), "enqueue commit timeout") + goto ROLLBACK } - if !serverInPeers { - return nil, ErrInvalidConfig + tmCommit = time.Now() + + return + +ROLLBACK: + // rollback local + var rollbackLog *kt.Log + if rollbackLog, err = r.leaderLogRollback(prepareLog.Index); err != nil { + // serve error, construct rollback log failed, internal error + // TODO(): CHANGE LEADER + return } - return runtime, nil + tmLeaderRollback = time.Now() + + // async send rollback to all nodes + r.rpc(rollbackLog, 0) + + tmRollback = time.Now() + + return } -// Init defines the common init logic. -func (r *Runtime) Init() (err error) { - // init log store - var logStore *BoltStore +// FollowerApply defines entry for follower node. +func (r *Runtime) FollowerApply(l *kt.Log) (err error) { + if l == nil { + err = errors.Wrap(kt.ErrInvalidLog, "log is nil") + return + } + + r.peersLock.RLock() + defer r.peersLock.RUnlock() + + if r.role == proto.Leader { + // not follower + err = kt.ErrNotFollower + return + } + + // verify log structure + switch l.Type { + case kt.LogPrepare: + err = r.followerPrepare(l) + case kt.LogRollback: + err = r.followerRollback(l) + case kt.LogCommit: + err = r.followerCommit(l) + case kt.LogBarrier: + // support barrier for log truncation and peer update + fallthrough + case kt.LogNoop: + // do nothing + err = r.followerNoop(l) + } - if logStore, err = NewBoltStore(filepath.Join(r.config.RootDir, FileStorePath)); err != nil { - return fmt.Errorf("new bolt store: %s", err.Error()) + if err == nil { + r.updateNextIndex(l) } - // call transport init - if err = r.config.Transport.Init(); err != nil { + return +} + +// UpdatePeers defines entry for peers update logic. +func (r *Runtime) UpdatePeers(peers *proto.Peers) (err error) { + r.peersLock.Lock() + defer r.peersLock.Unlock() + + return +} + +func (r *Runtime) leaderLogPrepare(data []byte) (*kt.Log, error) { + // just write new log + return r.newLog(kt.LogPrepare, data) +} + +func (r *Runtime) leaderLogRollback(i uint64) (*kt.Log, error) { + // just write new log + return r.newLog(kt.LogRollback, r.uint64ToBytes(i)) +} + +func (r *Runtime) doCheck(req interface{}) (err error) { + if err = r.sh.Check(req); err != nil { + err = errors.Wrap(err, "verify log") + return + } + + return +} + +func (r *Runtime) followerPrepare(l *kt.Log) (err error) { + // decode + var req interface{} + if req, err = r.sh.DecodePayload(l.Data); err != nil { + err = errors.Wrap(err, "decode kayak payload failed") + return + } + + if err = r.doCheck(req); err != nil { + return + } + + // write log + if err = r.wal.Write(l); err != nil { + err = errors.Wrap(err, "write follower prepare log failed") + return + } + + r.markPendingPrepare(l.Index) + + return +} + +func (r *Runtime) followerRollback(l *kt.Log) (err error) { + var prepareLog *kt.Log + if _, prepareLog, err = r.getPrepareLog(l); err != nil || prepareLog == nil { + err = errors.Wrap(err, "get original request in rollback failed") return } - // call runner init - if err = r.config.Runner.Init(r.runnerConfig, r.peers, logStore, logStore, r.config.Transport); err != nil { - logStore.Close() - return fmt.Errorf("%s runner init: %s", r.config.LocalID, err.Error()) + // check if prepare already processed + if r.checkIfPrepareFinished(prepareLog.Index) { + err = errors.Wrap(kt.ErrInvalidLog, "prepare request already processed") + return } - r.logStore = logStore - return nil + // write wal + if err = r.wal.Write(l); err != nil { + err = errors.Wrap(err, "write follower rollback log failed") + } + + return } -// Shutdown defines common shutdown logic. -func (r *Runtime) Shutdown() (err error) { - if err = r.config.Runner.Shutdown(true); err != nil { - return fmt.Errorf("%s runner shutdown: %s", r.config.LocalID, err.Error()) +func (r *Runtime) followerCommit(l *kt.Log) (err error) { + var prepareLog *kt.Log + var lastCommit uint64 + if lastCommit, prepareLog, err = r.getPrepareLog(l); err != nil { + err = errors.Wrap(err, "get original request in commit failed") + return + } + + // check if prepare already processed + if r.checkIfPrepareFinished(prepareLog.Index) { + err = errors.Wrap(kt.ErrInvalidLog, "prepare request already processed") + return } - if err = r.config.Transport.Shutdown(); err != nil { + myLastCommit := atomic.LoadUint64(&r.lastCommit) + + // check committed index + if lastCommit < myLastCommit { + // leader pushed a early index before commit + log.WithFields(log.Fields{ + "head": myLastCommit, + "supplied": lastCommit, + }).Warning("invalid last commit log") + err = errors.Wrap(kt.ErrInvalidLog, "invalid last commit log index") + return + } else if lastCommit > myLastCommit { + // last log does not committed yet + // DO RECOVERY + log.WithFields(log.Fields{ + "expected": lastCommit, + "actual": myLastCommit, + }).Warning("DO RECOVERY, REQUIRED LAST COMMITTED DOES NOT COMMIT YET") + err = errors.Wrap(kt.ErrNeedRecovery, "last commit does not received, need recovery") return } - if r.logStore != nil { - if err = r.logStore.Close(); err != nil { - return fmt.Errorf("shutdown bolt store: %s", err.Error()) + cResult := <-r.commitResult(context.Background(), l, prepareLog) + if cResult != nil { + err = cResult.err + } + + return +} + +func (r *Runtime) commitResult(ctx context.Context, commitLog *kt.Log, prepareLog *kt.Log) (res chan *commitResult) { + // decode log and send to commit channel to process + res = make(chan *commitResult, 1) + + if prepareLog == nil { + res <- &commitResult{ + err: errors.Wrap(kt.ErrInvalidLog, "nil prepare log in commit"), + } + return + } + + // decode prepare log + var logReq interface{} + var err error + if logReq, err = r.sh.DecodePayload(prepareLog.Data); err != nil { + res <- &commitResult{ + err: errors.Wrap(err, "decode log payload failed"), + } + return + } + + req := &commitReq{ + ctx: ctx, + data: logReq, + index: prepareLog.Index, + result: res, + log: commitLog, + } + + select { + case <-ctx.Done(): + case r.commitCh <- req: + } + + return +} + +func (r *Runtime) commitCycle() { + // TODO(): panic recovery + for { + var cReq *commitReq + + select { + case <-r.stopCh: + return + case cReq = <-r.commitCh: + } + + if cReq != nil { + r.doCommit(cReq) } + } +} + +func (r *Runtime) doCommit(req *commitReq) { + r.peersLock.RLock() + defer r.peersLock.RUnlock() - r.logStore = nil + resp := &commitResult{} + + if r.role == proto.Leader { + resp.rpc, resp.result, resp.err = r.leaderDoCommit(req) + } else { + resp.err = r.followerDoCommit(req) } - return nil + req.result <- resp } -// Apply defines common process logic. -func (r *Runtime) Apply(data []byte) (result interface{}, offset uint64, err error) { - // validate if myself is leader - if !r.isLeader { - return nil, 0, ErrNotLeader +func (r *Runtime) leaderDoCommit(req *commitReq) (tracker *rpcTracker, result interface{}, err error) { + if req.log != nil { + // mis-use follower commit for leader + log.Fatal("INVALID EXISTING LOG FOR LEADER COMMIT") + return } - result, offset, err = r.config.Runner.Apply(data) - if err != nil { - return nil, 0, err + // create leader log + var l *kt.Log + var logData []byte + + logData = append(logData, r.uint64ToBytes(req.index)...) + logData = append(logData, r.uint64ToBytes(atomic.LoadUint64(&r.lastCommit))...) + + if l, err = r.newLog(kt.LogCommit, logData); err != nil { + // serve error, leader could not write log + return + } + + // not wrapping underlying handler commit error + result, err = r.sh.Commit(req.data) + + if err == nil { + // mark last commit + atomic.StoreUint64(&r.lastCommit, l.Index) } + // send commit + commitCtx, commitCtxCancelFunc := context.WithTimeout(context.Background(), r.commitTimeout) + defer commitCtxCancelFunc() + tracker = r.rpc(l, r.minCommitFollowers) + _, _, _ = tracker.get(commitCtx) + + // TODO(): text log for rpc errors + + // TODO(): mark uncommitted nodes and remove from peers + return } -// GetLog fetches runtime log produced by runner. -func (r *Runtime) GetLog(offset uint64) (data []byte, err error) { - var l Log - if err = r.logStore.GetLog(offset, &l); err != nil { +func (r *Runtime) followerDoCommit(req *commitReq) (err error) { + if req.log == nil { + log.Fatal("NO LOG FOR FOLLOWER COMMIT") return } - data = l.Data + // write log first + if err = r.wal.Write(req.log); err != nil { + err = errors.Wrap(err, "write follower commit log failed") + return + } + + // do commit, not wrapping underlying handler commit error + _, err = r.sh.Commit(req.data) + + if err == nil { + atomic.StoreUint64(&r.lastCommit, req.log.Index) + } return } -// UpdatePeers defines common peers update logic. -func (r *Runtime) UpdatePeers(peers *Peers) error { - // Verify peers - if !peers.Verify() { - return ErrInvalidConfig +func (r *Runtime) getPrepareLog(l *kt.Log) (lastCommitIndex uint64, pl *kt.Log, err error) { + var prepareIndex uint64 + + // decode prepare index + if prepareIndex, err = r.bytesToUint64(l.Data); err != nil { + err = errors.Wrap(err, "log does not contain valid prepare index") + return + } + + // decode commit index + if len(l.Data) >= 16 { + lastCommitIndex, _ = r.bytesToUint64(l.Data[8:]) } - // Check if myself is still in peers - inPeers := false - isLeader := false + pl, err = r.wal.Get(prepareIndex) + + return +} + +func (r *Runtime) newLog(logType kt.LogType, data []byte) (l *kt.Log, err error) { + // allocate index + r.nextIndexLock.Lock() + i := r.nextIndex + r.nextIndex++ + r.nextIndexLock.Unlock() + l = &kt.Log{ + LogHeader: kt.LogHeader{ + Index: i, + Type: logType, + Producer: r.nodeID, + }, + Data: data, + } - for _, s := range peers.Servers { - if s.ID == r.config.LocalID { - inPeers = true - isLeader = s.Role == proto.Leader + // error write will be a fatal error, cause to node to fail fast + if err = r.wal.Write(l); err != nil { + log.Fatalf("WRITE LOG FAILED: %v", err) + } + + return +} + +func (r *Runtime) readLogs() (err error) { + // load logs, only called during init + var l *kt.Log + + for { + if l, err = r.wal.Read(); err != nil && err != io.EOF { + err = errors.Wrap(err, "load previous logs in wal failed") + break + } else if err == io.EOF { + err = nil + break + } + + switch l.Type { + case kt.LogPrepare: + // record in pending prepares + r.pendingPrepares[l.Index] = true + case kt.LogCommit: + // record last commit + var lastCommit uint64 + var prepareLog *kt.Log + if lastCommit, prepareLog, err = r.getPrepareLog(l); err != nil { + err = errors.Wrap(err, "previous prepare does not exists, node need full recovery") + break + } + if lastCommit != r.lastCommit { + err = errors.Wrapf(err, + "last commit record in wal mismatched (expected: %v, actual: %v)", r.lastCommit, lastCommit) + break + } + if !r.pendingPrepares[prepareLog.Index] { + err = errors.Wrap(kt.ErrInvalidLog, "previous prepare already committed/rollback") + break + } + r.lastCommit = l.Index + // resolve previous prepared + delete(r.pendingPrepares, prepareLog.Index) + case kt.LogRollback: + var prepareLog *kt.Log + if _, prepareLog, err = r.getPrepareLog(l); err != nil { + err = errors.Wrap(err, "previous prepare doe snot exists, node need full recovery") + return + } + if !r.pendingPrepares[prepareLog.Index] { + err = errors.Wrap(kt.ErrInvalidLog, "previous prepare already committed/rollback") + break + } + // resolve previous prepared + delete(r.pendingPrepares, prepareLog.Index) + default: + err = errors.Wrapf(kt.ErrInvalidLog, "invalid log type: %v", l.Type) break } + + // record nextIndex + r.updateNextIndex(l) } - if !inPeers { - // shutdown - return r.Shutdown() + return +} + +func (r *Runtime) updateNextIndex(l *kt.Log) { + r.nextIndexLock.Lock() + defer r.nextIndexLock.Unlock() + + if r.nextIndex < l.Index+1 { + r.nextIndex = l.Index + 1 } +} + +func (r *Runtime) checkIfPrepareFinished(index uint64) (finished bool) { + r.pendingPreparesLock.RLock() + defer r.pendingPreparesLock.RUnlock() - if err := r.config.Runner.UpdatePeers(peers); err != nil { - return fmt.Errorf("update peers to %s: %s", peers, err.Error()) + return !r.pendingPrepares[index] +} + +func (r *Runtime) markPendingPrepare(index uint64) { + r.pendingPreparesLock.Lock() + defer r.pendingPreparesLock.Unlock() + + r.pendingPrepares[index] = true +} + +func (r *Runtime) markPrepareFinished(index uint64) { + r.pendingPreparesLock.Lock() + defer r.pendingPreparesLock.Unlock() + + delete(r.pendingPrepares, index) +} + +func (r *Runtime) errorSummary(errs map[proto.NodeID]error) error { + failNodes := make([]proto.NodeID, 0, len(errs)) + + for s, err := range errs { + if err != nil { + failNodes = append(failNodes, s) + } + } + + if len(failNodes) == 0 { + return nil + } + + return errors.Wrapf(kt.ErrPrepareFailed, "fail on nodes: %v", failNodes) +} + +/// rpc related +func (r *Runtime) rpc(l *kt.Log, minCount int) (tracker *rpcTracker) { + req := &kt.RPCRequest{ + Instance: r.instanceID, + Log: l, } - r.isLeader = isLeader + tracker = newTracker(r, req, minCount) + tracker.send() + + // TODO(): track this rpc + + // TODO(): log remote errors + + return +} + +func (r *Runtime) getCaller(id proto.NodeID) Caller { + var caller Caller = rpc.NewPersistentCaller(id) + rawCaller, _ := r.callerMap.LoadOrStore(id, caller) + return rawCaller.(Caller) +} + +// SetCaller injects caller for test purpose. +func (r *Runtime) SetCaller(id proto.NodeID, c Caller) { + r.callerMap.Store(id, c) +} + +// RemoveCaller removes cached caller. +func (r *Runtime) RemoveCaller(id proto.NodeID) { + r.callerMap.Delete(id) +} + +func (r *Runtime) goFunc(f func()) { + r.wg.Add(1) + go func() { + defer r.wg.Done() + f() + }() +} + +/// utils +func (r *Runtime) uint64ToBytes(i uint64) (res []byte) { + res = make([]byte, 8) + binary.BigEndian.PutUint64(res, i) + return +} + +func (r *Runtime) bytesToUint64(b []byte) (uint64, error) { + if len(b) < 8 { + return 0, kt.ErrInvalidLog + } + return binary.BigEndian.Uint64(b), nil +} - return nil +//// future extensions, barrier, noop log placeholder etc. +func (r *Runtime) followerNoop(l *kt.Log) (err error) { + return r.wal.Write(l) } diff --git a/kayak/runtime_test.go b/kayak/runtime_test.go deleted file mode 100644 index d704b9fa0..000000000 --- a/kayak/runtime_test.go +++ /dev/null @@ -1,367 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kayak - -import ( - "errors" - "io/ioutil" - "os" - "testing" - "time" - - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/utils/log" - . "github.com/smartystreets/goconvey/convey" - "github.com/stretchr/testify/mock" -) - -func testConfig(rootDir string, nodeID proto.NodeID) Config { - config := &MockConfig{} - log.SetLevel(log.FatalLevel) - - runtimeConfig := &RuntimeConfig{ - RootDir: rootDir, - LocalID: nodeID, - Runner: &MockRunner{}, - Transport: &MockTransport{}, - ProcessTimeout: time.Microsecond * 800, - AutoBanCount: 100, - } - - config.On("GetRuntimeConfig").Return(runtimeConfig) - - return config -} - -func TestNewRuntime(t *testing.T) { - Convey("new runtime", t, func() { - config := testConfig(".", "leader") - peers := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - - Convey("missing arguments", func() { - var r *Runtime - var err error - - r, err = NewRuntime(nil, nil) - So(r, ShouldBeNil) - So(err, ShouldNotBeNil) - So(err, ShouldEqual, ErrInvalidConfig) - - r, err = NewRuntime(config, nil) - So(r, ShouldBeNil) - So(err, ShouldNotBeNil) - So(err, ShouldEqual, ErrInvalidConfig) - - r, err = NewRuntime(nil, peers) - So(r, ShouldBeNil) - So(err, ShouldNotBeNil) - So(err, ShouldEqual, ErrInvalidConfig) - }) - - Convey("invalid peer", func() { - newPeers := peers.Clone() - // change peer signature - newPeers.Term = 3 - - r, err := NewRuntime(config, &newPeers) - So(r, ShouldBeNil) - So(err, ShouldNotBeNil) - So(err, ShouldEqual, ErrInvalidConfig) - }) - - Convey("server not in peers", func() { - newConfig := testConfig(".", "test2") - - r, err := NewRuntime(newConfig, peers) - - So(r, ShouldBeNil) - So(err, ShouldNotBeNil) - So(err, ShouldEqual, ErrInvalidConfig) - }) - - Convey("success", func() { - r, err := NewRuntime(config, peers) - - So(r, ShouldNotBeNil) - So(err, ShouldBeNil) - So(r.isLeader, ShouldBeTrue) - }) - - Convey("success with follower", func() { - newConfig := testConfig(".", "follower1") - r, err := NewRuntime(newConfig, peers) - - So(r, ShouldNotBeNil) - So(err, ShouldBeNil) - So(r.isLeader, ShouldBeFalse) - }) - }) -} - -func TestRuntimeAll(t *testing.T) { - Convey("init", t, func() { - d, err := ioutil.TempDir("", "kayak_test") - So(err, ShouldBeNil) - if err == nil { - defer os.RemoveAll(d) - } - - config := testConfig(d, "leader") - peers := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - - r, err := NewRuntime(config, peers) - So(err, ShouldBeNil) - - runner := config.GetRuntimeConfig().Runner.(*MockRunner) - - Convey("runner init failed", func() { - unknownErr := errors.New("unknown error") - runner.On("Init", - mock.Anything, // config - mock.Anything, // peers - mock.Anything, // logStore - mock.Anything, // stableStore - mock.Anything, // transport - ).Return(unknownErr) - - err := r.Init() - - So(err, ShouldNotBeNil) - So(r.logStore, ShouldBeNil) - }) - - Convey("runner init success", func() { - runner.On("Init", - mock.Anything, // config - mock.Anything, // peers - mock.Anything, // logStore - mock.Anything, // stableStore - mock.Anything, // transport - ).Return(nil) - runner.On("Shutdown", mock.Anything). - Return(nil) - - var err error - err = r.Init() - So(err, ShouldBeNil) - So(r.logStore, ShouldNotBeNil) - - // run process - runner.On("Apply", mock.Anything).Return(nil, uint64(1), nil) - - _, _, err = r.Apply([]byte("test")) - So(err, ShouldBeNil) - - // test get log - var l Log - l.Data = []byte("test") - l.Index = uint64(1) - err = r.logStore.StoreLog(&l) - So(err, ShouldBeNil) - - data, err := r.GetLog(1) - So(err, ShouldBeNil) - So(data, ShouldResemble, []byte("test")) - - // call shutdowns - err = r.Shutdown() - So(err, ShouldBeNil) - }) - }) - - Convey("init success with follower", t, func() { - d, err := ioutil.TempDir("", "kayak_test") - So(err, ShouldBeNil) - if err == nil { - defer os.RemoveAll(d) - } - - config := testConfig(d, "follower1") - peers := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - - r, err := NewRuntime(config, peers) - So(err, ShouldBeNil) - - runner := config.GetRuntimeConfig().Runner.(*MockRunner) - runner.On("Init", - mock.Anything, // config - mock.Anything, // peers - mock.Anything, // logStore - mock.Anything, // stableStore - mock.Anything, // transport - ).Return(nil) - runner.On("Shutdown", mock.Anything). - Return(nil) - runner.On("Apply", mock.Anything).Return(nil, uint64(1), nil) - - err = r.Init() - So(err, ShouldBeNil) - defer r.Shutdown() - - _, _, err = r.Apply([]byte("test")) - So(err, ShouldNotBeNil) - So(err, ShouldEqual, ErrNotLeader) - }) - - Convey("init success with peers update", t, func() { - d, err := ioutil.TempDir("", "kayak_test") - So(err, ShouldBeNil) - if err == nil { - defer os.RemoveAll(d) - } - - config := testConfig(d, "leader") - peers := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - - r, err := NewRuntime(config, peers) - So(err, ShouldBeNil) - - runner := config.GetRuntimeConfig().Runner.(*MockRunner) - runner.On("Init", - mock.Anything, // config - mock.Anything, // peers - mock.Anything, // logStore - mock.Anything, // stableStore - mock.Anything, // transport - ).Return(nil) - runner.On("Shutdown", mock.Anything).Return(nil) - runner.On("UpdatePeers", mock.Anything).Return(nil) - - err = r.Init() - So(err, ShouldBeNil) - defer r.Shutdown() - - Convey("invalid peers", func() { - newPeers := testPeersFixture(3, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - - newPeers.Term = 5 - - // not valid - err := r.UpdatePeers(newPeers) - - So(err, ShouldNotBeNil) - So(err, ShouldEqual, ErrInvalidConfig) - }) - - Convey("change leader", func() { - newPeers := testPeersFixture(3, []*Server{ - { - Role: proto.Follower, - ID: "leader", - }, - { - Role: proto.Leader, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - - // valid - err := r.UpdatePeers(newPeers) - - So(err, ShouldBeNil) - So(r.isLeader, ShouldBeFalse) - }) - - Convey("dropped peer", func() { - newPeers := testPeersFixture(3, []*Server{ - { - Role: proto.Leader, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - - // valid - err := r.UpdatePeers(newPeers) - - So(err, ShouldBeNil) - runner.AssertCalled(t, "Shutdown", true) - }) - }) -} diff --git a/kayak/test/runtime_test.go b/kayak/test/runtime_test.go new file mode 100644 index 000000000..31f8d4b4c --- /dev/null +++ b/kayak/test/runtime_test.go @@ -0,0 +1,347 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package test + +import ( + "bytes" + "context" + "database/sql" + "fmt" + "math/rand" + "net" + "net/rpc" + "os" + "sync/atomic" + "testing" + "time" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/kayak" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + kl "github.com/CovenantSQL/CovenantSQL/kayak/wal" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/sqlchain/storage" + "github.com/CovenantSQL/CovenantSQL/utils" + "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/jordwest/mock-conn" + "github.com/pkg/errors" + . "github.com/smartystreets/goconvey/convey" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +func RandStringRunes(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letterRunes[rand.Intn(len(letterRunes))] + } + return string(b) +} + +type sqliteStorage struct { + st *storage.Storage +} + +type queryStructure struct { + ConnID uint64 + SeqNo uint64 + Timestamp int64 + Queries []storage.Query +} + +func newSQLiteStorage(dsn string) (s *sqliteStorage, err error) { + s = &sqliteStorage{} + s.st, err = storage.New(dsn) + return +} + +func (s *sqliteStorage) EncodePayload(request interface{}) (data []byte, err error) { + var buf *bytes.Buffer + if buf, err = utils.EncodeMsgPack(request); err != nil { + err = errors.Wrap(err, "encode payload failed") + return + } + + data = buf.Bytes() + return +} + +func (s *sqliteStorage) DecodePayload(data []byte) (request interface{}, err error) { + var req *queryStructure + if err = utils.DecodeMsgPack(data, &req); err != nil { + err = errors.Wrap(err, "decode payload failed") + return + } + + request = req + return +} + +func (s *sqliteStorage) Check(data interface{}) (err error) { + // no check + return nil +} + +func (s *sqliteStorage) Commit(data interface{}) (result interface{}, err error) { + var d *queryStructure + var ok bool + if d, ok = data.(*queryStructure); !ok { + err = errors.New("invalid data") + return + } + + tm := time.Now() + result, err = s.st.Exec(context.Background(), d.Queries) + log.WithField("c", time.Now().Sub(tm).String()).Info("db commit") + + return +} + +func (s *sqliteStorage) Query(ctx context.Context, queries []storage.Query) (columns []string, types []string, + data [][]interface{}, err error) { + return s.st.Query(ctx, queries) +} + +func (s *sqliteStorage) Close() { + if s.st != nil { + s.st.Close() + } +} + +type fakeMux struct { + mux map[proto.NodeID]*fakeService +} + +func newFakeMux() *fakeMux { + return &fakeMux{ + mux: make(map[proto.NodeID]*fakeService), + } +} + +func (m *fakeMux) register(nodeID proto.NodeID, s *fakeService) { + m.mux[nodeID] = s +} + +func (m *fakeMux) get(nodeID proto.NodeID) *fakeService { + return m.mux[nodeID] +} + +type fakeService struct { + rt *kayak.Runtime + s *rpc.Server +} + +func newFakeService(rt *kayak.Runtime) (fs *fakeService) { + fs = &fakeService{ + rt: rt, + s: rpc.NewServer(), + } + + fs.s.RegisterName("Test", fs) + + return +} + +func (s *fakeService) Call(req *kt.RPCRequest, resp *interface{}) (err error) { + return s.rt.FollowerApply(req.Log) +} + +func (s *fakeService) serveConn(c net.Conn) { + s.s.ServeCodec(utils.GetMsgPackServerCodec(c)) +} + +type fakeCaller struct { + m *fakeMux + target proto.NodeID +} + +func newFakeCaller(m *fakeMux, nodeID proto.NodeID) *fakeCaller { + return &fakeCaller{ + m: m, + target: nodeID, + } +} + +func (c *fakeCaller) Call(method string, req interface{}, resp interface{}) (err error) { + fakeConn := mock_conn.NewConn() + + go c.m.get(c.target).serveConn(fakeConn.Server) + client := rpc.NewClientWithCodec(utils.GetMsgPackClientCodec(fakeConn.Client)) + defer client.Close() + + return client.Call(method, req, resp) +} + +func BenchmarkNewRuntime(b *testing.B) { + Convey("runtime test", b, func(c C) { + log.SetLevel(log.DebugLevel) + f, err := os.OpenFile("test.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) + So(err, ShouldBeNil) + log.SetOutput(f) + defer f.Close() + + db1, err := newSQLiteStorage("test1.db") + So(err, ShouldBeNil) + defer func() { + db1.Close() + os.Remove("test1.db") + }() + db2, err := newSQLiteStorage("test2.db") + So(err, ShouldBeNil) + defer func() { + db2.Close() + os.Remove("test2.db") + }() + + node1 := proto.NodeID("000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade") + node2 := proto.NodeID("000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5") + + peers := &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Leader: node1, + Servers: []proto.NodeID{ + node1, + node2, + }, + }, + } + + privKey, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + err = peers.Sign(privKey) + So(err, ShouldBeNil) + + wal1 := kl.NewMemWal() + cfg1 := &kt.RuntimeConfig{ + Handler: db1, + PrepareThreshold: 1.0, + CommitThreshold: 1.0, + PrepareTimeout: time.Second, + CommitTimeout: 10 * time.Second, + Peers: peers, + Wal: wal1, + NodeID: node1, + ServiceName: "Test", + MethodName: "Call", + } + rt1, err := kayak.NewRuntime(cfg1) + So(err, ShouldBeNil) + + wal2 := kl.NewMemWal() + cfg2 := &kt.RuntimeConfig{ + Handler: db2, + PrepareThreshold: 1.0, + CommitThreshold: 1.0, + PrepareTimeout: time.Second, + CommitTimeout: 10 * time.Second, + Peers: peers, + Wal: wal2, + NodeID: node2, + ServiceName: "Test", + MethodName: "Call", + } + rt2, err := kayak.NewRuntime(cfg2) + So(err, ShouldBeNil) + + m := newFakeMux() + fs1 := newFakeService(rt1) + m.register(node1, fs1) + fs2 := newFakeService(rt2) + m.register(node2, fs2) + + rt1.SetCaller(node2, newFakeCaller(m, node2)) + rt2.SetCaller(node1, newFakeCaller(m, node1)) + + err = rt1.Start() + So(err, ShouldBeNil) + defer rt1.Shutdown() + + err = rt2.Start() + So(err, ShouldBeNil) + defer rt2.Shutdown() + + q1 := &queryStructure{ + Queries: []storage.Query{ + {Pattern: "CREATE TABLE IF NOT EXISTS test (test string)"}, + }, + } + So(err, ShouldBeNil) + + q2 := &queryStructure{ + Queries: []storage.Query{ + { + Pattern: "INSERT INTO test (test) VALUES(?)", + Args: []sql.NamedArg{sql.Named("", RandStringRunes(1024))}, + }, + }, + } + + rt1.Apply(context.Background(), q1) + rt2.Apply(context.Background(), q2) + rt1.Apply(context.Background(), q2) + db1.Query(context.Background(), []storage.Query{ + {Pattern: "SELECT * FROM test"}, + }) + + b.ResetTimer() + + var count uint64 + atomic.StoreUint64(&count, 1) + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + atomic.AddUint64(&count, 1) + q := &queryStructure{ + Queries: []storage.Query{ + { + Pattern: "INSERT INTO test (test) VALUES(?)", + Args: []sql.NamedArg{sql.Named("", RandStringRunes(1024))}, + }, + }, + } + _ = err + //c.So(err, ShouldBeNil) + + _, _, err = rt1.Apply(context.Background(), q) + //c.So(err, ShouldBeNil) + } + }) + + b.StopTimer() + + total := atomic.LoadUint64(&count) + _, _, d1, _ := db1.Query(context.Background(), []storage.Query{ + {Pattern: "SELECT COUNT(1) FROM test"}, + }) + So(d1, ShouldHaveLength, 1) + So(d1[0], ShouldHaveLength, 1) + So(fmt.Sprint(d1[0][0]), ShouldEqual, fmt.Sprint(total)) + + _, _, d2, _ := db2.Query(context.Background(), []storage.Query{ + {Pattern: "SELECT COUNT(1) FROM test"}, + }) + So(d2, ShouldHaveLength, 1) + So(d2[0], ShouldHaveLength, 1) + So(fmt.Sprint(d2[0][0]), ShouldResemble, fmt.Sprint(total)) + + b.StartTimer() + }) +} diff --git a/kayak/tracker.go b/kayak/tracker.go new file mode 100644 index 000000000..7eef7ed6a --- /dev/null +++ b/kayak/tracker.go @@ -0,0 +1,159 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kayak + +import ( + "context" + "sync" + "sync/atomic" + + "github.com/CovenantSQL/CovenantSQL/proto" +) + +// rpcTracker defines the rpc call tracker +// support tracking the rpc result. +type rpcTracker struct { + // related runtime + r *Runtime + // target nodes, a copy of current followers + nodes []proto.NodeID + // rpc method + method string + // rpc request + req interface{} + // minimum response count + minCount int + // responses + errLock sync.RWMutex + errors map[proto.NodeID]error + // scoreboard + complete int + sent uint32 + doneOnce sync.Once + doneCh chan struct{} + wg sync.WaitGroup + closed uint32 +} + +func newTracker(r *Runtime, req interface{}, minCount int) (t *rpcTracker) { + // copy nodes + nodes := append([]proto.NodeID(nil), r.followers...) + + if minCount > len(nodes) { + minCount = len(nodes) + } + if minCount < 0 { + minCount = 0 + } + + t = &rpcTracker{ + r: r, + nodes: nodes, + method: r.rpcMethod, + req: req, + minCount: minCount, + errors: make(map[proto.NodeID]error, len(nodes)), + doneCh: make(chan struct{}), + } + + return +} + +func (t *rpcTracker) send() { + if !atomic.CompareAndSwapUint32(&t.sent, 0, 1) { + return + } + + for i := range t.nodes { + t.wg.Add(1) + go t.callSingle(i) + } + + if t.minCount == 0 { + t.done() + } +} + +func (t *rpcTracker) callSingle(idx int) { + err := t.r.getCaller(t.nodes[idx]).Call(t.method, t.req, nil) + t.errLock.Lock() + defer t.errLock.Unlock() + t.errors[t.nodes[idx]] = err + t.complete++ + + if t.complete >= t.minCount { + t.done() + } +} + +func (t *rpcTracker) done() { + t.doneOnce.Do(func() { + if t.doneCh != nil { + select { + case <-t.doneCh: + default: + close(t.doneCh) + } + } + }) +} + +func (t *rpcTracker) get(ctx context.Context) (errors map[proto.NodeID]error, meets bool, finished bool) { + for { + select { + case <-t.doneCh: + meets = true + default: + } + + select { + case <-ctx.Done(): + case <-t.doneCh: + meets = true + } + + break + } + + t.errLock.RLock() + defer t.errLock.RUnlock() + + errors = make(map[proto.NodeID]error) + + for s, e := range t.errors { + errors[s] = e + } + + if !meets && len(errors) >= t.minCount { + meets = true + } + + if len(errors) == len(t.nodes) { + finished = true + } + + return +} + +func (t *rpcTracker) close() { + if !atomic.CompareAndSwapUint32(&t.closed, 0, 1) { + return + } + + t.wg.Wait() + t.done() +} diff --git a/kayak/transport/etls_transport.go b/kayak/transport/etls_transport.go deleted file mode 100644 index 59b104f18..000000000 --- a/kayak/transport/etls_transport.go +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package transport - -import ( - "context" - "sync" - - "github.com/CovenantSQL/CovenantSQL/kayak" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/rpc" -) - -// ETLSTransportConfig defines a transport config with transport id and rpc service related config. -type ETLSTransportConfig struct { - NodeID proto.NodeID - TransportID string - TransportService *ETLSTransportService - ServiceName string -} - -// ETLSTransport defines kayak transport using ETLS rpc as transport layer. -type ETLSTransport struct { - *ETLSTransportConfig - queue chan kayak.Request -} - -// ETLSTransportService defines kayak rpc endpoint to be registered to rpc server. -type ETLSTransportService struct { - ServiceName string - serviceMap sync.Map -} - -// ETLSTransportRequest defines kayak rpc request entity. -type ETLSTransportRequest struct { - proto.Envelope - TransportID string - NodeID proto.NodeID - Method string - Log *kayak.Log - Response []byte - Error error - respAvailable chan struct{} - respInit sync.Once -} - -// ETLSTransportResponse defines kayak rpc response entity. -type ETLSTransportResponse struct { - proto.Envelope - Data []byte -} - -// NewETLSTransport creates new transport and bind to transport service with specified transport id. -func NewETLSTransport(config *ETLSTransportConfig) (t *ETLSTransport) { - t = &ETLSTransport{ - ETLSTransportConfig: config, - queue: make(chan kayak.Request, 100), - } - - return -} - -// Init implements kayak.Transport.Init. -func (e *ETLSTransport) Init() error { - e.TransportService.register(e) - return nil -} - -// Request implements kayak.Transport.Request. -func (e *ETLSTransport) Request(ctx context.Context, - nodeID proto.NodeID, method string, log *kayak.Log) (response []byte, err error) { - req := &ETLSTransportRequest{ - TransportID: e.TransportID, - NodeID: e.NodeID, - Method: method, - Log: log, - } - resp := &ETLSTransportResponse{} - - if err = rpc.NewCaller().CallNodeWithContext(ctx, nodeID, e.ServiceName+".Call", req, resp); err != nil { - return - } - - response = resp.Data - - return -} - -// Process implements kayak.Transport.Process. -func (e *ETLSTransport) Process() <-chan kayak.Request { - // get response from remote request - return e.queue -} - -// Shutdown implements kayak.Transport.Shutdown. -func (e *ETLSTransport) Shutdown() error { - e.TransportService.deRegister(e) - return nil -} - -func (e *ETLSTransport) enqueue(req *ETLSTransportRequest) { - e.queue <- req -} - -// GetPeerNodeID implements kayak.Request.GetPeerNodeID. -func (r *ETLSTransportRequest) GetPeerNodeID() proto.NodeID { - return r.NodeID -} - -// GetMethod implements kayak.Request.GetMethod. -func (r *ETLSTransportRequest) GetMethod() string { - return r.Method -} - -// GetLog implements kayak.Request.GetLog. -func (r *ETLSTransportRequest) GetLog() *kayak.Log { - return r.Log -} - -// SendResponse implements kayak.Request.SendResponse. -func (r *ETLSTransportRequest) SendResponse(resp []byte, err error) error { - // send response with transport id - r.respInit.Do(r.initChan) - select { - case <-r.respAvailable: - return kayak.ErrInvalidRequest - default: - r.Response = resp - r.Error = err - close(r.respAvailable) - } - return nil -} - -func (r *ETLSTransportRequest) initChan() { - r.respAvailable = make(chan struct{}) -} - -func (r *ETLSTransportRequest) getResponse() ([]byte, error) { - r.respInit.Do(r.initChan) - <-r.respAvailable - return r.Response, r.Error -} - -// Call is the rpc entry of ETLS transport. -func (s *ETLSTransportService) Call(req *ETLSTransportRequest, resp *ETLSTransportResponse) error { - // verify - // TODO(xq262144): unified NodeID types in project - if req.Envelope.NodeID.String() != string(req.NodeID) { - return kayak.ErrInvalidRequest - } - - var t interface{} - var trans *ETLSTransport - var ok bool - - if t, ok = s.serviceMap.Load(req.TransportID); !ok { - return kayak.ErrInvalidRequest - } - - if trans, ok = t.(*ETLSTransport); !ok { - return kayak.ErrInvalidRequest - } - - trans.enqueue(req) - obj, err := req.getResponse() - - if resp != nil { - resp.Data = obj - } - - return err -} - -func (s *ETLSTransportService) register(t *ETLSTransport) { - // register transport to service map - s.serviceMap.Store(t.TransportID, t) -} - -func (s *ETLSTransportService) deRegister(t *ETLSTransport) { - // de-register transport from service map - s.serviceMap.Delete(t.TransportID) -} diff --git a/kayak/transport/etls_transport_test.go b/kayak/transport/etls_transport_test.go deleted file mode 100644 index 98e0283cd..000000000 --- a/kayak/transport/etls_transport_test.go +++ /dev/null @@ -1,345 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package transport - -import ( - "context" - "crypto/rand" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "sync" - "testing" - "time" - - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" - "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/route" - "github.com/CovenantSQL/CovenantSQL/rpc" - "github.com/CovenantSQL/CovenantSQL/utils/log" - . "github.com/smartystreets/goconvey/convey" - "github.com/stretchr/testify/mock" -) - -type mockRes struct { - nodeID proto.NodeID - service *ETLSTransportService - transport *ETLSTransport - server *rpc.Server - listenAddr string -} - -func testWithNewNode() (mock *mockRes, err error) { - // mock etls transport without kms server - mock = &mockRes{} - addr := "127.0.0.1:0" - - // random node id - randBytes := make([]byte, 4) - rand.Read(randBytes) - mock.nodeID = proto.NodeID(hash.THashH(randBytes).String()) - kms.SetLocalNodeIDNonce(mock.nodeID.ToRawNodeID().CloneBytes(), &cpuminer.Uint256{}) - mock.service = &ETLSTransportService{} - mock.transport = NewETLSTransport(&ETLSTransportConfig{ - NodeID: mock.nodeID, - TransportID: "test", - TransportService: mock.service, - ServiceName: "Kayak", - }) - mock.server, err = rpc.NewServerWithService(rpc.ServiceMap{"Kayak": mock.service}) - if err != nil { - return - } - _, testFile, _, _ := runtime.Caller(0) - privKeyPath := filepath.Join(filepath.Dir(testFile), "../../test/node_standalone/private.key") - if err = mock.server.InitRPCServer(addr, privKeyPath, []byte("")); err != nil { - return - } - mock.listenAddr = mock.server.Listener.Addr().String() - route.SetNodeAddrCache(mock.nodeID.ToRawNodeID(), mock.listenAddr) - var nonce *cpuminer.Uint256 - if nonce, err = kms.GetLocalNonce(); err != nil { - return - } - var pubKey *asymmetric.PublicKey - if pubKey, err = kms.GetLocalPublicKey(); err != nil { - return - } - if err = kms.SetPublicKey(mock.nodeID, *nonce, pubKey); err != nil { - return - } - - log.Infof("fake node with node id: %v", mock.nodeID) - return -} - -func initKMS() (err error) { - var f *os.File - f, err = ioutil.TempFile("", "keystore_") - f.Close() - os.Remove(f.Name()) - route.InitKMS(f.Name()) - - // flag as test - kms.Unittest = true - - return -} - -func TestETLSTransport(t *testing.T) { - Convey("full test", t, FailureContinues, func(c C) { - var err error - - err = initKMS() - So(err, ShouldBeNil) - - mock1, err := testWithNewNode() - So(err, ShouldBeNil) - mock2, err := testWithNewNode() - So(err, ShouldBeNil) - - var wgServer, wgRequest sync.WaitGroup - - // start server - wgServer.Add(1) - go func() { - defer wgServer.Done() - mock1.server.Serve() - }() - - wgServer.Add(1) - go func() { - defer wgServer.Done() - mock2.server.Serve() - }() - - // init transport - err = mock1.transport.Init() - So(err, ShouldBeNil) - err = mock2.transport.Init() - So(err, ShouldBeNil) - - testLog := testLogFixture([]byte("test request")) - - // make request issuer as node 1 - kms.SetLocalNodeIDNonce(mock1.nodeID.ToRawNodeID().CloneBytes(), &cpuminer.Uint256{}) - - wgRequest.Add(1) - go func() { - defer wgRequest.Done() - res, err := mock1.transport.Request(context.Background(), mock2.nodeID, "test method", testLog) - c.So(err, ShouldBeNil) - c.So(res, ShouldResemble, []byte("test response")) - }() - - wgRequest.Add(1) - go func() { - defer wgRequest.Done() - select { - case req := <-mock2.transport.Process(): - c.So(req.GetLog(), ShouldResemble, testLog) - c.So(req.GetMethod(), ShouldEqual, "test method") - c.So(req.GetPeerNodeID(), ShouldEqual, mock1.nodeID) - req.SendResponse([]byte("test response"), nil) - } - }() - - wgRequest.Wait() - - // shutdown transport - err = mock1.transport.Shutdown() - So(err, ShouldBeNil) - err = mock2.transport.Shutdown() - So(err, ShouldBeNil) - - // stop - mock1.server.Listener.Close() - mock1.server.Stop() - mock2.server.Listener.Close() - mock2.server.Stop() - - wgServer.Wait() - }) -} - -func TestETLSIntegration(t *testing.T) { - type createMockRes struct { - runner *kayak.TwoPCRunner - transport *ETLSTransport - worker *MockWorker - config *kayak.TwoPCConfig - runtime *kayak.Runtime - etlsMock *mockRes - } - - // create mock returns basic arguments to prepare for a server - createMock := func(etlsMock *mockRes, peers *kayak.Peers) (res *createMockRes) { - res = &createMockRes{} - log.SetLevel(log.FatalLevel) - d, _ := ioutil.TempDir("", "kayak_test") - - // etls mock res - res.etlsMock = etlsMock - // runner instance - res.runner = kayak.NewTwoPCRunner() - // transport for this instance - res.transport = res.etlsMock.transport - // underlying worker - res.worker = &MockWorker{} - // runner config including timeout settings, commit log storage, local server id - res.config = &kayak.TwoPCConfig{ - RuntimeConfig: kayak.RuntimeConfig{ - RootDir: d, - LocalID: etlsMock.nodeID, - Runner: res.runner, - Transport: res.transport, - ProcessTimeout: time.Millisecond * 800, - }, - Storage: res.worker, - } - res.runtime, _ = kayak.NewRuntime(res.config, peers) - go func() { - res.etlsMock.server.Serve() - }() - return - } - // cleanup log storage after execution - cleanupDir := func(c *createMockRes) { - os.RemoveAll(c.config.RuntimeConfig.RootDir) - } - - Convey("integration test", t, FailureContinues, func(c C) { - var err error - - err = initKMS() - So(err, ShouldBeNil) - - lNodeEtls, err := testWithNewNode() - So(err, ShouldBeNil) - f1NodeEtls, err := testWithNewNode() - So(err, ShouldBeNil) - f2NodeEtls, err := testWithNewNode() - So(err, ShouldBeNil) - - // peers is a simple 3-node peer configuration - peers := testPeersFixture(1, []*kayak.Server{ - { - Role: proto.Leader, - ID: lNodeEtls.nodeID, - }, - { - Role: proto.Follower, - ID: f1NodeEtls.nodeID, - }, - { - Role: proto.Follower, - ID: f2NodeEtls.nodeID, - }, - }) - - lMock := createMock(lNodeEtls, peers) - f1Mock := createMock(f1NodeEtls, peers) - f2Mock := createMock(f2NodeEtls, peers) - defer cleanupDir(lMock) - defer cleanupDir(f1Mock) - defer cleanupDir(f2Mock) - - // init - err = lMock.runtime.Init() - So(err, ShouldBeNil) - err = f1Mock.runtime.Init() - So(err, ShouldBeNil) - err = f2Mock.runtime.Init() - So(err, ShouldBeNil) - - // payload to send - testPayload := []byte("test data") - - // make request issuer as leader node - kms.SetLocalNodeIDNonce(lMock.config.LocalID.ToRawNodeID().CloneBytes(), &cpuminer.Uint256{}) - - // underlying worker mock, prepare/commit/rollback with be received the decoded data - callOrder := &CallCollector{} - f1Mock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("prepare") - }) - f2Mock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("prepare") - }) - f1Mock.worker.On("Commit", mock.Anything, testPayload). - Return(nil, nil).Run(func(args mock.Arguments) { - callOrder.Append("commit") - }) - f2Mock.worker.On("Commit", mock.Anything, testPayload). - Return(nil, nil).Run(func(args mock.Arguments) { - callOrder.Append("commit") - }) - lMock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("prepare") - }) - lMock.worker.On("Commit", mock.Anything, testPayload). - Return(nil, nil).Run(func(args mock.Arguments) { - callOrder.Append("commit") - }) - - // process the encoded data - _, _, err = lMock.runtime.Apply(testPayload) - So(err, ShouldBeNil) - So(callOrder.Get(), ShouldResemble, []string{ - "prepare", - "prepare", - "prepare", - "commit", - "commit", - "commit", - }) - - // process the encoded data again - callOrder.Reset() - _, _, err = lMock.runtime.Apply(testPayload) - So(err, ShouldBeNil) - So(callOrder.Get(), ShouldResemble, []string{ - "prepare", - "prepare", - "prepare", - "commit", - "commit", - "commit", - }) - - // shutdown - lMock.runtime.Shutdown() - f1Mock.runtime.Shutdown() - f2Mock.runtime.Shutdown() - - // stop server - lNodeEtls.server.Listener.Close() - f1NodeEtls.server.Listener.Close() - f2NodeEtls.server.Listener.Close() - lNodeEtls.server.Stop() - f1NodeEtls.server.Stop() - f2NodeEtls.server.Stop() - }) -} diff --git a/kayak/transport/network_transport.go b/kayak/transport/network_transport.go deleted file mode 100644 index 5ebade2f3..000000000 --- a/kayak/transport/network_transport.go +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package transport - -import ( - "context" - "io" - "net" - "net/rpc" - "net/rpc/jsonrpc" - "sync" - - "github.com/CovenantSQL/CovenantSQL/kayak" - "github.com/CovenantSQL/CovenantSQL/proto" -) - -// ConnWithPeerNodeID defines interface support getting remote peer ID. -type ConnWithPeerNodeID interface { - net.Conn - - GetPeerNodeID() proto.NodeID -} - -// StreamLayer is the underlying network connection layer. -type StreamLayer interface { - Accept() (ConnWithPeerNodeID, error) - Dial(context.Context, proto.NodeID) (ConnWithPeerNodeID, error) -} - -// NetworkRequest is the request object hand off inter node request. -type NetworkRequest struct { - NodeID proto.NodeID - Method string - Log *kayak.Log - Response []byte - Error error - respAvailable chan struct{} - respInit sync.Once -} - -// ClientCodecBuilder is the client codec builder. -type ClientCodecBuilder func(io.ReadWriteCloser) rpc.ClientCodec - -// ServerCodecBuilder is the server codec builder. -type ServerCodecBuilder func(closer io.ReadWriteCloser) rpc.ServerCodec - -// NetworkResponse is the response object hand off inter node response. -type NetworkResponse struct { - Response []byte -} - -// NetworkTransport support customized stream layer integration with kayak transport. -type NetworkTransport struct { - config *NetworkTransportConfig - shutdownCh chan struct{} - queue chan kayak.Request -} - -// NetworkTransportConfig defines NetworkTransport config object. -type NetworkTransportConfig struct { - NodeID proto.NodeID - StreamLayer StreamLayer - - ClientCodec ClientCodecBuilder - ServerCodec ServerCodecBuilder -} - -// NetworkTransportRequestProxy defines a rpc proxy method exported to golang net/rpc. -type NetworkTransportRequestProxy struct { - transport *NetworkTransport - conn ConnWithPeerNodeID - server *rpc.Server -} - -// NewConfig returns new transport config. -func NewConfig(nodeID proto.NodeID, streamLayer StreamLayer) (c *NetworkTransportConfig) { - return NewConfigWithCodec(nodeID, streamLayer, jsonrpc.NewClientCodec, jsonrpc.NewServerCodec) -} - -// NewConfigWithCodec returns new transport config with custom codec. -func NewConfigWithCodec(nodeID proto.NodeID, streamLayer StreamLayer, - clientCodec ClientCodecBuilder, serverCodec ServerCodecBuilder) (c *NetworkTransportConfig) { - return &NetworkTransportConfig{ - NodeID: nodeID, - StreamLayer: streamLayer, - ClientCodec: clientCodec, - ServerCodec: serverCodec, - } -} - -// NewRequest returns new request entity. -func NewRequest(nodeID proto.NodeID, method string, log *kayak.Log) (r *NetworkRequest) { - return &NetworkRequest{ - NodeID: nodeID, - Method: method, - Log: log, - } -} - -// NewResponse returns response returns new response entity. -func NewResponse() (r *NetworkResponse) { - return &NetworkResponse{} -} - -// NewTransport returns new network transport. -func NewTransport(config *NetworkTransportConfig) (t *NetworkTransport) { - t = &NetworkTransport{ - config: config, - shutdownCh: make(chan struct{}), - queue: make(chan kayak.Request, 100), - } - - return -} - -// NewRequestProxy returns request proxy object hand-off golang net/rpc. -func NewRequestProxy(transport *NetworkTransport, conn ConnWithPeerNodeID) (rp *NetworkTransportRequestProxy) { - rp = &NetworkTransportRequestProxy{ - transport: transport, - conn: conn, - server: rpc.NewServer(), - } - - rp.server.RegisterName("Service", rp) - - return -} - -// GetPeerNodeID implements kayak.Request.GetPeerNodeID. -func (r *NetworkRequest) GetPeerNodeID() proto.NodeID { - return r.NodeID -} - -// GetMethod implements kayak.Request.GetMethod. -func (r *NetworkRequest) GetMethod() string { - return r.Method -} - -// GetLog implements kayak.Request.GetLog. -func (r *NetworkRequest) GetLog() *kayak.Log { - return r.Log -} - -// SendResponse implements kayak.Request.SendResponse. -func (r *NetworkRequest) SendResponse(resp []byte, err error) error { - r.respInit.Do(r.initChan) - select { - case <-r.respAvailable: - return kayak.ErrInvalidRequest - default: - r.Response = resp - r.Error = err - close(r.respAvailable) - } - return nil -} - -func (r *NetworkRequest) getResponse() ([]byte, error) { - r.respInit.Do(r.initChan) - <-r.respAvailable - return r.Response, r.Error -} - -func (r *NetworkRequest) initChan() { - r.respAvailable = make(chan struct{}) -} - -func (r *NetworkResponse) set(v []byte) { - r.Response = v -} - -func (r *NetworkResponse) get() []byte { - return r.Response -} - -// Init implements kayak.Transport.Init method. -func (t *NetworkTransport) Init() error { - go t.run() - return nil -} - -// Request implements kayak.Transport.Request method. -func (t *NetworkTransport) Request(ctx context.Context, nodeID proto.NodeID, - method string, log *kayak.Log) (response []byte, err error) { - conn, err := t.config.StreamLayer.Dial(ctx, nodeID) - - if err != nil { - return - } - - // check node id - if conn.GetPeerNodeID() != nodeID { - // err creating connection - return nil, kayak.ErrInvalidRequest - } - - client := rpc.NewClientWithCodec(t.config.ClientCodec(conn)) - req := NewRequest(t.config.NodeID, method, log) - res := NewResponse() - err = client.Call("Service.Call", req, res) - - return res.get(), err -} - -// Process implements kayak.Transport.Process method. -func (t *NetworkTransport) Process() <-chan kayak.Request { - return t.queue -} - -// Shutdown implements kayak.Transport.Shutdown method. -func (t *NetworkTransport) Shutdown() error { - select { - case <-t.shutdownCh: - default: - close(t.shutdownCh) - } - return nil -} - -func (t *NetworkTransport) enqueue(req *NetworkRequest) { - t.queue <- req -} - -// Call hand-off request from remote rpc server. -func (p *NetworkTransportRequestProxy) Call(req *NetworkRequest, res *NetworkResponse) error { - // verify node id - if p.conn.GetPeerNodeID() != req.NodeID { - return kayak.ErrInvalidRequest - } - - p.transport.enqueue(req) - obj, err := req.getResponse() - res.set(obj) - return err -} - -func (p *NetworkTransportRequestProxy) serve() { - p.server.ServeCodec(p.transport.config.ServerCodec(p.conn)) -} - -func (t *NetworkTransport) run() { - for { - select { - case <-t.shutdownCh: - return - default: - conn, err := t.config.StreamLayer.Accept() - if err != nil { - continue - } - - go t.handleConn(conn) - } - } -} - -func (t *NetworkTransport) handleConn(conn ConnWithPeerNodeID) { - NewRequestProxy(t, conn).serve() -} diff --git a/kayak/transport/network_transport_test.go b/kayak/transport/network_transport_test.go deleted file mode 100644 index e797032c0..000000000 --- a/kayak/transport/network_transport_test.go +++ /dev/null @@ -1,463 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package transport - -import ( - "context" - "io/ioutil" - "os" - "sync" - "testing" - "time" - - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/kayak" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/twopc" - "github.com/CovenantSQL/CovenantSQL/utils/log" - "github.com/jordwest/mock-conn" - . "github.com/smartystreets/goconvey/convey" - "github.com/stretchr/testify/mock" -) - -type TestConn struct { - *mock_conn.End - peerNodeID proto.NodeID -} - -type TestStreamRouter struct { - sync.Mutex - streamMap map[proto.NodeID]*TestStream -} - -type TestStream struct { - nodeID proto.NodeID - router *TestStreamRouter - queue chan *TestConn -} - -func NewTestStreamRouter() *TestStreamRouter { - return &TestStreamRouter{ - streamMap: make(map[proto.NodeID]*TestStream), - } -} - -func NewTestStream(nodeID proto.NodeID, router *TestStreamRouter) *TestStream { - return &TestStream{ - nodeID: nodeID, - router: router, - queue: make(chan *TestConn), - } -} - -func NewSocketPair(fromNode proto.NodeID, toNode proto.NodeID) (clientConn *TestConn, serverConn *TestConn) { - conn := mock_conn.NewConn() - clientConn = NewTestConn(conn.Server, toNode) - serverConn = NewTestConn(conn.Client, fromNode) - return -} - -func NewTestConn(endpoint *mock_conn.End, peerNodeID proto.NodeID) *TestConn { - return &TestConn{ - End: endpoint, - peerNodeID: peerNodeID, - } -} - -func (r *TestStreamRouter) Get(id proto.NodeID) *TestStream { - r.Lock() - defer r.Unlock() - - if _, ok := r.streamMap[id]; !ok { - r.streamMap[id] = NewTestStream(id, r) - } - - return r.streamMap[id] -} - -func (c *TestConn) GetPeerNodeID() proto.NodeID { - return c.peerNodeID -} - -func (s *TestStream) Accept() (conn ConnWithPeerNodeID, err error) { - select { - case conn := <-s.queue: - return conn, nil - } -} - -func (s *TestStream) Dial(ctx context.Context, nodeID proto.NodeID) (conn ConnWithPeerNodeID, err error) { - clientConn, serverConn := NewSocketPair(s.nodeID, nodeID) - - select { - case <-ctx.Done(): - return nil, ctx.Err() - case s.router.Get(nodeID).queue <- serverConn: - } - - return clientConn, nil -} - -// MockWorker is an autogenerated mock type for the Worker type -type MockWorker struct { - mock.Mock -} - -// Commit provides a mock function with given fields: ctx, wb -func (_m *MockWorker) Commit(ctx context.Context, wb twopc.WriteBatch) (interface{}, error) { - ret := _m.Called(context.Background(), wb) - - var r0 interface{} - if rf, ok := ret.Get(0).(func(context.Context, twopc.WriteBatch) interface{}); ok { - r0 = rf(ctx, wb) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(interface{}) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, twopc.WriteBatch) error); ok { - r1 = rf(ctx, wb) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Prepare provides a mock function with given fields: ctx, wb -func (_m *MockWorker) Prepare(ctx context.Context, wb twopc.WriteBatch) error { - ret := _m.Called(context.Background(), wb) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, twopc.WriteBatch) error); ok { - r0 = rf(ctx, wb) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Rollback provides a mock function with given fields: ctx, wb -func (_m *MockWorker) Rollback(ctx context.Context, wb twopc.WriteBatch) error { - ret := _m.Called(context.Background(), wb) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, twopc.WriteBatch) error); ok { - r0 = rf(ctx, wb) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -type CallCollector struct { - l sync.Mutex - callOrder []string -} - -func (c *CallCollector) Append(call string) { - c.l.Lock() - defer c.l.Unlock() - c.callOrder = append(c.callOrder, call) -} - -func (c *CallCollector) Get() []string { - c.l.Lock() - defer c.l.Unlock() - return c.callOrder[:] -} - -func (c *CallCollector) Reset() { - c.l.Lock() - defer c.l.Unlock() - c.callOrder = c.callOrder[:0] -} - -func testPeersFixture(term uint64, servers []*kayak.Server) *kayak.Peers { - testPriv := []byte{ - 0xea, 0xf0, 0x2c, 0xa3, 0x48, 0xc5, 0x24, 0xe6, - 0x39, 0x26, 0x55, 0xba, 0x4d, 0x29, 0x60, 0x3c, - 0xd1, 0xa7, 0x34, 0x7d, 0x9d, 0x65, 0xcf, 0xe9, - 0x3c, 0xe1, 0xeb, 0xff, 0xdc, 0xa2, 0x26, 0x94, - } - privKey, pubKey := asymmetric.PrivKeyFromBytes(testPriv) - - newServers := make([]*kayak.Server, 0, len(servers)) - var leaderNode *kayak.Server - - for _, s := range servers { - newS := &kayak.Server{ - Role: s.Role, - ID: s.ID, - PubKey: pubKey, - } - newServers = append(newServers, newS) - if newS.Role == proto.Leader { - leaderNode = newS - } - } - - peers := &kayak.Peers{ - Term: term, - Leader: leaderNode, - Servers: servers, - PubKey: pubKey, - } - - peers.Sign(privKey) - - return peers -} - -func testLogFixture(data []byte) (log *kayak.Log) { - log = &kayak.Log{ - Index: uint64(1), - Term: uint64(1), - Data: data, - } - - log.ComputeHash() - - return -} - -func TestConnPair(t *testing.T) { - Convey("test transport", t, FailureContinues, func(c C) { - router := NewTestStreamRouter() - stream1 := router.Get("id1") - stream2 := router.Get("id2") - - var wg sync.WaitGroup - - wg.Add(1) - go func() { - defer wg.Done() - var err error - clientConn, err := stream1.Dial(context.Background(), "id2") - c.So(err, ShouldBeNil) - _, err = clientConn.Write([]byte("test")) - c.So(err, ShouldBeNil) - clientConn.Close() - }() - - wg.Add(1) - go func() { - defer wg.Done() - var err error - serverConn, err := stream2.Accept() - c.So(err, ShouldBeNil) - buffer, err := ioutil.ReadAll(serverConn) - c.So(err, ShouldBeNil) - c.So(buffer, ShouldResemble, []byte("test")) - }() - - wg.Wait() - }) -} - -func TestTransport(t *testing.T) { - Convey("test transport", t, FailureContinues, func(c C) { - router := NewTestStreamRouter() - stream1 := router.Get("id1") - stream2 := router.Get("id2") - config1 := NewConfig("id1", stream1) - config2 := NewConfig("id2", stream2) - t1 := NewTransport(config1) - t2 := NewTransport(config2) - testLog := testLogFixture([]byte("test request")) - - var err error - - // init - err = t1.Init() - So(err, ShouldBeNil) - err = t2.Init() - So(err, ShouldBeNil) - - var wg sync.WaitGroup - - wg.Add(1) - go func() { - defer wg.Done() - res, err := t1.Request(context.Background(), "id2", "test method", testLog) - c.So(err, ShouldBeNil) - c.So(res, ShouldResemble, []byte("test response")) - }() - - wg.Add(1) - go func() { - defer wg.Done() - select { - case req := <-t2.Process(): - c.So(req.GetLog(), ShouldResemble, testLog) - c.So(req.GetMethod(), ShouldEqual, "test method") - c.So(req.GetPeerNodeID(), ShouldEqual, proto.NodeID("id1")) - req.SendResponse([]byte("test response"), nil) - } - }() - - wg.Wait() - - // shutdown transport - err = t1.Shutdown() - So(err, ShouldBeNil) - err = t2.Shutdown() - So(err, ShouldBeNil) - }) -} - -func TestIntegration(t *testing.T) { - type createMockRes struct { - runner *kayak.TwoPCRunner - transport *NetworkTransport - worker *MockWorker - config *kayak.TwoPCConfig - runtime *kayak.Runtime - } - - // router is a dummy channel based local rpc transport router - mockRouter := NewTestStreamRouter() - - // peers is a simple 3-node peer configuration - peers := testPeersFixture(1, []*kayak.Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - // create mock returns basic arguments to prepare for a server - createMock := func(nodeID proto.NodeID) (res *createMockRes) { - res = &createMockRes{} - log.SetLevel(log.FatalLevel) - d, _ := ioutil.TempDir("", "kayak_test") - - // runner instance - res.runner = kayak.NewTwoPCRunner() - // transport for this instance - res.transport = NewTransport(NewConfig(nodeID, mockRouter.Get(nodeID))) - // underlying worker - res.worker = &MockWorker{} - // runner config including timeout settings, commit log storage, local server id - res.config = &kayak.TwoPCConfig{ - RuntimeConfig: kayak.RuntimeConfig{ - RootDir: d, - LocalID: nodeID, - Runner: res.runner, - Transport: res.transport, - ProcessTimeout: time.Millisecond * 800, - }, - Storage: res.worker, - } - res.runtime, _ = kayak.NewRuntime(res.config, peers) - return - } - // cleanup log storage after execution - cleanupDir := func(c *createMockRes) { - os.RemoveAll(c.config.RuntimeConfig.RootDir) - } - - Convey("integration test", t, FailureContinues, func(c C) { - var err error - - lMock := createMock("leader") - f1Mock := createMock("follower1") - f2Mock := createMock("follower2") - defer cleanupDir(lMock) - defer cleanupDir(f1Mock) - defer cleanupDir(f2Mock) - - // init - err = lMock.runtime.Init() - So(err, ShouldBeNil) - err = f1Mock.runtime.Init() - So(err, ShouldBeNil) - err = f2Mock.runtime.Init() - So(err, ShouldBeNil) - - // payload to send - testPayload := []byte("test data") - - // underlying worker mock, prepare/commit/rollback with be received the decoded data - callOrder := &CallCollector{} - f1Mock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("prepare") - }) - f2Mock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("prepare") - }) - f1Mock.worker.On("Commit", mock.Anything, testPayload). - Return(nil, nil).Run(func(args mock.Arguments) { - callOrder.Append("commit") - }) - f2Mock.worker.On("Commit", mock.Anything, testPayload). - Return(nil, nil).Run(func(args mock.Arguments) { - callOrder.Append("commit") - }) - lMock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("prepare") - }) - lMock.worker.On("Commit", mock.Anything, testPayload). - Return(nil, nil).Run(func(args mock.Arguments) { - callOrder.Append("commit") - }) - - // process the encoded data - _, _, err = lMock.runtime.Apply(testPayload) - So(err, ShouldBeNil) - So(callOrder.Get(), ShouldResemble, []string{ - "prepare", - "prepare", - "prepare", - "commit", - "commit", - "commit", - }) - - // process the encoded data again - callOrder.Reset() - _, _, err = lMock.runtime.Apply(testPayload) - So(err, ShouldBeNil) - So(callOrder.Get(), ShouldResemble, []string{ - "prepare", - "prepare", - "prepare", - "commit", - "commit", - "commit", - }) - - // shutdown - lMock.runtime.Shutdown() - f1Mock.runtime.Shutdown() - f2Mock.runtime.Shutdown() - }) -} diff --git a/kayak/twopc_runner.go b/kayak/twopc_runner.go deleted file mode 100644 index e49f27ff7..000000000 --- a/kayak/twopc_runner.go +++ /dev/null @@ -1,829 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kayak - -import ( - "bytes" - "context" - "fmt" - "runtime/trace" - "sync" - - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/twopc" - "github.com/CovenantSQL/CovenantSQL/utils" - "github.com/CovenantSQL/CovenantSQL/utils/log" -) - -var ( - // current term stored in local meta - keyCurrentTerm = []byte("CurrentTerm") - - // committed index store in local meta - keyCommittedIndex = []byte("CommittedIndex") -) - -// TwoPCConfig is a RuntimeConfig implementation organizing two phase commit mutation. -type TwoPCConfig struct { - RuntimeConfig - - // Storage is the underlying twopc Storage - Storage twopc.Worker -} - -type logProcessResult struct { - result interface{} - offset uint64 - err error -} - -// TwoPCRunner is a Runner implementation organizing two phase commit mutation. -type TwoPCRunner struct { - config *TwoPCConfig - peers *Peers - logStore LogStore - stableStore StableStore - transport Transport - - // Current term/log state - currentTerm uint64 - lastLogIndex uint64 - lastLogTerm uint64 - lastLogHash *hash.Hash - - // Server role - leader *Server - role proto.ServerRole - - // Shutdown channel to exit, protected to prevent concurrent exits - shutdown bool - shutdownCh chan struct{} - shutdownLock sync.Mutex - - // Lock/events - processReq chan []byte - processRes chan logProcessResult - updatePeersLock sync.Mutex - updatePeersReq chan *Peers - updatePeersRes chan error - - currentState ServerState - stateLock sync.Mutex - currentContext context.Context - - // Tracks running goroutines - routinesGroup sync.WaitGroup -} - -// TwoPCWorkerWrapper wraps remote runner as worker. -type TwoPCWorkerWrapper struct { - runner *TwoPCRunner - nodeID proto.NodeID -} - -// NewTwoPCRunner create a two pc runner. -func NewTwoPCRunner() *TwoPCRunner { - return &TwoPCRunner{ - shutdownCh: make(chan struct{}), - processReq: make(chan []byte), - processRes: make(chan logProcessResult), - updatePeersReq: make(chan *Peers), - updatePeersRes: make(chan error), - } -} - -// GetRuntimeConfig implements Config.GetRuntimeConfig. -func (tpc *TwoPCConfig) GetRuntimeConfig() *RuntimeConfig { - return &tpc.RuntimeConfig -} - -// Init implements Runner.Init. -func (r *TwoPCRunner) Init(config Config, peers *Peers, logs LogStore, stable StableStore, transport Transport) error { - if _, ok := config.(*TwoPCConfig); !ok { - return ErrInvalidConfig - } - - if peers == nil || logs == nil || stable == nil || transport == nil { - return ErrInvalidConfig - } - - r.config = config.(*TwoPCConfig) - r.peers = peers - r.logStore = logs - r.stableStore = stable - r.transport = transport - r.setState(Idle) - - // restore from log/stable store - if err := r.tryRestore(); err != nil { - return err - } - - // set init peers and update term - if err := r.initState(); err != nil { - return err - } - - r.goFunc(r.run) - - return nil -} - -func (r *TwoPCRunner) tryRestore() error { - // Init term, committedIndex, storage - var err error - var lastTerm uint64 - - lastTerm, err = r.stableStore.GetUint64(keyCurrentTerm) - if err != nil && err != ErrKeyNotFound { - return fmt.Errorf("get last term failed: %s", err.Error()) - } - - if r.peers.Term < lastTerm { - // invalid config, term older than current context - // suggest rebuild local config - return ErrInvalidConfig - } - - var lastCommitted uint64 - lastCommitted, err = r.stableStore.GetUint64(keyCommittedIndex) - if err != nil && err != ErrKeyNotFound { - return fmt.Errorf("last committed index not found: %s", err.Error()) - } - - var lastCommittedLog Log - if lastCommitted > 0 { - if err = r.logStore.GetLog(lastCommitted, &lastCommittedLog); err != nil { - return fmt.Errorf("failed to get last log at index %d: %s", lastCommitted, err.Error()) - } - } - - // committed index term check - if r.peers.Term < lastCommittedLog.Term { - return fmt.Errorf("invalid last committed log term, peers: %d, local committed: %d", - r.peers.Term, lastCommittedLog.Term) - } - - // assert index related log validation - if lastCommitted != lastCommittedLog.Index { - // invalid log - return fmt.Errorf("invalid last committed log index, index: %d, log: %d", - lastCommitted, lastCommittedLog.Index) - } - - // get last index - var lastIndex uint64 - lastIndex, err = r.logStore.LastIndex() - if err != nil { - return fmt.Errorf("failed to get last index: %s", err.Error()) - } - - if lastIndex > lastCommitted { - // uncommitted log found, print warning - log.WithFields(log.Fields{ - "uncommitted": lastIndex, - "committed": lastCommitted, - }).Warning("truncating local uncommitted log") - - // truncate local uncommitted logs - r.logStore.DeleteRange(lastCommitted+1, lastIndex) - } - - if err = r.reValidateLocalLogs(); err != nil { - return err - } - - if err = r.restoreUnderlying(); err != nil { - return err - } - - r.currentTerm = r.peers.Term - r.lastLogTerm = lastCommittedLog.Term - r.lastLogIndex = lastCommitted - if lastCommittedLog.Index != 0 { - r.lastLogHash = &lastCommittedLog.Hash - } else { - r.lastLogHash = nil - } - - return nil -} - -func (r *TwoPCRunner) initState() error { - if !r.peers.Verify() { - return ErrInvalidConfig - } - - // set leader and node role - r.leader = r.peers.Leader - - for _, s := range r.peers.Servers { - if s.ID == r.config.LocalID { - r.role = s.Role - break - } - } - - // update peers term - return r.stableStore.SetUint64(keyCurrentTerm, r.peers.Term) -} - -func (r *TwoPCRunner) reValidateLocalLogs() error { - // TODO(xq262144): maybe re-validating local log hashes - return nil -} - -func (r *TwoPCRunner) restoreUnderlying() error { - // TODO(xq262144): restore underlying from snapshot and replaying local logs - return nil -} - -// UpdatePeers implements Runner.UpdatePeers. -func (r *TwoPCRunner) UpdatePeers(peers *Peers) error { - r.updatePeersLock.Lock() - defer r.updatePeersLock.Unlock() - - // wait for transaction completion - // TODO(xq262144): support transaction timeout - - if peers.Term == r.peers.Term { - // same term, ignore - return nil - } - - if peers.Term < r.peers.Term { - // lower term, maybe spoofing request - return ErrInvalidConfig - } - - // validate peers structure - if !peers.Verify() { - return ErrInvalidConfig - } - - r.updatePeersReq <- peers - return <-r.updatePeersRes -} - -// Apply implements Runner.Apply. -func (r *TwoPCRunner) Apply(data []byte) (result interface{}, offset uint64, err error) { - // check leader privilege - if r.role != proto.Leader { - return nil, 0, ErrNotLeader - } - - //TODO(auxten): need throughput optimization - r.processReq <- data - res := <-r.processRes - - return res.result, res.offset, res.err -} - -// Shutdown implements Runner.Shutdown. -func (r *TwoPCRunner) Shutdown(wait bool) error { - r.shutdownLock.Lock() - defer r.shutdownLock.Unlock() - - if !r.shutdown { - close(r.shutdownCh) - r.shutdown = true - r.setState(Shutdown) - if wait { - r.routinesGroup.Wait() - } - } - - return nil -} - -func (r *TwoPCRunner) run() { - for { - select { - case <-r.shutdownCh: - // TODO(xq262144): cleanup logic - return - case data := <-r.processReq: - r.processRes <- r.processNewLog(data) - case request := <-r.transport.Process(): - r.processRequest(request) - // TODO(xq262144): support timeout logic for auto rollback prepared transaction on leader change - case peersUpdate := <-r.safeForPeersUpdate(): - r.processPeersUpdate(peersUpdate) - } - } -} - -func (r *TwoPCRunner) safeForPeersUpdate() chan *Peers { - if r.getState() == Idle { - return r.updatePeersReq - } - - return nil -} - -func (r *TwoPCRunner) processNewLog(data []byte) (res logProcessResult) { - ctx := context.Background() - ctx, task := trace.NewTask(ctx, "processNewLog") - defer task.End() - defer trace.StartRegion(ctx, "processNewLogRegion").End() - - // build Log - l := &Log{ - Index: r.lastLogIndex + 1, - Term: r.currentTerm, - Data: data, - LastHash: r.lastLogHash, - } - - // compute hash - l.ComputeHash() - - localPrepare := func(ctx context.Context) error { - // prepare local prepare node - if err := r.config.Storage.Prepare(ctx, l.Data); err != nil { - return err - } - - // write log to storage - return r.logStore.StoreLog(l) - } - - localRollback := func(ctx context.Context) error { - // prepare local rollback node - r.logStore.DeleteRange(r.lastLogIndex+1, l.Index) - return r.config.Storage.Rollback(ctx, l.Data) - } - - localCommit := func(ctx context.Context) (result interface{}, err error) { - result, err = r.config.Storage.Commit(ctx, l.Data) - - r.stableStore.SetUint64(keyCommittedIndex, l.Index) - r.lastLogHash = &l.Hash - r.lastLogIndex = l.Index - r.lastLogTerm = l.Term - - return - } - - // build 2PC workers - if len(r.peers.Servers) > 1 { - nodes := make([]twopc.Worker, 0, len(r.peers.Servers)) - nodes = append(nodes, newLocalWrapper( - localPrepare, - localRollback, - localCommit, - )) - - for _, s := range r.peers.Servers { - if s.ID != r.config.LocalID { - nodes = append(nodes, NewTwoPCWorkerWrapper(r, s.ID)) - } - } - - // start coordination - c := twopc.NewCoordinator(twopc.NewOptions(r.config.ProcessTimeout)) - res.result, res.err = c.Put(nodes, l) - res.offset = r.lastLogIndex - } else { - // single node short cut - // init context - ctx, cancel := context.WithTimeout(context.Background(), r.config.ProcessTimeout) - defer cancel() - - if err := localPrepare(ctx); err != nil { - localRollback(ctx) - res.err = err - return - } - - // Commit myself - // return commit err but still commit - res.result, res.err = localCommit(ctx) - res.offset = r.lastLogIndex - } - - return -} - -func (r *TwoPCRunner) setState(state ServerState) { - r.stateLock.Lock() - defer r.stateLock.Unlock() - r.currentState = state -} - -func (r *TwoPCRunner) getState() ServerState { - r.stateLock.Lock() - defer r.stateLock.Unlock() - return r.currentState -} - -func (r *TwoPCRunner) processRequest(req Request) { - ctx := context.Background() - ctx, task := trace.NewTask(ctx, "processRequest") - defer task.End() - defer trace.StartRegion(ctx, "processRequestRegion").End() - - // verify call from leader - if err := r.verifyLeader(req); err != nil { - req.SendResponse(nil, err) - return - } - - switch req.GetMethod() { - case "Prepare": - r.processPrepare(req) - case "Commit": - r.processCommit(req) - case "Rollback": - r.processRollback(req) - default: - req.SendResponse(nil, ErrInvalidRequest) - } -} - -func (r *TwoPCRunner) processPeersUpdate(peersUpdate *Peers) { - // update peers - var err error - if err = r.stableStore.SetUint64(keyCurrentTerm, peersUpdate.Term); err == nil { - r.peers = peersUpdate - r.currentTerm = peersUpdate.Term - - // change role - r.leader = r.peers.Leader - - notFound := true - - for _, s := range r.peers.Servers { - if s.ID == r.config.LocalID { - r.role = s.Role - notFound = false - break - } - } - - if notFound { - // shutdown - r.Shutdown(false) - } - } - - r.updatePeersRes <- err -} - -func (r *TwoPCRunner) verifyLeader(req Request) error { - // TODO(xq262144): verify call from current leader or from new leader containing new peers info - if req.GetPeerNodeID() != r.peers.Leader.ID { - // not our leader - return ErrInvalidRequest - } - - return nil -} - -func (r *TwoPCRunner) verifyLog(req Request) (log *Log, err error) { - log = req.GetLog() - - if log == nil { - err = ErrInvalidLog - return - } - - if !log.VerifyHash() { - err = ErrInvalidLog - return - } - - return -} - -func (r *TwoPCRunner) processPrepare(req Request) { - req.SendResponse(nil, func() (err error) { - // already in transaction, try abort previous - if r.getState() != Idle { - // TODO(xq262144): has running transaction - // TODO(xq262144): abort previous or failed current - log.Warning("runner status not available for new prepare request") - } - - // init context - var cancelFunc context.CancelFunc - r.currentContext, cancelFunc = context.WithTimeout(context.Background(), r.config.ProcessTimeout) - _ = cancelFunc - - // get log - var l *Log - if l, err = r.verifyLog(req); err != nil { - log.WithError(err).Debug("verify log failed") - return - } - - // check log index existence - var lastIndex uint64 - if lastIndex, err = r.logStore.LastIndex(); err != nil || lastIndex >= l.Index { - // already prepared or failed - log.WithFields(log.Fields{ - "lastIndex": lastIndex, - "index": l.Index, - }).WithError(err).Debug("check log existence failed") - - return - } - - // check prepare hash with last log hash - if l.LastHash != nil && lastIndex == 0 { - // invalid - err = ErrInvalidLog - log.WithFields(log.Fields{ - "lastIndex": lastIndex, - "hash": l.LastHash, - }).WithError(err).Debug("invalid log parent hash") - return - } - - if lastIndex > 0 { - var lastLog Log - if err = r.logStore.GetLog(lastIndex, &lastLog); err != nil { - log.WithError(err).Debug("get last log failed") - return - } - - if !l.LastHash.IsEqual(&lastLog.Hash) { - err = ErrInvalidLog - log.WithFields(log.Fields{ - "expected": lastLog.Hash, - "actual": l.LastHash, - }).WithError(err).Debug("parent hash not matched") - return - } - } - - // prepare on storage - if err = r.config.Storage.Prepare(r.currentContext, l.Data); err != nil { - log.WithError(err).Debug("call storage prepare failed") - return - } - - // write log to storage - if err = r.logStore.StoreLog(l); err != nil { - log.WithError(err).Debug("record log to log storage failed") - return - } - - // set state to prepared - r.setState(Prepared) - - return nil - }()) -} - -func (r *TwoPCRunner) processCommit(req Request) { - // commit log - req.SendResponse(func() (resp []byte, err error) { - // TODO(xq262144): check current running transaction index - if r.getState() != Prepared { - // not prepared, failed directly - err = ErrInvalidRequest - log.WithError(err).Warning("runner status not prepared to commit") - return - } - - // get log - var l *Log - if l, err = r.verifyLog(req); err != nil { - log.WithError(err).Debug("verify log failed") - return - } - - var lastIndex uint64 - if lastIndex, err = r.logStore.LastIndex(); err != nil { - log.WithError(err).Debug("get last log index failed") - return - } else if lastIndex < l.Index { - // not logged, need re-prepare - err = ErrInvalidLog - log.WithFields(log.Fields{ - "lastIndex": lastIndex, - "index": l.Index, - }).WithError(err).Debug("check log index correctness failed") - return - } - - if r.lastLogIndex+1 != l.Index { - // not at the head of the commit position - err = ErrInvalidLog - log.WithFields(log.Fields{ - "lastLogIndex": r.lastLogIndex, - "index": l.Index, - }).WithError(err).Debug("check log index correctness failed") - return - } - - // get log - var lastLog Log - if err = r.logStore.GetLog(l.Index, &lastLog); err != nil { - log.WithError(err).Debug("get last log failed") - return - } - - // commit on storage - // return err but still commit local index - var respData interface{} - respData, err = r.config.Storage.Commit(r.currentContext, l.Data) - - // encode response - if err == nil { - var encodeBuf *bytes.Buffer - if encodeBuf, err = utils.EncodeMsgPack(respData); err == nil { - resp = encodeBuf.Bytes() - } else { - log.WithError(err).Warning("encode response failed") - // clear error - err = nil - } - } else { - log.WithError(err).Warning("call storage commit failed") - } - - // commit log - r.stableStore.SetUint64(keyCommittedIndex, l.Index) - r.lastLogHash = &lastLog.Hash - r.lastLogIndex = lastLog.Index - r.lastLogTerm = lastLog.Term - - // set state to idle - r.setState(Idle) - - return - }()) -} - -func (r *TwoPCRunner) processRollback(req Request) { - // rollback log - req.SendResponse(nil, func() (err error) { - // TODO(xq262144): check current running transaction index - if r.getState() != Prepared { - // not prepared, failed directly - err = ErrInvalidRequest - log.WithError(err).Warning("runner status not prepared to rollback") - return - } - - // get log - var l *Log - if l, err = r.verifyLog(req); err != nil { - log.WithError(err).Debug("verify log failed") - return - } - - var lastIndex uint64 - if lastIndex, err = r.logStore.LastIndex(); err != nil { - log.WithError(err).Debug("get last log index failed") - return - } else if lastIndex < l.Index { - // not logged, no rollback required, maybe previous initiated rollback - log.WithFields(log.Fields{ - "lastIndex": lastIndex, - "index": l.Index, - }).Debug("index beyond max index, rollback request ignored") - return - } - - if r.lastLogIndex+1 != l.Index { - // not at the head of the commit position - err = ErrInvalidLog - log.WithFields(log.Fields{ - "lastLogIndex": r.lastLogIndex, - "index": l.Index, - }).WithError(err).Debug("check log index correctness failed") - return - } - - // get log - var lastLog Log - if err = r.logStore.GetLog(l.Index, &lastLog); err != nil { - log.WithError(err).Debug("get last log failed") - return - } - - // rollback on storage - if err = r.config.Storage.Rollback(r.currentContext, l.Data); err != nil { - log.WithError(err).Warning("call storage rollback failed") - return - } - - // rewind log, can be failed, since committedIndex is not updated - r.logStore.DeleteRange(r.lastLogIndex+1, l.Index) - - // set state to idle - r.setState(Idle) - - return - }()) -} - -// Start a goroutine and properly handle the race between a routine -// starting and incrementing, and exiting and decrementing. -func (r *TwoPCRunner) goFunc(f func()) { - r.routinesGroup.Add(1) - go func() { - defer r.routinesGroup.Done() - f() - }() -} - -type localFunc func(context.Context) error -type localCommitFunc func(context.Context) (interface{}, error) - -type localWrapper struct { - prepare localFunc - rollback localFunc - commit localCommitFunc -} - -func newLocalWrapper(prepare localFunc, rollback localFunc, commit localCommitFunc) *localWrapper { - return &localWrapper{ - prepare: prepare, - rollback: rollback, - commit: commit, - } -} - -func (lw *localWrapper) Prepare(ctx context.Context, _ twopc.WriteBatch) error { - return lw.prepare(ctx) -} - -func (lw *localWrapper) Commit(ctx context.Context, _ twopc.WriteBatch) (interface{}, error) { - return lw.commit(ctx) -} - -func (lw *localWrapper) Rollback(ctx context.Context, _ twopc.WriteBatch) error { - return lw.rollback(ctx) -} - -// NewTwoPCWorkerWrapper returns a wrapper for remote worker. -func NewTwoPCWorkerWrapper(runner *TwoPCRunner, nodeID proto.NodeID) *TwoPCWorkerWrapper { - return &TwoPCWorkerWrapper{ - nodeID: nodeID, - runner: runner, - } -} - -// Prepare implements twopc.Worker.Prepare. -func (tpww *TwoPCWorkerWrapper) Prepare(ctx context.Context, wb twopc.WriteBatch) error { - // extract log - l, ok := wb.(*Log) - if !ok { - return ErrInvalidLog - } - - _, err := tpww.callRemote(ctx, "Prepare", l) - return err -} - -// Commit implements twopc.Worker.Commit. -func (tpww *TwoPCWorkerWrapper) Commit(ctx context.Context, wb twopc.WriteBatch) (interface{}, error) { - // extract log - l, ok := wb.(*Log) - if !ok { - return nil, ErrInvalidLog - } - - return tpww.callRemote(ctx, "Commit", l) -} - -// Rollback implements twopc.Worker.Rollback. -func (tpww *TwoPCWorkerWrapper) Rollback(ctx context.Context, wb twopc.WriteBatch) error { - // extract log - l, ok := wb.(*Log) - if !ok { - return ErrInvalidLog - } - - _, err := tpww.callRemote(ctx, "Rollback", l) - return err -} - -func (tpww *TwoPCWorkerWrapper) callRemote(ctx context.Context, method string, log *Log) (res []byte, err error) { - return tpww.runner.transport.Request(ctx, tpww.nodeID, method, log) -} - -var ( - _ Config = &TwoPCConfig{} - _ Runner = &TwoPCRunner{} - _ twopc.Worker = &TwoPCWorkerWrapper{} -) diff --git a/kayak/twopc_runner_test.go b/kayak/twopc_runner_test.go deleted file mode 100644 index 1423db66d..000000000 --- a/kayak/twopc_runner_test.go +++ /dev/null @@ -1,995 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kayak - -import ( - "context" - "errors" - "sync" - "testing" - "time" - - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/twopc" - "github.com/CovenantSQL/CovenantSQL/utils/log" - . "github.com/smartystreets/goconvey/convey" - "github.com/stretchr/testify/mock" -) - -func TestTwoPCRunner_Init(t *testing.T) { - // invalid config - Convey("test invalid config", t, func() { - runner := NewTwoPCRunner() - config := &MockConfig{} - err := runner.Init(config, nil, nil, nil, nil) - So(err, ShouldNotBeNil) - }) - - Convey("test nil parameters", t, func() { - runner := NewTwoPCRunner() - config := &TwoPCConfig{} - err := runner.Init(config, nil, nil, nil, nil) - So(err, ShouldNotBeNil) - }) - - Convey("test sign broken peers", t, func() { - runner := NewTwoPCRunner() - log.SetLevel(log.FatalLevel) - config := &TwoPCConfig{ - RuntimeConfig: RuntimeConfig{}, - } - peers := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "happy", - }, - }) - // change term to invalidate signature - peers.Term = 2 - mockRouter := &MockTransportRouter{ - transports: make(map[proto.NodeID]*MockTransport), - } - mockLogStore := &MockLogStore{} - mockStableStore := &MockStableStore{} - mockTransport := mockRouter.getTransport("happy") - testLog := &Log{ - Term: 1, - Index: 1, - } - testLog.ComputeHash() - mockStableStore.On("GetUint64", keyCurrentTerm).Return(uint64(1), nil) - mockStableStore.On("GetUint64", keyCommittedIndex).Return(uint64(1), nil) - mockStableStore.On("SetUint64", keyCurrentTerm, uint64(2)).Return(nil) - mockLogStore.On("GetLog", uint64(1), mock.AnythingOfType("*kayak.Log")). - Return(nil).Run(func(args mock.Arguments) { - arg := args.Get(1).(*Log) - *arg = *testLog - }) - mockLogStore.On("LastIndex").Return(uint64(2), nil) - mockLogStore.On("DeleteRange", - mock.AnythingOfType("uint64"), mock.AnythingOfType("uint64")).Return(nil) - - err := runner.Init(config, peers, mockLogStore, mockStableStore, mockTransport) - So(err, ShouldNotBeNil) - }) - - Convey("test log restore", t, func() { - runner := NewTwoPCRunner() - log.SetLevel(log.FatalLevel) - config := &TwoPCConfig{ - RuntimeConfig: RuntimeConfig{}, - } - peers := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "happy", - }, - }) - mockRouter := &MockTransportRouter{ - transports: make(map[proto.NodeID]*MockTransport), - } - mockLogStore := &MockLogStore{} - mockStableStore := &MockStableStore{} - mockTransport := mockRouter.getTransport("happy") - unknownErr := errors.New("unknown error") - - Convey("failed getting currentTerm from log", func() { - mockStableStore.On("GetUint64", keyCurrentTerm).Return(uint64(0), unknownErr) - - err := runner.Init(config, peers, mockLogStore, mockStableStore, mockTransport) - So(err, ShouldNotBeNil) - }) - - Convey("currentTerm in log older than term in peers", func() { - mockStableStore.On("GetUint64", keyCurrentTerm).Return(uint64(2), nil) - - err := runner.Init(config, peers, mockLogStore, mockStableStore, mockTransport) - So(err, ShouldNotBeNil) - }) - - Convey("get last committed index failed", func() { - mockStableStore.On("GetUint64", keyCurrentTerm).Return(uint64(1), nil) - mockStableStore.On("GetUint64", keyCommittedIndex).Return(uint64(0), unknownErr) - - err := runner.Init(config, peers, mockLogStore, mockStableStore, mockTransport) - So(err, ShouldNotBeNil) - }) - - Convey("get last committed log data failed", func() { - mockStableStore.On("GetUint64", keyCurrentTerm).Return(uint64(1), nil) - mockStableStore.On("GetUint64", keyCommittedIndex).Return(uint64(1), nil) - mockLogStore.On("GetLog", uint64(1), mock.Anything).Return(unknownErr) - - err := runner.Init(config, peers, mockLogStore, mockStableStore, mockTransport) - So(err, ShouldNotBeNil) - }) - - Convey("last committed log with higher term than peers", func() { - mockStableStore.On("GetUint64", keyCurrentTerm).Return(uint64(1), nil) - mockStableStore.On("GetUint64", keyCommittedIndex).Return(uint64(1), nil) - mockLogStore.On("GetLog", uint64(1), mock.AnythingOfType("*kayak.Log")). - Return(nil).Run(func(args mock.Arguments) { - arg := args.Get(1).(*Log) - *arg = Log{ - Term: 2, - Index: 1, - } - }) - - err := runner.Init(config, peers, mockLogStore, mockStableStore, mockTransport) - So(err, ShouldNotBeNil) - }) - - Convey("last committed log not equal to index field", func() { - mockStableStore.On("GetUint64", keyCurrentTerm).Return(uint64(1), nil) - mockStableStore.On("GetUint64", keyCommittedIndex).Return(uint64(1), nil) - mockLogStore.On("GetLog", uint64(1), mock.AnythingOfType("*kayak.Log")). - Return(nil).Run(func(args mock.Arguments) { - arg := args.Get(1).(*Log) - *arg = Log{ - Term: 1, - Index: 2, - } - }) - - err := runner.Init(config, peers, mockLogStore, mockStableStore, mockTransport) - So(err, ShouldNotBeNil) - }) - - Convey("get last index failed", func() { - mockStableStore.On("GetUint64", keyCurrentTerm).Return(uint64(1), nil) - mockStableStore.On("GetUint64", keyCommittedIndex).Return(uint64(1), nil) - mockLogStore.On("GetLog", uint64(1), mock.AnythingOfType("*kayak.Log")). - Return(nil).Run(func(args mock.Arguments) { - arg := args.Get(1).(*Log) - *arg = Log{ - Term: 1, - Index: 1, - } - }) - mockLogStore.On("LastIndex").Return(uint64(0), unknownErr) - - err := runner.Init(config, peers, mockLogStore, mockStableStore, mockTransport) - So(err, ShouldNotBeNil) - }) - - Convey("last index overlaps committed index", func() { - testLog := &Log{ - Term: 1, - Index: 1, - } - testLog.ComputeHash() - mockStableStore.On("GetUint64", keyCurrentTerm).Return(uint64(1), nil) - mockStableStore.On("GetUint64", keyCommittedIndex).Return(uint64(1), nil) - mockStableStore.On("SetUint64", keyCurrentTerm, uint64(1)).Return(nil) - mockLogStore.On("GetLog", uint64(1), mock.AnythingOfType("*kayak.Log")). - Return(nil).Run(func(args mock.Arguments) { - arg := args.Get(1).(*Log) - *arg = *testLog - }) - mockLogStore.On("LastIndex").Return(uint64(2), nil) - mockLogStore.On("DeleteRange", - mock.AnythingOfType("uint64"), mock.AnythingOfType("uint64")).Return(nil) - - err := runner.Init(config, peers, mockLogStore, mockStableStore, mockTransport) - mockLogStore.AssertCalled(t, "DeleteRange", uint64(2), uint64(2)) - - So(err, ShouldBeNil) - So(runner.currentTerm, ShouldEqual, uint64(1)) - So(runner.lastLogTerm, ShouldEqual, uint64(1)) - So(runner.lastLogIndex, ShouldEqual, 1) - So(runner.lastLogHash, ShouldNotBeNil) - So(runner.lastLogHash.IsEqual(&testLog.Hash), ShouldBeTrue) - }) - }) -} - -func TestTwoPCRunner_Apply(t *testing.T) { - mockRouter := &MockTransportRouter{ - transports: make(map[proto.NodeID]*MockTransport), - } - - type createMockRes struct { - runner *TwoPCRunner - transport *MockTransport - worker *MockWorker - config *TwoPCConfig - logStore *MockLogStore - stableStore *MockStableStore - } - - createMock := func(nodeID proto.NodeID) (res *createMockRes) { - res = &createMockRes{} - log.SetLevel(log.FatalLevel) - res.runner = NewTwoPCRunner() - res.transport = mockRouter.getTransport(nodeID) - res.worker = &MockWorker{} - res.config = &TwoPCConfig{ - RuntimeConfig: RuntimeConfig{ - RootDir: "test_dir", - LocalID: nodeID, - Runner: res.runner, - Transport: res.transport, - ProcessTimeout: time.Millisecond * 300, - }, - Storage: res.worker, - } - res.logStore = &MockLogStore{} - res.stableStore = &MockStableStore{} - - // init with no log and no term info - res.stableStore.On("GetUint64", keyCurrentTerm).Return(uint64(0), nil) - res.stableStore.On("GetUint64", keyCommittedIndex).Return(uint64(0), nil) - res.stableStore.On("SetUint64", keyCurrentTerm, uint64(1)).Return(nil) - res.logStore.On("LastIndex").Return(uint64(0), nil) - return - } - peers := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower", - }, - }) - - Convey("call process on no leader", t, func() { - mockRouter.ResetAll() - mockRes := createMock("follower") - - err := mockRes.runner.Init(mockRes.config, peers, mockRes.logStore, mockRes.stableStore, mockRes.transport) - - So(err, ShouldBeNil) - So(mockRes.runner.role, ShouldEqual, proto.Follower) - So(mockRes.runner.leader.ID, ShouldEqual, proto.NodeID("leader")) - - // try call process - testPayload := []byte("test data") - _, _, err = mockRes.runner.Apply(testPayload) - So(err, ShouldNotBeNil) - So(err, ShouldEqual, ErrNotLeader) - }) - - Convey("call process on leader with single node", t, func() { - mockRouter.ResetAll() - - // change server id to leader and set peers to single node - peers := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - }) - mockRes := createMock("leader") - - err := mockRes.runner.Init(mockRes.config, peers, mockRes.logStore, mockRes.stableStore, mockRes.transport) - - So(err, ShouldBeNil) - So(mockRes.runner.role, ShouldEqual, proto.Leader) - So(mockRes.runner.leader.ID, ShouldEqual, proto.NodeID("leader")) - - Convey("commit", func() { - testPayload := []byte("test data") - - // mock worker - callOrder := &CallCollector{} - mockRes.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("prepare") - }) - mockRes.logStore.On("StoreLog", mock.AnythingOfType("*kayak.Log")). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("store_log") - }) - mockRes.worker.On("Commit", mock.Anything, testPayload). - Return(nil, nil).Run(func(args mock.Arguments) { - callOrder.Append("commit") - }) - mockRes.stableStore.On("SetUint64", keyCommittedIndex, uint64(1)). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("update_committed") - }) - - // try call process - var offset uint64 - _, offset, err = mockRes.runner.Apply(testPayload) - So(err, ShouldBeNil) - So(offset, ShouldEqual, uint64(1)) - - // test call orders - So(callOrder.Get(), ShouldResemble, []string{ - "prepare", - "store_log", - "commit", - "update_committed", - }) - }) - - Convey("rollback", func() { - testPayload := []byte("test data") - - // mock worker - callOrder := &CallCollector{} - unknownErr := errors.New("unknown error") - mockRes.worker.On("Prepare", mock.Anything, testPayload). - Return(unknownErr).Run(func(args mock.Arguments) { - callOrder.Append("prepare") - }) - mockRes.logStore.On("StoreLog", mock.AnythingOfType("*kayak.Log")). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("store_log") - }) - mockRes.worker.On("Rollback", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("rollback") - }) - mockRes.logStore.On("DeleteRange", uint64(1), uint64(1)). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("truncate_log") - }) - - // try call process - _, _, err = mockRes.runner.Apply(testPayload) - So(err, ShouldNotBeNil) - - // no log should be written to local log store after failed preparing - mockRes.logStore.AssertNotCalled(t, "StoreLog", mock.AnythingOfType("*kayak.Log")) - So(callOrder.Get(), ShouldResemble, []string{ - "prepare", - "truncate_log", - "rollback", - }) - }) - - Convey("prepare timeout", FailureContinues, func(c C) { - testPayload := []byte("test data") - unknownErr := errors.New("unknown error") - mockRes.worker.On("Prepare", mock.Anything, testPayload). - Return(func(ctx context.Context, _ twopc.WriteBatch) error { - c.So(ctx.Err(), ShouldNotBeNil) - return unknownErr - }).After(time.Millisecond * 400) - mockRes.worker.On("Rollback", mock.Anything, testPayload).Return(nil) - mockRes.logStore.On("DeleteRange", uint64(1), uint64(1)).Return(nil) - - // try call process - _, _, err = mockRes.runner.Apply(testPayload) - - So(err, ShouldNotBeNil) - }) - - Convey("commit timeout", FailureContinues, func(c C) { - testPayload := []byte("test data") - unknownErr := errors.New("unknown error") - mockRes.worker.On("Prepare", mock.Anything, testPayload). - Return(nil) - mockRes.logStore.On("StoreLog", mock.AnythingOfType("*kayak.Log")). - Return(nil) - mockRes.worker.On("Commit", mock.Anything, testPayload). - Return(nil, func(ctx context.Context, _ twopc.WriteBatch) error { - c.So(ctx.Err(), ShouldNotBeNil) - return unknownErr - }).After(time.Millisecond * 400) - mockRes.stableStore.On("SetUint64", keyCommittedIndex, uint64(1)). - Return(nil) - - // try call process - _, _, err = mockRes.runner.Apply(testPayload) - - So(err, ShouldNotBeNil) - }) - - Convey("rollback timeout", FailureContinues, func(c C) { - testPayload := []byte("test data") - prepareErr := errors.New("prepare error") - rollbackErr := errors.New("rollback error") - mockRes.worker.On("Prepare", mock.Anything, testPayload). - Return(prepareErr) - mockRes.logStore.On("StoreLog", mock.AnythingOfType("*kayak.Log")). - Return(nil) - mockRes.worker.On("Rollback", mock.Anything, testPayload). - Return(func(ctx context.Context, _ twopc.WriteBatch) error { - c.So(ctx.Err(), ShouldNotBeNil) - return rollbackErr - }).After(time.Millisecond * 400) - mockRes.logStore.On("DeleteRange", uint64(1), uint64(1)).Return(nil) - - // try call process - _, _, err = mockRes.runner.Apply(testPayload) - - // rollback error is ignored - So(err, ShouldNotBeNil) - So(err, ShouldEqual, prepareErr) - }) - }) - - Convey("call process on leader with multiple nodes", t, func(c C) { - mockRouter.ResetAll() - - peers := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - initMock := func(mocks ...*createMockRes) { - for _, r := range mocks { - store := NewMockInmemStore() - err := r.runner.Init(r.config, peers, store, store, r.transport) - So(err, ShouldBeNil) - } - } - - Convey("commit", func() { - lMock := createMock("leader") - f1Mock := createMock("follower1") - f2Mock := createMock("follower2") - - // init - initMock(lMock, f1Mock, f2Mock) - - testPayload := []byte("test data") - - callOrder := &CallCollector{} - f1Mock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("prepare") - }) - f2Mock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("prepare") - }) - f1Mock.worker.On("Commit", mock.Anything, testPayload). - Return(nil, nil).Run(func(args mock.Arguments) { - callOrder.Append("commit") - }) - f2Mock.worker.On("Commit", mock.Anything, testPayload). - Return(nil, nil).Run(func(args mock.Arguments) { - callOrder.Append("commit") - }) - lMock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("prepare") - }) - lMock.worker.On("Commit", mock.Anything, testPayload). - Return(nil, nil).Run(func(args mock.Arguments) { - callOrder.Append("commit") - }) - - // try call process - _, _, err := lMock.runner.Apply(testPayload) - - So(err, ShouldBeNil) - - // test call orders - So(callOrder.Get(), ShouldResemble, []string{ - "prepare", - "prepare", - "prepare", - "commit", - "commit", - "commit", - }) - - lastLogHash := lMock.runner.lastLogHash - lastLogIndex := lMock.runner.lastLogIndex - lastLogTerm := lMock.runner.lastLogTerm - - So(lastLogHash, ShouldNotBeNil) - So(lastLogIndex, ShouldEqual, uint64(1)) - So(lastLogTerm, ShouldEqual, uint64(1)) - - // check with log - var firstLog Log - err = lMock.runner.logStore.GetLog(1, &firstLog) - So(err, ShouldBeNil) - - So(firstLog.LastHash, ShouldBeNil) - So(lastLogHash.IsEqual(&firstLog.Hash), ShouldBeTrue) - So(lastLogIndex, ShouldResemble, firstLog.Index) - So(lastLogTerm, ShouldResemble, firstLog.Term) - - // commit second log - callOrder.Reset() - - _, _, err = lMock.runner.Apply(testPayload) - - So(err, ShouldBeNil) - - // test call orders - So(callOrder.Get(), ShouldResemble, []string{ - "prepare", - "prepare", - "prepare", - "commit", - "commit", - "commit", - }) - - // check with log - var secondLog Log - err = lMock.runner.logStore.GetLog(2, &secondLog) - So(err, ShouldBeNil) - - So(secondLog.LastHash.IsEqual(lastLogHash), ShouldBeTrue) - }) - - Convey("rollback", func() { - lMock := createMock("leader") - f1Mock := createMock("follower1") - f2Mock := createMock("follower2") - - // init - initMock(lMock, f1Mock, f2Mock) - - testPayload := []byte("test data") - - callOrder := &CallCollector{} - unknownErr := errors.New("unknown error") - // f1 prepare with error - f1Mock.worker.On("Prepare", mock.Anything, testPayload). - Return(unknownErr).Run(func(args mock.Arguments) { - callOrder.Append("prepare") - }) - f1Mock.worker.On("Rollback", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("rollback") - }) - // f2 prepare with no error - f2Mock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("prepare") - }) - f2Mock.worker.On("Rollback", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("rollback") - }) - lMock.worker.On("Prepare", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("prepare") - }) - lMock.worker.On("Rollback", mock.Anything, testPayload). - Return(nil).Run(func(args mock.Arguments) { - callOrder.Append("rollback") - }) - - // try call process - origLevel := log.GetLevel() - log.SetLevel(log.DebugLevel) - _, _, err := lMock.runner.Apply(testPayload) - log.SetLevel(origLevel) - - So(err, ShouldNotBeNil) - So(err, ShouldEqual, unknownErr) - - // test call orders - // FIXME, one prepare error worker will not trigger rollback function on storage - So(callOrder.Get(), ShouldResemble, []string{ - "prepare", - "prepare", - "prepare", - "rollback", - "rollback", - //"rollback", - }) - }) - }) - - Convey("sybil test", t, func() { - mockRouter.ResetAll() - - peers := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - initMock := func(mocks ...*createMockRes) { - for _, r := range mocks { - err := r.runner.Init(r.config, peers, r.logStore, r.stableStore, r.transport) - So(err, ShouldBeNil) - } - } - - Convey("request from non-leader", func() { - lMock := createMock("leader") - f1Mock := createMock("follower1") - f2Mock := createMock("follower1") - - // init - initMock(lMock, f1Mock, f2Mock) - - // fake request - testPayload := []byte("test data") - fakeLog := &Log{ - Term: 1, - Index: 1, - Data: testPayload, - } - - var err error - var rv []byte - rv, err = f1Mock.transport.Request( - context.Background(), - f2Mock.config.LocalID, - "Prepare", - fakeLog, - ) - - So(rv, ShouldBeNil) - So(err, ShouldNotBeNil) - So(err.Error(), ShouldEqual, ErrInvalidRequest.Error()) - }) - - Convey("send invalid request", func() { - lMock := createMock("leader") - f1Mock := createMock("follower1") - - // init - initMock(lMock, f1Mock) - - // fake request - var err error - var rv []byte - rv, err = lMock.transport.Request( - context.Background(), - f1Mock.config.LocalID, - "invalid request", - nil, - ) - - So(rv, ShouldBeNil) - So(err, ShouldNotBeNil) - So(err.Error(), ShouldEqual, ErrInvalidRequest.Error()) - }) - - Convey("log could not be decoded", func() { - lMock := createMock("leader") - f1Mock := createMock("follower1") - - // init - initMock(lMock, f1Mock) - - var err error - var rv []byte - rv, err = lMock.transport.Request( - context.Background(), - f1Mock.config.LocalID, - "Prepare", - nil, - ) - - So(rv, ShouldBeNil) - So(err, ShouldNotBeNil) - So(err, ShouldEqual, ErrInvalidLog) - }) - }) -} - -func TestTwoPCRunner_UpdatePeers(t *testing.T) { - mockRouter := &MockTransportRouter{ - transports: make(map[proto.NodeID]*MockTransport), - } - - type createMockRes struct { - runner *TwoPCRunner - transport *MockTransport - worker *MockWorker - config *TwoPCConfig - logStore *MockLogStore - stableStore *MockStableStore - } - - createMock := func(nodeID proto.NodeID) (res *createMockRes) { - res = &createMockRes{} - log.SetLevel(log.FatalLevel) - res.runner = NewTwoPCRunner() - res.transport = mockRouter.getTransport(nodeID) - res.worker = &MockWorker{} - res.config = &TwoPCConfig{ - RuntimeConfig: RuntimeConfig{ - RootDir: "test_dir", - LocalID: nodeID, - Runner: res.runner, - Transport: res.transport, - ProcessTimeout: time.Millisecond * 800, - }, - Storage: res.worker, - } - res.logStore = &MockLogStore{} - res.stableStore = &MockStableStore{} - - // init with no log and no term info - res.stableStore.On("GetUint64", keyCurrentTerm).Return(uint64(0), nil) - res.stableStore.On("GetUint64", keyCommittedIndex).Return(uint64(0), nil) - res.stableStore.On("SetUint64", keyCurrentTerm, uint64(2)).Return(nil) - res.logStore.On("LastIndex").Return(uint64(0), nil) - return - } - peers := testPeersFixture(2, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - initMock := func(mocks ...*createMockRes) { - for _, r := range mocks { - err := r.runner.Init(r.config, peers, r.logStore, r.stableStore, r.transport) - So(err, ShouldBeNil) - } - } - testMock := func(peers *Peers, testFunc func(*createMockRes, error), mocks ...*createMockRes) { - wg := new(sync.WaitGroup) - - for _, r := range mocks { - wg.Add(1) - go func(m *createMockRes) { - defer wg.Done() - err := m.runner.UpdatePeers(peers) - if testFunc != nil { - testFunc(m, err) - } - }(r) - } - - wg.Wait() - } - - Convey("update peers with invalid configuration", t, func() { - mockRouter.ResetAll() - - lMock := createMock("leader") - f1Mock := createMock("follower1") - f2Mock := createMock("follower2") - - // init - initMock(lMock, f1Mock, f2Mock) - - Convey("same peers term", FailureContinues, func(c C) { - newPeers := testPeersFixture(2, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - - testFunc := func(_ *createMockRes, err error) { - c.So(err, ShouldBeNil) - } - testMock(newPeers, testFunc, lMock, f1Mock, f2Mock) - }) - - Convey("invalid peers term", FailureContinues, func(c C) { - newPeers := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - - testFunc := func(_ *createMockRes, err error) { - c.So(err, ShouldNotBeNil) - c.So(err, ShouldEqual, ErrInvalidConfig) - } - testMock(newPeers, testFunc, lMock, f1Mock, f2Mock) - }) - - Convey("invalid peers signature", FailureContinues, func(c C) { - newPeers := testPeersFixture(4, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - - newPeers.Term = 3 - - testFunc := func(_ *createMockRes, err error) { - c.So(err, ShouldNotBeNil) - c.So(err, ShouldEqual, ErrInvalidConfig) - } - testMock(newPeers, testFunc, lMock, f1Mock, f2Mock) - }) - - Convey("peers update success", FailureContinues, func(c C) { - updateMock := func(mocks ...*createMockRes) { - for _, r := range mocks { - r.stableStore.On("SetUint64", keyCurrentTerm, uint64(3)).Return(nil) - } - } - - updateMock(lMock, f1Mock, f2Mock) - - newPeers := testPeersFixture(3, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - - testFunc := func(r *createMockRes, err error) { - c.So(err, ShouldBeNil) - c.So(r.runner.currentTerm, ShouldEqual, uint64(3)) - c.So(r.runner.peers, ShouldResemble, newPeers) - r.stableStore.AssertCalled(t, "SetUint64", keyCurrentTerm, uint64(3)) - } - - testMock(newPeers, testFunc, lMock, f1Mock, f2Mock) - }) - - Convey("peers update include leader change", FailureContinues, func(c C) { - updateMock := func(mocks ...*createMockRes) { - for _, r := range mocks { - r.stableStore.On("SetUint64", keyCurrentTerm, uint64(3)).Return(nil) - } - } - - updateMock(lMock, f1Mock, f2Mock) - - newPeers := testPeersFixture(3, []*Server{ - { - Role: proto.Follower, - ID: "leader", - }, - { - Role: proto.Leader, - ID: "follower1", - }, - { - Role: proto.Follower, - ID: "follower2", - }, - }) - - testFunc := func(r *createMockRes, err error) { - c.So(err, ShouldBeNil) - c.So(r.runner.currentTerm, ShouldEqual, uint64(3)) - c.So(r.runner.peers, ShouldResemble, newPeers) - - switch r.config.LocalID { - case "leader": - c.So(r.runner.role, ShouldEqual, proto.Follower) - case "follower1": - c.So(r.runner.role, ShouldEqual, proto.Leader) - case "follower2": - c.So(r.runner.role, ShouldEqual, proto.Follower) - } - - r.stableStore.AssertCalled(t, "SetUint64", keyCurrentTerm, uint64(3)) - } - - testMock(newPeers, testFunc, lMock, f1Mock, f2Mock) - - // test call process - testPayload := []byte("test data") - _, _, err := lMock.runner.Apply(testPayload) - - // no longer leader - So(err, ShouldNotBeNil) - }) - - Convey("peers update with shutdown", FailureContinues, func(c C) { - updateMock := func(mocks ...*createMockRes) { - for _, r := range mocks { - r.stableStore.On("SetUint64", keyCurrentTerm, uint64(3)).Return(nil) - } - } - - updateMock(lMock, f1Mock, f2Mock) - - newPeers := testPeersFixture(3, []*Server{ - { - Role: proto.Leader, - ID: "leader", - }, - { - Role: proto.Follower, - ID: "follower1", - }, - }) - - testFunc := func(r *createMockRes, err error) { - c.So(err, ShouldBeNil) - c.So(r.runner.currentTerm, ShouldEqual, uint64(3)) - c.So(r.runner.peers, ShouldResemble, newPeers) - r.stableStore.AssertCalled(t, "SetUint64", keyCurrentTerm, uint64(3)) - } - - testMock(newPeers, testFunc, lMock, f1Mock, f2Mock) - - So(f2Mock.runner.currentState, ShouldEqual, Shutdown) - }) - }) -} diff --git a/kayak/types.go b/kayak/types.go deleted file mode 100644 index fd0d2f6a5..000000000 --- a/kayak/types.go +++ /dev/null @@ -1,327 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kayak - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/binary" - "fmt" - "time" - - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/proto" -) - -//go:generate hsp -//hsp:ignore RuntimeConfig - -// Log entries are replicated to all members of the Kayak cluster -// and form the heart of the replicated state machine. -type Log struct { - // Index holds the index of the log entry. - Index uint64 - - // Term holds the election term of the log entry. - Term uint64 - - // Data holds the log entry's type-specific data. - Data []byte - - // LastHash is log entry hash - LastHash *hash.Hash - - // Hash is current log entry hash - Hash hash.Hash -} - -// ComputeHash updates Hash. -func (l *Log) ComputeHash() { - l.Hash.SetBytes(hash.DoubleHashB(l.Serialize())) -} - -// VerifyHash validates hash field. -func (l *Log) VerifyHash() bool { - h := hash.DoubleHashH(l.Serialize()) - return h.IsEqual(&l.Hash) -} - -// Serialize transform log structure to bytes. -func (l *Log) Serialize() []byte { - if l == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - binary.Write(buf, binary.LittleEndian, l.Index) - binary.Write(buf, binary.LittleEndian, l.Term) - binary.Write(buf, binary.LittleEndian, uint64(len(l.Data))) - buf.Write(l.Data) - if l.LastHash != nil { - buf.Write(l.LastHash[:]) - } else { - buf.WriteRune('\000') - } - - return buf.Bytes() -} - -// LogStore is used to provide an interface for storing -// and retrieving logs in a durable fashion. -type LogStore interface { - // FirstIndex returns the first index written. 0 for no entries. - FirstIndex() (uint64, error) - - // LastIndex returns the last index written. 0 for no entries. - LastIndex() (uint64, error) - - // GetLog gets a log entry at a given index. - GetLog(index uint64, l *Log) error - - // StoreLog stores a log entry. - StoreLog(l *Log) error - - // StoreLogs stores multiple log entries. - StoreLogs(logs []*Log) error - - // DeleteRange deletes a range of log entries. The range is inclusive. - DeleteRange(min, max uint64) error -} - -// StableStore is used to provide stable storage -// of key configurations to ensure safety. -type StableStore interface { - Set(key []byte, val []byte) error - - // Get returns the value for key, or an empty byte slice if key was not found. - Get(key []byte) ([]byte, error) - - SetUint64(key []byte, val uint64) error - - // GetUint64 returns the uint64 value for key, or 0 if key was not found. - GetUint64(key []byte) (uint64, error) -} - -// ServerState define the state of node to be checked by commit/peers update logic. -type ServerState int - -// Note: Don't renumber these, since the numbers are written into the log. -const ( - // Idle indicates no running transaction. - Idle ServerState = iota - - // Prepared indicates in-flight transaction prepared. - Prepared - - // Shutdown state - Shutdown -) - -func (s ServerState) String() string { - switch s { - case Idle: - return "Idle" - case Prepared: - return "Prepared" - } - return "Unknown" -} - -// Server tracks the information about a single server in a configuration. -type Server struct { - // Suffrage determines whether the server gets a vote. - Role proto.ServerRole - // ID is a unique string identifying this server for all time. - ID proto.NodeID - // Public key - PubKey *asymmetric.PublicKey -} - -func (s *Server) String() string { - return fmt.Sprintf("Server id:%s role:%s pubKey:%s", - s.ID, s.Role, - base64.StdEncoding.EncodeToString(s.PubKey.Serialize())) -} - -// Serialize server struct to bytes. -func (s *Server) Serialize() []byte { - if s == nil { - return []byte{'\000'} - } - - buffer := new(bytes.Buffer) - binary.Write(buffer, binary.LittleEndian, s.Role) - binary.Write(buffer, binary.LittleEndian, uint64(len(s.ID))) - buffer.WriteString(string(s.ID)) - if s.PubKey != nil { - buffer.Write(s.PubKey.Serialize()) - } else { - buffer.WriteRune('\000') - } - - return buffer.Bytes() -} - -// Peers defines peer configuration. -type Peers struct { - Term uint64 - Leader *Server - Servers []*Server - PubKey *asymmetric.PublicKey - Signature *asymmetric.Signature -} - -// Clone makes a deep copy of a Peers. -func (c *Peers) Clone() (copy Peers) { - copy.Term = c.Term - copy.Leader = c.Leader - copy.Servers = append(copy.Servers, c.Servers...) - copy.PubKey = c.PubKey - copy.Signature = c.Signature - return -} - -// Serialize peers struct to bytes. -func (c *Peers) Serialize() []byte { - if c == nil { - return []byte{'\000'} - } - - buffer := new(bytes.Buffer) - binary.Write(buffer, binary.LittleEndian, c.Term) - binary.Write(buffer, binary.LittleEndian, c.Leader.Serialize()) - binary.Write(buffer, binary.LittleEndian, uint64(len(c.Servers))) - for _, s := range c.Servers { - binary.Write(buffer, binary.LittleEndian, s.Serialize()) - } - if c.PubKey != nil { - buffer.Write(c.PubKey.Serialize()) - } else { - buffer.WriteRune('\000') - } - return buffer.Bytes() -} - -// Sign generates signature. -func (c *Peers) Sign(signer *asymmetric.PrivateKey) error { - c.PubKey = signer.PubKey() - h := hash.THashB(c.Serialize()) - sig, err := signer.Sign(h) - - if err != nil { - return fmt.Errorf("sign peer configuration failed: %s", err.Error()) - } - - c.Signature = sig - - return nil -} - -// Verify verify signature. -func (c *Peers) Verify() bool { - h := hash.THashB(c.Serialize()) - - return c.Signature.Verify(h, c.PubKey) -} - -func (c *Peers) String() string { - return fmt.Sprintf("Peers term:%v nodesCnt:%v leader:%s signature:%s", - c.Term, len(c.Servers), c.Leader.ID, - base64.StdEncoding.EncodeToString(c.Signature.Serialize())) -} - -// Find finds the index of the server with the specified key in the server list. -func (c *Peers) Find(key proto.NodeID) (index int32, found bool) { - if c.Servers != nil { - for i, s := range c.Servers { - if s.ID == key { - index = int32(i) - found = true - break - } - } - } - - return -} - -// RuntimeConfig defines minimal configuration fields for consensus runner. -type RuntimeConfig struct { - // RootDir is the root dir for runtime - RootDir string - - // LocalID is the unique ID for this server across all time. - LocalID proto.NodeID - - // Runner defines the runner type - Runner Runner - - // Transport defines the dialer type - Transport Transport - - // ProcessTimeout defines whole process timeout - ProcessTimeout time.Duration - - // AutoBanCount defines how many times a nodes will be banned from execution - AutoBanCount uint32 -} - -// Config interface for abstraction. -type Config interface { - // Get config returns runtime config - GetRuntimeConfig() *RuntimeConfig -} - -// Request defines a transport request payload. -type Request interface { - GetPeerNodeID() proto.NodeID - GetMethod() string - GetLog() *Log - SendResponse([]byte, error) error -} - -// Transport adapter for abstraction. -type Transport interface { - Init() error - - // Request - Request(ctx context.Context, nodeID proto.NodeID, method string, log *Log) ([]byte, error) - - // Process - Process() <-chan Request - - Shutdown() error -} - -// Runner adapter for different consensus protocols including Eventual Consistency/2PC/3PC. -type Runner interface { - // Init defines setup logic. - Init(config Config, peers *Peers, logs LogStore, stable StableStore, transport Transport) error - - // UpdatePeers defines peer configuration update logic. - UpdatePeers(peers *Peers) error - - // Apply defines log replication and log commit logic - // and should be called by Leader role only. - Apply(data []byte) (interface{}, uint64, error) - - // Shutdown defines destruct logic. - Shutdown(wait bool) error -} diff --git a/kayak/types/config.go b/kayak/types/config.go new file mode 100644 index 000000000..0407a5e4a --- /dev/null +++ b/kayak/types/config.go @@ -0,0 +1,49 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "time" + + "github.com/CovenantSQL/CovenantSQL/proto" +) + +// RuntimeConfig defines the runtime config of kayak. +type RuntimeConfig struct { + // underlying handler. + Handler Handler + // minimum rpc success node percent requirement for prepare operation. + PrepareThreshold float64 + // minimum rpc success node percent requirement for commit operation. + CommitThreshold float64 + // maximum allowed time for prepare operation. + PrepareTimeout time.Duration + // maximum allowed time for commit operation. + CommitTimeout time.Duration + // init peers of node. + Peers *proto.Peers + // wal for kayak. + Wal Wal + // current node id. + NodeID proto.NodeID + // current instance id. + InstanceID string + // mux service name. + ServiceName string + // mux service method. + MethodName string +} diff --git a/kayak/api/doc.go b/kayak/types/doc.go similarity index 86% rename from kayak/api/doc.go rename to kayak/types/doc.go index 6a9ec371c..28c02e239 100644 --- a/kayak/api/doc.go +++ b/kayak/types/doc.go @@ -14,7 +14,4 @@ * limitations under the License. */ -/* -Package api provides simplified kayak api with pre-defined practical options. -*/ -package api +package types diff --git a/kayak/types/errors.go b/kayak/types/errors.go new file mode 100644 index 000000000..baa5b824b --- /dev/null +++ b/kayak/types/errors.go @@ -0,0 +1,36 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import "github.com/pkg/errors" + +var ( + // ErrNotLeader represents current node is not a peer leader. + ErrNotLeader = errors.New("not leader") + // ErrNotFollower represents current node is not a peer follower. + ErrNotFollower = errors.New("not follower") + // ErrPrepareTimeout represents timeout failure for prepare operation. + ErrPrepareTimeout = errors.New("prepare timeout") + // ErrPrepareFailed represents failure for prepare operation. + ErrPrepareFailed = errors.New("prepare failed") + // ErrInvalidLog represents log is invalid. + ErrInvalidLog = errors.New("invalid log") + // ErrNotInPeer represents current node does not exists in peer list. + ErrNotInPeer = errors.New("node not in peer") + // ErrNeedRecovery represents current follower node needs recovery, back-off is required by leader. + ErrNeedRecovery = errors.New("need recovery") +) diff --git a/kayak/types/handler.go b/kayak/types/handler.go new file mode 100644 index 000000000..c74b053e2 --- /dev/null +++ b/kayak/types/handler.go @@ -0,0 +1,25 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +// Handler defines the main underlying fsm of kayak. +type Handler interface { + EncodePayload(req interface{}) (data []byte, err error) + DecodePayload(data []byte) (req interface{}, err error) + Check(request interface{}) error + Commit(request interface{}) (result interface{}, err error) +} diff --git a/kayak/types/log.go b/kayak/types/log.go new file mode 100644 index 000000000..a69ce2ef5 --- /dev/null +++ b/kayak/types/log.go @@ -0,0 +1,70 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "github.com/CovenantSQL/CovenantSQL/proto" +) + +// LogType defines the log type. +type LogType uint16 + +const ( + // LogPrepare defines the prepare phase of a commit. + LogPrepare LogType = iota + // LogRollback defines the rollback phase of a commit. + LogRollback + // LogCommit defines the commit phase of a commit. + LogCommit + // LogBarrier defines barrier log, all open windows should be waiting this operations to complete. + LogBarrier + // LogNoop defines noop log. + LogNoop +) + +func (t LogType) String() (s string) { + switch t { + case LogPrepare: + return "LogPrepare" + case LogRollback: + return "LogRollback" + case LogCommit: + return "LogCommit" + case LogBarrier: + return "LogBarrier" + case LogNoop: + return "LogNoop" + default: + return + } +} + +// LogHeader defines the checksum header structure. +type LogHeader struct { + Index uint64 // log index + Version uint64 // log version + Type LogType // log type + Producer proto.NodeID // producer node + DataLength uint64 // data length +} + +// Log defines the log data structure. +type Log struct { + LogHeader + // Data could be detected and handle decode properly by log layer + Data []byte +} diff --git a/kayak/types/rpc.go b/kayak/types/rpc.go new file mode 100644 index 000000000..7b96f42aa --- /dev/null +++ b/kayak/types/rpc.go @@ -0,0 +1,26 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import "github.com/CovenantSQL/CovenantSQL/proto" + +// RPCRequest defines the RPC request entity. +type RPCRequest struct { + proto.Envelope + Instance string + Log *Log +} diff --git a/kayak/types/wal.go b/kayak/types/wal.go new file mode 100644 index 000000000..e955fa42f --- /dev/null +++ b/kayak/types/wal.go @@ -0,0 +1,27 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +// Wal defines the log storage interface. +type Wal interface { + // sequential write + Write(*Log) error + // sequential read, return io.EOF if there is no more records to read + Read() (*Log, error) + // random access + Get(index uint64) (*Log, error) +} diff --git a/kayak/types_gen.go b/kayak/types_gen.go deleted file mode 100644 index 4ab3f6aff..000000000 --- a/kayak/types_gen.go +++ /dev/null @@ -1,190 +0,0 @@ -package kayak - -// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. - -import ( - hsp "github.com/CovenantSQL/HashStablePack/marshalhash" -) - -// MarshalHash marshals for hash -func (z *Log) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 5 - o = append(o, 0x85, 0x85) - if z.LastHash == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.LastHash.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x85) - o = hsp.AppendBytes(o, z.Data) - o = append(o, 0x85) - if oTemp, err := z.Hash.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x85) - o = hsp.AppendUint64(o, z.Index) - o = append(o, 0x85) - o = hsp.AppendUint64(o, z.Term) - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *Log) Msgsize() (s int) { - s = 1 + 9 - if z.LastHash == nil { - s += hsp.NilSize - } else { - s += z.LastHash.Msgsize() - } - s += 5 + hsp.BytesPrefixSize + len(z.Data) + 5 + z.Hash.Msgsize() + 6 + hsp.Uint64Size + 5 + hsp.Uint64Size - return -} - -// MarshalHash marshals for hash -func (z *Peers) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 5 - o = append(o, 0x85, 0x85) - if z.Leader == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Leader.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x85) - if z.PubKey == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.PubKey.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x85) - if z.Signature == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signature.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x85) - o = hsp.AppendArrayHeader(o, uint32(len(z.Servers))) - for za0001 := range z.Servers { - if z.Servers[za0001] == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Servers[za0001].MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - } - o = append(o, 0x85) - o = hsp.AppendUint64(o, z.Term) - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *Peers) Msgsize() (s int) { - s = 1 + 7 - if z.Leader == nil { - s += hsp.NilSize - } else { - s += z.Leader.Msgsize() - } - s += 7 - if z.PubKey == nil { - s += hsp.NilSize - } else { - s += z.PubKey.Msgsize() - } - s += 10 - if z.Signature == nil { - s += hsp.NilSize - } else { - s += z.Signature.Msgsize() - } - s += 8 + hsp.ArrayHeaderSize - for za0001 := range z.Servers { - if z.Servers[za0001] == nil { - s += hsp.NilSize - } else { - s += z.Servers[za0001].Msgsize() - } - } - s += 5 + hsp.Uint64Size - return -} - -// MarshalHash marshals for hash -func (z *Server) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 3 - o = append(o, 0x83, 0x83) - if z.PubKey == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.PubKey.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x83) - if oTemp, err := z.ID.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x83) - if oTemp, err := z.Role.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *Server) Msgsize() (s int) { - s = 1 + 7 - if z.PubKey == nil { - s += hsp.NilSize - } else { - s += z.PubKey.Msgsize() - } - s += 3 + z.ID.Msgsize() + 5 + z.Role.Msgsize() - return -} - -// MarshalHash marshals for hash -func (z ServerState) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - o = hsp.AppendInt(o, int(z)) - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z ServerState) Msgsize() (s int) { - s = hsp.IntSize - return -} diff --git a/kayak/types_test.go b/kayak/types_test.go deleted file mode 100644 index 57a47a831..000000000 --- a/kayak/types_test.go +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kayak - -import ( - "fmt" - "testing" - - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/proto" - . "github.com/smartystreets/goconvey/convey" -) - -func TestLog_ComputeHash(t *testing.T) { - log1 := &Log{ - Index: 1, - Term: 1, - Data: []byte("happy"), - } - - log2 := &Log{ - Index: 1, - Term: 1, - Data: []byte("happy"), - } - - log1.ComputeHash() - log2.ComputeHash() - - Convey("same hash result on identical field value", t, func() { - equalHash := log1.Hash.IsEqual(&log2.Hash) - So(equalHash, ShouldBeTrue) - }) -} - -func TestLog_VerifyHash(t *testing.T) { - // Test with no LastHash - log1 := &Log{ - Index: 1, - Term: 1, - Data: []byte("happy"), - } - - log1.ComputeHash() - - Convey("verify correct hash", t, func() { - So(log1.VerifyHash(), ShouldBeTrue) - }) - - // Test including LastHash - log2 := &Log{ - Index: 2, - Term: 1, - Data: []byte("happy2"), - LastHash: &log1.Hash, - } - - log2.ComputeHash() - - Convey("verify correct hash", t, func() { - So(log2.VerifyHash(), ShouldBeTrue) - }) - - log2.Hash.SetBytes(hash.HashB([]byte("test generation"))) - - Convey("verify incorrect hash", t, func() { - So(log2.VerifyHash(), ShouldBeFalse) - }) -} - -func TestServer_Serialize(t *testing.T) { - testKey := []byte{ - 0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, - 0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e, - 0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca, - 0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0, - 0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64, - 0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9, - 0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56, - 0xb4, 0x12, 0xa3, - } - - pubKey, err := asymmetric.ParsePubKey(testKey) - - if err != nil { - t.Fatalf("parse pubkey failed: %v", err.Error()) - } - - s := &Server{ - Role: proto.Leader, - ID: "happy", - PubKey: pubKey, - } - data := s.Serialize() - - // try to load data from serialization - s2 := &Server{ - Role: proto.Leader, - ID: "happy", - PubKey: pubKey, - } - data2 := s2.Serialize() - - Convey("test serialization", t, func() { - So(data, ShouldResemble, data2) - }) - - Convey("test serialize with nil PubKey", t, func() { - s.PubKey = nil - So(s.Serialize(), ShouldNotResemble, data2) - }) -} - -func TestPeers_Clone(t *testing.T) { - testPriv := []byte{ - 0xea, 0xf0, 0x2c, 0xa3, 0x48, 0xc5, 0x24, 0xe6, - 0x39, 0x26, 0x55, 0xba, 0x4d, 0x29, 0x60, 0x3c, - 0xd1, 0xa7, 0x34, 0x7d, 0x9d, 0x65, 0xcf, 0xe9, - 0x3c, 0xe1, 0xeb, 0xff, 0xdc, 0xa2, 0x26, 0x94, - } - _, pubKey := asymmetric.PrivKeyFromBytes(testPriv) - - samplePeersConf := &Peers{ - Term: 1, - Leader: &Server{ - Role: proto.Leader, - ID: "happy", - PubKey: pubKey, - }, - Servers: []*Server{ - { - Role: proto.Leader, - ID: "happy", - PubKey: pubKey, - }, - }, - PubKey: pubKey, - } - - Convey("clone peers", t, func() { - peers := samplePeersConf.Clone() - So(peers.Term, ShouldEqual, samplePeersConf.Term) - So(peers.Leader, ShouldResemble, samplePeersConf.Leader) - So(peers.Servers, ShouldResemble, samplePeersConf.Servers) - So(peers.PubKey, ShouldResemble, samplePeersConf.PubKey) - So(peers.Signature, ShouldResemble, samplePeersConf.Signature) - }) -} - -func TestPeers_Find(t *testing.T) { - samplePeersConf := &Peers{ - Servers: []*Server{ - {ID: "X1"}, - {ID: "X2"}, - {ID: "X3"}, - {ID: "X4"}, - {ID: "X5"}, - }, - } - - Convey("find server", t, func() { - index, found := samplePeersConf.Find("X1") - So(found, ShouldBeTrue) - So(index, ShouldEqual, 0) - index, found = samplePeersConf.Find("X6") - So(found, ShouldBeFalse) - samplePeersConf.Servers = nil - index, found = samplePeersConf.Find("X6") - So(found, ShouldBeFalse) - }) -} - -func TestPeers_Sign(t *testing.T) { - testPriv := []byte{ - 0xea, 0xf0, 0x2c, 0xa3, 0x48, 0xc5, 0x24, 0xe6, - 0x39, 0x26, 0x55, 0xba, 0x4d, 0x29, 0x60, 0x3c, - 0xd1, 0xa7, 0x34, 0x7d, 0x9d, 0x65, 0xcf, 0xe9, - 0x3c, 0xe1, 0xeb, 0xff, 0xdc, 0xa2, 0x26, 0x94, - } - privKey, pubKey := asymmetric.PrivKeyFromBytes(testPriv) - peers := &Peers{ - Term: 1, - Leader: &Server{ - Role: proto.Leader, - ID: "happy", - PubKey: pubKey, - }, - Servers: []*Server{ - { - Role: proto.Leader, - ID: "happy", - PubKey: pubKey, - }, - }, - PubKey: pubKey, - } - - if err := peers.Sign(privKey); err != nil { - t.Fatalf("sign peer conf failed: %v", err.Error()) - } - Convey("verify signed peers", t, func() { - So(peers.Verify(), ShouldBeTrue) - }) - Convey("verify corrupted peers", t, func() { - peers.Term = 2 - So(peers.Verify(), ShouldBeFalse) - }) -} - -func TestToString(t *testing.T) { - Convey("ServerRole", t, func() { - So(fmt.Sprint(proto.Leader), ShouldEqual, "Leader") - So(fmt.Sprint(proto.Follower), ShouldEqual, "Follower") - So(fmt.Sprint(proto.ServerRole(100)), ShouldEqual, "Unknown") - }) - Convey("ServerState", t, func() { - So(fmt.Sprint(Idle), ShouldEqual, "Idle") - So(fmt.Sprint(Prepared), ShouldEqual, "Prepared") - So(fmt.Sprint(ServerState(100)), ShouldEqual, "Unknown") - }) - Convey("Server", t, func() { - s := &Server{ - Role: proto.Leader, - ID: "test", - } - So(fmt.Sprint(s), ShouldNotBeEmpty) - }) - Convey("Peers", t, func() { - p := testPeersFixture(1, []*Server{ - { - Role: proto.Leader, - ID: "test", - }, - }) - So(fmt.Sprint(p), ShouldNotBeEmpty) - }) -} diff --git a/kayak/transport/doc.go b/kayak/wal/doc.go similarity index 84% rename from kayak/transport/doc.go rename to kayak/wal/doc.go index c9307e311..953b5f875 100644 --- a/kayak/transport/doc.go +++ b/kayak/wal/doc.go @@ -14,7 +14,4 @@ * limitations under the License. */ -/* -Package transport implements applicable transport implementations for kayak runtime. -*/ -package transport +package wal diff --git a/kayak/errors.go b/kayak/wal/errors.go similarity index 62% rename from kayak/errors.go rename to kayak/wal/errors.go index 0df42ab4c..f54332685 100644 --- a/kayak/errors.go +++ b/kayak/wal/errors.go @@ -14,17 +14,17 @@ * limitations under the License. */ -package kayak +package wal -import "errors" +import "github.com/pkg/errors" var ( - // ErrInvalidConfig defines invalid config error - ErrInvalidConfig = errors.New("invalid configuration") - // ErrInvalidLog defines invalid log error + // ErrWalClosed represents the log file is closed. + ErrWalClosed = errors.New("wal is closed") + // ErrInvalidLog represents the log object is invalid. ErrInvalidLog = errors.New("invalid log") - // ErrNotLeader defines not leader on log processing - ErrNotLeader = errors.New("not leader") - // ErrInvalidRequest indicate inconsistent state - ErrInvalidRequest = errors.New("invalid request") + // ErrAlreadyExists represents the log already exists. + ErrAlreadyExists = errors.New("log already exists") + // ErrNotExists represents the log does not exists. + ErrNotExists = errors.New("log not exists") ) diff --git a/kayak/wal/leveldb_wal.go b/kayak/wal/leveldb_wal.go new file mode 100644 index 000000000..813b8c02d --- /dev/null +++ b/kayak/wal/leveldb_wal.go @@ -0,0 +1,272 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package wal + +import ( + "bytes" + "encoding/binary" + "io" + "sort" + "sync" + "sync/atomic" + + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/utils" + "github.com/pkg/errors" + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/util" +) + +var ( + // logHeaderKeyPrefix defines the leveldb header key prefix. + logHeaderKeyPrefix = []byte{'L', 'H'} + // logDataKeyPrefix defines the leveldb data key prefix. + logDataKeyPrefix = []byte{'L', 'D'} + // baseIndexKey defines the base index key. + baseIndexKey = []byte{'B', 'I'} +) + +// LevelDBWal defines a toy wal using leveldb as storage. +type LevelDBWal struct { + db *leveldb.DB + it iterator.Iterator + base uint64 + closed uint32 + readLock sync.Mutex + read uint32 + pending []uint64 + pendingLock sync.Mutex +} + +// NewLevelDBWal returns new leveldb wal instance. +func NewLevelDBWal(filename string) (p *LevelDBWal, err error) { + p = &LevelDBWal{} + if p.db, err = leveldb.OpenFile(filename, nil); err != nil { + err = errors.Wrap(err, "open database failed") + return + } + + // load current base + var baseValue []byte + if baseValue, err = p.db.Get(baseIndexKey, nil); err == nil { + // decode base + p.base = p.bytesToUint64(baseValue) + } else { + err = nil + } + + return +} + +// Write implements Wal.Write. +func (p *LevelDBWal) Write(l *kt.Log) (err error) { + if atomic.LoadUint32(&p.closed) == 1 { + err = ErrWalClosed + return + } + + if l == nil { + err = ErrInvalidLog + return + } + + if l.Index < p.base { + // already exists + err = ErrAlreadyExists + return + } + + // build header headerKey + headerKey := append(append([]byte(nil), logHeaderKeyPrefix...), p.uint64ToBytes(l.Index)...) + + if _, err = p.db.Get(headerKey, nil); err != nil && err != leveldb.ErrNotFound { + err = errors.Wrap(err, "access leveldb failed") + return + } else if err == nil { + err = ErrAlreadyExists + return + } + + dataKey := append(append([]byte(nil), logDataKeyPrefix...), p.uint64ToBytes(l.Index)...) + + // write data first + var enc *bytes.Buffer + if enc, err = utils.EncodeMsgPack(l.Data); err != nil { + err = errors.Wrap(err, "encode log data failed") + return + } + + if err = p.db.Put(dataKey, enc.Bytes(), nil); err != nil { + err = errors.Wrap(err, "write log data failed") + return + } + + // write header + l.DataLength = uint64(enc.Len()) + + if enc, err = utils.EncodeMsgPack(l.LogHeader); err != nil { + err = errors.Wrap(err, "encode log header failed") + return + } + + // save header + if err = p.db.Put(headerKey, enc.Bytes(), nil); err != nil { + err = errors.Wrap(err, "encode log header failed") + return + } + + p.updatePending(l.Index) + + return +} + +// Read implements Wal.Read. +func (p *LevelDBWal) Read() (l *kt.Log, err error) { + if atomic.LoadUint32(&p.read) == 1 { + err = io.EOF + return + } + + p.readLock.Lock() + defer p.readLock.Unlock() + + // start with base, use iterator to read + if p.it == nil { + keyRange := util.BytesPrefix(logHeaderKeyPrefix) + p.it = p.db.NewIterator(keyRange, nil) + } + + if p.it.Next() { + // load + l, err = p.load(p.it.Value()) + // update base and pending + if err == nil { + p.updatePending(l.Index) + } + return + } + + p.it.Release() + if err = p.it.Error(); err == nil { + err = io.EOF + } + p.it = nil + + // log read complete, could not read again + atomic.StoreUint32(&p.read, 1) + + return +} + +// Get implements Wal.Get. +func (p *LevelDBWal) Get(i uint64) (l *kt.Log, err error) { + if atomic.LoadUint32(&p.closed) == 1 { + err = ErrWalClosed + return + } + + headerKey := append(append([]byte(nil), logHeaderKeyPrefix...), p.uint64ToBytes(i)...) + + var headerData []byte + if headerData, err = p.db.Get(headerKey, nil); err == leveldb.ErrNotFound { + err = ErrNotExists + } else if err != nil { + err = errors.Wrap(err, "get log header failed") + return + } + + return p.load(headerData) +} + +// Close implements Wal.Close. +func (p *LevelDBWal) Close() { + if !atomic.CompareAndSwapUint32(&p.closed, 0, 1) { + return + } + + if p.it != nil { + p.it.Release() + p.it = nil + } + + if p.db != nil { + p.db.Close() + } +} + +func (p *LevelDBWal) updatePending(index uint64) { + p.pendingLock.Lock() + defer p.pendingLock.Unlock() + + if atomic.CompareAndSwapUint64(&p.base, index, index+1) { + // process pending + for len(p.pending) > 0 { + if !atomic.CompareAndSwapUint64(&p.base, p.pending[0], p.pending[0]+1) { + break + } + p.pending = p.pending[1:] + } + + // commit base index to database + _ = p.db.Put(baseIndexKey, p.uint64ToBytes(atomic.LoadUint64(&p.base)), nil) + } else { + i := sort.Search(len(p.pending), func(i int) bool { + return p.pending[i] >= index + }) + + if len(p.pending) == i || p.pending[i] != index { + p.pending = append(p.pending, 0) + copy(p.pending[i+1:], p.pending[i:]) + p.pending[i] = index + } + } +} + +func (p *LevelDBWal) load(logHeader []byte) (l *kt.Log, err error) { + l = new(kt.Log) + + if err = utils.DecodeMsgPack(logHeader, &l.LogHeader); err != nil { + err = errors.Wrap(err, "decode log header failed") + return + } + + dataKey := append(append([]byte(nil), logDataKeyPrefix...), p.uint64ToBytes(l.Index)...) + + var encData []byte + if encData, err = p.db.Get(dataKey, nil); err != nil { + err = errors.Wrap(err, "get log data failed") + return + } + + // load data + if err = utils.DecodeMsgPack(encData, &l.Data); err != nil { + err = errors.Wrap(err, "decode log data failed") + } + + return +} + +func (p *LevelDBWal) uint64ToBytes(o uint64) (res []byte) { + res = make([]byte, 8) + binary.BigEndian.PutUint64(res, o) + return +} + +func (p *LevelDBWal) bytesToUint64(b []byte) uint64 { + return binary.BigEndian.Uint64(b) +} diff --git a/kayak/wal/leveldb_wal_test.go b/kayak/wal/leveldb_wal_test.go new file mode 100644 index 000000000..db21e5aca --- /dev/null +++ b/kayak/wal/leveldb_wal_test.go @@ -0,0 +1,91 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package wal + +import ( + "os" + "testing" + + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/proto" + . "github.com/smartystreets/goconvey/convey" +) + +func TestLevelDBWal_Write(t *testing.T) { + Convey("test leveldb wal write", t, func() { + var p *LevelDBWal + var err error + p, err = NewLevelDBWal("testWrite.ldb") + So(err, ShouldBeNil) + defer func() { + p.Close() + os.RemoveAll("testWrite.ldb") + }() + + l1 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 0, + Type: kt.LogPrepare, + Producer: proto.NodeID("0000000000000000000000000000000000000000000000000000000000000000"), + }, + Data: []byte("happy1"), + } + + err = p.Write(l1) + So(err, ShouldBeNil) + err = p.Write(l1) + So(err, ShouldNotBeNil) + + // test get + var l *kt.Log + l, err = p.Get(l1.Index) + So(err, ShouldBeNil) + So(l, ShouldResemble, l1) + + // test consecutive writes + l2 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 1, + Type: kt.LogPrepare, + }, + Data: []byte("happy2"), + } + err = p.Write(l2) + So(err, ShouldBeNil) + + // test not consecutive writes + l4 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 3, + Type: kt.LogPrepare, + }, + Data: []byte("happy3"), + } + err = p.Write(l4) + So(err, ShouldBeNil) + + l3 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 2, + Type: kt.LogPrepare, + }, + Data: []byte("happy4"), + } + err = p.Write(l3) + So(err, ShouldBeNil) + }) +} diff --git a/kayak/wal/mem_wal.go b/kayak/wal/mem_wal.go new file mode 100644 index 000000000..48314ce47 --- /dev/null +++ b/kayak/wal/mem_wal.go @@ -0,0 +1,128 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package wal + +import ( + "io" + "sync" + "sync/atomic" + + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" +) + +// MemWal defines a toy wal using memory as storage. +type MemWal struct { + sync.RWMutex + logs []*kt.Log + revIndex map[uint64]int + offset uint64 + closed uint32 +} + +// NewMemWal returns new memory wal instance. +func NewMemWal() (p *MemWal) { + p = &MemWal{ + revIndex: make(map[uint64]int), + } + + return +} + +// Write implements Wal.Write. +func (p *MemWal) Write(l *kt.Log) (err error) { + if atomic.LoadUint32(&p.closed) == 1 { + err = ErrWalClosed + return + } + + if l == nil { + err = ErrInvalidLog + return + } + + p.Lock() + defer p.Unlock() + + if _, exists := p.revIndex[l.Index]; exists { + err = ErrAlreadyExists + return + } + + offset := atomic.AddUint64(&p.offset, 1) - 1 + p.logs = append(p.logs, nil) + copy(p.logs[offset+1:], p.logs[offset:]) + p.logs[offset] = l + p.revIndex[l.Index] = int(offset) + + return +} + +// Read implements Wal.Read. +func (p *MemWal) Read() (l *kt.Log, err error) { + if atomic.LoadUint32(&p.closed) == 1 { + err = ErrWalClosed + return + } + + p.RLock() + defer p.RUnlock() + + if atomic.LoadUint64(&p.offset) >= uint64(len(p.logs)) { + err = io.EOF + return + } + + index := atomic.AddUint64(&p.offset, 1) - 1 + if index >= uint64(len(p.logs)) { + // error + err = io.EOF + return + } + + l = p.logs[index] + + return +} + +// Get implements Wal.Get. +func (p *MemWal) Get(index uint64) (l *kt.Log, err error) { + if atomic.LoadUint32(&p.closed) == 1 { + err = ErrWalClosed + return + } + + p.RLock() + defer p.RUnlock() + + var i int + var exists bool + if i, exists = p.revIndex[index]; !exists { + err = ErrNotExists + return + } + + l = p.logs[i] + + return +} + +// Close implements Wal.Close. +func (p *MemWal) Close() { + if !atomic.CompareAndSwapUint32(&p.closed, 0, 1) { + return + } +} diff --git a/kayak/wal/mem_wal_test.go b/kayak/wal/mem_wal_test.go new file mode 100644 index 000000000..1a539ccb0 --- /dev/null +++ b/kayak/wal/mem_wal_test.go @@ -0,0 +1,172 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package wal + +import ( + "sync" + "testing" + + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + . "github.com/smartystreets/goconvey/convey" +) + +func TestMemWal_Write(t *testing.T) { + Convey("test mem wal write", t, func() { + var p *MemWal + p = NewMemWal() + + l1 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 0, + Type: kt.LogPrepare, + }, + Data: []byte("happy1"), + } + + var err error + err = p.Write(l1) + So(err, ShouldBeNil) + So(p.logs, ShouldResemble, []*kt.Log{l1}) + err = p.Write(l1) + So(err, ShouldNotBeNil) + So(p.revIndex, ShouldHaveLength, 1) + So(p.revIndex[l1.Index], ShouldEqual, 0) + So(p.offset, ShouldEqual, 1) + + // test get + var l *kt.Log + l, err = p.Get(l1.Index) + So(err, ShouldBeNil) + So(l, ShouldResemble, l1) + + // test consecutive writes + l2 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 1, + Type: kt.LogPrepare, + }, + Data: []byte("happy2"), + } + err = p.Write(l2) + So(err, ShouldBeNil) + So(p.revIndex, ShouldHaveLength, 2) + So(p.revIndex[l2.Index], ShouldEqual, 1) + So(p.offset, ShouldEqual, 2) + + // test not consecutive writes + l4 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 3, + Type: kt.LogPrepare, + }, + Data: []byte("happy3"), + } + err = p.Write(l4) + So(err, ShouldBeNil) + So(p.revIndex, ShouldHaveLength, 3) + So(p.revIndex[l4.Index], ShouldEqual, 2) + So(p.offset, ShouldEqual, 3) + + l3 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 2, + Type: kt.LogPrepare, + }, + Data: []byte("happy4"), + } + err = p.Write(l3) + So(err, ShouldBeNil) + So(p.revIndex, ShouldHaveLength, 4) + So(p.revIndex[l3.Index], ShouldEqual, 3) + So(p.offset, ShouldEqual, 4) + }) +} + +func TestMemWal_Write2(t *testing.T) { + Convey("test mem wal write", t, func() { + l1 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 0, + Type: kt.LogPrepare, + }, + Data: []byte("happy1"), + } + l2 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 1, + Type: kt.LogPrepare, + }, + Data: []byte("happy2"), + } + l3 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 2, + Type: kt.LogPrepare, + }, + Data: []byte("happy4"), + } + l4 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 3, + Type: kt.LogPrepare, + }, + Data: []byte("happy3"), + } + l5 := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 4, + Type: kt.LogPrepare, + }, + Data: []byte("happy5"), + } + + var wg sync.WaitGroup + var p *MemWal + p = NewMemWal() + + wg.Add(1) + go func() { + defer wg.Done() + p.Write(l1) + }() + wg.Add(1) + go func() { + defer wg.Done() + p.Write(l2) + }() + wg.Add(1) + go func() { + defer wg.Done() + p.Write(l3) + }() + wg.Add(1) + go func() { + defer wg.Done() + p.Write(l4) + }() + wg.Add(1) + go func() { + defer wg.Done() + p.Write(l5) + }() + + wg.Wait() + + So(p.revIndex, ShouldHaveLength, 5) + So(p.offset, ShouldEqual, 5) + }) +} diff --git a/proto/errors.go b/proto/errors.go new file mode 100644 index 000000000..cee9c752c --- /dev/null +++ b/proto/errors.go @@ -0,0 +1,27 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package proto + +import "github.com/pkg/errors" + +var ( + // ErrHashVerification indicates a failed hash verification. + ErrHashVerification = errors.New("hash verification failed") + + // ErrSignVerification indicates a failed signature verification. + ErrSignVerification = errors.New("signature verification failed") +) diff --git a/proto/nodeinfo.go b/proto/nodeinfo.go index 019e32237..2fa826584 100644 --- a/proto/nodeinfo.go +++ b/proto/nodeinfo.go @@ -231,7 +231,6 @@ func (s *ServerRole) UnmarshalYAML(unmarshal func(interface{}) error) error { } func parseServerRole(roleStr string) (role ServerRole, err error) { - switch strings.ToLower(roleStr) { case "leader": role = Leader diff --git a/proto/servers.go b/proto/servers.go new file mode 100644 index 000000000..27d935fee --- /dev/null +++ b/proto/servers.go @@ -0,0 +1,103 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package proto + +import ( + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" +) + +//go:generate hsp + +// PeersHeader defines the header for miner peers. +type PeersHeader struct { + Version uint64 + Term uint64 + Leader NodeID + Servers []NodeID +} + +// Peers defines the peers configuration. +type Peers struct { + PeersHeader + Hash hash.Hash + Signee *asymmetric.PublicKey + Signature *asymmetric.Signature +} + +// Clone makes a deep copy of Peers. +func (p *Peers) Clone() (copy Peers) { + copy.Version = p.Version + copy.Leader = p.Leader + copy.Servers = append(copy.Servers, p.Servers...) + copy.Signee = p.Signee + copy.Signature = p.Signature + return +} + +// Sign generates signature. +func (p *Peers) Sign(signer *asymmetric.PrivateKey) (err error) { + var enc []byte + if enc, err = p.PeersHeader.MarshalHash(); err != nil { + return + } + + var h = hash.THashH(enc) + if p.Signature, err = signer.Sign(h[:]); err != nil { + return + } + + p.Hash = h + p.Signee = signer.PubKey() + return +} + +// Verify verify signature. +func (p *Peers) Verify() (err error) { + var enc []byte + if enc, err = p.PeersHeader.MarshalHash(); err != nil { + return + } + + var h = hash.THashH(enc) + if !p.Hash.IsEqual(&h) { + err = ErrHashVerification + return + } + + if !p.Signature.Verify(h[:], p.Signee) { + err = ErrSignVerification + return + } + + return +} + +// Find finds the index of the server with the specified key in the server list. +func (p *Peers) Find(key NodeID) (index int32, found bool) { + if p.Servers != nil { + for i, s := range p.Servers { + if key.IsEqual(&s) { + index = int32(i) + found = true + break + } + } + } + + return +} diff --git a/proto/servers_gen.go b/proto/servers_gen.go new file mode 100644 index 000000000..7357a4dfd --- /dev/null +++ b/proto/servers_gen.go @@ -0,0 +1,102 @@ +package proto + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + hsp "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// MarshalHash marshals for hash +func (z *Peers) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 4 + o = append(o, 0x84, 0x84) + if z.Signee == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Signee.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x84) + if z.Signature == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Signature.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x84) + if oTemp, err := z.PeersHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x84) + if oTemp, err := z.Hash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Peers) Msgsize() (s int) { + s = 1 + 7 + if z.Signee == nil { + s += hsp.NilSize + } else { + s += z.Signee.Msgsize() + } + s += 10 + if z.Signature == nil { + s += hsp.NilSize + } else { + s += z.Signature.Msgsize() + } + s += 12 + z.PeersHeader.Msgsize() + 5 + z.Hash.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *PeersHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 4 + o = append(o, 0x84, 0x84) + if oTemp, err := z.Leader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x84) + o = hsp.AppendArrayHeader(o, uint32(len(z.Servers))) + for za0001 := range z.Servers { + if oTemp, err := z.Servers[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x84) + o = hsp.AppendUint64(o, z.Version) + o = append(o, 0x84) + o = hsp.AppendUint64(o, z.Term) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *PeersHeader) Msgsize() (s int) { + s = 1 + 7 + z.Leader.Msgsize() + 8 + hsp.ArrayHeaderSize + for za0001 := range z.Servers { + s += z.Servers[za0001].Msgsize() + } + s += 8 + hsp.Uint64Size + 5 + hsp.Uint64Size + return +} diff --git a/kayak/types_gen_test.go b/proto/servers_gen_test.go similarity index 60% rename from kayak/types_gen_test.go rename to proto/servers_gen_test.go index 17a02ec9d..3111a438c 100644 --- a/kayak/types_gen_test.go +++ b/proto/servers_gen_test.go @@ -1,4 +1,4 @@ -package kayak +package proto // Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. @@ -9,43 +9,6 @@ import ( "testing" ) -func TestMarshalHashLog(t *testing.T) { - v := Log{} - binary.Read(rand.Reader, binary.BigEndian, &v) - bts1, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - bts2, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(bts1, bts2) { - t.Fatal("hash not stable") - } -} - -func BenchmarkMarshalHashLog(b *testing.B) { - v := Log{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalHash() - } -} - -func BenchmarkAppendMsgLog(b *testing.B) { - v := Log{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalHash() - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalHash() - } -} - func TestMarshalHashPeers(t *testing.T) { v := Peers{} binary.Read(rand.Reader, binary.BigEndian, &v) @@ -83,8 +46,8 @@ func BenchmarkAppendMsgPeers(b *testing.B) { } } -func TestMarshalHashServer(t *testing.T) { - v := Server{} +func TestMarshalHashPeersHeader(t *testing.T) { + v := PeersHeader{} binary.Read(rand.Reader, binary.BigEndian, &v) bts1, err := v.MarshalHash() if err != nil { @@ -99,8 +62,8 @@ func TestMarshalHashServer(t *testing.T) { } } -func BenchmarkMarshalHashServer(b *testing.B) { - v := Server{} +func BenchmarkMarshalHashPeersHeader(b *testing.B) { + v := PeersHeader{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -108,8 +71,8 @@ func BenchmarkMarshalHashServer(b *testing.B) { } } -func BenchmarkAppendMsgServer(b *testing.B) { - v := Server{} +func BenchmarkAppendMsgPeersHeader(b *testing.B) { + v := PeersHeader{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalHash() b.SetBytes(int64(len(bts))) diff --git a/rpc/sharedsecret.go b/rpc/sharedsecret.go index e9166a1c3..86d150464 100644 --- a/rpc/sharedsecret.go +++ b/rpc/sharedsecret.go @@ -17,6 +17,7 @@ package rpc import ( + "fmt" "sync" "github.com/CovenantSQL/CovenantSQL/conf" @@ -70,8 +71,8 @@ func GetSharedSecretWith(nodeID *proto.RawNodeID, isAnonymous bool) (symmetricKe symmetricKeyCache.Store(nodeID, symmetricKey) log.WithFields(log.Fields{ "node": nodeID.String(), - "remotePub": remotePublicKey.Serialize(), - "sessionKey": symmetricKey, + "remotePub": fmt.Sprintf("%#x", remotePublicKey.Serialize()), + "sessionKey": fmt.Sprintf("%#x", symmetricKey), }).Debug("generated shared secret") } //log.Debugf("ECDH for %s Public Key: %x, Private Key: %x Session Key: %x", diff --git a/sqlchain/chain.go b/sqlchain/chain.go index ab0667468..1fae7f8ec 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -29,7 +29,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" @@ -300,11 +299,11 @@ func LoadChain(c *Config) (chain *Chain, err error) { } log.WithFields(log.Fields{ "height": h, - "header": resp.HeaderHash.String(), + "header": resp.Hash.String(), }).Debug("Loaded new resp header") err = chain.qi.addResponse(h, resp) if err != nil { - err = errors.Wrapf(err, "load resp, height %d, hash %s", h, resp.HeaderHash.String()) + err = errors.Wrapf(err, "load resp, height %d, hash %s", h, resp.Hash.String()) return } } @@ -326,11 +325,11 @@ func LoadChain(c *Config) (chain *Chain, err error) { } log.WithFields(log.Fields{ "height": h, - "header": ack.HeaderHash.String(), + "header": ack.Hash.String(), }).Debug("Loaded new ack header") err = chain.qi.addAck(h, ack) if err != nil { - err = errors.Wrapf(err, "load ack, height %d, hash %s", h, ack.HeaderHash.String()) + err = errors.Wrapf(err, "load ack, height %d, hash %s", h, ack.Hash.String()) return } } @@ -417,14 +416,14 @@ func (c *Chain) pushResponedQuery(resp *wt.SignedResponseHeader) (err error) { return } - tdbKey := utils.ConcatAll(metaResponseIndex[:], k, resp.HeaderHash[:]) + tdbKey := utils.ConcatAll(metaResponseIndex[:], k, resp.Hash[:]) if err = c.tdb.Put(tdbKey, enc.Bytes(), nil); err != nil { - err = errors.Wrapf(err, "put response %d %s", h, resp.HeaderHash.String()) + err = errors.Wrapf(err, "put response %d %s", h, resp.Hash.String()) return } if err = c.qi.addResponse(h, resp); err != nil { - err = errors.Wrapf(err, "add resp h %d hash %s", h, resp.HeaderHash) + err = errors.Wrapf(err, "add resp h %d hash %s", h, resp.Hash) return err } @@ -433,7 +432,7 @@ func (c *Chain) pushResponedQuery(resp *wt.SignedResponseHeader) (err error) { // pushAckedQuery pushes a acknowledged, signed and verified query into the chain. func (c *Chain) pushAckedQuery(ack *wt.SignedAckHeader) (err error) { - log.Debugf("push ack %s", ack.HeaderHash.String()) + log.Debugf("push ack %s", ack.Hash.String()) h := c.rt.getHeightFromTime(ack.SignedResponseHeader().Timestamp) k := heightToKey(h) var enc *bytes.Buffer @@ -442,15 +441,15 @@ func (c *Chain) pushAckedQuery(ack *wt.SignedAckHeader) (err error) { return } - tdbKey := utils.ConcatAll(metaAckIndex[:], k, ack.HeaderHash[:]) + tdbKey := utils.ConcatAll(metaAckIndex[:], k, ack.Hash[:]) if err = c.tdb.Put(tdbKey, enc.Bytes(), nil); err != nil { - err = errors.Wrapf(err, "put ack %d %s", h, ack.HeaderHash.String()) + err = errors.Wrapf(err, "put ack %d %s", h, ack.Hash.String()) return } if err = c.qi.addAck(h, ack); err != nil { - err = errors.Wrapf(err, "add ack h %d hash %s", h, ack.HeaderHash) + err = errors.Wrapf(err, "add ack h %d hash %s", h, ack.Hash) return err } @@ -471,7 +470,7 @@ func (c *Chain) produceBlock(now time.Time) (err error) { SignedHeader: ct.SignedHeader{ Header: ct.Header{ Version: 0x01000000, - Producer: c.rt.getServer().ID, + Producer: c.rt.getServer(), GenesisHash: c.rt.genesisHash, ParentHash: c.rt.getHead().Head, // MerkleRoot: will be set by Block.PackAndSignBlock(PrivateKey) @@ -519,7 +518,7 @@ func (c *Chain) produceBlock(now time.Time) (err error) { wg := &sync.WaitGroup{} for _, s := range peers.Servers { - if s.ID != c.rt.getServer().ID { + if s != c.rt.getServer() { wg.Add(1) go func(id proto.NodeID) { defer wg.Done() @@ -535,7 +534,7 @@ func (c *Chain) produceBlock(now time.Time) (err error) { }).WithError(err).Error( "Failed to advise new block") } - }(s.ID) + }(s) } } @@ -565,14 +564,14 @@ func (c *Chain) syncHead() { succ := false for i, s := range peers.Servers { - if s.ID != c.rt.getServer().ID { + if s != c.rt.getServer() { if err = c.cl.CallNode( - s.ID, route.SQLCFetchBlock.String(), req, resp, + s, route.SQLCFetchBlock.String(), req, resp, ); err != nil || resp.Block == nil { log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), - "remote": fmt.Sprintf("[%d/%d] %s", i, len(peers.Servers), s.ID), + "remote": fmt.Sprintf("[%d/%d] %s", i, len(peers.Servers), s), "curr_turn": c.rt.getNextTurn(), "head_height": c.rt.getHead().Height, "head_block": c.rt.getHead().Head.String(), @@ -583,7 +582,7 @@ func (c *Chain) syncHead() { log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), - "remote": fmt.Sprintf("[%d/%d] %s", i, len(peers.Servers), s.ID), + "remote": fmt.Sprintf("[%d/%d] %s", i, len(peers.Servers), s), "curr_turn": c.rt.getNextTurn(), "head_height": c.rt.getHead().Height, "head_block": c.rt.getHead().Head.String(), @@ -918,7 +917,7 @@ func (c *Chain) syncAckedQuery(height int32, header *hash.Hash, id proto.NodeID) DatabaseID: c.rt.databaseID, FetchAckedQueryReq: FetchAckedQueryReq{ Height: height, - SignedAckedHeaderHash: header, + SignedAckedHash: header, }, } resp := &MuxFetchAckedQueryResp{} @@ -947,7 +946,7 @@ func (c *Chain) queryOrSyncAckedQuery(height int32, header *hash.Hash, id proto. ) { if ack, err = c.FetchAckedQuery( height, header, - ); (err == nil && ack != nil) || id == c.rt.getServer().ID { + ); (err == nil && ack != nil) || id == c.rt.getServer() { return } return c.syncAckedQuery(height, header, id) @@ -991,7 +990,7 @@ func (c *Chain) CheckAndPushNewBlock(block *ct.Block) (err error) { } // Short circuit the checking process if it's a self-produced block - if block.Producer() == c.rt.server.ID { + if block.Producer() == c.rt.server { return c.pushBlock(block) } @@ -1072,7 +1071,7 @@ func (c *Chain) VerifyAndPushAckedQuery(ack *wt.SignedAckHeader) (err error) { } // UpdatePeers updates peer list of the sql-chain. -func (c *Chain) UpdatePeers(peers *kayak.Peers) error { +func (c *Chain) UpdatePeers(peers *proto.Peers) error { return c.rt.updatePeers(peers) } @@ -1235,7 +1234,7 @@ func (c *Chain) collectBillingSignatures(billings *pt.BillingRequest) { }() for _, s := range peers.Servers { - if s.ID != c.rt.getServer().ID { + if s != c.rt.getServer() { rpcWG.Add(1) go func(id proto.NodeID) { defer rpcWG.Done() @@ -1250,7 +1249,7 @@ func (c *Chain) collectBillingSignatures(billings *pt.BillingRequest) { } respC <- &resp.SignBillingResp - }(s.ID) + }(s) } } } diff --git a/sqlchain/chain_test.go b/sqlchain/chain_test.go index 5afd1f227..68b44f827 100644 --- a/sqlchain/chain_test.go +++ b/sqlchain/chain_test.go @@ -109,20 +109,20 @@ func TestMultiChain(t *testing.T) { } for i, p := range peers.Servers { - t.Logf("Peer #%d: %s", i, p.ID) + t.Logf("Peer #%d: %s", i, p) } // Create config info from created nodes bpinfo := &conf.BPInfo{ PublicKey: testPubKey, - NodeID: peers.Servers[testPeersNumber].ID, + NodeID: peers.Servers[testPeersNumber], Nonce: nis[testPeersNumber].Nonce, } knownnodes := make([]proto.Node, 0, testPeersNumber+1) for i, v := range peers.Servers { knownnodes = append(knownnodes, proto.Node{ - ID: v.ID, + ID: v, Role: func() proto.ServerRole { if i < testPeersNumber { return proto.Miner @@ -156,7 +156,11 @@ func TestMultiChain(t *testing.T) { defer server.Stop() // Create multiplexing service from RPC server - mux := NewMuxService(route.SQLChainRPCName, server) + mux, err := NewMuxService(route.SQLChainRPCName, server) + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } // Create chain instance config := &Config{ @@ -326,7 +330,7 @@ func TestMultiChain(t *testing.T) { sC := make(chan struct{}) wg := &sync.WaitGroup{} wk := &nodeProfile{ - NodeID: peers.Servers[i].ID, + NodeID: peers.Servers[i], PrivateKey: testPrivKey, PublicKey: testPubKey, } diff --git a/sqlchain/config.go b/sqlchain/config.go index 627f0380a..dc6cb94a2 100644 --- a/sqlchain/config.go +++ b/sqlchain/config.go @@ -19,7 +19,6 @@ package sqlchain import ( "time" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" wt "github.com/CovenantSQL/CovenantSQL/worker/types" @@ -35,8 +34,8 @@ type Config struct { Tick time.Duration MuxService *MuxService - Peers *kayak.Peers - Server *kayak.Server + Peers *proto.Peers + Server proto.NodeID // Price sets query price in gases. Price map[wt.QueryType]uint64 diff --git a/sqlchain/mux.go b/sqlchain/mux.go index 9d0c8f060..09a215d81 100644 --- a/sqlchain/mux.go +++ b/sqlchain/mux.go @@ -30,13 +30,13 @@ type MuxService struct { } // NewMuxService creates a new multiplexing service and registers it to rpc server. -func NewMuxService(serviceName string, server *rpc.Server) (service *MuxService) { +func NewMuxService(serviceName string, server *rpc.Server) (service *MuxService, err error) { service = &MuxService{ ServiceName: serviceName, } - server.RegisterService(serviceName, service) - return service + err = server.RegisterService(serviceName, service) + return } func (s *MuxService) register(id proto.DatabaseID, service *ChainRPCService) { diff --git a/sqlchain/observer.go b/sqlchain/observer.go index 495c723d3..8b928427c 100644 --- a/sqlchain/observer.go +++ b/sqlchain/observer.go @@ -45,6 +45,7 @@ type observerReplicator struct { nodeID proto.NodeID height int32 triggerCh chan struct{} + stopOnce sync.Once stopCh chan struct{} replLock sync.Mutex c *Chain @@ -69,11 +70,13 @@ func (r *observerReplicator) setNewHeight(newHeight int32) { } func (r *observerReplicator) stop() { - select { - case <-r.stopCh: - default: - close(r.stopCh) - } + r.stopOnce.Do(func() { + select { + case <-r.stopCh: + default: + close(r.stopCh) + } + }) } func (r *observerReplicator) replicate() { diff --git a/sqlchain/queryindex.go b/sqlchain/queryindex.go index 27fa77ca9..d070ddf41 100644 --- a/sqlchain/queryindex.go +++ b/sqlchain/queryindex.go @@ -69,7 +69,7 @@ func (s *requestTracker) updateAck(ack *wt.SignedAckHeader) (isNew bool, err err } isNew = true - } else if !s.ack.HeaderHash.IsEqual(&ack.HeaderHash) { + } else if !s.ack.Hash.IsEqual(&ack.Hash) { // This may happen when a client sends multiple acknowledgements for a same query (same // response header hash) err = ErrMultipleAckOfResponse @@ -110,11 +110,11 @@ func (i seqIndex) ensure(k wt.QueryKey) (v *queryTracker) { // | ... | | | | | | +-... | // +--------+ +------------------+ | | | | | +-SeqNo: seq#0 | // | hash#3 |-----+ +->| queryTracker | | | | | | +-... | -// +--------+ | | | +-firstAck (nil) | | | | | +-HeaderHash = hash#0 | +// +--------+ | | | +-firstAck (nil) | | | | | +-Hash = hash#0 | // | ... | | | | +-queries | | | | | +-Signee ====> pubk#0 | // +--------+ | | | +-[0] |--+ | | | +-Signature => sign#0 | // | hash#6 |--+ | | | +-... | | | +-... | -// +--------+ | | | +------------------+ | +-HeaderHash = hash#1 | +// +--------+ | | | +------------------+ | +-Hash = hash#1 | // | ... | | | | | +-Signee ====> pubk#1 | // | | | | +-Signature => sign#1 | // | | | +---------------------------+ @@ -130,15 +130,15 @@ func (i seqIndex) ensure(k wt.QueryKey) (v *queryTracker) { // | ... | | | | | | | | | | | +-... | // | | | | | | | | | | +-SeqNo: seq#1 | // | | | | | | | | | | +-... | -// | | | | | | | | | +-HeaderHash = hash#2 | +// | | | | | | | | | +-Hash = hash#2 | // | | | | | | | | | +-Signee ====> pubk#2 | // | | | | | | | | | +-Signature => sign#2 | // seqIndex | | | | +----------------+ | | | | +-... | -// +------------------------------+->| requestTracker | | | | +-HeaderHash = hash#3 | +// +------------------------------+->| requestTracker | | | | +-Hash = hash#3 | // | ... | | | | | | +-response |---+ | | | +-signee ====> pubk#3 | // +--------+ | | | | | +-ack (nil) | | | | | +-Signature => sign#3 | // | seq#0 |--------+ | | | | +-... | | | | +-... | -// +--------+ | | | +----------------+ | | +-HeaderHash = hash#4 | +// +--------+ | | | +----------------+ | | +-Hash = hash#4 | // | ... | | | | | | +-Signee ====> pubk#2 | // +--------+ +--------------+ | | | | | +-Signature => sign#4 | // | seq#1 |---------->| queryTracker | | | | | +-------------------------------+ @@ -153,11 +153,11 @@ func (i seqIndex) ensure(k wt.QueryKey) (v *queryTracker) { // | | | | +-... | // | | | | +-SeqNo: seq#1 | // | | | | +-... | -// | | | +-HeaderHash = hash#5 | +// | | | +-Hash = hash#5 | // | | | +-Signee ====> pubk#5 | // | | | +-Signature => sign#5 | // | | +-... | -// | +-HeaderHash = hash#6 | +// | +-Hash = hash#6 | // | +-Signee ====> pubk#6 | // | +-Signature => sign#6 | // +---------------------------+ @@ -182,7 +182,7 @@ func (i *multiIndex) addResponse(resp *wt.SignedResponseHeader) (err error) { i.Lock() defer i.Unlock() - if v, ok := i.respIndex[resp.HeaderHash]; ok { + if v, ok := i.respIndex[resp.Hash]; ok { if v == nil || v.response == nil { // TODO(leventeliu): consider to panic. err = ErrCorruptedIndex @@ -202,7 +202,7 @@ func (i *multiIndex) addResponse(resp *wt.SignedResponseHeader) (err error) { response: resp, } - i.respIndex[resp.HeaderHash] = s + i.respIndex[resp.Hash] = s q := i.seqIndex.ensure(resp.Request.GetQueryKey()) q.queries = append(q.queries, s) @@ -217,7 +217,7 @@ func (i *multiIndex) addAck(ack *wt.SignedAckHeader) (err error) { var ok bool q := i.seqIndex.ensure(ack.SignedRequestHeader().GetQueryKey()) - if v, ok = i.respIndex[ack.ResponseHeaderHash()]; ok { + if v, ok = i.respIndex[ack.ResponseHash()]; ok { if v == nil || v.response == nil { // TODO(leventeliu): consider to panic. err = ErrCorruptedIndex @@ -226,7 +226,7 @@ func (i *multiIndex) addAck(ack *wt.SignedAckHeader) (err error) { // Add hash -> ack index anyway, so that we can find the request tracker later, even if // there is a earlier acknowledgement for the same request - i.ackIndex[ack.HeaderHash] = v + i.ackIndex[ack.Hash] = v // This also updates the item indexed by ackIndex and seqIndex var isNew bool @@ -245,8 +245,8 @@ func (i *multiIndex) addAck(ack *wt.SignedAckHeader) (err error) { ack: ack, } - i.respIndex[ack.ResponseHeaderHash()] = v - i.ackIndex[ack.HeaderHash] = v + i.respIndex[ack.ResponseHash()] = v + i.ackIndex[ack.Hash] = v q.queries = append(q.queries, v) } @@ -255,7 +255,7 @@ func (i *multiIndex) addAck(ack *wt.SignedAckHeader) (err error) { // We will keep the first ack counted anyway. But, should we report it to someone? if q.firstAck == nil { q.firstAck = v - } else if !q.firstAck.ack.HeaderHash.IsEqual(&ack.HeaderHash) { + } else if !q.firstAck.ack.Hash.IsEqual(&ack.Hash) { err = ErrMultipleAckOfSeqNo } @@ -345,7 +345,7 @@ func (i *multiIndex) checkAckFromBlock(b *hash.Hash, ack *hash.Hash) (isKnown bo qs := i.seqIndex[q.ack.SignedRequestHeader().GetQueryKey()] // Check it as a first acknowledgement - if i.respIndex[q.response.HeaderHash] != q || qs == nil || qs.firstAck == nil { + if i.respIndex[q.response.Hash] != q || qs == nil || qs.firstAck == nil { err = ErrCorruptedIndex return } @@ -384,7 +384,7 @@ func (i *multiIndex) markAndCollectUnsignedAcks(qs *[]*hash.Hash) { for _, q := range i.seqIndex { if ack := q.firstAck; ack != nil && ack.signedBlock == nil { ack.signedBlock = placeHolder - *qs = append(*qs, &ack.ack.HeaderHash) + *qs = append(*qs, &ack.ack.Hash) } } } diff --git a/sqlchain/queryindex_test.go b/sqlchain/queryindex_test.go index 609b55b35..55b1f0863 100644 --- a/sqlchain/queryindex_test.go +++ b/sqlchain/queryindex_test.go @@ -70,7 +70,7 @@ func TestCorruptedIndex(t *testing.T) { } // Test corrupted index - qi.heightIndex.mustGet(0).respIndex[resp.HeaderHash].response = nil + qi.heightIndex.mustGet(0).respIndex[resp.Hash].response = nil if err = qi.addResponse(0, resp); err != ErrCorruptedIndex { t.Fatalf("Unexpected error: %v", err) @@ -80,7 +80,7 @@ func TestCorruptedIndex(t *testing.T) { t.Fatalf("Unexpected error: %v", err) } - qi.heightIndex.mustGet(0).respIndex[resp.HeaderHash] = nil + qi.heightIndex.mustGet(0).respIndex[resp.Hash] = nil if err = qi.addResponse(0, resp); err != ErrCorruptedIndex { t.Fatalf("Unexpected error: %v", err) @@ -207,8 +207,8 @@ func TestCheckAckFromBlock(t *testing.T) { t.Fatalf("Error occurred: %v", err) } - b1.Queries[0] = &ack1.HeaderHash - b2.Queries[0] = &ack1.HeaderHash + b1.Queries[0] = &ack1.Hash + b2.Queries[0] = &ack1.Hash qi.setSignedBlock(height, b1) if _, err := qi.checkAckFromBlock( @@ -218,7 +218,7 @@ func TestCheckAckFromBlock(t *testing.T) { } // Test checking same ack signed by another block - b2.Queries[0] = &ack2.HeaderHash + b2.Queries[0] = &ack2.Hash if _, err = qi.checkAckFromBlock( height, b2.BlockHash(), b2.Queries[0], @@ -307,7 +307,7 @@ func TestQueryIndex(t *testing.T) { } log.Debugf("i = %d, j = %d, k = %d\n\tseqno = %+v, req = %v, resp = %v", i, j, k, - resp.Request.GetQueryKey(), &req.HeaderHash, &resp.HeaderHash) + resp.Request.GetQueryKey(), &req.Hash, &resp.Hash) if err = qi.addResponse(int32(i), resp); err != nil { t.Fatalf("Error occurred: %v", err) @@ -323,9 +323,9 @@ func TestQueryIndex(t *testing.T) { "req = %v, resp = %v, ack = %v", i, j, k, l, ack.SignedRequestHeader().GetQueryKey(), - &ack.SignedRequestHeader().HeaderHash, - &ack.SignedResponseHeader().HeaderHash, - &ack.HeaderHash, + &ack.SignedRequestHeader().Hash, + &ack.SignedResponseHeader().Hash, + &ack.Hash, ) if err != nil { @@ -347,12 +347,12 @@ func TestQueryIndex(t *testing.T) { if err == nil { hasFirstAck = true - block.PushAckedQuery(&ack.HeaderHash) + block.PushAckedQuery(&ack.Hash) } else { continue } - if rAck, err := qi.getAck(int32(i), &ack.HeaderHash); err != nil { + if rAck, err := qi.getAck(int32(i), &ack.Hash); err != nil { t.Fatalf("Error occurred: %v", err) } else if !reflect.DeepEqual(ack, rAck) { t.Fatalf("Unexpected result:\n\torigin = %+v\n\toutput = %+v", diff --git a/sqlchain/rpc.go b/sqlchain/rpc.go index b662d67ca..5bb7fcbdf 100644 --- a/sqlchain/rpc.go +++ b/sqlchain/rpc.go @@ -79,8 +79,8 @@ type FetchBlockResp struct { // FetchAckedQueryReq defines a request of the FetchAckedQuery RPC method. type FetchAckedQueryReq struct { - Height int32 - SignedAckedHeaderHash *hash.Hash + Height int32 + SignedAckedHash *hash.Hash } // FetchAckedQueryResp defines a request of the FetchAckedQuery RPC method. @@ -162,7 +162,7 @@ func (s *ChainRPCService) FetchBlock(req *FetchBlockReq, resp *FetchBlockResp) ( // FetchAckedQuery is the RPC method to fetch a known block from the target server. func (s *ChainRPCService) FetchAckedQuery(req *FetchAckedQueryReq, resp *FetchAckedQueryResp, ) (err error) { - resp.Ack, err = s.chain.FetchAckedQuery(req.Height, req.SignedAckedHeaderHash) + resp.Ack, err = s.chain.FetchAckedQuery(req.Height, req.SignedAckedHash) return } diff --git a/sqlchain/runtime.go b/sqlchain/runtime.go index c846bf2b9..2fbc8df53 100644 --- a/sqlchain/runtime.go +++ b/sqlchain/runtime.go @@ -18,11 +18,11 @@ package sqlchain import ( "fmt" + "github.com/CovenantSQL/CovenantSQL/utils/log" "sync" "time" "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" wt "github.com/CovenantSQL/CovenantSQL/worker/types" @@ -58,9 +58,9 @@ type runtime struct { // peersMutex protects following peers-relative fields. peersMutex sync.Mutex // peers is the peer list of the sql-chain. - peers *kayak.Peers + peers *proto.Peers // server is the local peer service instance. - server *kayak.Server + server proto.NodeID // index is the index of the current server in the peer list. index int32 // total is the total peer number of the sql-chain. @@ -98,10 +98,15 @@ func newRunTime(c *Config) (r *runtime) { peers: c.Peers, server: c.Server, index: func() int32 { - if index, found := c.Peers.Find(c.Server.ID); found { + if index, found := c.Peers.Find(c.Server); found { return index } + log.WithFields(log.Fields{ + "node": c.Server, + "peers": c.Peers, + }).Warning("could not found server in peers") + return -1 }(), total: int32(len(c.Peers.Servers)), @@ -221,10 +226,10 @@ func (r *runtime) nextTick() (t time.Time, d time.Duration) { return } -func (r *runtime) updatePeers(peers *kayak.Peers) (err error) { +func (r *runtime) updatePeers(peers *proto.Peers) (err error) { r.peersMutex.Lock() defer r.peersMutex.Unlock() - index, found := peers.Find(r.server.ID) + index, found := peers.Find(r.server) if found { r.index = index @@ -259,7 +264,7 @@ func (r *runtime) getIndexTotal() (int32, int32) { return r.index, r.total } -func (r *runtime) getIndexTotalServer() (int32, int32, *kayak.Server) { +func (r *runtime) getIndexTotalServer() (int32, int32, proto.NodeID) { r.peersMutex.Lock() defer r.peersMutex.Unlock() return r.index, r.total, r.server @@ -267,10 +272,10 @@ func (r *runtime) getIndexTotalServer() (int32, int32, *kayak.Server) { func (r *runtime) getPeerInfoString() string { index, total, server := r.getIndexTotalServer() - return fmt.Sprintf("[%d/%d] %s", index, total, server.ID) + return fmt.Sprintf("[%d/%d] %s", index, total, server) } -func (r *runtime) getServer() *kayak.Server { +func (r *runtime) getServer() proto.NodeID { r.peersMutex.Lock() defer r.peersMutex.Unlock() return r.server @@ -298,7 +303,7 @@ func (r *runtime) isMyTurn() (ret bool) { return } -func (r *runtime) getPeers() *kayak.Peers { +func (r *runtime) getPeers() *proto.Peers { r.peersMutex.Lock() defer r.peersMutex.Unlock() peers := r.peers.Clone() diff --git a/sqlchain/storage/storage.go b/sqlchain/storage/storage.go index 481e36b39..7e36bad0e 100644 --- a/sqlchain/storage/storage.go +++ b/sqlchain/storage/storage.go @@ -322,7 +322,7 @@ func (s *Storage) Query(ctx context.Context, queries []Query) (columns []string, } // Exec implements write query feature. -func (s *Storage) Exec(ctx context.Context, queries []Query) (rowsAffected int64, err error) { +func (s *Storage) Exec(ctx context.Context, queries []Query) (result ExecResult, err error) { if len(queries) == 0 { return } @@ -346,16 +346,16 @@ func (s *Storage) Exec(ctx context.Context, queries []Query) (rowsAffected int64 args[i] = v } - var result sql.Result - if result, err = tx.Exec(q.Pattern, args...); err != nil { + var r sql.Result + if r, err = tx.Exec(q.Pattern, args...); err != nil { log.WithError(err).Debug("execute query failed") return } var affected int64 - affected, err = result.RowsAffected() - - rowsAffected += affected + affected, _ = r.RowsAffected() + result.RowsAffected += affected + result.LastInsertID, _ = r.LastInsertId() } tx.Commit() diff --git a/sqlchain/storage/storage_test.go b/sqlchain/storage/storage_test.go index 0cb0a4459..0cda67128 100644 --- a/sqlchain/storage/storage_test.go +++ b/sqlchain/storage/storage_test.go @@ -260,19 +260,19 @@ func TestStorage(t *testing.T) { columns, types, data, err = st.Query(context.Background(), []Query{newQuery("DELETE FROM `kv` WHERE `value` IS NULL")}) - affected, err := st.Exec(context.Background(), + execResult, err := st.Exec(context.Background(), []Query{newQuery("INSERT OR REPLACE INTO `kv` VALUES ('k4', 'v4')")}) - if err != nil || affected != 1 { + if err != nil || execResult.RowsAffected != 1 { t.Fatalf("Exec INSERT failed: %v", err) } // test with arguments - affected, err = st.Exec(context.Background(), []Query{newQuery("DELETE FROM `kv` WHERE `key`='k4'")}) - if err != nil || affected != 1 { + execResult, err = st.Exec(context.Background(), []Query{newQuery("DELETE FROM `kv` WHERE `key`='k4'")}) + if err != nil || execResult.RowsAffected != 1 { t.Fatalf("Exec DELETE failed: %v", err) } - affected, err = st.Exec(context.Background(), + execResult, err = st.Exec(context.Background(), []Query{newQuery("DELETE FROM `kv` WHERE `key`=?", "not_exist")}) - if err != nil || affected != 0 { + if err != nil || execResult.RowsAffected != 0 { t.Fatalf("Exec DELETE failed: %v", err) } diff --git a/sqlchain/xxx_test.go b/sqlchain/xxx_test.go index 96870cc01..15c814c9b 100644 --- a/sqlchain/xxx_test.go +++ b/sqlchain/xxx_test.go @@ -28,7 +28,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" "github.com/CovenantSQL/CovenantSQL/proto" ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" @@ -407,14 +406,14 @@ func createRandomBlockWithQueries(genesis, parent hash.Hash, acks []*wt.SignedAc } for _, ack := range acks { - b.PushAckedQuery(&ack.HeaderHash) + b.PushAckedQuery(&ack.Hash) } err = b.PackAndSignBlock(priv) return } -func createTestPeers(num int) (nis []cpuminer.NonceInfo, p *kayak.Peers, err error) { +func createTestPeers(num int) (nis []cpuminer.NonceInfo, p *proto.Peers, err error) { if num <= 0 { return } @@ -439,29 +438,20 @@ func createTestPeers(num int) (nis []cpuminer.NonceInfo, p *kayak.Peers, err err return } - s := make([]*kayak.Server, num) + s := make([]proto.NodeID, num) h := &hash.Hash{} for i := range s { rand.Read(h[:]) - s[i] = &kayak.Server{ - Role: func() proto.ServerRole { - if i == 0 { - return proto.Leader - } - return proto.Follower - }(), - ID: proto.NodeID(nis[i].Hash.String()), - PubKey: pub, - } + s[i] = proto.NodeID(nis[i].Hash.String()) } - p = &kayak.Peers{ - Term: 0, - Leader: s[0], - Servers: s, - PubKey: pub, - Signature: nil, + p = &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: 0, + Leader: s[0], + Servers: s, + }, } if err = p.Sign(priv); err != nil { diff --git a/worker/db.go b/worker/db.go index 87f0b186f..1c95d9763 100644 --- a/worker/db.go +++ b/worker/db.go @@ -17,8 +17,8 @@ package worker import ( - "bytes" "context" + "database/sql" "io" "os" "path/filepath" @@ -30,12 +30,12 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/kayak" - ka "github.com/CovenantSQL/CovenantSQL/kayak/api" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + kl "github.com/CovenantSQL/CovenantSQL/kayak/wal" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/sqlchain" "github.com/CovenantSQL/CovenantSQL/sqlchain/storage" ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" - "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" wt "github.com/CovenantSQL/CovenantSQL/worker/types" "github.com/CovenantSQL/sqlparser" @@ -46,11 +46,20 @@ const ( // StorageFileName defines storage file name of database instance. StorageFileName = "storage.db3" + // KayakWalFileName defines log pool name of database instance. + KayakWalFileName = "kayak.ldb" + // SQLChainFileName defines sqlchain storage file name. SQLChainFileName = "chain.db" // MaxRecordedConnectionSequences defines the max connection slots to anti reply attack. MaxRecordedConnectionSequences = 1000 + + // PrepareThreshold defines the prepare complete threshold. + PrepareThreshold = 1.0 + + // CommitThreshold defines the commit complete threshold. + CommitThreshold = 1.0 ) // Database defines a single database instance in worker runtime. @@ -58,15 +67,18 @@ type Database struct { cfg *DBConfig dbID proto.DatabaseID storage *storage.Storage + kayakWal *kl.LevelDBWal kayakRuntime *kayak.Runtime - kayakConfig kayak.Config + kayakConfig *kt.RuntimeConfig connSeqs sync.Map connSeqEvictCh chan uint64 chain *sqlchain.Chain + nodeID proto.NodeID + mux *DBKayakMuxService } // NewDatabase create a single database instance using config. -func NewDatabase(cfg *DBConfig, peers *kayak.Peers, genesisBlock *ct.Block) (db *Database, err error) { +func NewDatabase(cfg *DBConfig, peers *proto.Peers, genesisBlock *ct.Block) (db *Database, err error) { // ensure dir exists if err = os.MkdirAll(cfg.DataDir, 0755); err != nil { return @@ -81,6 +93,7 @@ func NewDatabase(cfg *DBConfig, peers *kayak.Peers, genesisBlock *ct.Block) (db db = &Database{ cfg: cfg, dbID: cfg.DatabaseID, + mux: cfg.KayakMux, connSeqEvictCh: make(chan uint64, 1), } @@ -120,9 +133,8 @@ func NewDatabase(cfg *DBConfig, peers *kayak.Peers, genesisBlock *ct.Block) (db } // init chain - var nodeID proto.NodeID chainFile := filepath.Join(cfg.DataDir, SQLChainFileName) - if nodeID, err = kms.GetLocalNodeID(); err != nil { + if db.nodeID, err = kms.GetLocalNodeID(); err != nil { return } @@ -136,9 +148,7 @@ func NewDatabase(cfg *DBConfig, peers *kayak.Peers, genesisBlock *ct.Block) (db // TODO(xq262144): should refactor server/node definition to conf/proto package // currently sqlchain package only use Server.ID as node id MuxService: cfg.ChainMux, - Server: &kayak.Server{ - ID: nodeID, - }, + Server: db.nodeID, // TODO(xq262144): currently using fixed period/resolution from sqlchain test case Period: 60 * time.Second, @@ -152,19 +162,37 @@ func NewDatabase(cfg *DBConfig, peers *kayak.Peers, genesisBlock *ct.Block) (db } // init kayak config - options := ka.NewDefaultTwoPCOptions().WithTransportID(string(cfg.DatabaseID)) - db.kayakConfig = ka.NewTwoPCConfigWithOptions(cfg.DataDir, cfg.KayakMux, db, options) - - // create kayak runtime - if db.kayakRuntime, err = ka.NewTwoPCKayak(peers, db.kayakConfig); err != nil { + kayakWalPath := filepath.Join(cfg.DataDir, KayakWalFileName) + if db.kayakWal, err = kl.NewLevelDBWal(kayakWalPath); err != nil { + err = errors.Wrap(err, "init kayak log pool failed") return } - // init kayak runtime - if err = db.kayakRuntime.Init(); err != nil { + db.kayakConfig = &kt.RuntimeConfig{ + Handler: db, + PrepareThreshold: PrepareThreshold, + CommitThreshold: CommitThreshold, + PrepareTimeout: time.Second, + CommitTimeout: time.Second * 60, + Peers: peers, + Wal: db.kayakWal, + NodeID: db.nodeID, + InstanceID: string(db.dbID), + ServiceName: DBKayakRPCName, + MethodName: DBKayakMethodName, + } + + // create kayak runtime + if db.kayakRuntime, err = kayak.NewRuntime(db.kayakConfig); err != nil { return } + // register kayak runtime rpc + db.mux.register(db.dbID, db.kayakRuntime) + + // start kayak runtime + db.kayakRuntime.Start() + // init sequence eviction processor go db.evictSequences() @@ -172,7 +200,7 @@ func NewDatabase(cfg *DBConfig, peers *kayak.Peers, genesisBlock *ct.Block) (db } // UpdatePeers defines peers update query interface. -func (db *Database) UpdatePeers(peers *kayak.Peers) (err error) { +func (db *Database) UpdatePeers(peers *proto.Peers) (err error) { if err = db.kayakRuntime.UpdatePeers(peers); err != nil { return } @@ -215,6 +243,14 @@ func (db *Database) Shutdown() (err error) { if err = db.kayakRuntime.Shutdown(); err != nil { return } + + // unregister + db.mux.unregister(db.dbID) + } + + if db.kayakWal != nil { + // shutdown, stop kayak + db.kayakWal.Close() } if db.chain != nil { @@ -282,14 +318,9 @@ func (db *Database) writeQuery(request *wt.Request) (response *wt.Response, err } // call kayak runtime Process - var buf *bytes.Buffer - if buf, err = utils.EncodeMsgPack(request); err != nil { - return - } - var logOffset uint64 var result interface{} - result, logOffset, err = db.kayakRuntime.Apply(buf.Bytes()) + result, logOffset, err = db.kayakRuntime.Apply(context.Background(), request) if err != nil { return @@ -326,6 +357,19 @@ func (db *Database) readQuery(request *wt.Request) (response *wt.Response, err e return db.buildQueryResponse(request, 0, columns, types, data, 0, 0) } +func (db *Database) getLog(index uint64) (data interface{}, err error) { + var l *kt.Log + if l, err = db.kayakWal.Get(index); err != nil || l == nil { + err = errors.Wrap(err, "get log from kayak pool failed") + return + } + + // decode log + data, err = db.DecodePayload(l.Data) + + return +} + func (db *Database) buildQueryResponse(request *wt.Request, offset uint64, columns []string, types []string, data [][]interface{}, lastInsertID int64, affectedRows int64) (response *wt.Response, err error) { // build response @@ -431,9 +475,16 @@ func convertAndSanitizeQuery(inQuery []wt.Query) (outQuery []storage.Query, err originalQueries = append(originalQueries, query) } + // covert args + var args []sql.NamedArg + + for _, v := range q.Args { + args = append(args, sql.Named(v.Name, v.Value)) + } + outQuery[i] = storage.Query{ Pattern: strings.Join(originalQueries, "; "), - Args: q.Args, + Args: args, } } return diff --git a/worker/db_config.go b/worker/db_config.go index 22508513f..7a2b5b1dc 100644 --- a/worker/db_config.go +++ b/worker/db_config.go @@ -19,7 +19,6 @@ package worker import ( "time" - kt "github.com/CovenantSQL/CovenantSQL/kayak/transport" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/sqlchain" ) @@ -28,7 +27,7 @@ import ( type DBConfig struct { DatabaseID proto.DatabaseID DataDir string - KayakMux *kt.ETLSTransportService + KayakMux *DBKayakMuxService ChainMux *sqlchain.MuxService MaxWriteTimeGap time.Duration EncryptionKey string diff --git a/worker/db_storage.go b/worker/db_storage.go index 837783e6e..6c7ca1ac3 100644 --- a/worker/db_storage.go +++ b/worker/db_storage.go @@ -17,11 +17,11 @@ package worker import ( + "bytes" "container/list" "context" "github.com/CovenantSQL/CovenantSQL/sqlchain/storage" - "github.com/CovenantSQL/CovenantSQL/twopc" "github.com/CovenantSQL/CovenantSQL/utils" wt "github.com/CovenantSQL/CovenantSQL/worker/types" "github.com/pkg/errors" @@ -29,75 +29,43 @@ import ( // Following contains storage related logic extracted from main database instance definition. -// Prepare implements twopc.Worker.Prepare. -func (db *Database) Prepare(ctx context.Context, wb twopc.WriteBatch) (err error) { - // wrap storage with signature check - var log *storage.ExecLog - if log, err = db.convertRequest(wb); err != nil { - return - } - return db.storage.Prepare(ctx, log) -} +// EncodePayload implements kayak.types.Handler.EncodePayload. +func (db *Database) EncodePayload(request interface{}) (data []byte, err error) { + var buf *bytes.Buffer -// Commit implements twopc.Worker.Commmit. -func (db *Database) Commit(ctx context.Context, wb twopc.WriteBatch) (result interface{}, err error) { - // wrap storage with signature check - var log *storage.ExecLog - if log, err = db.convertRequest(wb); err != nil { + if buf, err = utils.EncodeMsgPack(request); err != nil { + err = errors.Wrap(err, "encode request failed") return } - db.recordSequence(log) - return db.storage.Commit(ctx, log) -} -// Rollback implements twopc.Worker.Rollback. -func (db *Database) Rollback(ctx context.Context, wb twopc.WriteBatch) (err error) { - // wrap storage with signature check - var log *storage.ExecLog - if log, err = db.convertRequest(wb); err != nil { - return - } - db.recordSequence(log) - return db.storage.Rollback(ctx, log) -} - -func (db *Database) recordSequence(log *storage.ExecLog) { - db.connSeqs.Store(log.ConnectionID, log.SeqNo) + data = buf.Bytes() + return } -func (db *Database) verifySequence(log *storage.ExecLog) (err error) { - var data interface{} - var ok bool - var lastSeq uint64 - - if data, ok = db.connSeqs.Load(log.ConnectionID); ok { - lastSeq, _ = data.(uint64) +// DecodePayload implements kayak.types.Handler.DecodePayload. +func (db *Database) DecodePayload(data []byte) (request interface{}, err error) { + var req *wt.Request - if log.SeqNo <= lastSeq { - return ErrInvalidRequestSeq - } + if err = utils.DecodeMsgPack(data, &req); err != nil { + err = errors.Wrap(err, "decode request failed") + return } + request = req + return } -func (db *Database) convertRequest(wb twopc.WriteBatch) (log *storage.ExecLog, err error) { +// Check implements kayak.types.Handler.Check. +func (db *Database) Check(rawReq interface{}) (err error) { + var req *wt.Request var ok bool - - // type convert - var payloadBytes []byte - if payloadBytes, ok = wb.([]byte); !ok { + if req, ok = rawReq.(*wt.Request); !ok || req == nil { err = errors.Wrap(ErrInvalidRequest, "invalid request payload") return } - // decode - var req wt.Request - if err = utils.DecodeMsgPack(payloadBytes, &req); err != nil { - return - } - - // verify + // verify signature, check time/sequence only if err = req.Verify(); err != nil { return } @@ -112,22 +80,54 @@ func (db *Database) convertRequest(wb twopc.WriteBatch) (log *storage.ExecLog, e return } - // convert - log = new(storage.ExecLog) - log.ConnectionID = req.Header.ConnectionID - log.SeqNo = req.Header.SeqNo - log.Timestamp = req.Header.Timestamp.UnixNano() + // verify sequence + if err = db.verifySequence(req.Header.ConnectionID, req.Header.SeqNo); err != nil { + return + } + + // record sequence + db.recordSequence(req.Header.ConnectionID, req.Header.SeqNo) + + return +} - // sanitize dangerous query - if log.Queries, err = convertAndSanitizeQuery(req.Payload.Queries); err != nil { +// Commit implements kayak.types.Handler.Commmit. +func (db *Database) Commit(rawReq interface{}) (result interface{}, err error) { + // convert query and check syntax + var req *wt.Request + var ok bool + if req, ok = rawReq.(*wt.Request); !ok || req == nil { + err = errors.Wrap(ErrInvalidRequest, "invalid request payload") return } - // verify connection sequence - if err = db.verifySequence(log); err != nil { + var queries []storage.Query + if queries, err = convertAndSanitizeQuery(req.Payload.Queries); err != nil { + // return original parser error return } + // execute + return db.storage.Exec(context.Background(), queries) +} + +func (db *Database) recordSequence(connID uint64, seqNo uint64) { + db.connSeqs.Store(connID, seqNo) +} + +func (db *Database) verifySequence(connID uint64, seqNo uint64) (err error) { + var data interface{} + var ok bool + var lastSeq uint64 + + if data, ok = db.connSeqs.Load(connID); ok { + lastSeq, _ = data.(uint64) + + if seqNo <= lastSeq { + return ErrInvalidRequestSeq + } + } + return } diff --git a/worker/db_test.go b/worker/db_test.go index 4e576bc5c..782dd1cde 100644 --- a/worker/db_test.go +++ b/worker/db_test.go @@ -35,8 +35,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" - ka "github.com/CovenantSQL/CovenantSQL/kayak/api" "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" @@ -69,10 +67,14 @@ func TestSingleDatabase(t *testing.T) { So(err, ShouldBeNil) // create mux service - service := ka.NewMuxService("DBKayak", server) + kayakMuxService, err := NewDBKayakMuxService("DBKayak", server) + So(err, ShouldBeNil) + + chainMuxService, err := sqlchain.NewMuxService("sqlchain", server) + So(err, ShouldBeNil) // create peers - var peers *kayak.Peers + var peers *proto.Peers peers, err = getPeers(1) So(err, ShouldBeNil) @@ -80,8 +82,8 @@ func TestSingleDatabase(t *testing.T) { cfg := &DBConfig{ DatabaseID: "TEST", DataDir: rootDir, - KayakMux: service, - ChainMux: sqlchain.NewMuxService("sqlchain", server), + KayakMux: kayakMuxService, + ChainMux: chainMuxService, MaxWriteTimeGap: time.Second * 5, } @@ -426,10 +428,14 @@ func TestInitFailed(t *testing.T) { defer os.RemoveAll(rootDir) // create mux service - service := ka.NewMuxService("DBKayak", server) + kayakMuxService, err := NewDBKayakMuxService("DBKayak", server) + So(err, ShouldBeNil) + + chainMuxService, err := sqlchain.NewMuxService("sqlchain", server) + So(err, ShouldBeNil) // create peers - var peers *kayak.Peers + var peers *proto.Peers peers, err = getPeers(1) So(err, ShouldBeNil) @@ -437,8 +443,8 @@ func TestInitFailed(t *testing.T) { cfg := &DBConfig{ DatabaseID: "TEST", DataDir: rootDir, - KayakMux: service, - ChainMux: sqlchain.NewMuxService("sqlchain", server), + KayakMux: kayakMuxService, + ChainMux: chainMuxService, MaxWriteTimeGap: time.Duration(5 * time.Second), } @@ -475,10 +481,14 @@ func TestDatabaseRecycle(t *testing.T) { So(err, ShouldBeNil) // create mux service - service := ka.NewMuxService("DBKayak", server) + kayakMuxService, err := NewDBKayakMuxService("DBKayak", server) + So(err, ShouldBeNil) + + chainMuxService, err := sqlchain.NewMuxService("sqlchain", server) + So(err, ShouldBeNil) // create peers - var peers *kayak.Peers + var peers *proto.Peers peers, err = getPeers(1) So(err, ShouldBeNil) @@ -486,8 +496,8 @@ func TestDatabaseRecycle(t *testing.T) { cfg := &DBConfig{ DatabaseID: "TEST", DataDir: rootDir, - KayakMux: service, - ChainMux: sqlchain.NewMuxService("sqlchain", server), + KayakMux: kayakMuxService, + ChainMux: chainMuxService, MaxWriteTimeGap: time.Duration(5 * time.Second), } @@ -629,7 +639,7 @@ func buildQueryEx(queryType wt.QueryType, connID uint64, seqNo uint64, timeShift return } -func getPeers(term uint64) (peers *kayak.Peers, err error) { +func getPeers(term uint64) (peers *proto.Peers, err error) { // get node id var nodeID proto.NodeID if nodeID, err = kms.GetLocalNodeID(); err != nil { @@ -637,24 +647,19 @@ func getPeers(term uint64) (peers *kayak.Peers, err error) { } // get private/public key - var pubKey *asymmetric.PublicKey var privateKey *asymmetric.PrivateKey - if privateKey, pubKey, err = getKeys(); err != nil { + if privateKey, _, err = getKeys(); err != nil { return } // generate peers and sign - server := &kayak.Server{ - Role: proto.Leader, - ID: nodeID, - PubKey: pubKey, - } - peers = &kayak.Peers{ - Term: term, - Leader: server, - Servers: []*kayak.Server{server}, - PubKey: pubKey, + peers = &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: term, + Leader: nodeID, + Servers: []proto.NodeID{nodeID}, + }, } err = peers.Sign(privateKey) return @@ -847,11 +852,6 @@ func (s *stubBPDBService) GetNodeDatabases(req *wt.InitService, resp *wt.InitSer } func (s *stubBPDBService) getInstanceMeta(dbID proto.DatabaseID) (instance wt.ServiceInstance, err error) { - var pubKey *asymmetric.PublicKey - if pubKey, err = kms.GetLocalPublicKey(); err != nil { - return - } - var privKey *asymmetric.PrivateKey if privKey, err = kms.GetLocalPrivateKey(); err != nil { return @@ -863,21 +863,12 @@ func (s *stubBPDBService) getInstanceMeta(dbID proto.DatabaseID) (instance wt.Se } instance.DatabaseID = proto.DatabaseID(dbID) - instance.Peers = &kayak.Peers{ - Term: 1, - Leader: &kayak.Server{ - Role: proto.Leader, - ID: nodeID, - PubKey: pubKey, - }, - Servers: []*kayak.Server{ - { - Role: proto.Leader, - ID: nodeID, - PubKey: pubKey, - }, + instance.Peers = &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: 1, + Leader: nodeID, + Servers: []proto.NodeID{nodeID}, }, - PubKey: pubKey, } if err = instance.Peers.Sign(privKey); err != nil { return diff --git a/worker/dbms.go b/worker/dbms.go index 4eb6ff748..769c6d45e 100644 --- a/worker/dbms.go +++ b/worker/dbms.go @@ -23,10 +23,7 @@ import ( "path/filepath" "sync" - "github.com/pkg/errors" - - ka "github.com/CovenantSQL/CovenantSQL/kayak/api" - kt "github.com/CovenantSQL/CovenantSQL/kayak/transport" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" @@ -34,6 +31,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/pkg/errors" ) const ( @@ -48,7 +46,7 @@ const ( type DBMS struct { cfg *DBMSConfig dbMap sync.Map - kayakMux *kt.ETLSTransportService + kayakMux *DBKayakMuxService chainMux *sqlchain.MuxService rpc *DBMSRPCService } @@ -60,10 +58,16 @@ func NewDBMS(cfg *DBMSConfig) (dbms *DBMS, err error) { } // init kayak rpc mux - dbms.kayakMux = ka.NewMuxService(DBKayakRPCName, cfg.Server) + if dbms.kayakMux, err = NewDBKayakMuxService(DBKayakRPCName, cfg.Server); err != nil { + err = errors.Wrap(err, "register kayak mux service failed") + return + } // init sql-chain rpc mux - dbms.chainMux = sqlchain.NewMuxService(route.SQLChainRPCName, cfg.Server) + if dbms.chainMux, err = sqlchain.NewMuxService(route.SQLChainRPCName, cfg.Server); err != nil { + err = errors.Wrap(err, "register sqlchain mux service failed") + return + } // init service dbms.rpc = NewDBMSRPCService(route.DBRPCName, cfg.Server, dbms) @@ -293,19 +297,19 @@ func (dbms *DBMS) GetRequest(dbID proto.DatabaseID, offset uint64) (query *wt.Re return } - var reqBytes []byte - if reqBytes, err = db.kayakRuntime.GetLog(offset); err != nil { + var req interface{} + if req, err = db.getLog(offset); err != nil { + err = errors.Wrap(err, "get log failed") return } // decode requests - var q wt.Request - if err = utils.DecodeMsgPack(reqBytes, &q); err != nil { + var ok bool + if query, ok = req.(*wt.Request); !ok { + err = errors.Wrap(kt.ErrInvalidLog, "convert log to request failed") return } - query = &q - return } diff --git a/worker/dbms_mux.go b/worker/dbms_mux.go new file mode 100644 index 000000000..8209a5d20 --- /dev/null +++ b/worker/dbms_mux.go @@ -0,0 +1,65 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package worker + +import ( + "sync" + + "github.com/CovenantSQL/CovenantSQL/kayak" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/rpc" + "github.com/pkg/errors" +) + +const ( + DBKayakMethodName = "Call" +) + +type DBKayakMuxService struct { + serviceName string + serviceMap sync.Map +} + +func NewDBKayakMuxService(serviceName string, server *rpc.Server) (s *DBKayakMuxService, err error) { + s = &DBKayakMuxService{ + serviceName: serviceName, + } + err = server.RegisterService(serviceName, s) + return +} + +func (s *DBKayakMuxService) register(id proto.DatabaseID, rt *kayak.Runtime) { + s.serviceMap.Store(id, rt) + +} + +func (s *DBKayakMuxService) unregister(id proto.DatabaseID) { + s.serviceMap.Delete(id) +} + +func (s *DBKayakMuxService) Call(req *kt.RPCRequest, _ *interface{}) (err error) { + // call apply to specified kayak + // treat req.Instance as DatabaseID + id := proto.DatabaseID(req.Instance) + + if v, ok := s.serviceMap.Load(id); ok { + return v.(*kayak.Runtime).FollowerApply(req.Log) + } + + return errors.Wrapf(ErrUnknownMuxRequest, "instance %v", req.Instance) +} diff --git a/worker/dbms_test.go b/worker/dbms_test.go index a0f2875ae..7b8d43fd6 100644 --- a/worker/dbms_test.go +++ b/worker/dbms_test.go @@ -24,7 +24,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" @@ -66,7 +65,7 @@ func TestDBMS(t *testing.T) { // add database var req *wt.UpdateService var res wt.UpdateServiceResponse - var peers *kayak.Peers + var peers *proto.Peers var block *ct.Block dbID := proto.DatabaseID("db") @@ -110,7 +109,7 @@ func TestDBMS(t *testing.T) { err = queryRes.Verify() So(err, ShouldBeNil) So(queryRes.Header.RowCount, ShouldEqual, 0) - So(queryRes.Header.LogOffset, ShouldEqual, 1) + So(queryRes.Header.LogOffset, ShouldEqual, 0) var reqGetRequest wt.GetRequestReq var respGetRequest *wt.GetRequestResp @@ -119,7 +118,7 @@ func TestDBMS(t *testing.T) { reqGetRequest.LogOffset = queryRes.Header.LogOffset err = testRequest(route.DBSGetRequest, reqGetRequest, &respGetRequest) So(err, ShouldBeNil) - So(respGetRequest.Request.Header.HeaderHash, ShouldResemble, writeQuery.Header.HeaderHash) + So(respGetRequest.Request.Header.Hash, ShouldResemble, writeQuery.Header.Hash) // sending read query var readQuery *wt.Request diff --git a/worker/errors.go b/worker/errors.go index 439a8cb2e..02f832ea7 100644 --- a/worker/errors.go +++ b/worker/errors.go @@ -39,4 +39,7 @@ var ( // ErrSpaceLimitExceeded defines errors on disk space exceeding limit. ErrSpaceLimitExceeded = errors.New("space limit exceeded") + + // ErrUnknownMuxRequest indicates that the a multiplexing request endpoint is not found. + ErrUnknownMuxRequest = errors.New("unknown multiplexing request") ) diff --git a/worker/types/ack_type.go b/worker/types/ack_type.go index 564c70cf4..f386e7556 100644 --- a/worker/types/ack_type.go +++ b/worker/types/ack_type.go @@ -17,8 +17,6 @@ package types import ( - "bytes" - "encoding/binary" "time" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" @@ -38,9 +36,9 @@ type AckHeader struct { // SignedAckHeader defines client signed ack entity. type SignedAckHeader struct { AckHeader - HeaderHash hash.Hash `json:"hh"` - Signee *asymmetric.PublicKey `json:"e"` - Signature *asymmetric.Signature `json:"s"` + Hash hash.Hash `json:"hh"` + Signee *asymmetric.PublicKey `json:"e"` + Signature *asymmetric.Signature `json:"s"` } // Ack defines a whole client ack request entity. @@ -52,57 +50,17 @@ type Ack struct { // AckResponse defines client ack response entity. type AckResponse struct{} -// Serialize structure to bytes. -func (h *AckHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.Write(h.Response.Serialize()) - binary.Write(buf, binary.LittleEndian, uint64(len(h.NodeID))) - buf.WriteString(string(h.NodeID)) - binary.Write(buf, binary.LittleEndian, int64(h.Timestamp.UnixNano())) - - return buf.Bytes() -} - -// Serialize structure to bytes. -func (sh *SignedAckHeader) Serialize() []byte { - if sh == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.Write(sh.AckHeader.Serialize()) - buf.Write(sh.HeaderHash[:]) - if sh.Signee != nil { - buf.Write(sh.Signee.Serialize()) - } else { - buf.WriteRune('\000') - } - if sh.Signature != nil { - buf.Write(sh.Signature.Serialize()) - } else { - buf.WriteRune('\000') - } - - return buf.Bytes() -} - // Verify checks hash and signature in ack header. func (sh *SignedAckHeader) Verify() (err error) { // verify response if err = sh.Response.Verify(); err != nil { return } - if err = verifyHash(&sh.AckHeader, &sh.HeaderHash); err != nil { + if err = verifyHash(&sh.AckHeader, &sh.Hash); err != nil { return } // verify sign - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { + if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { return ErrSignVerification } return @@ -119,24 +77,17 @@ func (sh *SignedAckHeader) Sign(signer *asymmetric.PrivateKey, verifyReqHeader b } // build hash - buildHash(&sh.AckHeader, &sh.HeaderHash) + if err = buildHash(&sh.AckHeader, &sh.Hash); err != nil { + return + } // sign - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) + sh.Signature, err = signer.Sign(sh.Hash[:]) sh.Signee = signer.PubKey() return } -// Serialize structure to bytes. -func (a *Ack) Serialize() []byte { - if a == nil { - return []byte{'\000'} - } - - return a.Header.Serialize() -} - // Verify checks hash and signature in ack. func (a *Ack) Verify() error { return a.Header.Verify() @@ -148,9 +99,9 @@ func (a *Ack) Sign(signer *asymmetric.PrivateKey, verifyReqHeader bool) (err err return a.Header.Sign(signer, verifyReqHeader) } -// ResponseHeaderHash returns the deep shadowed Response HeaderHash field. -func (sh *SignedAckHeader) ResponseHeaderHash() hash.Hash { - return sh.AckHeader.Response.HeaderHash +// ResponseHash returns the deep shadowed Response Hash field. +func (sh *SignedAckHeader) ResponseHash() hash.Hash { + return sh.AckHeader.Response.Hash } // SignedRequestHeader returns the deep shadowed Request reference. diff --git a/worker/types/ack_type_gen.go b/worker/types/ack_type_gen.go index 10e1687b0..47511dcee 100644 --- a/worker/types/ack_type_gen.go +++ b/worker/types/ack_type_gen.go @@ -116,7 +116,7 @@ func (z *SignedAckHeader) MarshalHash() (o []byte, err error) { o = append(o, 0x83) o = hsp.AppendTime(o, z.AckHeader.Timestamp) o = append(o, 0x84) - if oTemp, err := z.HeaderHash.MarshalHash(); err != nil { + if oTemp, err := z.Hash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -138,6 +138,6 @@ func (z *SignedAckHeader) Msgsize() (s int) { } else { s += z.Signature.Msgsize() } - s += 10 + 1 + 9 + z.AckHeader.Response.Msgsize() + 7 + z.AckHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 11 + z.HeaderHash.Msgsize() + s += 10 + 1 + 9 + z.AckHeader.Response.Msgsize() + 7 + z.AckHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 5 + z.Hash.Msgsize() return } diff --git a/worker/types/init_service_type.go b/worker/types/init_service_type.go index 8c97f1c5e..12d9a0786 100644 --- a/worker/types/init_service_type.go +++ b/worker/types/init_service_type.go @@ -17,15 +17,10 @@ package types import ( - "bytes" - "encoding/binary" - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" - "github.com/CovenantSQL/CovenantSQL/utils" ) //go:generate hsp @@ -37,17 +32,17 @@ type InitService struct { // ResourceMeta defines single database resource meta. type ResourceMeta struct { - Node uint16 // reserved node count - Space uint64 // reserved storage space in bytes - Memory uint64 // reserved memory in bytes - LoadAvgPerCPU uint64 // max loadAvg15 per CPU + Node uint16 // reserved node count + Space uint64 // reserved storage space in bytes + Memory uint64 // reserved memory in bytes + LoadAvgPerCPU uint64 // max loadAvg15 per CPU EncryptionKey string `hspack:"-"` // encryption key for database instance } // ServiceInstance defines single instance to be initialized. type ServiceInstance struct { DatabaseID proto.DatabaseID - Peers *kayak.Peers + Peers *proto.Peers ResourceMeta ResourceMeta GenesisBlock *ct.Block } @@ -60,9 +55,9 @@ type InitServiceResponseHeader struct { // SignedInitServiceResponseHeader defines signed worker service init response header. type SignedInitServiceResponseHeader struct { InitServiceResponseHeader - HeaderHash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + Hash hash.Hash + Signee *asymmetric.PublicKey + Signature *asymmetric.Signature } // InitServiceResponse defines worker service init response. @@ -70,90 +65,14 @@ type InitServiceResponse struct { Header SignedInitServiceResponseHeader } -// Serialize structure to bytes. -func (m *ResourceMeta) Serialize() []byte { - if m == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - binary.Write(buf, binary.LittleEndian, m.Node) - binary.Write(buf, binary.LittleEndian, m.Space) - binary.Write(buf, binary.LittleEndian, m.Memory) - - return buf.Bytes() -} - -// Serialize structure to bytes. -func (i *ServiceInstance) Serialize() []byte { - if i == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.WriteString(string(i.DatabaseID)) - buf.Write(i.Peers.Serialize()) - buf.Write(i.ResourceMeta.Serialize()) - if i.GenesisBlock != nil { - genesisBlock, _ := utils.EncodeMsgPack(i.GenesisBlock) - buf.Write(genesisBlock.Bytes()) - } else { - buf.Write([]byte{'\000'}) - } - - return buf.Bytes() -} - -// Serialize structure to bytes. -func (h *InitServiceResponseHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - binary.Write(buf, binary.LittleEndian, uint64(len(h.Instances))) - for _, instance := range h.Instances { - buf.Write(instance.Serialize()) - } - - return buf.Bytes() -} - -// Serialize structure to bytes. -func (sh *SignedInitServiceResponseHeader) Serialize() []byte { - if sh == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.Write(sh.InitServiceResponseHeader.Serialize()) - buf.Write(sh.HeaderHash[:]) - if sh.Signee != nil { - buf.Write(sh.Signee.Serialize()) - } else { - buf.WriteRune('\000') - } - if sh.Signature != nil { - buf.Write(sh.Signature.Serialize()) - } else { - buf.WriteRune('\000') - } - - return buf.Bytes() -} - // Verify checks hash and signature in init service response header. func (sh *SignedInitServiceResponseHeader) Verify() (err error) { // verify hash - if err = verifyHash(&sh.InitServiceResponseHeader, &sh.HeaderHash); err != nil { + if err = verifyHash(&sh.InitServiceResponseHeader, &sh.Hash); err != nil { return } // verify sign - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { + if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { return ErrSignVerification } return @@ -162,24 +81,17 @@ func (sh *SignedInitServiceResponseHeader) Verify() (err error) { // Sign the request. func (sh *SignedInitServiceResponseHeader) Sign(signer *asymmetric.PrivateKey) (err error) { // build hash - buildHash(&sh.InitServiceResponseHeader, &sh.HeaderHash) + if err = buildHash(&sh.InitServiceResponseHeader, &sh.Hash); err != nil { + return + } // sign - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) + sh.Signature, err = signer.Sign(sh.Hash[:]) sh.Signee = signer.PubKey() return } -// Serialize structure to bytes. -func (rs *InitServiceResponse) Serialize() []byte { - if rs == nil { - return []byte{'\000'} - } - - return rs.Header.Serialize() -} - // Verify checks hash and signature in init service response header. func (rs *InitServiceResponse) Verify() error { return rs.Header.Verify() diff --git a/worker/types/init_service_type_gen.go b/worker/types/init_service_type_gen.go index d250cd883..1fcce43a4 100644 --- a/worker/types/init_service_type_gen.go +++ b/worker/types/init_service_type_gen.go @@ -188,7 +188,7 @@ func (z *SignedInitServiceResponseHeader) MarshalHash() (o []byte, err error) { } } o = append(o, 0x84) - if oTemp, err := z.HeaderHash.MarshalHash(); err != nil { + if oTemp, err := z.Hash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -214,6 +214,6 @@ func (z *SignedInitServiceResponseHeader) Msgsize() (s int) { for za0001 := range z.InitServiceResponseHeader.Instances { s += z.InitServiceResponseHeader.Instances[za0001].Msgsize() } - s += 11 + z.HeaderHash.Msgsize() + s += 5 + z.Hash.Msgsize() return } diff --git a/worker/types/no_ack_report_type.go b/worker/types/no_ack_report_type.go index aa163176e..bda6aee91 100644 --- a/worker/types/no_ack_report_type.go +++ b/worker/types/no_ack_report_type.go @@ -17,13 +17,10 @@ package types import ( - "bytes" - "encoding/binary" "time" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" ) @@ -39,9 +36,9 @@ type NoAckReportHeader struct { // SignedNoAckReportHeader defines worker worker issued/signed client no ack report. type SignedNoAckReportHeader struct { NoAckReportHeader - HeaderHash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + Hash hash.Hash + Signee *asymmetric.PublicKey + Signature *asymmetric.Signature } // NoAckReport defines whole worker no client ack report. @@ -55,15 +52,15 @@ type AggrNoAckReportHeader struct { NodeID proto.NodeID // aggregated report node id Timestamp time.Time // time in UTC zone Reports []SignedNoAckReportHeader // no-ack reports - Peers *kayak.Peers // serving peers during report + Peers *proto.Peers // serving peers during report } // SignedAggrNoAckReportHeader defines worker leader aggregated/signed client no ack report. type SignedAggrNoAckReportHeader struct { AggrNoAckReportHeader - HeaderHash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + Hash hash.Hash + Signee *asymmetric.PublicKey + Signature *asymmetric.Signature } // AggrNoAckReport defines whole worker leader no client ack report. @@ -72,46 +69,6 @@ type AggrNoAckReport struct { Header SignedAggrNoAckReportHeader } -// Serialize structure to bytes. -func (h *NoAckReportHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - binary.Write(buf, binary.LittleEndian, uint64(len(h.NodeID))) - buf.WriteString(string(h.NodeID)) - binary.Write(buf, binary.LittleEndian, int64(h.Timestamp.UnixNano())) - buf.Write(h.Response.Serialize()) - - return buf.Bytes() -} - -// Serialize structure to bytes. -func (sh *SignedNoAckReportHeader) Serialize() []byte { - if sh == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.Write(sh.NoAckReportHeader.Serialize()) - buf.Write(sh.HeaderHash[:]) - if sh.Signee != nil { - buf.Write(sh.Signee.Serialize()) - } else { - buf.WriteRune('\000') - } - if sh.Signature != nil { - buf.Write(sh.Signature.Serialize()) - } else { - buf.WriteRune('\000') - } - - return buf.Bytes() -} - // Verify checks hash and signature in signed no ack report header. func (sh *SignedNoAckReportHeader) Verify() (err error) { // verify original response @@ -119,11 +76,11 @@ func (sh *SignedNoAckReportHeader) Verify() (err error) { return } // verify hash - if err = verifyHash(&sh.NoAckReportHeader, &sh.HeaderHash); err != nil { + if err = verifyHash(&sh.NoAckReportHeader, &sh.Hash); err != nil { return } // validate signature - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { + if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { return ErrSignVerification } return @@ -137,24 +94,17 @@ func (sh *SignedNoAckReportHeader) Sign(signer *asymmetric.PrivateKey) (err erro } // build hash - buildHash(&sh.NoAckReportHeader, &sh.HeaderHash) + if err = buildHash(&sh.NoAckReportHeader, &sh.Hash); err != nil { + return + } // sign - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) + sh.Signature, err = signer.Sign(sh.Hash[:]) sh.Signee = signer.PubKey() return } -// Serialize structure to bytes. -func (r *NoAckReport) Serialize() []byte { - if r == nil { - return []byte{'\000'} - } - - return r.Header.Serialize() -} - // Verify checks hash and signature in whole no ack report. func (r *NoAckReport) Verify() error { return r.Header.Verify() @@ -165,50 +115,6 @@ func (r *NoAckReport) Sign(signer *asymmetric.PrivateKey) error { return r.Header.Sign(signer) } -// Serialize structure to bytes. -func (h *AggrNoAckReportHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - binary.Write(buf, binary.LittleEndian, uint64(len(h.NodeID))) - buf.WriteString(string(h.NodeID)) - binary.Write(buf, binary.LittleEndian, int64(h.Timestamp.UnixNano())) - binary.Write(buf, binary.LittleEndian, uint64(len(h.Reports))) - for _, r := range h.Reports { - buf.Write(r.Serialize()) - } - buf.Write(h.Peers.Serialize()) - - return buf.Bytes() -} - -// Serialize structure to bytes. -func (sh *SignedAggrNoAckReportHeader) Serialize() []byte { - if sh == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.Write(sh.AggrNoAckReportHeader.Serialize()) - buf.Write(sh.HeaderHash[:]) - if sh.Signee != nil { - buf.Write(sh.Signee.Serialize()) - } else { - buf.WriteRune('\000') - } - if sh.Signature != nil { - buf.Write(sh.Signature.Serialize()) - } else { - buf.WriteRune('\000') - } - - return buf.Bytes() -} - // Verify checks hash and signature in aggregated no ack report. func (sh *SignedAggrNoAckReportHeader) Verify() (err error) { // verify original reports @@ -218,11 +124,11 @@ func (sh *SignedAggrNoAckReportHeader) Verify() (err error) { } } // verify hash - if err = verifyHash(&sh.AggrNoAckReportHeader, &sh.HeaderHash); err != nil { + if err = verifyHash(&sh.AggrNoAckReportHeader, &sh.Hash); err != nil { return } // verify signature - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { + if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { return ErrSignVerification } return @@ -237,24 +143,17 @@ func (sh *SignedAggrNoAckReportHeader) Sign(signer *asymmetric.PrivateKey) (err } // verify hash - buildHash(&sh.AggrNoAckReportHeader, &sh.HeaderHash) + if err = buildHash(&sh.AggrNoAckReportHeader, &sh.Hash); err != nil { + return + } // verify signature - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) + sh.Signature, err = signer.Sign(sh.Hash[:]) sh.Signee = signer.PubKey() return } -// Serialize structure to bytes. -func (r *AggrNoAckReport) Serialize() []byte { - if r == nil { - return []byte{'\000'} - } - - return r.Header.Serialize() -} - // Verify the whole aggregation no ack report. func (r *AggrNoAckReport) Verify() (err error) { return r.Header.Verify() diff --git a/worker/types/no_ack_report_type_gen.go b/worker/types/no_ack_report_type_gen.go index f8c660233..8b1e57cd9 100644 --- a/worker/types/no_ack_report_type_gen.go +++ b/worker/types/no_ack_report_type_gen.go @@ -169,7 +169,7 @@ func (z *SignedAggrNoAckReportHeader) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } o = append(o, 0x84) - if oTemp, err := z.HeaderHash.MarshalHash(); err != nil { + if oTemp, err := z.Hash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -191,7 +191,7 @@ func (z *SignedAggrNoAckReportHeader) Msgsize() (s int) { } else { s += z.Signature.Msgsize() } - s += 22 + z.AggrNoAckReportHeader.Msgsize() + 11 + z.HeaderHash.Msgsize() + s += 22 + z.AggrNoAckReportHeader.Msgsize() + 5 + z.Hash.Msgsize() return } @@ -236,7 +236,7 @@ func (z *SignedNoAckReportHeader) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } o = append(o, 0x84) - if oTemp, err := z.HeaderHash.MarshalHash(); err != nil { + if oTemp, err := z.Hash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -258,6 +258,6 @@ func (z *SignedNoAckReportHeader) Msgsize() (s int) { } else { s += z.Signature.Msgsize() } - s += 18 + 1 + 7 + z.NoAckReportHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 9 + z.NoAckReportHeader.Response.Msgsize() + 11 + z.HeaderHash.Msgsize() + s += 18 + 1 + 7 + z.NoAckReportHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 9 + z.NoAckReportHeader.Response.Msgsize() + 5 + z.Hash.Msgsize() return } diff --git a/worker/types/request_type.go b/worker/types/request_type.go index 26101475b..01d94cb85 100644 --- a/worker/types/request_type.go +++ b/worker/types/request_type.go @@ -17,19 +17,14 @@ package types import ( - "bytes" - "database/sql" - "encoding/binary" "time" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/utils" ) //go:generate hsp -//hsp:ignore Query Queries Payload RequestPayload Request // QueryType enumerates available query type, currently read/write. type QueryType int32 @@ -41,21 +36,16 @@ const ( WriteQuery ) +// NamedArg defines the named argument structure for database. +type NamedArg struct { + Name string + Value interface{} +} + // Query defines single query. type Query struct { Pattern string - Args []sql.NamedArg -} - -func (t QueryType) String() string { - switch t { - case ReadQuery: - return "read" - case WriteQuery: - return "write" - default: - return "unknown" - } + Args []NamedArg } // RequestPayload defines a queries payload. @@ -85,9 +75,9 @@ type QueryKey struct { // SignedRequestHeader defines a signed query request header. type SignedRequestHeader struct { RequestHeader - HeaderHash hash.Hash `json:"hh"` - Signee *asymmetric.PublicKey `json:"e"` - Signature *asymmetric.Signature `json:"s"` + Hash hash.Hash `json:"hh"` + Signee *asymmetric.PublicKey `json:"e"` + Signature *asymmetric.Signature `json:"s"` } // Request defines a complete query request. @@ -97,67 +87,25 @@ type Request struct { Payload RequestPayload `json:"p"` } -// Serialize returns byte based binary form of struct. -func (p *RequestPayload) Serialize() []byte { - // HACK(xq262144): currently use idiomatic serialization for hash generation - buf, _ := utils.EncodeMsgPack(p) - - return buf.Bytes() -} - -// Serialize returns bytes based binary form of struct. -func (h *RequestHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - binary.Write(buf, binary.LittleEndian, h.QueryType) - binary.Write(buf, binary.LittleEndian, uint64(len(h.NodeID))) - buf.WriteString(string(h.NodeID)) - buf.WriteString(string(h.DatabaseID)) - binary.Write(buf, binary.LittleEndian, h.ConnectionID) - binary.Write(buf, binary.LittleEndian, h.SeqNo) - binary.Write(buf, binary.LittleEndian, int64(h.Timestamp.UnixNano())) // use nanoseconds unix epoch - binary.Write(buf, binary.LittleEndian, h.BatchCount) - buf.Write(h.QueriesHash[:]) - - return buf.Bytes() -} - -// Serialize returns bytes based binary form of struct. -func (sh *SignedRequestHeader) Serialize() []byte { - if sh == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.Write(sh.RequestHeader.Serialize()) - buf.Write(sh.HeaderHash[:]) - if sh.Signee != nil { - buf.Write(sh.Signee.Serialize()) - } else { - buf.WriteRune('\000') - } - if sh.Signature != nil { - buf.Write(sh.Signature.Serialize()) - } else { - buf.WriteRune('\000') +func (t QueryType) String() string { + switch t { + case ReadQuery: + return "read" + case WriteQuery: + return "write" + default: + return "unknown" } - - return buf.Bytes() } // Verify checks hash and signature in request header. func (sh *SignedRequestHeader) Verify() (err error) { // verify hash - if err = verifyHash(&sh.RequestHeader, &sh.HeaderHash); err != nil { + if err = verifyHash(&sh.RequestHeader, &sh.Hash); err != nil { return } // verify sign - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { + if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { return ErrSignVerification } return nil @@ -166,33 +114,21 @@ func (sh *SignedRequestHeader) Verify() (err error) { // Sign the request. func (sh *SignedRequestHeader) Sign(signer *asymmetric.PrivateKey) (err error) { // compute hash - buildHash(&sh.RequestHeader, &sh.HeaderHash) + if err = buildHash(&sh.RequestHeader, &sh.Hash); err != nil { + return + } if signer == nil { return ErrSignRequest } // sign - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) + sh.Signature, err = signer.Sign(sh.Hash[:]) sh.Signee = signer.PubKey() return } -// Serialize returns bytes based binary form of struct. -func (r *Request) Serialize() []byte { - if r == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.Write(r.Header.Serialize()) - buf.Write(r.Payload.Serialize()) - - return buf.Bytes() -} - // Verify checks hash and signature in whole request. func (r *Request) Verify() (err error) { // verify payload hash in signed header @@ -209,7 +145,9 @@ func (r *Request) Sign(signer *asymmetric.PrivateKey) (err error) { r.Header.BatchCount = uint64(len(r.Payload.Queries)) // compute payload hash - buildHash(&r.Payload, &r.Header.QueriesHash) + if err = buildHash(&r.Payload, &r.Header.QueriesHash); err != nil { + return + } return r.Header.Sign(signer) } diff --git a/worker/types/request_type_gen.go b/worker/types/request_type_gen.go index 542809bd5..c20cd58ba 100644 --- a/worker/types/request_type_gen.go +++ b/worker/types/request_type_gen.go @@ -6,6 +6,59 @@ import ( hsp "github.com/CovenantSQL/HashStablePack/marshalhash" ) +// MarshalHash marshals for hash +func (z NamedArg) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + o, err = hsp.AppendIntf(o, z.Value) + if err != nil { + return + } + o = append(o, 0x82) + o = hsp.AppendString(o, z.Name) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z NamedArg) Msgsize() (s int) { + s = 1 + 6 + hsp.GuessSize(z.Value) + 5 + hsp.StringPrefixSize + len(z.Name) + return +} + +// MarshalHash marshals for hash +func (z *Query) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + o = hsp.AppendArrayHeader(o, uint32(len(z.Args))) + for za0001 := range z.Args { + // map header, size 2 + o = append(o, 0x82, 0x82) + o = hsp.AppendString(o, z.Args[za0001].Name) + o = append(o, 0x82) + o, err = hsp.AppendIntf(o, z.Args[za0001].Value) + if err != nil { + return + } + } + o = append(o, 0x82) + o = hsp.AppendString(o, z.Pattern) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Query) Msgsize() (s int) { + s = 1 + 5 + hsp.ArrayHeaderSize + for za0001 := range z.Args { + s += 1 + 5 + hsp.StringPrefixSize + len(z.Args[za0001].Name) + 6 + hsp.GuessSize(z.Args[za0001].Value) + } + s += 8 + hsp.StringPrefixSize + len(z.Pattern) + return +} + // MarshalHash marshals for hash func (z *QueryKey) MarshalHash() (o []byte, err error) { var b []byte @@ -44,6 +97,59 @@ func (z QueryType) Msgsize() (s int) { return } +// MarshalHash marshals for hash +func (z *Request) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 3 + // map header, size 1 + o = append(o, 0x83, 0x83, 0x81, 0x81) + o = hsp.AppendArrayHeader(o, uint32(len(z.Payload.Queries))) + for za0001 := range z.Payload.Queries { + // map header, size 2 + o = append(o, 0x82, 0x82) + o = hsp.AppendString(o, z.Payload.Queries[za0001].Pattern) + o = append(o, 0x82) + o = hsp.AppendArrayHeader(o, uint32(len(z.Payload.Queries[za0001].Args))) + for za0002 := range z.Payload.Queries[za0001].Args { + // map header, size 2 + o = append(o, 0x82, 0x82) + o = hsp.AppendString(o, z.Payload.Queries[za0001].Args[za0002].Name) + o = append(o, 0x82) + o, err = hsp.AppendIntf(o, z.Payload.Queries[za0001].Args[za0002].Value) + if err != nil { + return + } + } + } + o = append(o, 0x83) + if oTemp, err := z.Header.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x83) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Request) Msgsize() (s int) { + s = 1 + 8 + 1 + 8 + hsp.ArrayHeaderSize + for za0001 := range z.Payload.Queries { + s += 1 + 8 + hsp.StringPrefixSize + len(z.Payload.Queries[za0001].Pattern) + 5 + hsp.ArrayHeaderSize + for za0002 := range z.Payload.Queries[za0001].Args { + s += 1 + 5 + hsp.StringPrefixSize + len(z.Payload.Queries[za0001].Args[za0002].Name) + 6 + hsp.GuessSize(z.Payload.Queries[za0001].Args[za0002].Value) + } + } + s += 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() + return +} + // MarshalHash marshals for hash func (z *RequestHeader) MarshalHash() (o []byte, err error) { var b []byte @@ -86,6 +192,32 @@ func (z *RequestHeader) Msgsize() (s int) { return } +// MarshalHash marshals for hash +func (z *RequestPayload) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 1 + o = append(o, 0x81, 0x81) + o = hsp.AppendArrayHeader(o, uint32(len(z.Queries))) + for za0001 := range z.Queries { + if oTemp, err := z.Queries[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *RequestPayload) Msgsize() (s int) { + s = 1 + 8 + hsp.ArrayHeaderSize + for za0001 := range z.Queries { + s += z.Queries[za0001].Msgsize() + } + return +} + // MarshalHash marshals for hash func (z *SignedRequestHeader) MarshalHash() (o []byte, err error) { var b []byte @@ -118,7 +250,7 @@ func (z *SignedRequestHeader) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } o = append(o, 0x84) - if oTemp, err := z.HeaderHash.MarshalHash(); err != nil { + if oTemp, err := z.Hash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -140,6 +272,6 @@ func (z *SignedRequestHeader) Msgsize() (s int) { } else { s += z.Signature.Msgsize() } - s += 14 + z.RequestHeader.Msgsize() + 11 + z.HeaderHash.Msgsize() + s += 14 + z.RequestHeader.Msgsize() + 5 + z.Hash.Msgsize() return } diff --git a/worker/types/request_type_gen_test.go b/worker/types/request_type_gen_test.go index a47471bfb..93e351222 100644 --- a/worker/types/request_type_gen_test.go +++ b/worker/types/request_type_gen_test.go @@ -9,6 +9,80 @@ import ( "testing" ) +func TestMarshalHashNamedArg(t *testing.T) { + v := NamedArg{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashNamedArg(b *testing.B) { + v := NamedArg{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgNamedArg(b *testing.B) { + v := NamedArg{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashQuery(t *testing.T) { + v := Query{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashQuery(b *testing.B) { + v := Query{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgQuery(b *testing.B) { + v := Query{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + func TestMarshalHashQueryKey(t *testing.T) { v := QueryKey{} binary.Read(rand.Reader, binary.BigEndian, &v) @@ -46,6 +120,43 @@ func BenchmarkAppendMsgQueryKey(b *testing.B) { } } +func TestMarshalHashRequest(t *testing.T) { + v := Request{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashRequest(b *testing.B) { + v := Request{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgRequest(b *testing.B) { + v := Request{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + func TestMarshalHashRequestHeader(t *testing.T) { v := RequestHeader{} binary.Read(rand.Reader, binary.BigEndian, &v) @@ -83,6 +194,43 @@ func BenchmarkAppendMsgRequestHeader(b *testing.B) { } } +func TestMarshalHashRequestPayload(t *testing.T) { + v := RequestPayload{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashRequestPayload(b *testing.B) { + v := RequestPayload{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgRequestPayload(b *testing.B) { + v := RequestPayload{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + func TestMarshalHashSignedRequestHeader(t *testing.T) { v := SignedRequestHeader{} binary.Read(rand.Reader, binary.BigEndian, &v) diff --git a/worker/types/response_type.go b/worker/types/response_type.go index 093fe4749..de55945af 100644 --- a/worker/types/response_type.go +++ b/worker/types/response_type.go @@ -17,14 +17,11 @@ package types import ( - "bytes" - "encoding/binary" "time" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/utils" "github.com/pkg/errors" ) @@ -57,9 +54,9 @@ type ResponseHeader struct { // SignedResponseHeader defines a signed query response header. type SignedResponseHeader struct { ResponseHeader - HeaderHash hash.Hash `json:"h"` - Signee *asymmetric.PublicKey `json:"e"` - Signature *asymmetric.Signature `json:"s"` + Hash hash.Hash `json:"h"` + Signee *asymmetric.PublicKey `json:"e"` + Signature *asymmetric.Signature `json:"s"` } // Response defines a complete query response. @@ -68,83 +65,6 @@ type Response struct { Payload ResponsePayload `json:"p"` } -// Serialize structure to bytes. -func (r *ResponseRow) Serialize() []byte { - // HACK(xq262144), currently use idiomatic serialization for hash generation - buf, _ := utils.EncodeMsgPack(r) - - return buf.Bytes() -} - -// Serialize structure to bytes. -func (r *ResponsePayload) Serialize() []byte { - if r == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - binary.Write(buf, binary.LittleEndian, uint64(len(r.Columns))) - for _, c := range r.Columns { - buf.WriteString(c) - } - - binary.Write(buf, binary.LittleEndian, uint64(len(r.DeclTypes))) - for _, t := range r.DeclTypes { - buf.WriteString(t) - } - - binary.Write(buf, binary.LittleEndian, uint64(len(r.Rows))) - for _, row := range r.Rows { - buf.Write(row.Serialize()) - } - - return buf.Bytes() -} - -// Serialize structure to bytes. -func (h *ResponseHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.Write(h.Request.Serialize()) - binary.Write(buf, binary.LittleEndian, uint64(len(h.NodeID))) - buf.WriteString(string(h.NodeID)) - binary.Write(buf, binary.LittleEndian, int64(h.Timestamp.UnixNano())) - binary.Write(buf, binary.LittleEndian, h.RowCount) - binary.Write(buf, binary.LittleEndian, h.LogOffset) - buf.Write(h.DataHash[:]) - - return buf.Bytes() -} - -// Serialize structure to bytes. -func (sh *SignedResponseHeader) Serialize() []byte { - if sh == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.Write(sh.ResponseHeader.Serialize()) - buf.Write(sh.HeaderHash[:]) - if sh.Signee != nil { - buf.Write(sh.Signee.Serialize()) - } else { - buf.WriteRune('\000') - } - if sh.Signature != nil { - buf.Write(sh.Signature.Serialize()) - } else { - buf.WriteRune('\000') - } - - return buf.Bytes() -} - // Verify checks hash and signature in response header. func (sh *SignedResponseHeader) Verify() (err error) { // verify original request header @@ -152,11 +72,11 @@ func (sh *SignedResponseHeader) Verify() (err error) { return } // verify hash - if err = verifyHash(&sh.ResponseHeader, &sh.HeaderHash); err != nil { + if err = verifyHash(&sh.ResponseHeader, &sh.Hash); err != nil { return } // verify signature - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { + if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { return ErrSignVerification } @@ -172,29 +92,17 @@ func (sh *SignedResponseHeader) Sign(signer *asymmetric.PrivateKey) (err error) } // build our hash - buildHash(&sh.ResponseHeader, &sh.HeaderHash) + if err = buildHash(&sh.ResponseHeader, &sh.Hash); err != nil { + return + } // sign - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) + sh.Signature, err = signer.Sign(sh.Hash[:]) sh.Signee = signer.PubKey() return } -// Serialize structure to bytes. -func (sh *Response) Serialize() []byte { - if sh == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.Write(sh.Header.Serialize()) - buf.Write(sh.Payload.Serialize()) - - return buf.Bytes() -} - // Verify checks hash and signature in whole response. func (sh *Response) Verify() (err error) { // verify data hash in header @@ -211,7 +119,9 @@ func (sh *Response) Sign(signer *asymmetric.PrivateKey) (err error) { sh.Header.RowCount = uint64(len(sh.Payload.Rows)) // build hash in header - buildHash(&sh.Payload, &sh.Header.DataHash) + if err = buildHash(&sh.Payload, &sh.Header.DataHash); err != nil { + return + } // sign the request return sh.Header.Sign(signer) diff --git a/worker/types/response_type_gen.go b/worker/types/response_type_gen.go index 07a3d20fa..898cbfa6e 100644 --- a/worker/types/response_type_gen.go +++ b/worker/types/response_type_gen.go @@ -182,7 +182,7 @@ func (z *SignedResponseHeader) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } o = append(o, 0x84) - if oTemp, err := z.HeaderHash.MarshalHash(); err != nil { + if oTemp, err := z.Hash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -204,6 +204,6 @@ func (z *SignedResponseHeader) Msgsize() (s int) { } else { s += z.Signature.Msgsize() } - s += 15 + z.ResponseHeader.Msgsize() + 11 + z.HeaderHash.Msgsize() + s += 15 + z.ResponseHeader.Msgsize() + 5 + z.Hash.Msgsize() return } diff --git a/worker/types/types_test.go b/worker/types/types_test.go index 9df73ff44..d095ecf46 100644 --- a/worker/types/types_test.go +++ b/worker/types/types_test.go @@ -17,16 +17,11 @@ package types import ( - "bytes" - "database/sql" "testing" "time" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/kayak" "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/utils" "github.com/pkg/errors" . "github.com/smartystreets/goconvey/convey" ) @@ -41,35 +36,6 @@ func getCommKeys() (*asymmetric.PrivateKey, *asymmetric.PublicKey) { return asymmetric.PrivKeyFromBytes(testPriv) } -type myTestBytes []byte - -func (bytes myTestBytes) Serialize() (res []byte) { - res = make([]byte, len(bytes)) - copy(res, bytes[:]) - return -} - -func Test_buildHash(t *testing.T) { - Convey("build", t, func() { - var a, b hash.Hash - var tb myTestBytes = []byte("test") - buildHash(tb, &a) - b = hash.THashH([]byte("test")) - So(a, ShouldResemble, b) - }) - - Convey("test verify", t, func() { - var a, b hash.Hash - var tb myTestBytes = []byte("test") - var err error - buildHash(tb, &a) - err = verifyHash(tb, &a) - So(err, ShouldBeNil) - err = verifyHash(tb, &b) - So(err, ShouldNotBeNil) - }) -} - func TestSignedRequestHeader_Sign(t *testing.T) { privKey, _ := getCommKeys() @@ -126,7 +92,7 @@ func TestRequest_Sign(t *testing.T) { Queries: []Query{ { Pattern: "INSERT INTO test VALUES(?)", - Args: []sql.NamedArg{ + Args: []NamedArg{ { Value: 1, }, @@ -134,7 +100,7 @@ func TestRequest_Sign(t *testing.T) { }, { Pattern: "INSERT INTO test VALUES(?)", - Args: []sql.NamedArg{ + Args: []NamedArg{ { Value: "happy", }, @@ -155,28 +121,6 @@ func TestRequest_Sign(t *testing.T) { err = verifyHash(&req.Payload, &req.Header.QueriesHash) So(err, ShouldBeNil) - Convey("serialize", func() { - So(req.Serialize(), ShouldNotBeEmpty) - So((*Request)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*RequestHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*RequestPayload)(nil).Serialize(), ShouldNotBeEmpty) - So((*SignedRequestHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - - s, err := req.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - // test nils - req.Header.Signee = nil - req.Header.Signature = nil - - s, err = req.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - So(req.Serialize(), ShouldNotBeEmpty) - }) - Convey("verify", func() { err = req.Verify() So(err, ShouldBeNil) @@ -192,7 +136,7 @@ func TestRequest_Sign(t *testing.T) { Convey("header change without signing", func() { req.Header.Timestamp = req.Header.Timestamp.Add(time.Second) - buildHash(&req.Header.RequestHeader, &req.Header.HeaderHash) + buildHash(&req.Header.RequestHeader, &req.Header.Hash) err = req.Verify() So(err, ShouldNotBeNil) }) @@ -268,9 +212,7 @@ func TestResponse_Sign(t *testing.T) { }, } - var data *bytes.Buffer var err error - var rres Response // sign directly, embedded original request is not filled err = res.Sign(privKey) @@ -292,35 +234,6 @@ func TestResponse_Sign(t *testing.T) { err = verifyHash(&res.Payload, &res.Header.DataHash) So(err, ShouldBeNil) - Convey("serialize", func() { - So(res.Serialize(), ShouldNotBeEmpty) - So((*Response)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*ResponseRow)(nil).Serialize(), ShouldNotBeEmpty) - So((*ResponseHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*ResponsePayload)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*SignedResponseHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - - data, err = utils.EncodeMsgPack(res.Header) - So(err, ShouldBeNil) - err = utils.DecodeMsgPack(data.Bytes(), &rres.Header) - So(err, ShouldBeNil) - So(&res.Header, ShouldResemble, &rres.Header) - - s, err := res.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - // test nils - res.Header.Signee = nil - res.Header.Signature = nil - - s, err = res.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - So(res.Serialize(), ShouldNotBeEmpty) - }) - // verify Convey("verify", func() { err = res.Verify() @@ -346,7 +259,7 @@ func TestResponse_Sign(t *testing.T) { }) Convey("header change without signing", func() { res.Header.Timestamp = res.Header.Timestamp.Add(time.Second) - buildHash(&res.Header.ResponseHeader, &res.Header.HeaderHash) + buildHash(&res.Header.ResponseHeader, &res.Header.Hash) err = res.Verify() So(err, ShouldNotBeNil) @@ -385,9 +298,7 @@ func TestAck_Sign(t *testing.T) { }, } - var data *bytes.Buffer var err error - var rack Ack Convey("get query key", func() { key := ack.Header.SignedRequestHeader().GetQueryKey() @@ -416,33 +327,6 @@ func TestAck_Sign(t *testing.T) { err = ack.Sign(privKey, true) So(err, ShouldBeNil) - Convey("serialize", func() { - So(ack.Serialize(), ShouldNotBeEmpty) - So((*Ack)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*AckHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*SignedAckHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - - data, err = utils.EncodeMsgPack(ack.Header) - So(err, ShouldBeNil) - err = utils.DecodeMsgPack(data.Bytes(), &rack.Header) - So(err, ShouldBeNil) - So(&ack.Header, ShouldResemble, &rack.Header) - - s, err := ack.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - // test nils - ack.Header.Signee = nil - ack.Header.Signature = nil - - s, err = ack.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - So(ack.Serialize(), ShouldNotBeEmpty) - }) - Convey("verify", func() { err = ack.Verify() So(err, ShouldBeNil) @@ -468,7 +352,7 @@ func TestAck_Sign(t *testing.T) { Convey("header change without signing", func() { ack.Header.Timestamp = ack.Header.Timestamp.Add(time.Second) - buildHash(&ack.Header.AckHeader, &ack.Header.HeaderHash) + buildHash(&ack.Header.AckHeader, &ack.Header.Hash) err = ack.Verify() So(err, ShouldNotBeNil) @@ -525,27 +409,6 @@ func TestNoAckReport_Sign(t *testing.T) { err = noAck.Sign(privKey) So(err, ShouldBeNil) - Convey("serialize", func() { - So(noAck.Serialize(), ShouldNotBeEmpty) - So((*NoAckReport)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*NoAckReportHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*SignedNoAckReportHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - - s, err := noAck.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - // test nils - noAck.Header.Signee = nil - noAck.Header.Signature = nil - - s, err = noAck.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - So(noAck.Serialize(), ShouldNotBeEmpty) - }) - Convey("verify", func() { err = noAck.Verify() So(err, ShouldBeNil) @@ -574,7 +437,7 @@ func TestNoAckReport_Sign(t *testing.T) { Convey("header change without signing", func() { noAck.Header.Timestamp = noAck.Header.Timestamp.Add(time.Second) - buildHash(&noAck.Header.NoAckReportHeader, &noAck.Header.HeaderHash) + buildHash(&noAck.Header.NoAckReportHeader, &noAck.Header.Hash) err = noAck.Verify() So(err, ShouldNotBeNil) @@ -640,20 +503,13 @@ func TestAggrNoAckReport_Sign(t *testing.T) { }, }, }, - Peers: &kayak.Peers{ - Term: uint64(1), - Leader: &kayak.Server{ - Role: proto.Leader, - ID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), - }, - Servers: []*kayak.Server{ - { - Role: proto.Leader, - ID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), - }, - { - Role: proto.Follower, - ID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + Peers: &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: uint64(1), + Leader: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + Servers: []proto.NodeID{ + proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), }, }, }, @@ -687,27 +543,6 @@ func TestAggrNoAckReport_Sign(t *testing.T) { err = aggrNoAck.Sign(privKey) So(err, ShouldBeNil) - Convey("serialize", func() { - So(aggrNoAck.Serialize(), ShouldNotBeEmpty) - So((*AggrNoAckReport)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*AggrNoAckReportHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*SignedAggrNoAckReportHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - - s, err := aggrNoAck.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - // test nils - aggrNoAck.Header.Signee = nil - aggrNoAck.Header.Signature = nil - - s, err = aggrNoAck.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - So(aggrNoAck.Serialize(), ShouldNotBeEmpty) - }) - Convey("verify", func() { err = aggrNoAck.Verify() So(err, ShouldBeNil) @@ -743,7 +578,7 @@ func TestAggrNoAckReport_Sign(t *testing.T) { Convey("header change without signing", func() { aggrNoAck.Header.Timestamp = aggrNoAck.Header.Timestamp.Add(time.Second) - buildHash(&aggrNoAck.Header.AggrNoAckReportHeader, &aggrNoAck.Header.HeaderHash) + buildHash(&aggrNoAck.Header.AggrNoAckReportHeader, &aggrNoAck.Header.Hash) err = aggrNoAck.Verify() So(err, ShouldNotBeNil) @@ -753,7 +588,7 @@ func TestAggrNoAckReport_Sign(t *testing.T) { } func TestInitServiceResponse_Sign(t *testing.T) { - privKey, pubKey := getCommKeys() + privKey, _ := getCommKeys() Convey("sign", t, func() { var err error @@ -764,24 +599,15 @@ func TestInitServiceResponse_Sign(t *testing.T) { Instances: []ServiceInstance{ { DatabaseID: proto.DatabaseID("db1"), - Peers: &kayak.Peers{ - Term: uint64(1), - Leader: &kayak.Server{ - Role: proto.Leader, - ID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), - }, - Servers: []*kayak.Server{ - { - Role: proto.Leader, - ID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), - }, - { - Role: proto.Follower, - ID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + Peers: &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: uint64(1), + Leader: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + Servers: []proto.NodeID{ + proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), }, }, - PubKey: pubKey, - Signature: nil, }, // TODO(xq262144), should integrated with genesis block serialization test GenesisBlock: nil, @@ -794,28 +620,6 @@ func TestInitServiceResponse_Sign(t *testing.T) { // sign err = initServiceResponse.Sign(privKey) - Convey("serialize", func() { - So(initServiceResponse.Serialize(), ShouldNotBeEmpty) - So((*ServiceInstance)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*InitServiceResponse)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*InitServiceResponseHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*SignedInitServiceResponseHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - - s, err := initServiceResponse.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - // test nils - initServiceResponse.Header.Signee = nil - initServiceResponse.Header.Signature = nil - - s, err = initServiceResponse.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - So(initServiceResponse.Serialize(), ShouldNotBeEmpty) - }) - Convey("verify", func() { err = initServiceResponse.Verify() So(err, ShouldBeNil) @@ -830,7 +634,7 @@ func TestInitServiceResponse_Sign(t *testing.T) { Convey("header change without signing", func() { initServiceResponse.Header.Instances[0].DatabaseID = proto.DatabaseID("db2") - buildHash(&initServiceResponse.Header.InitServiceResponseHeader, &initServiceResponse.Header.HeaderHash) + buildHash(&initServiceResponse.Header.InitServiceResponseHeader, &initServiceResponse.Header.Hash) s, err := initServiceResponse.Header.InitServiceResponseHeader.MarshalHash() So(err, ShouldBeNil) @@ -844,7 +648,7 @@ func TestInitServiceResponse_Sign(t *testing.T) { } func TestUpdateService_Sign(t *testing.T) { - privKey, pubKey := getCommKeys() + privKey, _ := getCommKeys() Convey("sign", t, func() { var err error @@ -855,24 +659,15 @@ func TestUpdateService_Sign(t *testing.T) { Op: CreateDB, Instance: ServiceInstance{ DatabaseID: proto.DatabaseID("db1"), - Peers: &kayak.Peers{ - Term: uint64(1), - Leader: &kayak.Server{ - Role: proto.Leader, - ID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), - }, - Servers: []*kayak.Server{ - { - Role: proto.Leader, - ID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), - }, - { - Role: proto.Follower, - ID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + Peers: &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: uint64(1), + Leader: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + Servers: []proto.NodeID{ + proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), }, }, - PubKey: pubKey, - Signature: nil, }, // TODO(xq262144), should integrated with genesis block serialization test GenesisBlock: nil, @@ -884,26 +679,6 @@ func TestUpdateService_Sign(t *testing.T) { // sign err = updateServiceReq.Sign(privKey) - Convey("serialize", func() { - So(updateServiceReq.Serialize(), ShouldNotBeEmpty) - So((*UpdateService)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*UpdateServiceHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - So((*SignedUpdateServiceHeader)(nil).Serialize(), ShouldResemble, []byte{'\000'}) - - s, err := updateServiceReq.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - updateServiceReq.Header.Signee = nil - updateServiceReq.Header.Signature = nil - - s, err = updateServiceReq.MarshalHash() - So(err, ShouldBeNil) - So(s, ShouldNotBeEmpty) - - So(updateServiceReq.Serialize(), ShouldNotBeEmpty) - }) - Convey("verify", func() { err = updateServiceReq.Verify() So(err, ShouldBeNil) @@ -917,7 +692,7 @@ func TestUpdateService_Sign(t *testing.T) { Convey("header change without signing", func() { updateServiceReq.Header.Instance.DatabaseID = proto.DatabaseID("db2") - buildHash(&updateServiceReq.Header.UpdateServiceHeader, &updateServiceReq.Header.HeaderHash) + buildHash(&updateServiceReq.Header.UpdateServiceHeader, &updateServiceReq.Header.Hash) err = updateServiceReq.Verify() So(err, ShouldNotBeNil) diff --git a/worker/types/update_service_type.go b/worker/types/update_service_type.go index ccea45e27..7d8689afa 100644 --- a/worker/types/update_service_type.go +++ b/worker/types/update_service_type.go @@ -17,9 +17,6 @@ package types import ( - "bytes" - "encoding/binary" - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/proto" @@ -48,9 +45,9 @@ type UpdateServiceHeader struct { // SignedUpdateServiceHeader defines signed service update header. type SignedUpdateServiceHeader struct { UpdateServiceHeader - HeaderHash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + Hash hash.Hash + Signee *asymmetric.PublicKey + Signature *asymmetric.Signature } // UpdateService defines service update type. @@ -62,52 +59,14 @@ type UpdateService struct { // UpdateServiceResponse defines empty response entity. type UpdateServiceResponse struct{} -// Serialize structure to bytes. -func (h *UpdateServiceHeader) Serialize() []byte { - if h == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - binary.Write(buf, binary.LittleEndian, int32(h.Op)) - buf.Write(h.Instance.Serialize()) - - return buf.Bytes() -} - -// Serialize structure to bytes. -func (sh *SignedUpdateServiceHeader) Serialize() []byte { - if sh == nil { - return []byte{'\000'} - } - - buf := new(bytes.Buffer) - - buf.Write(sh.UpdateServiceHeader.Serialize()) - buf.Write(sh.HeaderHash[:]) - if sh.Signee != nil { - buf.Write(sh.Signee.Serialize()) - } else { - buf.WriteRune('\000') - } - if sh.Signature != nil { - buf.Write(sh.Signature.Serialize()) - } else { - buf.WriteRune('\000') - } - - return buf.Bytes() -} - // Verify checks hash and signature in update service header. func (sh *SignedUpdateServiceHeader) Verify() (err error) { // verify hash - if err = verifyHash(&sh.UpdateServiceHeader, &sh.HeaderHash); err != nil { + if err = verifyHash(&sh.UpdateServiceHeader, &sh.Hash); err != nil { return } // verify sign - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.HeaderHash[:], sh.Signee) { + if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { return ErrSignVerification } return @@ -116,24 +75,17 @@ func (sh *SignedUpdateServiceHeader) Verify() (err error) { // Sign the request. func (sh *SignedUpdateServiceHeader) Sign(signer *asymmetric.PrivateKey) (err error) { // build hash - buildHash(&sh.UpdateServiceHeader, &sh.HeaderHash) + if err = buildHash(&sh.UpdateServiceHeader, &sh.Hash); err != nil { + return + } // sign - sh.Signature, err = signer.Sign(sh.HeaderHash[:]) + sh.Signature, err = signer.Sign(sh.Hash[:]) sh.Signee = signer.PubKey() return } -// Serialize structure to bytes. -func (s *UpdateService) Serialize() []byte { - if s == nil { - return []byte{'\000'} - } - - return s.Header.Serialize() -} - // Verify checks hash and signature in update service. func (s *UpdateService) Verify() error { return s.Header.Verify() diff --git a/worker/types/update_service_type_gen.go b/worker/types/update_service_type_gen.go index 1d262f53a..c92211de3 100644 --- a/worker/types/update_service_type_gen.go +++ b/worker/types/update_service_type_gen.go @@ -41,7 +41,7 @@ func (z *SignedUpdateServiceHeader) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } o = append(o, 0x84) - if oTemp, err := z.HeaderHash.MarshalHash(); err != nil { + if oTemp, err := z.Hash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -63,7 +63,7 @@ func (z *SignedUpdateServiceHeader) Msgsize() (s int) { } else { s += z.Signature.Msgsize() } - s += 20 + 1 + 3 + hsp.Int32Size + 9 + z.UpdateServiceHeader.Instance.Msgsize() + 11 + z.HeaderHash.Msgsize() + s += 20 + 1 + 3 + hsp.Int32Size + 9 + z.UpdateServiceHeader.Instance.Msgsize() + 5 + z.Hash.Msgsize() return } diff --git a/worker/types/util.go b/worker/types/util.go index fb6bbd152..a049bc07b 100644 --- a/worker/types/util.go +++ b/worker/types/util.go @@ -20,20 +20,27 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/hash" ) -type canSerialize interface { - Serialize() []byte +type canMarshalHash interface { + MarshalHash() ([]byte, error) } -func verifyHash(data canSerialize, h *hash.Hash) (err error) { +func verifyHash(data canMarshalHash, h *hash.Hash) (err error) { var newHash hash.Hash - buildHash(data, &newHash) + if err = buildHash(data, &newHash); err != nil { + return + } if !newHash.IsEqual(h) { return ErrHashVerification } return } -func buildHash(data canSerialize, h *hash.Hash) { - newHash := hash.THashH(data.Serialize()) +func buildHash(data canMarshalHash, h *hash.Hash) (err error) { + var hashBytes []byte + if hashBytes, err = data.MarshalHash(); err != nil { + return + } + newHash := hash.THashH(hashBytes) copy(h[:], newHash[:]) + return } From 79ce086ddbd9e6bbade6995facb155f5bcacb7f1 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 6 Nov 2018 01:18:09 +0800 Subject: [PATCH 02/32] Add logs --- kayak/runtime.go | 38 ++++++++++++++++++++++++++++++++++++++ kayak/tracker.go | 9 +++++++++ 2 files changed, 47 insertions(+) diff --git a/kayak/runtime.go b/kayak/runtime.go index ff6c69606..70457bb13 100644 --- a/kayak/runtime.go +++ b/kayak/runtime.go @@ -365,6 +365,8 @@ func (r *Runtime) FollowerApply(l *kt.Log) (err error) { return } + tm := time.Now() + r.peersLock.RLock() defer r.peersLock.RUnlock() @@ -394,6 +396,11 @@ func (r *Runtime) FollowerApply(l *kt.Log) (err error) { r.updateNextIndex(l) } + log.WithFields(log.Fields{ + "c": time.Now().Sub(tm).String(), + "t": l.Type, + }).Info("follower apply") + return } @@ -516,6 +523,18 @@ func (r *Runtime) commitResult(ctx context.Context, commitLog *kt.Log, prepareLo // decode log and send to commit channel to process res = make(chan *commitResult, 1) + var tm, tmDecode, tmEnqueue time.Time + + defer func(){ + log.WithFields(log.Fields{ + "d": tmDecode.Sub(tm).String(), + "q": tmEnqueue.Sub(tmDecode).String(), + "r": r.role.String(), + }).Info("commit result") + }() + + tm = time.Now() + if prepareLog == nil { res <- &commitResult{ err: errors.Wrap(kt.ErrInvalidLog, "nil prepare log in commit"), @@ -533,6 +552,8 @@ func (r *Runtime) commitResult(ctx context.Context, commitLog *kt.Log, prepareLo return } + tmDecode = time.Now() + req := &commitReq{ ctx: ctx, data: logReq, @@ -546,6 +567,8 @@ func (r *Runtime) commitResult(ctx context.Context, commitLog *kt.Log, prepareLo case r.commitCh <- req: } + tmEnqueue = time.Now() + return } @@ -588,6 +611,17 @@ func (r *Runtime) leaderDoCommit(req *commitReq) (tracker *rpcTracker, result in return } + var tm, lm, fm time.Time + + defer func(){ + log.WithFields(log.Fields{ + "lc": lm.Sub(tm).String(), + "fc": fm.Sub(lm).String(), + }).Info("leader commit") + }() + + tm = time.Now() + // create leader log var l *kt.Log var logData []byte @@ -603,6 +637,8 @@ func (r *Runtime) leaderDoCommit(req *commitReq) (tracker *rpcTracker, result in // not wrapping underlying handler commit error result, err = r.sh.Commit(req.data) + lm = time.Now() + if err == nil { // mark last commit atomic.StoreUint64(&r.lastCommit, l.Index) @@ -614,6 +650,8 @@ func (r *Runtime) leaderDoCommit(req *commitReq) (tracker *rpcTracker, result in tracker = r.rpc(l, r.minCommitFollowers) _, _, _ = tracker.get(commitCtx) + fm = time.Now() + // TODO(): text log for rpc errors // TODO(): mark uncommitted nodes and remove from peers diff --git a/kayak/tracker.go b/kayak/tracker.go index 7eef7ed6a..47117aab4 100644 --- a/kayak/tracker.go +++ b/kayak/tracker.go @@ -20,8 +20,11 @@ import ( "context" "sync" "sync/atomic" + "time" "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils/log" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" ) // rpcTracker defines the rpc call tracker @@ -89,7 +92,13 @@ func (t *rpcTracker) send() { } func (t *rpcTracker) callSingle(idx int) { + tm := time.Now() err := t.r.getCaller(t.nodes[idx]).Call(t.method, t.req, nil) + log.WithFields(log.Fields{ + "m": t.method, + "c": time.Now().Sub(tm).String(), + "r": t.req.(*kt.RPCRequest).Log.Index, + }).Info("call rpc") t.errLock.Lock() defer t.errLock.Unlock() t.errors[t.nodes[idx]] = err From da3308e5a1a2a7b5ebc7ee706be7b88262dcf6d0 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 6 Nov 2018 02:35:14 +0800 Subject: [PATCH 03/32] Increase commit window and support commit window in follower --- kayak/runtime.go | 94 +++++++++++++++++++++----------------- kayak/test/runtime_test.go | 2 +- kayak/tracker.go | 5 +- 3 files changed, 57 insertions(+), 44 deletions(-) diff --git a/kayak/runtime.go b/kayak/runtime.go index 70457bb13..534351e2c 100644 --- a/kayak/runtime.go +++ b/kayak/runtime.go @@ -35,7 +35,7 @@ import ( const ( // commit channel window size - commitWindow = 1 + commitWindow = 200 // prepare window trackerWindow = 10 ) @@ -107,11 +107,12 @@ type Runtime struct { // commitReq defines the commit operation input. type commitReq struct { - ctx context.Context - data interface{} - index uint64 - log *kt.Log - result chan *commitResult + ctx context.Context + data interface{} + index uint64 + lastCommit uint64 + log *kt.Log + result chan *commitResult } // commitResult defines the commit operation result. @@ -242,19 +243,19 @@ func (r *Runtime) Apply(ctx context.Context, req interface{}) (result interface{ "r": logIndex, } if !tmLeaderPrepare.Before(tmStart) { - fields["lp"] = tmLeaderPrepare.Sub(tmStart) + fields["lp"] = tmLeaderPrepare.Sub(tmStart).Nanoseconds() } if !tmFollowerPrepare.Before(tmLeaderPrepare) { - fields["fp"] = tmFollowerPrepare.Sub(tmLeaderPrepare) + fields["fp"] = tmFollowerPrepare.Sub(tmLeaderPrepare).Nanoseconds() } if !tmLeaderRollback.Before(tmFollowerPrepare) { - fields["lr"] = tmLeaderRollback.Sub(tmFollowerPrepare) + fields["lr"] = tmLeaderRollback.Sub(tmFollowerPrepare).Nanoseconds() } if !tmRollback.Before(tmLeaderRollback) { - fields["fr"] = tmRollback.Sub(tmLeaderRollback) + fields["fr"] = tmRollback.Sub(tmLeaderRollback).Nanoseconds() } if !tmCommit.Before(tmFollowerPrepare) { - fields["c"] = tmCommit.Sub(tmFollowerPrepare) + fields["c"] = tmCommit.Sub(tmFollowerPrepare).Nanoseconds() } log.WithFields(fields).Debug("kayak leader apply") }() @@ -309,7 +310,7 @@ func (r *Runtime) Apply(ctx context.Context, req interface{}) (result interface{ tmFollowerPrepare = time.Now() select { - case cResult := <-r.commitResult(ctx, nil, prepareLog): + case cResult := <-r.commitResult(ctx, nil, prepareLog, 0): if cResult != nil { logIndex = prepareLog.Index result = cResult.result @@ -397,8 +398,9 @@ func (r *Runtime) FollowerApply(l *kt.Log) (err error) { } log.WithFields(log.Fields{ - "c": time.Now().Sub(tm).String(), + "c": time.Now().Sub(tm).Nanoseconds(), "t": l.Type, + "l": len(l.Data), }).Info("follower apply") return @@ -500,18 +502,9 @@ func (r *Runtime) followerCommit(l *kt.Log) (err error) { }).Warning("invalid last commit log") err = errors.Wrap(kt.ErrInvalidLog, "invalid last commit log index") return - } else if lastCommit > myLastCommit { - // last log does not committed yet - // DO RECOVERY - log.WithFields(log.Fields{ - "expected": lastCommit, - "actual": myLastCommit, - }).Warning("DO RECOVERY, REQUIRED LAST COMMITTED DOES NOT COMMIT YET") - err = errors.Wrap(kt.ErrNeedRecovery, "last commit does not received, need recovery") - return } - cResult := <-r.commitResult(context.Background(), l, prepareLog) + cResult := <-r.commitResult(context.Background(), l, prepareLog, lastCommit) if cResult != nil { err = cResult.err } @@ -519,16 +512,16 @@ func (r *Runtime) followerCommit(l *kt.Log) (err error) { return } -func (r *Runtime) commitResult(ctx context.Context, commitLog *kt.Log, prepareLog *kt.Log) (res chan *commitResult) { +func (r *Runtime) commitResult(ctx context.Context, commitLog *kt.Log, prepareLog *kt.Log, lastCommit uint64) (res chan *commitResult) { // decode log and send to commit channel to process res = make(chan *commitResult, 1) var tm, tmDecode, tmEnqueue time.Time - defer func(){ + defer func() { log.WithFields(log.Fields{ - "d": tmDecode.Sub(tm).String(), - "q": tmEnqueue.Sub(tmDecode).String(), + "d": tmDecode.Sub(tm).Nanoseconds(), + "q": tmEnqueue.Sub(tmDecode).Nanoseconds(), "r": r.role.String(), }).Info("commit result") }() @@ -555,11 +548,12 @@ func (r *Runtime) commitResult(ctx context.Context, commitLog *kt.Log, prepareLo tmDecode = time.Now() req := &commitReq{ - ctx: ctx, - data: logReq, - index: prepareLog.Index, - result: res, - log: commitLog, + ctx: ctx, + data: logReq, + index: prepareLog.Index, + lastCommit: lastCommit, + result: res, + log: commitLog, } select { @@ -597,11 +591,10 @@ func (r *Runtime) doCommit(req *commitReq) { if r.role == proto.Leader { resp.rpc, resp.result, resp.err = r.leaderDoCommit(req) + req.result <- resp } else { - resp.err = r.followerDoCommit(req) + r.followerDoCommit(req) } - - req.result <- resp } func (r *Runtime) leaderDoCommit(req *commitReq) (tracker *rpcTracker, result interface{}, err error) { @@ -613,10 +606,10 @@ func (r *Runtime) leaderDoCommit(req *commitReq) (tracker *rpcTracker, result in var tm, lm, fm time.Time - defer func(){ + defer func() { log.WithFields(log.Fields{ - "lc": lm.Sub(tm).String(), - "fc": fm.Sub(lm).String(), + "lc": lm.Sub(tm).Nanoseconds(), + "fc": fm.Sub(lm).Nanoseconds(), }).Info("leader commit") }() @@ -645,10 +638,11 @@ func (r *Runtime) leaderDoCommit(req *commitReq) (tracker *rpcTracker, result in } // send commit - commitCtx, commitCtxCancelFunc := context.WithTimeout(context.Background(), r.commitTimeout) - defer commitCtxCancelFunc() + //commitCtx, commitCtxCancelFunc := context.WithTimeout(context.Background(), r.commitTimeout) + //defer commitCtxCancelFunc() tracker = r.rpc(l, r.minCommitFollowers) - _, _, _ = tracker.get(commitCtx) + //_ = commitCtx + //_, _, _ = tracker.get(commitCtx) fm = time.Now() @@ -665,6 +659,22 @@ func (r *Runtime) followerDoCommit(req *commitReq) (err error) { return } + // check for last commit availability + myLastCommit := atomic.LoadUint64(&r.lastCommit) + if req.lastCommit != myLastCommit { + // wait for next round + log.WithFields(log.Fields{ + "expected": req.lastCommit, + "actual": myLastCommit, + }).Warning("new commit arrived too early, wait for real commit") + + // TODO(): need counter for retries, infinite commit re-order would cause troubles + go func(req *commitReq) { + r.commitCh <- req + }(req) + return + } + // write log first if err = r.wal.Write(req.log); err != nil { err = errors.Wrap(err, "write follower commit log failed") @@ -678,6 +688,8 @@ func (r *Runtime) followerDoCommit(req *commitReq) (err error) { atomic.StoreUint64(&r.lastCommit, req.log.Index) } + req.result <- &commitResult{err: err} + return } diff --git a/kayak/test/runtime_test.go b/kayak/test/runtime_test.go index 31f8d4b4c..be43976db 100644 --- a/kayak/test/runtime_test.go +++ b/kayak/test/runtime_test.go @@ -110,7 +110,7 @@ func (s *sqliteStorage) Commit(data interface{}) (result interface{}, err error) tm := time.Now() result, err = s.st.Exec(context.Background(), d.Queries) - log.WithField("c", time.Now().Sub(tm).String()).Info("db commit") + log.WithField("c", time.Now().Sub(tm).Nanoseconds()).Info("db commit") return } diff --git a/kayak/tracker.go b/kayak/tracker.go index 47117aab4..e7557fc03 100644 --- a/kayak/tracker.go +++ b/kayak/tracker.go @@ -22,9 +22,9 @@ import ( "sync/atomic" "time" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/utils/log" - kt "github.com/CovenantSQL/CovenantSQL/kayak/types" ) // rpcTracker defines the rpc call tracker @@ -96,8 +96,9 @@ func (t *rpcTracker) callSingle(idx int) { err := t.r.getCaller(t.nodes[idx]).Call(t.method, t.req, nil) log.WithFields(log.Fields{ "m": t.method, - "c": time.Now().Sub(tm).String(), + "c": time.Now().Sub(tm).Nanoseconds(), "r": t.req.(*kt.RPCRequest).Log.Index, + "t": t.req.(*kt.RPCRequest).Log.Type, }).Info("call rpc") t.errLock.Lock() defer t.errLock.Unlock() From a185ed9665d10bb13019f48c63f272e52dde04b6 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 6 Nov 2018 11:41:46 +0800 Subject: [PATCH 04/32] Update runtime --- kayak/runtime.go | 62 +++++++++----------------------------- kayak/test/runtime_test.go | 43 ++++++++++++++++++-------- 2 files changed, 44 insertions(+), 61 deletions(-) diff --git a/kayak/runtime.go b/kayak/runtime.go index 534351e2c..191d02824 100644 --- a/kayak/runtime.go +++ b/kayak/runtime.go @@ -35,7 +35,7 @@ import ( const ( // commit channel window size - commitWindow = 200 + commitWindow = 10 // prepare window trackerWindow = 10 ) @@ -236,7 +236,9 @@ func (r *Runtime) Apply(ctx context.Context, req interface{}) (result interface{ r.peersLock.RLock() defer r.peersLock.RUnlock() - var tmStart, tmLeaderPrepare, tmFollowerPrepare, tmLeaderRollback, tmRollback, tmCommit time.Time + var commitFuture <-chan *commitResult + + var tmStart, tmLeaderPrepare, tmFollowerPrepare, tmCommitEnqueue, tmLeaderRollback, tmRollback, tmCommit time.Time defer func() { fields := log.Fields{ @@ -254,8 +256,11 @@ func (r *Runtime) Apply(ctx context.Context, req interface{}) (result interface{ if !tmRollback.Before(tmLeaderRollback) { fields["fr"] = tmRollback.Sub(tmLeaderRollback).Nanoseconds() } - if !tmCommit.Before(tmFollowerPrepare) { - fields["c"] = tmCommit.Sub(tmFollowerPrepare).Nanoseconds() + if !tmCommitEnqueue.Before(tmFollowerPrepare) { + fields["q"] = tmCommitEnqueue.Sub(tmFollowerPrepare).Nanoseconds() + } + if !tmCommit.Before(tmCommitEnqueue) { + fields["c"] = tmCommit.Sub(tmCommitEnqueue).Nanoseconds() } log.WithFields(fields).Debug("kayak leader apply") }() @@ -309,8 +314,12 @@ func (r *Runtime) Apply(ctx context.Context, req interface{}) (result interface{ tmFollowerPrepare = time.Now() + commitFuture = r.commitResult(ctx, nil, prepareLog, 0) + + tmCommitEnqueue = time.Now() + select { - case cResult := <-r.commitResult(ctx, nil, prepareLog, 0): + case cResult := <-commitFuture: if cResult != nil { logIndex = prepareLog.Index result = cResult.result @@ -366,8 +375,6 @@ func (r *Runtime) FollowerApply(l *kt.Log) (err error) { return } - tm := time.Now() - r.peersLock.RLock() defer r.peersLock.RUnlock() @@ -397,12 +404,6 @@ func (r *Runtime) FollowerApply(l *kt.Log) (err error) { r.updateNextIndex(l) } - log.WithFields(log.Fields{ - "c": time.Now().Sub(tm).Nanoseconds(), - "t": l.Type, - "l": len(l.Data), - }).Info("follower apply") - return } @@ -516,18 +517,6 @@ func (r *Runtime) commitResult(ctx context.Context, commitLog *kt.Log, prepareLo // decode log and send to commit channel to process res = make(chan *commitResult, 1) - var tm, tmDecode, tmEnqueue time.Time - - defer func() { - log.WithFields(log.Fields{ - "d": tmDecode.Sub(tm).Nanoseconds(), - "q": tmEnqueue.Sub(tmDecode).Nanoseconds(), - "r": r.role.String(), - }).Info("commit result") - }() - - tm = time.Now() - if prepareLog == nil { res <- &commitResult{ err: errors.Wrap(kt.ErrInvalidLog, "nil prepare log in commit"), @@ -545,8 +534,6 @@ func (r *Runtime) commitResult(ctx context.Context, commitLog *kt.Log, prepareLo return } - tmDecode = time.Now() - req := &commitReq{ ctx: ctx, data: logReq, @@ -561,8 +548,6 @@ func (r *Runtime) commitResult(ctx context.Context, commitLog *kt.Log, prepareLo case r.commitCh <- req: } - tmEnqueue = time.Now() - return } @@ -604,17 +589,6 @@ func (r *Runtime) leaderDoCommit(req *commitReq) (tracker *rpcTracker, result in return } - var tm, lm, fm time.Time - - defer func() { - log.WithFields(log.Fields{ - "lc": lm.Sub(tm).Nanoseconds(), - "fc": fm.Sub(lm).Nanoseconds(), - }).Info("leader commit") - }() - - tm = time.Now() - // create leader log var l *kt.Log var logData []byte @@ -630,21 +604,13 @@ func (r *Runtime) leaderDoCommit(req *commitReq) (tracker *rpcTracker, result in // not wrapping underlying handler commit error result, err = r.sh.Commit(req.data) - lm = time.Now() - if err == nil { // mark last commit atomic.StoreUint64(&r.lastCommit, l.Index) } // send commit - //commitCtx, commitCtxCancelFunc := context.WithTimeout(context.Background(), r.commitTimeout) - //defer commitCtxCancelFunc() tracker = r.rpc(l, r.minCommitFollowers) - //_ = commitCtx - //_, _, _ = tracker.get(commitCtx) - - fm = time.Now() // TODO(): text log for rpc errors diff --git a/kayak/test/runtime_test.go b/kayak/test/runtime_test.go index be43976db..0444d034f 100644 --- a/kayak/test/runtime_test.go +++ b/kayak/test/runtime_test.go @@ -57,7 +57,8 @@ func RandStringRunes(n int) string { } type sqliteStorage struct { - st *storage.Storage + st *storage.Storage + dsn string } type queryStructure struct { @@ -70,6 +71,7 @@ type queryStructure struct { func newSQLiteStorage(dsn string) (s *sqliteStorage, err error) { s = &sqliteStorage{} s.st, err = storage.New(dsn) + s.dsn = dsn return } @@ -110,7 +112,10 @@ func (s *sqliteStorage) Commit(data interface{}) (result interface{}, err error) tm := time.Now() result, err = s.st.Exec(context.Background(), d.Queries) - log.WithField("c", time.Now().Sub(tm).Nanoseconds()).Info("db commit") + log.WithFields(log.Fields{ + "c": time.Now().Sub(tm).Nanoseconds(), + "d": s.dsn, + }).Info("db commit") return } @@ -280,16 +285,24 @@ func BenchmarkNewRuntime(b *testing.B) { q1 := &queryStructure{ Queries: []storage.Query{ - {Pattern: "CREATE TABLE IF NOT EXISTS test (test string)"}, + {Pattern: "CREATE TABLE IF NOT EXISTS test (t1 text, t2 text, t3 text)"}, }, } So(err, ShouldBeNil) + r1 := RandStringRunes(333) + r2 := RandStringRunes(333) + r3 := RandStringRunes(333) + q2 := &queryStructure{ Queries: []storage.Query{ { - Pattern: "INSERT INTO test (test) VALUES(?)", - Args: []sql.NamedArg{sql.Named("", RandStringRunes(1024))}, + Pattern: "INSERT INTO test (t1, t2, t3) VALUES(?, ?, ?)", + Args: []sql.NamedArg{ + sql.Named("", r1), + sql.Named("", r2), + sql.Named("", r3), + }, }, }, } @@ -312,8 +325,12 @@ func BenchmarkNewRuntime(b *testing.B) { q := &queryStructure{ Queries: []storage.Query{ { - Pattern: "INSERT INTO test (test) VALUES(?)", - Args: []sql.NamedArg{sql.Named("", RandStringRunes(1024))}, + Pattern: "INSERT INTO test (t1, t2, t3) VALUES(?, ?, ?)", + Args: []sql.NamedArg{ + sql.Named("", r1), + sql.Named("", r2), + sql.Named("", r3), + }, }, }, } @@ -335,12 +352,12 @@ func BenchmarkNewRuntime(b *testing.B) { So(d1[0], ShouldHaveLength, 1) So(fmt.Sprint(d1[0][0]), ShouldEqual, fmt.Sprint(total)) - _, _, d2, _ := db2.Query(context.Background(), []storage.Query{ - {Pattern: "SELECT COUNT(1) FROM test"}, - }) - So(d2, ShouldHaveLength, 1) - So(d2[0], ShouldHaveLength, 1) - So(fmt.Sprint(d2[0][0]), ShouldResemble, fmt.Sprint(total)) + //_, _, d2, _ := db2.Query(context.Background(), []storage.Query{ + // {Pattern: "SELECT COUNT(1) FROM test"}, + //}) + //So(d2, ShouldHaveLength, 1) + //So(d2[0], ShouldHaveLength, 1) + //So(fmt.Sprint(d2[0][0]), ShouldResemble, fmt.Sprint(total)) b.StartTimer() }) From 5011864f6935cfb5f25905576cc9a8edda922dd3 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 6 Nov 2018 12:58:46 +0800 Subject: [PATCH 05/32] CallerHook disabled in Warning Info Debug --- kayak/runtime.go | 6 ------ utils/log/logwrapper.go | 6 +++--- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/kayak/runtime.go b/kayak/runtime.go index 191d02824..787b98bcb 100644 --- a/kayak/runtime.go +++ b/kayak/runtime.go @@ -628,12 +628,6 @@ func (r *Runtime) followerDoCommit(req *commitReq) (err error) { // check for last commit availability myLastCommit := atomic.LoadUint64(&r.lastCommit) if req.lastCommit != myLastCommit { - // wait for next round - log.WithFields(log.Fields{ - "expected": req.lastCommit, - "actual": myLastCommit, - }).Warning("new commit arrived too early, wait for real commit") - // TODO(): need counter for retries, infinite commit re-order would cause troubles go func(req *commitReq) { r.commitCh <- req diff --git a/utils/log/logwrapper.go b/utils/log/logwrapper.go index 3c32421f5..9bb99c5ff 100644 --- a/utils/log/logwrapper.go +++ b/utils/log/logwrapper.go @@ -102,9 +102,9 @@ func (hook *CallerHook) Levels() []logrus.Level { logrus.PanicLevel, logrus.FatalLevel, logrus.ErrorLevel, - logrus.WarnLevel, - logrus.InfoLevel, - logrus.DebugLevel, + //logrus.WarnLevel, + //logrus.InfoLevel, + //logrus.DebugLevel, } } From 6de61a4b367cc8a7e1e2a0b86fe5a80613e71deb Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 6 Nov 2018 12:23:36 +0800 Subject: [PATCH 06/32] Remove log in tracker --- kayak/tracker.go | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/kayak/tracker.go b/kayak/tracker.go index e7557fc03..7eef7ed6a 100644 --- a/kayak/tracker.go +++ b/kayak/tracker.go @@ -20,11 +20,8 @@ import ( "context" "sync" "sync/atomic" - "time" - kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/utils/log" ) // rpcTracker defines the rpc call tracker @@ -92,14 +89,7 @@ func (t *rpcTracker) send() { } func (t *rpcTracker) callSingle(idx int) { - tm := time.Now() err := t.r.getCaller(t.nodes[idx]).Call(t.method, t.req, nil) - log.WithFields(log.Fields{ - "m": t.method, - "c": time.Now().Sub(tm).Nanoseconds(), - "r": t.req.(*kt.RPCRequest).Log.Index, - "t": t.req.(*kt.RPCRequest).Log.Type, - }).Info("call rpc") t.errLock.Lock() defer t.errLock.Unlock() t.errors[t.nodes[idx]] = err From 3f20b3f7b9bf09ca3f2cd87c50285083e9ef2c60 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 6 Nov 2018 13:14:44 +0800 Subject: [PATCH 07/32] No -traceFile in performance test --- cmd/cql-minerd/integration_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 60024948c..c438f0517 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -240,7 +240,7 @@ func startNodesProfile(bypassSign bool) { FJ(baseDir, "./bin/cql-minerd"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_0/config.yaml"), "-cpu-profile", FJ(baseDir, "./cmd/cql-minerd/miner0.profile"), - "-traceFile", FJ(baseDir, "./cmd/cql-minerd/miner0.trace"), + //"-traceFile", FJ(baseDir, "./cmd/cql-minerd/miner0.trace"), "-metricGraphiteServer", "192.168.2.100:2003", "-profileServer", "0.0.0.0:8080", "-metricLog", @@ -258,7 +258,7 @@ func startNodesProfile(bypassSign bool) { FJ(baseDir, "./bin/cql-minerd"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_1/config.yaml"), "-cpu-profile", FJ(baseDir, "./cmd/cql-minerd/miner1.profile"), - "-traceFile", FJ(baseDir, "./cmd/cql-minerd/miner1.trace"), + //"-traceFile", FJ(baseDir, "./cmd/cql-minerd/miner1.trace"), "-metricGraphiteServer", "192.168.2.100:2003", "-profileServer", "0.0.0.0:8081", "-metricLog", @@ -276,7 +276,7 @@ func startNodesProfile(bypassSign bool) { FJ(baseDir, "./bin/cql-minerd"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_2/config.yaml"), "-cpu-profile", FJ(baseDir, "./cmd/cql-minerd/miner2.profile"), - "-traceFile", FJ(baseDir, "./cmd/cql-minerd/miner2.trace"), + //"-traceFile", FJ(baseDir, "./cmd/cql-minerd/miner2.trace"), "-metricGraphiteServer", "192.168.2.100:2003", "-profileServer", "0.0.0.0:8082", "-metricLog", From cadfa74874eaf4c59590224e128978b8c0a5fd57 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 6 Nov 2018 13:17:31 +0800 Subject: [PATCH 08/32] Comment single test --- cmd/cql-minerd/integration_test.go | 34 +++++++++++++++--------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index c438f0517..c05ceb1b0 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -415,23 +415,23 @@ func benchDB(b *testing.B, db *sql.DB, createDB bool) { rand.Seed(time.Now().UnixNano()) start := (rand.Int31() % 100) * 10000 - b.Run("benchmark Single INSERT", func(b *testing.B) { - b.ResetTimer() - insertedCount = b.N - for i := 0; i < b.N; i++ { - _, err = db.Exec("INSERT INTO test ( indexedColumn, nonIndexedColumn ) VALUES"+ - "(?, ?)", int(start)+i, i, - ) - if err != nil { - b.Fatal(err) - } - } - }) - - if createDB { - prepareBenchTable(db) - } - + //b.Run("benchmark Single INSERT", func(b *testing.B) { + // b.ResetTimer() + // insertedCount = b.N + // for i := 0; i < b.N; i++ { + // _, err = db.Exec("INSERT INTO test ( indexedColumn, nonIndexedColumn ) VALUES"+ + // "(?, ?)", int(start)+i, i, + // ) + // if err != nil { + // b.Fatal(err) + // } + // } + //}) + // + //if createDB { + // prepareBenchTable(db) + //} + // b.Run("benchmark Multi INSERT", func(b *testing.B) { b.ResetTimer() insertedCount = b.N From dcc8104f069b837263fd115dc2b95c1f8f35597c Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 6 Nov 2018 16:15:41 +0800 Subject: [PATCH 09/32] Add more verbose logging --- kayak/runtime.go | 101 ++++++++++++++++++++++++++++--------- kayak/test/runtime_test.go | 5 -- kayak/wal/mem_wal.go | 3 +- 3 files changed, 80 insertions(+), 29 deletions(-) diff --git a/kayak/runtime.go b/kayak/runtime.go index 787b98bcb..c160b1b16 100644 --- a/kayak/runtime.go +++ b/kayak/runtime.go @@ -115,8 +115,10 @@ type commitReq struct { result chan *commitResult } -// commitResult defines the commit operation result. +// followerCommitResult defines the commit operation result. type commitResult struct { + start time.Time + dbCost time.Duration result interface{} err error rpc *rpcTracker @@ -238,7 +240,9 @@ func (r *Runtime) Apply(ctx context.Context, req interface{}) (result interface{ var commitFuture <-chan *commitResult - var tmStart, tmLeaderPrepare, tmFollowerPrepare, tmCommitEnqueue, tmLeaderRollback, tmRollback, tmCommit time.Time + var tmStart, tmLeaderPrepare, tmFollowerPrepare, tmCommitEnqueue, tmLeaderRollback, + tmRollback, tmCommitDequeue, tmLeaderCommit, tmCommit time.Time + var dbCost time.Duration defer func() { fields := log.Fields{ @@ -257,12 +261,26 @@ func (r *Runtime) Apply(ctx context.Context, req interface{}) (result interface{ fields["fr"] = tmRollback.Sub(tmLeaderRollback).Nanoseconds() } if !tmCommitEnqueue.Before(tmFollowerPrepare) { - fields["q"] = tmCommitEnqueue.Sub(tmFollowerPrepare).Nanoseconds() + fields["eq"] = tmCommitEnqueue.Sub(tmFollowerPrepare).Nanoseconds() } - if !tmCommit.Before(tmCommitEnqueue) { - fields["c"] = tmCommit.Sub(tmCommitEnqueue).Nanoseconds() + if !tmCommitDequeue.Before(tmCommitEnqueue) { + fields["dq"] = tmCommitDequeue.Sub(tmCommitEnqueue).Nanoseconds() } - log.WithFields(fields).Debug("kayak leader apply") + if !tmLeaderCommit.Before(tmCommitDequeue) { + fields["lc"] = tmLeaderCommit.Sub(tmCommitDequeue).Nanoseconds() + } + if !tmCommit.Before(tmLeaderCommit) { + fields["fc"] = tmCommit.Sub(tmLeaderCommit).Nanoseconds() + } + if dbCost > 0 { + fields["dc"] = dbCost.Nanoseconds() + } + if !tmCommit.Before(tmStart) { + fields["t"] = tmCommit.Sub(tmStart).Nanoseconds() + } else if !tmRollback.Before(tmStart) { + fields["t"] = tmRollback.Sub(tmStart).Nanoseconds() + } + log.WithFields(fields).Info("kayak leader apply") }() if r.role != proto.Leader { @@ -314,7 +332,7 @@ func (r *Runtime) Apply(ctx context.Context, req interface{}) (result interface{ tmFollowerPrepare = time.Now() - commitFuture = r.commitResult(ctx, nil, prepareLog, 0) + commitFuture = r.leaderCommitResult(ctx, req, prepareLog) tmCommitEnqueue = time.Now() @@ -325,6 +343,10 @@ func (r *Runtime) Apply(ctx context.Context, req interface{}) (result interface{ result = cResult.result err = cResult.err + tmCommitDequeue = cResult.start + dbCost = cResult.dbCost + tmLeaderCommit = time.Now() + // wait until context deadline or commit done if cResult.rpc != nil { cResult.rpc.get(ctx) @@ -492,28 +514,42 @@ func (r *Runtime) followerCommit(l *kt.Log) (err error) { return } - myLastCommit := atomic.LoadUint64(&r.lastCommit) + cResult := <-r.followerCommitResult(context.Background(), l, prepareLog, lastCommit) + if cResult != nil { + err = cResult.err + } - // check committed index - if lastCommit < myLastCommit { - // leader pushed a early index before commit - log.WithFields(log.Fields{ - "head": myLastCommit, - "supplied": lastCommit, - }).Warning("invalid last commit log") - err = errors.Wrap(kt.ErrInvalidLog, "invalid last commit log index") + return +} + +func (r *Runtime) leaderCommitResult(ctx context.Context, reqPayload interface{}, prepareLog *kt.Log) (res chan *commitResult) { + // decode log and send to commit channel to process + res = make(chan *commitResult, 1) + + if prepareLog == nil { + res <- &commitResult{ + err: errors.Wrap(kt.ErrInvalidLog, "nil prepare log in commit"), + } return } - cResult := <-r.commitResult(context.Background(), l, prepareLog, lastCommit) - if cResult != nil { - err = cResult.err + // decode prepare log + req := &commitReq{ + ctx: ctx, + data: reqPayload, + index: prepareLog.Index, + result: res, + } + + select { + case <-ctx.Done(): + case r.commitCh <- req: } return } -func (r *Runtime) commitResult(ctx context.Context, commitLog *kt.Log, prepareLog *kt.Log, lastCommit uint64) (res chan *commitResult) { +func (r *Runtime) followerCommitResult(ctx context.Context, commitLog *kt.Log, prepareLog *kt.Log, lastCommit uint64) (res chan *commitResult) { // decode log and send to commit channel to process res = make(chan *commitResult, 1) @@ -524,6 +560,21 @@ func (r *Runtime) commitResult(ctx context.Context, commitLog *kt.Log, prepareLo return } + myLastCommit := atomic.LoadUint64(&r.lastCommit) + + // check committed index + if lastCommit < myLastCommit { + // leader pushed a early index before commit + log.WithFields(log.Fields{ + "head": myLastCommit, + "supplied": lastCommit, + }).Warning("invalid last commit log") + res <- &commitResult{ + err: errors.Wrap(kt.ErrInvalidLog, "invalid last commit log index"), + } + return + } + // decode prepare log var logReq interface{} var err error @@ -572,17 +623,19 @@ func (r *Runtime) doCommit(req *commitReq) { r.peersLock.RLock() defer r.peersLock.RUnlock() - resp := &commitResult{} + resp := &commitResult{ + start: time.Now(), + } if r.role == proto.Leader { - resp.rpc, resp.result, resp.err = r.leaderDoCommit(req) + resp.dbCost, resp.rpc, resp.result, resp.err = r.leaderDoCommit(req) req.result <- resp } else { r.followerDoCommit(req) } } -func (r *Runtime) leaderDoCommit(req *commitReq) (tracker *rpcTracker, result interface{}, err error) { +func (r *Runtime) leaderDoCommit(req *commitReq) (dbCost time.Duration, tracker *rpcTracker, result interface{}, err error) { if req.log != nil { // mis-use follower commit for leader log.Fatal("INVALID EXISTING LOG FOR LEADER COMMIT") @@ -602,7 +655,9 @@ func (r *Runtime) leaderDoCommit(req *commitReq) (tracker *rpcTracker, result in } // not wrapping underlying handler commit error + tmStartDB := time.Now() result, err = r.sh.Commit(req.data) + dbCost = time.Now().Sub(tmStartDB) if err == nil { // mark last commit diff --git a/kayak/test/runtime_test.go b/kayak/test/runtime_test.go index 0444d034f..5d86b43ee 100644 --- a/kayak/test/runtime_test.go +++ b/kayak/test/runtime_test.go @@ -110,12 +110,7 @@ func (s *sqliteStorage) Commit(data interface{}) (result interface{}, err error) return } - tm := time.Now() result, err = s.st.Exec(context.Background(), d.Queries) - log.WithFields(log.Fields{ - "c": time.Now().Sub(tm).Nanoseconds(), - "d": s.dsn, - }).Info("db commit") return } diff --git a/kayak/wal/mem_wal.go b/kayak/wal/mem_wal.go index 48314ce47..0b2e7d2ed 100644 --- a/kayak/wal/mem_wal.go +++ b/kayak/wal/mem_wal.go @@ -36,7 +36,8 @@ type MemWal struct { // NewMemWal returns new memory wal instance. func NewMemWal() (p *MemWal) { p = &MemWal{ - revIndex: make(map[uint64]int), + revIndex: make(map[uint64]int, 100000), + logs: make([]*kt.Log, 0, 100000), } return From 3a2bf5bc0f0f35b35f296cd3a156fa2572fa9402 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 6 Nov 2018 18:00:16 +0800 Subject: [PATCH 10/32] Remove bin/*.test from image --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index 2f046e8a4..626ca9619 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,6 +4,7 @@ FROM golang:1.11-stretch as builder WORKDIR /go/src/github.com/CovenantSQL/CovenantSQL COPY . . RUN CGO_ENABLED=1 GOOS=linux GOLDFLAGS="-linkmode external -extldflags -static" ./build.sh +RUN rm -f bin/*.test # Stage: runner FROM alpine:3.7 From 1fbbe08b396205dd1771c7569b5263c5c6eaa9a7 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 6 Nov 2018 18:14:33 +0800 Subject: [PATCH 11/32] Use covenantsql instead of covenantsql.io for docker hub org name --- docker-compose.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 3f6ab07a3..7b815fe5a 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,7 +2,7 @@ version: '3' services: covenantsql_bp_0: - image: covenantsql.io/covenantsql:latest + image: covenantsql/covenantsql:latest container_name: covenantsql_bp_0 restart: always ports: @@ -21,7 +21,7 @@ services: max-size: "1m" max-file: "10" covenantsql_bp_1: - image: covenantsql.io/covenantsql:latest + image: covenantsql/covenantsql:latest container_name: covenantsql_bp_1 restart: always ports: @@ -40,7 +40,7 @@ services: max-size: "1m" max-file: "10" covenantsql_bp_2: - image: covenantsql.io/covenantsql:latest + image: covenantsql/covenantsql:latest container_name: covenantsql_bp_2 restart: always ports: @@ -59,7 +59,7 @@ services: max-size: "1m" max-file: "10" covenantsql_miner_0: - image: covenantsql.io/covenantsql:latest + image: covenantsql/covenantsql:latest container_name: covenantsql_miner_0 restart: always ports: @@ -78,7 +78,7 @@ services: max-size: "1m" max-file: "10" covenantsql_miner_1: - image: covenantsql.io/covenantsql:latest + image: covenantsql/covenantsql:latest container_name: covenantsql_miner_1 restart: always ports: @@ -97,7 +97,7 @@ services: max-size: "1m" max-file: "10" covenantsql_miner_2: - image: covenantsql.io/covenantsql:latest + image: covenantsql/covenantsql:latest container_name: covenantsql_miner_2 restart: always ports: @@ -116,7 +116,7 @@ services: max-size: "1m" max-file: "10" covenantsql_adapter: - image: covenantsql.io/covenantsql:latest + image: covenantsql/covenantsql:latest container_name: covenantsql_adapter restart: always ports: @@ -132,7 +132,7 @@ services: default: ipv4_address: 172.254.1.8 covenantsql_observer: - image: covenantsql.io/covenantsql:latest + image: covenantsql/covenantsql:latest container_name: covenantsql_observer restart: always ports: @@ -152,7 +152,7 @@ services: max-size: "1m" max-file: "10" covenantsql_mysql_adapter: - image: covenantsql.io/covenantsql:latest + image: covenantsql/covenantsql:latest container_name: covenantsql_mysql_adapter restart: always ports: From 0aa1739d86374d37dfd2d8db98f8a18a03b500e2 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Tue, 6 Nov 2018 20:27:38 +0800 Subject: [PATCH 12/32] Add covenantsql explorer to docker-compose --- docker-compose.yml | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 7b815fe5a..8274d4070 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,4 +1,4 @@ -version: '3' +version: "3" services: covenantsql_bp_0: @@ -137,7 +137,15 @@ services: restart: always ports: - "11106:4663" - command: ["-database", "057e55460f501ad071383c95f691293f2f0a7895988e22593669ceeb52a6452a", "-reset", "oldest", "-listen", "0.0.0.0:4663"] + command: + [ + "-database", + "057e55460f501ad071383c95f691293f2f0a7895988e22593669ceeb52a6452a", + "-reset", + "oldest", + "-listen", + "0.0.0.0:4663", + ] environment: COVENANT_ROLE: observer COVENANT_CONF: ./node_observer/config.yaml @@ -171,6 +179,20 @@ services: options: max-size: "1m" max-file: "10" + covenantsql_explorer: + image: covenantsql/explorer:latest + container_name: covenantsql_explorer + depends_on: + - covenantsql_observer + restart: always + ports: + - "11108:80" + environment: + COVENANTSQL_EXPLORER_DOMAIN: localhost + COVENANTSQL_OBSERVER_ADDR: covenantsql_observer:4663 + logging: + options: + max-size: "5m" networks: default: From f45f3748cdf56daa3a470e411b8468ce2c848f99 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 6 Nov 2018 22:53:33 +0800 Subject: [PATCH 13/32] Update grep command to support binary log grepping --- cmd/cql-minerd/integration_test.go | 2 +- cmd/cql-observer/observation_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index c05ceb1b0..d11fb5f7b 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -299,7 +299,7 @@ func stopNodes() { defer wg.Done() thisCmd.Cmd.Process.Signal(syscall.SIGTERM) thisCmd.Cmd.Wait() - grepRace := exec.Command("/bin/sh", "-c", "grep -A 50 'DATA RACE' "+thisCmd.LogPath) + grepRace := exec.Command("/bin/sh", "-c", "grep -a -A 50 'DATA RACE' "+thisCmd.LogPath) out, _ := grepRace.Output() if len(out) > 2 { log.Fatalf("DATA RACE in %s :\n%s", thisCmd.Cmd.Path, string(out)) diff --git a/cmd/cql-observer/observation_test.go b/cmd/cql-observer/observation_test.go index 8b7919d05..14457edb7 100644 --- a/cmd/cql-observer/observation_test.go +++ b/cmd/cql-observer/observation_test.go @@ -175,7 +175,7 @@ func stopNodes() { defer wg.Done() thisCmd.Cmd.Process.Signal(syscall.SIGTERM) thisCmd.Cmd.Wait() - grepRace := exec.Command("/bin/sh", "-c", "grep -A 50 'DATA RACE' "+thisCmd.LogPath) + grepRace := exec.Command("/bin/sh", "-c", "grep -a -A 50 'DATA RACE' "+thisCmd.LogPath) out, _ := grepRace.Output() if len(out) > 2 { log.Fatal(string(out)) From 557b06cbaf4fcee86b7d6b68d91405ffa88dc555 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 6 Nov 2018 23:25:52 +0800 Subject: [PATCH 14/32] Fix makefile for docker build --- Makefile | 7 +++++-- docker-compose.yml | 6 +----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index 2adee6581..3538bf37c 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ default: build -IMAGE := covenantsql.io/covenantsql +IMAGE := covenantsql/covenantsql GIT_COMMIT ?= $(shell git rev-parse --short HEAD) GIT_DIRTY ?= $(shell test -n "`git status --porcelain`" && echo "+CHANGES" || true) GIT_DESCRIBE ?= $(shell git describe --tags --always) @@ -38,4 +38,7 @@ start: logs: docker-compose logs -f --tail=10 -.PHONY: status build save start logs +push: + docker push $(IMAGE) + +.PHONY: status build save start logs push diff --git a/docker-compose.yml b/docker-compose.yml index 8274d4070..e401c83c0 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,4 +1,4 @@ -version: "3" +version: "v0.0.3" services: covenantsql_bp_0: @@ -139,10 +139,6 @@ services: - "11106:4663" command: [ - "-database", - "057e55460f501ad071383c95f691293f2f0a7895988e22593669ceeb52a6452a", - "-reset", - "oldest", "-listen", "0.0.0.0:4663", ] From 3a7ff27948b553c3dd0c36c4ed9108c73ce58d45 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 6 Nov 2018 23:46:38 +0800 Subject: [PATCH 15/32] Add docker push version --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3538bf37c..43e9dbbe4 100644 --- a/Makefile +++ b/Makefile @@ -39,6 +39,7 @@ logs: docker-compose logs -f --tail=10 push: - docker push $(IMAGE) + docker push $(IMAGE):$(VERSION) + docker push $(IMAGE):latest .PHONY: status build save start logs push From 8b7465f7b8c8d54e692a284d6b9f6f272ff4c3d0 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 6 Nov 2018 23:37:21 +0800 Subject: [PATCH 16/32] Fix compilation failure bug --- cmd/cql-minerd/node.go | 4 ++-- sqlchain/chain.go | 2 +- sqlchain/chain_test.go | 1 - sqlchain/queryindex.go | 2 +- sqlchain/queryindex_test.go | 2 +- sqlchain/runtime.go | 2 +- utils/bytes_test.go | 2 +- utils/log/logwrapper_test.go | 1 - worker/types/init_service_type.go | 8 ++++---- 9 files changed, 11 insertions(+), 13 deletions(-) diff --git a/cmd/cql-minerd/node.go b/cmd/cql-minerd/node.go index 2b7808e0a..3ea86d589 100644 --- a/cmd/cql-minerd/node.go +++ b/cmd/cql-minerd/node.go @@ -25,7 +25,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/kayak" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" @@ -112,7 +112,7 @@ func registerNodeToBP(timeout time.Duration) (err error) { ch <- id return } - if strings.Contains(err.Error(), kayak.ErrNotLeader.Error()) { + if strings.Contains(err.Error(), kt.ErrNotLeader.Error()) { log.Debug("stop ping non leader BP node") return } diff --git a/sqlchain/chain.go b/sqlchain/chain.go index 1fae7f8ec..1c669263e 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -916,7 +916,7 @@ func (c *Chain) syncAckedQuery(height int32, header *hash.Hash, id proto.NodeID) }, DatabaseID: c.rt.databaseID, FetchAckedQueryReq: FetchAckedQueryReq{ - Height: height, + Height: height, SignedAckedHash: header, }, } diff --git a/sqlchain/chain_test.go b/sqlchain/chain_test.go index 68b44f827..42a78df35 100644 --- a/sqlchain/chain_test.go +++ b/sqlchain/chain_test.go @@ -189,7 +189,6 @@ func TestMultiChain(t *testing.T) { chain: chain, } - } // Create a master BP for RPC test diff --git a/sqlchain/queryindex.go b/sqlchain/queryindex.go index d070ddf41..da30cc84e 100644 --- a/sqlchain/queryindex.go +++ b/sqlchain/queryindex.go @@ -19,13 +19,13 @@ package sqlchain // TODO(leventeliu): use pooled objects to speed up this index. import ( - "github.com/pkg/errors" "sync" "github.com/CovenantSQL/CovenantSQL/crypto/hash" ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" "github.com/CovenantSQL/CovenantSQL/utils/log" wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/pkg/errors" ) var ( diff --git a/sqlchain/queryindex_test.go b/sqlchain/queryindex_test.go index 55b1f0863..01bb5bc13 100644 --- a/sqlchain/queryindex_test.go +++ b/sqlchain/queryindex_test.go @@ -17,13 +17,13 @@ package sqlchain import ( - "github.com/pkg/errors" "math/rand" "reflect" "testing" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/pkg/errors" ) const ( diff --git a/sqlchain/runtime.go b/sqlchain/runtime.go index 2fbc8df53..e3754024d 100644 --- a/sqlchain/runtime.go +++ b/sqlchain/runtime.go @@ -18,13 +18,13 @@ package sqlchain import ( "fmt" - "github.com/CovenantSQL/CovenantSQL/utils/log" "sync" "time" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/proto" ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/utils/log" wt "github.com/CovenantSQL/CovenantSQL/worker/types" ) diff --git a/utils/bytes_test.go b/utils/bytes_test.go index 9276681de..1d056aaae 100644 --- a/utils/bytes_test.go +++ b/utils/bytes_test.go @@ -36,7 +36,7 @@ func TestNewLevelDBKey(t *testing.T) { So(ConcatAll([]byte{'0', '1', '2', '3'}, nil, []byte{'x', 'y', 'z'}), ShouldResemble, []byte{'0', '1', '2', '3', 'x', 'y', 'z'}) So(ConcatAll([]byte{'0', '1', '2', '3'}, []byte{}, []byte{'x', 'y', 'z'}), - ShouldResemble, []byte{'0', '1', '2', '3','x', 'y', 'z'}) + ShouldResemble, []byte{'0', '1', '2', '3', 'x', 'y', 'z'}) So(ConcatAll(nil, []byte{'0', '1', '2', '3'}, nil, []byte{'x', 'y', 'z'}), ShouldResemble, []byte{'0', '1', '2', '3', 'x', 'y', 'z'}) So(ConcatAll([]byte{}, []byte{'0', '1', '2', '3'}, nil, []byte{'x', 'y', 'z'}, nil), diff --git a/utils/log/logwrapper_test.go b/utils/log/logwrapper_test.go index acc6dfc0d..26fbfe485 100644 --- a/utils/log/logwrapper_test.go +++ b/utils/log/logwrapper_test.go @@ -22,7 +22,6 @@ import ( "time" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) diff --git a/worker/types/init_service_type.go b/worker/types/init_service_type.go index 12d9a0786..6007729db 100644 --- a/worker/types/init_service_type.go +++ b/worker/types/init_service_type.go @@ -32,10 +32,10 @@ type InitService struct { // ResourceMeta defines single database resource meta. type ResourceMeta struct { - Node uint16 // reserved node count - Space uint64 // reserved storage space in bytes - Memory uint64 // reserved memory in bytes - LoadAvgPerCPU uint64 // max loadAvg15 per CPU + Node uint16 // reserved node count + Space uint64 // reserved storage space in bytes + Memory uint64 // reserved memory in bytes + LoadAvgPerCPU uint64 // max loadAvg15 per CPU EncryptionKey string `hspack:"-"` // encryption key for database instance } From 443640094c64b10abb88e3aab92e1e3b3760a374 Mon Sep 17 00:00:00 2001 From: auxten Date: Wed, 7 Nov 2018 00:09:53 +0800 Subject: [PATCH 17/32] Fix docker-compose version --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index e401c83c0..90ab2e8c2 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,4 +1,4 @@ -version: "v0.0.3" +version: "3" services: covenantsql_bp_0: From 39cc93e5728190dece727fb34c11183014bb58f8 Mon Sep 17 00:00:00 2001 From: auxten Date: Wed, 7 Nov 2018 02:15:37 +0800 Subject: [PATCH 18/32] NodeID.UnmarshalBinary process 64 bytes NodeID for backward compatible --- proto/nodeinfo.go | 7 ++++++- proto/nodeinfo_test.go | 6 +++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/proto/nodeinfo.go b/proto/nodeinfo.go index 2fa826584..a4822549d 100644 --- a/proto/nodeinfo.go +++ b/proto/nodeinfo.go @@ -141,9 +141,14 @@ func (id *NodeID) MarshalBinary() (keyBytes []byte, err error) { // UnmarshalBinary does the deserialization func (id *NodeID) UnmarshalBinary(keyBytes []byte) (err error) { + // for backward compatible + if len(keyBytes) == 64 { + *id = NodeID(keyBytes) + return + } h, err := hash.NewHash(keyBytes) if err != nil { - log.Error("nodeID bytes len should be 32") + log.Error("load 32 bytes nodeID failed") return } *id = NodeID(h.String()) diff --git a/proto/nodeinfo_test.go b/proto/nodeinfo_test.go index f033d7865..b0ac900ab 100644 --- a/proto/nodeinfo_test.go +++ b/proto/nodeinfo_test.go @@ -131,7 +131,7 @@ func TestNodeID_IsEmpty(t *testing.T) { func TestNodeID_MarshalBinary(t *testing.T) { Convey("NodeID MarshalBinary", t, func() { - var nodeID, nodeID2 NodeID + var nodeID, nodeID2, nodeID3 NodeID nb, err := nodeID.MarshalBinary() So(err, ShouldBeNil) @@ -147,5 +147,9 @@ func TestNodeID_MarshalBinary(t *testing.T) { err = nodeID2.UnmarshalBinary(nb) So(err, ShouldBeNil) So(nodeID2, ShouldResemble, nodeID) + + nodeID3.UnmarshalBinary([]byte("0000000000000000000000000000000000000000000000000000000000000000")) + So(err, ShouldBeNil) + So(nodeID3, ShouldResemble, nodeID) }) } From 3194c18db1e954327ecab82a01c72e96493b6fb9 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 7 Nov 2018 00:11:35 +0800 Subject: [PATCH 19/32] Update hash-upgrade binary logic --- cmd/hotfix/hash-upgrade/main.go | 69 +++++++++++++++++++++++++-------- 1 file changed, 52 insertions(+), 17 deletions(-) diff --git a/cmd/hotfix/hash-upgrade/main.go b/cmd/hotfix/hash-upgrade/main.go index 310e31b15..33b50fd88 100644 --- a/cmd/hotfix/hash-upgrade/main.go +++ b/cmd/hotfix/hash-upgrade/main.go @@ -97,14 +97,37 @@ type Peers_ struct { Signature *asymmetric.Signature } -// ServiceInstance defines the old service instance type before marshaller updates. -type ServiceInstance struct { +// ServiceInstancePlainOld defines the plain old service instance type before marshaller updates. +type ServiceInstancePlainOld struct { DatabaseID proto.DatabaseID Peers *Peers_ ResourceMeta wt.ResourceMeta GenesisBlock *Block_ } +// ServiceInstanceOld defines the old service instance type before marshaller updates. +type ServiceInstanceOld struct { + DatabaseID proto.DatabaseID + Peers *Peers_ + ResourceMeta wt.ResourceMeta + GenesisBlock *ct.Block +} + +func convertPeers(oldPeers *Peers_) (newPeers *proto.Peers) { + if oldPeers == nil { + return + } + + newPeers = new(proto.Peers) + for _, s := range oldPeers.Servers { + newPeers.Servers = append(newPeers.Servers, s.ID) + } + newPeers.Leader = oldPeers.Leader.ID + newPeers.Term = oldPeers.Term + + return +} + func main() { flag.Parse() @@ -157,8 +180,8 @@ func main() { } else { // detect if the genesis block is in old version if strings.Contains(fmt.Sprintf("%#v", testDecode), "\"GenesisBlock\":[]uint8") { - log.Info("detected old version") - var instance ServiceInstance + log.Info("detected plain old version (without msgpack tag and use custom serializer)") + var instance ServiceInstancePlainOld if err := utils.DecodeMsgPackPlain(rawInstance, &instance); err != nil { log.WithError(err).Fatal("decode msgpack failed") @@ -166,13 +189,25 @@ func main() { } newInstance.DatabaseID = instance.DatabaseID - // TODO: re-construct peers structure - // newInstance.Peers = instance.Peers + newInstance.Peers = convertPeers(instance.Peers) newInstance.ResourceMeta = instance.ResourceMeta newInstance.GenesisBlock = &ct.Block{ SignedHeader: instance.GenesisBlock.SignedHeader, Queries: instance.GenesisBlock.Queries, } + } else if strings.Contains(fmt.Sprintf("%#v", testDecode), "\"PubKey\"") { + log.Info("detected old version (old kayak implementation [called as kaar])") + var instance ServiceInstanceOld + + if err := utils.DecodeMsgPack(rawInstance, &instance); err != nil { + log.WithError(err).Fatal("decode msgpack failed") + return + } + + newInstance.DatabaseID = instance.DatabaseID + newInstance.Peers = convertPeers(instance.Peers) + newInstance.ResourceMeta = instance.ResourceMeta + newInstance.GenesisBlock = instance.GenesisBlock } else { log.Info("detected new version, need re-signature") @@ -180,20 +215,20 @@ func main() { log.WithError(err).Fatal("decode msgpack failed") return } + } - // set genesis block to now - newInstance.GenesisBlock.SignedHeader.Timestamp = time.Now().UTC() + // set genesis block to now + newInstance.GenesisBlock.SignedHeader.Timestamp = time.Now().UTC() - // sign peers again - if err := newInstance.Peers.Sign(privateKey); err != nil { - log.WithError(err).Fatal("sign peers failed") - return - } + // sign peers again + if err := newInstance.Peers.Sign(privateKey); err != nil { + log.WithError(err).Fatal("sign peers failed") + return + } - if err := newInstance.GenesisBlock.PackAndSignBlock(privateKey); err != nil { - log.WithError(err).Fatal("sign genesis block failed") - return - } + if err := newInstance.GenesisBlock.PackAndSignBlock(privateKey); err != nil { + log.WithError(err).Fatal("sign genesis block failed") + return } } From a0f79b39684f947b83c70b4be1fe37c22440d7ea Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 7 Nov 2018 00:26:10 +0800 Subject: [PATCH 20/32] Update hotfix binary to pretty print convert result --- cmd/hotfix/hash-upgrade/main.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/hotfix/hash-upgrade/main.go b/cmd/hotfix/hash-upgrade/main.go index 33b50fd88..689065d29 100644 --- a/cmd/hotfix/hash-upgrade/main.go +++ b/cmd/hotfix/hash-upgrade/main.go @@ -21,6 +21,7 @@ import ( "context" "database/sql" "encoding/binary" + "encoding/json" "flag" "fmt" "os/exec" @@ -232,7 +233,8 @@ func main() { } } - log.Infof("database is: %#v -> %#v", id, newInstance) + d, _ := json.MarshalIndent(newInstance, "", " ") + log.Infof("database is: %#v -> %s", id, d) // encode and put back to database rawInstanceBuffer, err := utils.EncodeMsgPack(newInstance) From 0f9c3d3c8f0528de510b03017b94ce741e4a9be6 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 7 Nov 2018 00:28:34 +0800 Subject: [PATCH 21/32] Update hash-upgrade binary to pretty print json instance config --- cmd/hotfix/hash-upgrade/main.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/hotfix/hash-upgrade/main.go b/cmd/hotfix/hash-upgrade/main.go index 689065d29..c4db07472 100644 --- a/cmd/hotfix/hash-upgrade/main.go +++ b/cmd/hotfix/hash-upgrade/main.go @@ -233,8 +233,8 @@ func main() { } } - d, _ := json.MarshalIndent(newInstance, "", " ") - log.Infof("database is: %#v -> %s", id, d) + d, _ := json.Marshal(newInstance) + log.Infof("database is: %#v -> %s", id, string(d)) // encode and put back to database rawInstanceBuffer, err := utils.EncodeMsgPack(newInstance) From 9ddf6de3622e981a3bf9e5e37e97895af2dcf42d Mon Sep 17 00:00:00 2001 From: auxten Date: Mon, 5 Nov 2018 11:59:26 +0800 Subject: [PATCH 22/32] Fix typo in log import --- client/_example/simple.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/_example/simple.go b/client/_example/simple.go index 1dd270aab..cc8c88464 100644 --- a/client/_example/simple.go +++ b/client/_example/simple.go @@ -22,7 +22,7 @@ import ( "fmt" "github.com/CovenantSQL/CovenantSQL/client" - log "github.com/sirupsen/logrus" + "github.com/CovenantSQL/CovenantSQL/utils/log" ) func main() { From bdc401c2bb8fa8e22d5628e3dde1af3d1717c964 Mon Sep 17 00:00:00 2001 From: auxten Date: Wed, 7 Nov 2018 03:03:31 +0800 Subject: [PATCH 23/32] Fix bad import in gdpaverage.go --- client/_example/gdpaverage.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/client/_example/gdpaverage.go b/client/_example/gdpaverage.go index 367f841c6..22878cb81 100644 --- a/client/_example/gdpaverage.go +++ b/client/_example/gdpaverage.go @@ -19,9 +19,8 @@ package main import ( "database/sql" "flag" - + "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/CovenantSQL/CovenantSQL/client" - log "github.com/Sirupsen/logrus" ) func main() { From 04b4cceb8ccdeef1f2278e4396240a93be695f22 Mon Sep 17 00:00:00 2001 From: auxten Date: Wed, 7 Nov 2018 03:09:09 +0800 Subject: [PATCH 24/32] Add test service pem for docker-compose --- .../node_c/admin.test.covenantsql.io-key.pem | 51 +++++++++++++++++++ .../node_c/admin.test.covenantsql.io.pem | 33 ++++++++++++ .../node_c/read.test.covenantsql.io-key.pem | 51 +++++++++++++++++++ .../node_c/read.test.covenantsql.io.pem | 33 ++++++++++++ test/service/node_c/rootCA-key.pem | 51 +++++++++++++++++++ test/service/node_c/rootCA.pem | 39 ++++++++++++++ .../node_c/server.test.covenantsql.io-key.pem | 28 ++++++++++ .../node_c/server.test.covenantsql.io.pem | 25 +++++++++ .../node_c/write.test.covenantsql.io-key.pem | 51 +++++++++++++++++++ .../node_c/write.test.covenantsql.io.pem | 33 ++++++++++++ 10 files changed, 395 insertions(+) create mode 100644 test/service/node_c/admin.test.covenantsql.io-key.pem create mode 100644 test/service/node_c/admin.test.covenantsql.io.pem create mode 100644 test/service/node_c/read.test.covenantsql.io-key.pem create mode 100644 test/service/node_c/read.test.covenantsql.io.pem create mode 100644 test/service/node_c/rootCA-key.pem create mode 100644 test/service/node_c/rootCA.pem create mode 100644 test/service/node_c/server.test.covenantsql.io-key.pem create mode 100644 test/service/node_c/server.test.covenantsql.io.pem create mode 100644 test/service/node_c/write.test.covenantsql.io-key.pem create mode 100644 test/service/node_c/write.test.covenantsql.io.pem diff --git a/test/service/node_c/admin.test.covenantsql.io-key.pem b/test/service/node_c/admin.test.covenantsql.io-key.pem new file mode 100644 index 000000000..46206b1a4 --- /dev/null +++ b/test/service/node_c/admin.test.covenantsql.io-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEAo/ktdmTGZAxZedA2vCVwXTb8iWNHg2D+EfI79oZ23qCBf09T +bp3RyRnAAogWh833Po+wsvAeKt5qtKQozKABjzyF0KJbwZhpDUcpf94SRX0FHha4 +G60hCmB9I9XzL2qx5A18G+d/Y4l962KA2DTgEoxoN33HUhHafCmpdJH8npkGsNol +2q+LElNXnS07caRXFbjckFZzm1s0YBlyT60CaNhI/R4kJvnO0UHfJ05vLunBlnJ7 +WGFSxJkcdrPGTGP6TdkU0AZTVQMYnkTnkTp6cdwhVBj7FrLvzCJgPfzcEQvxkxpB +EQwureJUstLOarnMGCretg/dNszyoVzdMjAZtNxtM2u49TLIZstEgY1KEu4h3Gc8 +omIXC1F3EEf3X+3rrYd37gems1ki5q/ow6wblwwUtvbKCyggubHMFzSaRpmNueV7 +e6hKnKZU6GWZ3/Q21gj5Ma5d3eauHdurquCS/tzYf+GNbYZHmczIADrTRlemqfhB +5zMCbSSb8cZ5/APjoZtq30/WfrvmhHxdUnxgML0n6q3sq5oSnFZ55vyMEQOBX3z/ +Vx9jb1S+pxZywJlRHaJ8GTz+hhgr+ojXvMBtEsn99r2Ndu4R/FEgEJV/26GZP0JD +c/SJ3GkZQhZ7IjUZfpQJ6/VKW0yiu86doPNsoG2gWJ+fF5VeagICYE5iBFUCAwEA +AQKCAgB145pp+n4gRDi4OZiAoLIucnASHsy1ijBgmrW9wmMIIIG6FEA50UGYweio +aUs5jD1sP0ac/8HQtGQnR7cFlyxH3Q2gOHqbr4Ynw7f0dKbSStY5EcCANXMB0Oln +sFTNDHqlKYTHUyLlX16mswVLbIiFDWmIK+f3+1oH1rQ8WRE0vXRwBgcdOQRVwpHF +MVYBmFP8DBKXu3AWi/YV+XWUDyEiXA3t3ZPEaenlzOQxkFSjd/B0yA4iNqaZLjOm +rA2vslmtSpuKDGIxRq6Wa5fJdC/AWLGlkuhDI5cAPt7O8lMN9nZSepe5N/b/kS+v ++ZvqY3Z3EatZXgJ/ec4fcXKeuLJhjOLRg38pfr49nq+ewVKivbH7bvfeYSyJrIx6 +ZCiXHl1IvQmS1272gx7rTAvBUJa29sKLwo1hw5vcCi06R/6GxWnTqUXhcg6W4yA/ +ejpkUJduDh4drU9w9FZ7OPfP+AzqPP5yhdEcCvxI/9wOVHgdILA8aM8OLE1QdoEW +sRc/my0dG3rmtx2tNOKE0oghZknlRdOj6j1Uq6O2XHHjQJuTHO+bvqLe87kOwr4F +KOq9APYUs7hRhtUROgx3fygc6nfyG0qi7Khz/2cidosAmObb547d+5a827zQ4dYS +xQ2lZeEe6cabuAZg1Kz1roC4t4Vl6Xi++rkqPEms574ITSHdaQKCAQEA2H+xvTpE +PoIOEBvZ2ECVjwk05qCmyqZhJzIKRgpXRTwRYjAQsSuRvPuafxtRv/sUC1eXFbuV +zCDCP1NiO1YD1lxG1FFfjRdoeKHoZlKd/DNpjv9s1knPhMm7Nms408c9V1202ttS +zmQr5DM6o2K+f3V701cnuBPyKbFDA4IQ7sYKFr7mEs+O44cKVr5+NPdsCu/4dyj6 +9ailBoWy7nkjPUrsaFcTyszwZ9b1LMJ4NcT1InvJS1LuP1SWIczqUiv7iaj5i0Hf +lubNQ1tjHe0XYIj+tW0shg8e6oAu73yhf43iVKFDa3kZCMD5Ht1jnAUxg8Nowvvu +XGAbJIDAOIJoKwKCAQEAweQa4mZS0dR3Jtvz4sxrqxz+pS2mSQB8pqfekfQwE6mx +UobFPd6qztDBf5PmfprkDZBPFl8S9ZYyEiJyjVDqeUyANZdHq3NPYOUtbT8SiCrl +ymsP/OX1sf2vPsxdwJ48PET5iFrWbEHFXCkeNuwgZIM3EhaqE7cMC/Uj9DyZwatJ +j1er5w+3E5A5oLhPpy2XuM83wlXyKTWXH2bbDpdN1HRcujESiY+rSzLpixvLcwl4 +ejFr3T/MfQXC5fEDmQI0R4hG6BpzNfGznSyY1+J0uJ8gDqzJ911MyQyD9eMNTOZU +PhMqLmBt1VyMUz5ekcFxM5v5vgPmF+fn9A9M5baFfwKCAQEAng+ETVnHzzcWW05q +Gkb0qewX0jUB8LvN/Fa9R2tvUZ3MNzpORXtAuI+cuSXR5m7BsJIvPO+qKtDT4HXZ +JubigFL4ZzRNpW2smT3jtSimLSW/8GWtKTnUJuc9Jjrbz0oMD8fbLVmouARMQxvf +uL9zwwyb7a0Y03zEdQn0mhAQmrK9VOPkh2E/uf+yXahP7g2htM6EQUMLDeUlLoDY +JOEOCEa2GGtSiOJctgMrFpWYO/Fi4t0rFjIivNvdjCnV/U4dI+DY54GdYsd4nq+O +yp95TMJX608cjXdmo+AX5ELCiaSl1BG0bjeIPmrctlr9yT/FaaR1zL0vxgNobZsO +O3OB5wKCAQBDQu1sotCSSCF5ko4dnIqxVqKkDJ0F8CxN6ChW+53+BD0mguhD2U3p +5xNpPZaVTwhUCD7XZO3/0jXWgqq4iVx97eMANFXBjYP4+ifzIRE9uZvzx4ZJVkEQ +mQ/FOkI/wuTkh40FF3YRIhPkL8NyjCGEnNxq4v/nTPXZ5BWv8aHpRJGFL4XL53C8 +UakcLzQ6q59ZllEikowqbZPaaeUOP8DZNfDBCqsCm5txv9yyzFactqlbwm9H1o0K +xgfhmuWDm/ck5YqrlBlpmkqT+Neg9MdHELSfQqPhszUi/bt9fmGrzq9kxWM5qWwQ +u0VWz2khKTkrDS3rFBErM+EMko47lkDjAoIBAEdoQOdMnKn5hzbhxUhDit6I/NoX +K9xEc7VH0oBd9KLsINFzQyGYz857jSyCZ7L6o7JHTVLs/469lcjcuDJ/9JkNU7G0 +p3/h33sHN/w/cGh5OyWpaAt+m1PoP6fEoHomFAilAINCkXlT06+sLQo7dl7khJ7z +5qsogIVzeW1etFICikJHIHSsND21vCkVmRbrOA3MZxNpDwsTcK/LxmF3xq34PTS3 +1BKFZA872IuMf/xLGQ0RdEbLzxtSUppkMl2SWE1Vph1dV3xR+YUeYMziYq692cRE +6McNJpjK8RhdC9t3AlLrViyAphcU1v8T8YprQHMS/1xCbGZ/8nrCAnD81gU= +-----END RSA PRIVATE KEY----- diff --git a/test/service/node_c/admin.test.covenantsql.io.pem b/test/service/node_c/admin.test.covenantsql.io.pem new file mode 100644 index 000000000..1e3d7d608 --- /dev/null +++ b/test/service/node_c/admin.test.covenantsql.io.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFyDCCA7ACCQCofDYaBrdh6zANBgkqhkiG9w0BAQsFADCBojELMAkGA1UEBhMC +Q04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0JlaWppbmcxFjAUBgNVBAoT +DU1lcmlkaWFuIEx0ZC4xEDAOBgNVBAsTB0RldmVsb3AxHDAaBgNVBAMTE3Rlc3Qu +Y292ZW5hbnRzcWwuaW8xJzAlBgkqhkiG9w0BCQEWGHdlYm1hc3RlckBjb3ZlbmFu +dHNxbC5pbzAeFw0xODA4MDEwNDIwNDFaFw0yODA3MjkwNDIwNDFaMIGoMQswCQYD +VQQGEwJDTjEQMA4GA1UECBMHQmVpamluZzEQMA4GA1UEBxMHQmVpamluZzEWMBQG +A1UEChMNTWVyaWRpYW4gTHRkLjEQMA4GA1UECxMHRGV2ZWxvcDEiMCAGA1UEAxMZ +YWRtaW4udGVzdC5jb3ZlbmFudHNxbC5pbzEnMCUGCSqGSIb3DQEJARYYd2VibWFz +dGVyQGNvdmVuYW50c3FsLmlvMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC +AgEAo/ktdmTGZAxZedA2vCVwXTb8iWNHg2D+EfI79oZ23qCBf09Tbp3RyRnAAogW +h833Po+wsvAeKt5qtKQozKABjzyF0KJbwZhpDUcpf94SRX0FHha4G60hCmB9I9Xz +L2qx5A18G+d/Y4l962KA2DTgEoxoN33HUhHafCmpdJH8npkGsNol2q+LElNXnS07 +caRXFbjckFZzm1s0YBlyT60CaNhI/R4kJvnO0UHfJ05vLunBlnJ7WGFSxJkcdrPG +TGP6TdkU0AZTVQMYnkTnkTp6cdwhVBj7FrLvzCJgPfzcEQvxkxpBEQwureJUstLO +arnMGCretg/dNszyoVzdMjAZtNxtM2u49TLIZstEgY1KEu4h3Gc8omIXC1F3EEf3 +X+3rrYd37gems1ki5q/ow6wblwwUtvbKCyggubHMFzSaRpmNueV7e6hKnKZU6GWZ +3/Q21gj5Ma5d3eauHdurquCS/tzYf+GNbYZHmczIADrTRlemqfhB5zMCbSSb8cZ5 +/APjoZtq30/WfrvmhHxdUnxgML0n6q3sq5oSnFZ55vyMEQOBX3z/Vx9jb1S+pxZy +wJlRHaJ8GTz+hhgr+ojXvMBtEsn99r2Ndu4R/FEgEJV/26GZP0JDc/SJ3GkZQhZ7 +IjUZfpQJ6/VKW0yiu86doPNsoG2gWJ+fF5VeagICYE5iBFUCAwEAATANBgkqhkiG +9w0BAQsFAAOCAgEARu5lULDZastDfdkkWhdBlPphbSksyNqT0tr/RZr0EtWjtNjq +IEYLuqOyVom3r3FiNjBO9u74BJxSqzbH2GH7qjZPnGfMgFQaxnS96T9HnXjZlPn5 +spcYA1m0W5TpF17N/rzxH+/c5VyIhwsVBdRF/uVow/6r+GkM+knC1K4Md27Wz0KU +jqOQ5eUm5KV4kyOQUg7MmTafqQcwt1Xh10kJ/52hAG53IznMgCo5ZSqYZroLlF8j +WXTlQtGr6SnsK8poSJW/JuidgBfwliL7OGFMnvWrCVk6FhAL3rlY/PmhDZ+OnG8x ++b5JuuxZcHnA0JVvK01eWAmcMixHlgtnZ+6Cgsx4CtUUo+PKuOZBBo4lWqw+/y5V +A0cvPy+8DadAndT/xd/NHUXgxrNjbaTaFuDeAJwN/i2wWh2wibEPhv25rCVQTvOP +HG9b2izWR4eYTqBSbTZjrfagnt3Ikx9os1C+/wuwGRMC/1GEwQ58bSuWHaKXdXSy +1syTvm+tt2Jg7shaKsfw+ZMY6iChUJ49yBB5W1F6VBHUgKqsGxnKlrEC4z6YoOkl +E9WNb6R/8ROF+OCYPgbisYaxIUFp6KJXK3Eh3J7s7XqW6Fn6nw5e0eMn1SZZIZNt +XeLTiv7tjmSREMVzABvaIaFQk0s5GmWkZvqQVkRLJRiHuCCgbIWMrZUZf24= +-----END CERTIFICATE----- diff --git a/test/service/node_c/read.test.covenantsql.io-key.pem b/test/service/node_c/read.test.covenantsql.io-key.pem new file mode 100644 index 000000000..4cbc33ec5 --- /dev/null +++ b/test/service/node_c/read.test.covenantsql.io-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEA7cWb0RE4+hFRBhwJpAgQUSUOG+8H9evC85z5IgyrgwIPP7sl +6xz7VpiJ3O+1SP3Y+aHf1vNryp+AqW6Y2G9fPjPSusp4dFE19xC5hG7kYL2TLmtv +8B5ceLtQnI1XZd46TAiFqOg3rkB2X8oHOPfHY4zhPQ+4PC4EnIlFwiczoAbdomV2 +pQrKKM4F4ENwvthVr9uPg74pOEbJcy8NoW0l0WZlY82DVfC9ydvOLJH4nsncLTEY +BDUki9ETQIrWgqC03U21MZmDcyfZbi5M53aKky1iQOzNQZ3Rr/fFdNMcbLYKVlMH +hXQC05oXH8GvPYigqOzNyMngWdousKGiE/k3vncJPwVFhdovQXLfa/aaZGznBrP+ +dJe7lpBmAIpFd7LmdqVWPnuDwywhmVEneaI1aeBEXEl60/V6bFDUmSd62yBtFPcx +axUk8tABWDL7vN3kY3W4bUbNyvscQo8Q6waPjD5hTsbXAptQnXNsLzssDTnBX+Kk +ZNI1tTZ0suikRnbtvTDYd4hN6GFtmD6kF5J8F/e9iNZiBy2JnOIbvSckqkVhelyQ +o8zbr0k/rgNRXnV4UfJShYaWEaG74i+JcAVmp/P4Y9LyBwbiSPgH5oGg+eNdo6P/ +JOil0ArFvjBbCwDiHxKtd03jdSZ/B3pnwPGd9lHI0M8Tulp37LZsZm6bfI0CAwEA +AQKCAgB3I1rNyPlpo5+blhTmkfvLDOwi5wRwHq/SbUcP3pVZ0YBeiKGZSy5M16XM +hHermTZM7uU/yTyrjHxlaTtAx51Lh6ABZE4yyjZmE4VBbGcWaicDTWYLRMtE22aq +6s9uBYnkayi9141+zGID4TD5RH4tzXtWozfHP6+j18ySWh4uAwKuynRGgj+FbqXX +FzO5DKDyuusQMgppXl62Tk9gIVafs9T9yw4R08zlBjQqdQHEXpTqN/02roIfZKVm +46pUTb4SXUt7DNamrsLtyFlUaTtKP6VJrt2yESfuKhJQVS+a8SQA2R2dquF3sXAA +w4XRKVKHEhCSmUTHAOIAMx0JMQjSeYffbUR3dF8t8jK/RYHZ32oKuYgt6LLLMMUt +nfehiweMYKkjhLzW0WCyhuqlhk4T3x1Wgh6S+HiOHgvD8dW8wnmNL2k11h3STroM +g6Fc9+9KMBp97FrsCYFrIDeTY6uWCJxE5Dkb1Y7VXUdGMuIHztNnSKIHcNDOL2Mz +N6qr2smE1I5Wzm7CGv46AXTt0TOKnXgEyjNxp8LRkl+oYm/GlbqG1RrXHqWOcQEv +1Y6FSo1yP3SlOcPq3YLcZzTLH2hrrR1R4ie7hKRL6j19TnBR8R1CsP7LHOHJ4ahM +14SkS4srowtYsXJijoGh56K2H2sxnElVxQJ85qALdTBeR64eKQKCAQEA901lznD2 +5ZHFtYWLKhlqXaqgM/4Whu6cR0f3C9SLcQzAYjuaZjGmOKauackLRWSVpDnSR8zB +ol1QrRyY2upbVKRXxR9nkqamVtYZxSBS+8YjRBOvcESNY5HhjIIqwBQBTJY9v1DQ +kA5WSfivThQZGJDH1y2PrYFi5ZxmwRBnMMw+NXQy0ccRIagVMt4qQLEnA7diab9F +2ZYAgpk4o4d/tA4rF/22AcWX7pdk89zA99qoz0p2ko5/JrV+FZMGZ8PPbm5I4HsV +ahHXBXIUWExpOhonVjLNWXDvTHhkRS2zT2uhav2ohLf0+CouSA/aGoDBpOTzSndw +pL7yIQTu9B2W4wKCAQEA9iJnNuS8qfENZu+/5fzLlh7OaAerMN4JLdg251ESp4fs +LTOWFTlzU880/57SREcLO5RfYhMw2FUzyU+tXrc1cAZMNe8cGmPqDeUyQSgavs0M +WzSnFUk7z6jHH3GNCAPBC9A7M3oogyNiNm8fXZX51Fwv/EyAJX9lQhmXPMh/c53f +ulWCD28XKVEgsjEMMKZZVOvkRGBN9KLJ6DlLCtrixZSCfUyP04AjLxDeMGnhqi/P +nDADvVcxrRuMs5/8OQ7DVg4UxuOK4D/v3KcBq34hK13uOvupdxBHO1yYlUVa71G7 +ZX4KhumUOZZQkoYSEzW1N6IZXzA8+nV/Ulh8u4WJzwKCAQEAn+9eN/S2uCFeS9bh ++YgWUh1XHkjlKL6IM1FHZE9BHwuwH9eMMytI5LpnceKjd21lmaALboPtdqQC2PH1 +qR6HkmX2nXWB9kXwrZgpcmNFR68Mf6p7e4/aINrnk4dbPn2xmWZQ6LnLKF8dTxmV +xlkZIdoAZBkDIqLa4sQTcCi7k8ODN+6+Lw0e9zVNAGjNyqjHIpAnBVy+P8nS6qNN +DfVDkZ7YH9vlKaAwcg1XLJ9H7QNsySLPLFkbwlz9/dXn/pOUQ0bvur3fS4neFZeB +sNk59GmVpxmT1JRFLp9tuY+kt2hULG1/3tVZiGU/KTuXQiyjD5FCBpbYMrOKw+/8 +2cOJIQKCAQBO6ub3Jc4MGxr190crIavRHV2G43aTO43r4hhwgIEfsCgcsh6b/Yip +xZUzpKO8ep7yYndWxdpycpchI+ftp4Z9vbcvz9PN7l08SVGcrJQuuyYMFEzCOXHw ++iemQE081Z9O/1wL/E4DBhRWabi/0/d/jHNiTNEFtNwtnnDsb0jWNDdo0kPaWP8v +IzD9kVZcPuoDnYLaHZrBJnTgfYY/G8F8IkrYi/TNlpcxXxIuqbROUfgaFxcL3Woz +G9M4QMKpNL+S1v74ajq7/iQVNoMFjnJqKjrZNJm4cEK2mNDfg5ZNh4IzX39WlIwP +DtAUuuIOwLiy9sl1yMy0bXn+WBreMUnnAoIBAFkchyvXwhi/l+rRiHAuFMTc3BsO +br1fVA5Avlv0MSBNRa2sISoslCDcqgLgdRckJplz2Q7YX0tL5aYZvZtMpiNmvyES +RL2hNqulrKJ/8Yuf04hUW14MhXizq7+NgMCTtOeLo3W40+EGswV9wvq/wTgdE5Yo +WgstDYvQ1YlqVXP1kWZDcFY1kO0zLIOWwFWbtmmtM2TDi09kZFNLGOoGXsJvKCWE +6vJ8xORPmmrVQ83hHIPqGlFkxts7R209RLWgGWSSOatdhDEd3uiuVS/XlNA3Q16l +70ME8P5a/MqEwmCF1sODndfqnc2A9n/XBM65IdFproaANOwsIcL2jW6T/3U= +-----END RSA PRIVATE KEY----- diff --git a/test/service/node_c/read.test.covenantsql.io.pem b/test/service/node_c/read.test.covenantsql.io.pem new file mode 100644 index 000000000..1fa09dd22 --- /dev/null +++ b/test/service/node_c/read.test.covenantsql.io.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFxzCCA68CCQCofDYaBrdh7DANBgkqhkiG9w0BAQsFADCBojELMAkGA1UEBhMC +Q04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0JlaWppbmcxFjAUBgNVBAoT +DU1lcmlkaWFuIEx0ZC4xEDAOBgNVBAsTB0RldmVsb3AxHDAaBgNVBAMTE3Rlc3Qu +Y292ZW5hbnRzcWwuaW8xJzAlBgkqhkiG9w0BCQEWGHdlYm1hc3RlckBjb3ZlbmFu +dHNxbC5pbzAeFw0xODA4MDEwNDIwNTdaFw0yODA3MjkwNDIwNTdaMIGnMQswCQYD +VQQGEwJDTjEQMA4GA1UECBMHQmVpamluZzEQMA4GA1UEBxMHQmVpamluZzEWMBQG +A1UEChMNTWVyaWRpYW4gTHRkLjEQMA4GA1UECxMHRGV2ZWxvcDEhMB8GA1UEAxMY +cmVhZC50ZXN0LmNvdmVuYW50c3FsLmlvMScwJQYJKoZIhvcNAQkBFhh3ZWJtYXN0 +ZXJAY292ZW5hbnRzcWwuaW8wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDtxZvRETj6EVEGHAmkCBBRJQ4b7wf168LznPkiDKuDAg8/uyXrHPtWmInc77VI +/dj5od/W82vKn4CpbpjYb18+M9K6ynh0UTX3ELmEbuRgvZMua2/wHlx4u1CcjVdl +3jpMCIWo6DeuQHZfygc498djjOE9D7g8LgSciUXCJzOgBt2iZXalCsoozgXgQ3C+ +2FWv24+Dvik4RslzLw2hbSXRZmVjzYNV8L3J284skfieydwtMRgENSSL0RNAitaC +oLTdTbUxmYNzJ9luLkzndoqTLWJA7M1BndGv98V00xxstgpWUweFdALTmhcfwa89 +iKCo7M3IyeBZ2i6woaIT+Te+dwk/BUWF2i9Bct9r9ppkbOcGs/50l7uWkGYAikV3 +suZ2pVY+e4PDLCGZUSd5ojVp4ERcSXrT9XpsUNSZJ3rbIG0U9zFrFSTy0AFYMvu8 +3eRjdbhtRs3K+xxCjxDrBo+MPmFOxtcCm1Cdc2wvOywNOcFf4qRk0jW1NnSy6KRG +du29MNh3iE3oYW2YPqQXknwX972I1mIHLYmc4hu9JySqRWF6XJCjzNuvST+uA1Fe +dXhR8lKFhpYRobviL4lwBWan8/hj0vIHBuJI+AfmgaD5412jo/8k6KXQCsW+MFsL +AOIfEq13TeN1Jn8HemfA8Z32UcjQzxO6Wnfstmxmbpt8jQIDAQABMA0GCSqGSIb3 +DQEBCwUAA4ICAQCq3FVZnp9HGItWlAXpViXrJx51D5W+bh83yKKlo23fo4u/6BM0 +H0gXTtl0XpG/nsp1oqINpc9+NXzEbs7Twx4utN29WyboacbLu5KPD6q17bWTdIH3 +VijHcyOchlru0nPhweNVtSR7+hmVMZrqHy+Ib2uzuDieD7ulvHTaX/JDkRvZYhYS +8qCptWk9VObeNnA3cyoZo5WyvRLXBQ5Q6LW5EMmXXQIKWyejX3vzwraZXFyhkLzz +GwY3h/ez4dm5Vgbf+lodAtslO5SEKcA6tSQLcdCO4J5+aZrbyIuzEGUra+Y2ZiRl +xtYzSkgaMRpMYZU7y96v7qoj2UOJw7KYj+3bN8rb3iTiXKXBG2XoH6Kn7IQb8pYD +k0+KGZmtZQ38St5UNmT0V2G1eoZA0F0FpuyVPe+ZOF3TxCq4BkvQC9puTrpHZiFm +mWw9xQsjOX34B88GckJsldUq86f+SNLhBFUBQOVRxWWjOV9R7PHHr+d28foTdPfU +gjf6Ff8XGoDw40peFLodsJfuI7xvZHa/4IoDnhEYHyDml++jskDypfNmSBn4m8fx +EtcwxUmsjHdW/mXqdFtgMsT+NGiGZ766KNS+JTWkv9ZJQMUS/714v3q/ymgzIIQ1 +BNhosSnSqa/eyAzggu6+US/FaG69xDBZGwoI+xw3kzQ+WoTQzjwoz57Enw== +-----END CERTIFICATE----- diff --git a/test/service/node_c/rootCA-key.pem b/test/service/node_c/rootCA-key.pem new file mode 100644 index 000000000..d4e545428 --- /dev/null +++ b/test/service/node_c/rootCA-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEA0ry0EA1+Yeidf13L2GdhO1WiSEUKUDslVcYfzqbHh+NJyw4F +7J2cnolA2UcJ9zg7NoB5W3mYjoRBxr/SY3JVeDa7E3NnniNvO+n7ZD1sfCgjsJdB +89kMY4adzMbyJIvrXcllMawP2237jZ8LsZRl+NLJzWe296pfivWD1dI8RxnVk6Rr +Ub+VrcxFLp7W/NoCfW37xU3SYo4jFhVfCS5P9IMqI3w6aBK7mLcRj2HH8+M4xCxz +YLmRsHxkazZcn6KL/Tef5QyxM9a9yexwg8u16z4yxn74m6egQ/CNVtWcqg6zNpr4 +EI8WfgagJr1dus1OrpZrhdeNQDIQwMLa2RHhj72PF2qQ7bLkuWAY0UyuFvcwsGFi +EfxYtfNxnl2YPM5aVqrt31lQLi0AOLEvC6rYXd5sykDg9XAthuNe3cJAfqcGUpFr +3GrAwbPcuGUHPrgo6UjyNmwaBYLlGFmz3te8Pj2P1fLoLFXROtCg5hpYHsAqNZoV +zrR4/3uGvPn0eABzMR6BaNYl1m0mPkSY7bCDH2oulEmU/E1Ck0QuJ9+jd5vVrHPc +jV4B3jQsmK5UP7TXkcsJ5n8OozPahoItv5cwNYt4XGWpCqpEoqRmOSWZu6utX/nD +oByaw8mcEqPBHEotG7in8qpQLIboPO003e3lwq7mECwSz7UXFrIkfx8l7PMCAwEA +AQKCAgEAxfVjhCzYyqpTwMBga3F5Yd5oalEIhiN5R+CsIliC2MCGoksS/5ly9W3j +T/eugd/9p4358hcAWugcTdIFlUtixGFNTNE8xc1tgS+j6h7VGLAwDoOX/bOnMprT +Avjjn7ccKuazu3xxDOR8yCVeO7s2Kw3/aYeC1ZXi2EsXQ7WQ0A2RlnZ+JbW9qhxX +5JprQ+ybKC43srkO52uzw9vhgWNS0lKgM+NPjlICjUtzIGhvB0gsHAPRgkvvcoT3 +Y8sWKRLtQ7mL5wMMNrEDaXpEm1myE0BDPDkr2jQVlZyTeL2CxDC44pOicROowkwB +B0MdmAuiXNiKOpkoY+Rj3l9sazqj0cfzc1aFmUchAyb0Q+a2V3ubEUgRVtynRO75 +p41SrdB5Jo4rm83GmRoV2tbIK53rseRrXQ9VT72pu7D2XN6KhEgyUbc+4p9jbTY5 +GFGkWPbfp6ryoyiFWnwQyqlKQZnz+k74aweQQ0uroc5JUKgNxaS7kLIB2+4DrIRF +P0RwuUTR5wI9WjpdB4J17NzpBNgJ2s5eaQ40CCFHSictUX1a9kFk24nel7XI9br0 +F6tFwC9F3TdSxx5HyHna66WfOfG+vs6Kt6RC4Dzft08/jrQeQ8fnZcufjaeFG6Uy +xPZQQJ24krJ/SrsZiZmrR5bFCRFTE/n2N9npZpBHhajYhjbhs8kCggEBAP04RPKR +vw9knLkuqK78QVUBR4UzydqMDQpZFF9wM1x2lhg312K/og1Y8785SEHqsTgtXNvT +cleE9NhjUqsLfENfJov0ofCCXbjEUuRxCEZd+1R5XfX/SLOGmGmWqqiBMReHE/Cu +c0e8nBY/isGDtl5E9FPxdTUQDrPz61UAt94SThs0Jhq0oKT90QRm8/vxKkgOcYWf +s4D3BgGcvdDXA0zwH8RC36fAPvYLfi8i9OQ1upi9gNBs0EgYOtM3VLHZ3HQrZWTT +gUCwR+la0no19eZOgpbJQS2XzGLTVC3FFNQK6emOQ5g3h6bml8ukFQOHIWeHVOqJ +K0G7B/lT+S4WCEUCggEBANUNBunyt1/Y3+2JyRhF9C5Rq7Av5k+sedhsMLFHuQ8x +Cf/wAs8yKW09a0YrqX6laVmu7VcBHaMVY37lac/U6Slr11JnsHLTNPBgwwl20Z7U +QSG7/WdE/p0ylatKKg7dJ6iA2ctjYbjG0ML1XWuj6QbkvNDh/KR3cD/niNqXNCQ2 +KihJ62mQO1odKRRgBqImYtRVo7E6hgYvkYqK9TBgGQ5ZtX4tiMjHah/YR7AtEuOr +O3Yt4aaAww7w6JeRecIEg4JSW4KuK/ztJ7D5PNRg7sz2hECjELcFP6fTxF+qcEj5 +IzRgdTjs/bNUZz4H7ikH9ejBJdEvwPHlyDQHlFPsP9cCggEAKWGGsvVqecOBcSnU +2zPSIWgiHfyGojZ88xH3qFkXq6adhLurcTHL885zlu5vhoYqC/ot0KbPasoJkUs5 ++UXZOtFT5U9HH5zOYCGFQlvOdGFrbzSeTFM5uEzon2jF3t+t/CBQ++YmZLTH9ULR +FCrIJMO0AfvVoaRMItBbxvplEd9/8CYni/m0vwHTpJqGiMeyly/1EVc16H919dF9 +m6Fnoq0jI9mh3zIll+Ps7RsTVjAJnGhroqQFraJ4CohiSOZHhpyI519BIicsuU/k +UaB73PU7lhSxmBfUiNnsScaJTtWxwD9FgJyiiH3qlJbt8DOnG9ob4HAmJ4m/FdnJ +QOTM2QKCAQB1uesWH27A4eBrK/YZGZ6icbLDetRzNkVmF/KYI5/ZCyoRaRjEUV2e +5Y9/iOTx/IlIa2bu6sjrswf1uONNWsM0hkjHWlCgQqFAKtfbRPL0JymOcIjIJdHk +H22g5yxyZjZh4EF5KAN5zTLSaC8lKb+8dWz4p8epQe6fAVwYHfFMCTomZSJWhMKn +OvHWNnGz7C40UtZPOp2QkXyE5+AwyQlParblcFfjSn4T9rk2WtHTSG1lEllcXk5q +1ShRiKuVUFUzEDtM7N2Vt551JmQ8nwuV6qqN5Q15dMcF//jFPDMrv696Y8qimCJg +k8Uw+8TYm3OBGCnDe/XMNUL5rS6DaUqlAoIBAC8zFGOm1FFfSRAFOiKUEkGIYaBQ +Gd7GaBWzZFC2tDe6Dmp+WxFxqX7Dr/AG8nmCwOrbJOayhrEmIlYwjHmZNDSvphMp +L3dQYqVCqQRvCDx9ztXb+mus3iyhgD1vgWB/EwqhiK3S2n4rbaGU60h0YFC1JL0s +icrlRsZMkJV+l5O7gGFCVHCBZc9XZDeu6pqOjyMS0gx5IXyHGRBS7hS2HXD9QHid +/sufbNxzs2sCdwM/EwE8BlaKX0OiLGyxcQh7e5Ca4INuNzM5G+3ZEr2auVAkfTNF +u+sAmvfbC83U70HJakLGZuqq5F+xamj8dL/qnlYpo6D1wdnep1IeVvn83z8= +-----END RSA PRIVATE KEY----- diff --git a/test/service/node_c/rootCA.pem b/test/service/node_c/rootCA.pem new file mode 100644 index 000000000..1aa3ca429 --- /dev/null +++ b/test/service/node_c/rootCA.pem @@ -0,0 +1,39 @@ +-----BEGIN CERTIFICATE----- +MIIG1jCCBL6gAwIBAgIJAIMSiSlXKMA9MA0GCSqGSIb3DQEBCwUAMIGiMQswCQYD +VQQGEwJDTjEQMA4GA1UECBMHQmVpamluZzEQMA4GA1UEBxMHQmVpamluZzEWMBQG +A1UEChMNTWVyaWRpYW4gTHRkLjEQMA4GA1UECxMHRGV2ZWxvcDEcMBoGA1UEAxMT +dGVzdC5jb3ZlbmFudHNxbC5pbzEnMCUGCSqGSIb3DQEJARYYd2VibWFzdGVyQGNv +dmVuYW50c3FsLmlvMB4XDTE4MDgwMTA0MDc0OFoXDTI4MDcyOTA0MDc0OFowgaIx +CzAJBgNVBAYTAkNOMRAwDgYDVQQIEwdCZWlqaW5nMRAwDgYDVQQHEwdCZWlqaW5n +MRYwFAYDVQQKEw1NZXJpZGlhbiBMdGQuMRAwDgYDVQQLEwdEZXZlbG9wMRwwGgYD +VQQDExN0ZXN0LmNvdmVuYW50c3FsLmlvMScwJQYJKoZIhvcNAQkBFhh3ZWJtYXN0 +ZXJAY292ZW5hbnRzcWwuaW8wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDSvLQQDX5h6J1/XcvYZ2E7VaJIRQpQOyVVxh/OpseH40nLDgXsnZyeiUDZRwn3 +ODs2gHlbeZiOhEHGv9JjclV4NrsTc2eeI2876ftkPWx8KCOwl0Hz2Qxjhp3MxvIk +i+tdyWUxrA/bbfuNnwuxlGX40snNZ7b3ql+K9YPV0jxHGdWTpGtRv5WtzEUuntb8 +2gJ9bfvFTdJijiMWFV8JLk/0gyojfDpoEruYtxGPYcfz4zjELHNguZGwfGRrNlyf +oov9N5/lDLEz1r3J7HCDy7XrPjLGfvibp6BD8I1W1ZyqDrM2mvgQjxZ+BqAmvV26 +zU6ulmuF141AMhDAwtrZEeGPvY8XapDtsuS5YBjRTK4W9zCwYWIR/Fi183GeXZg8 +zlpWqu3fWVAuLQA4sS8Lqthd3mzKQOD1cC2G417dwkB+pwZSkWvcasDBs9y4ZQc+ +uCjpSPI2bBoFguUYWbPe17w+PY/V8ugsVdE60KDmGlgewCo1mhXOtHj/e4a8+fR4 +AHMxHoFo1iXWbSY+RJjtsIMfai6USZT8TUKTRC4n36N3m9Wsc9yNXgHeNCyYrlQ/ +tNeRywnmfw6jM9qGgi2/lzA1i3hcZakKqkSipGY5JZm7q61f+cOgHJrDyZwSo8Ec +Si0buKfyqlAshug87TTd7eXCruYQLBLPtRcWsiR/HyXs8wIDAQABo4IBCzCCAQcw +HQYDVR0OBBYEFFdgm7OKRRCg0gIK6kxGU4PuVhM7MIHXBgNVHSMEgc8wgcyAFFdg +m7OKRRCg0gIK6kxGU4PuVhM7oYGopIGlMIGiMQswCQYDVQQGEwJDTjEQMA4GA1UE +CBMHQmVpamluZzEQMA4GA1UEBxMHQmVpamluZzEWMBQGA1UEChMNTWVyaWRpYW4g +THRkLjEQMA4GA1UECxMHRGV2ZWxvcDEcMBoGA1UEAxMTdGVzdC5jb3ZlbmFudHNx +bC5pbzEnMCUGCSqGSIb3DQEJARYYd2VibWFzdGVyQGNvdmVuYW50c3FsLmlvggkA +gxKJKVcowD0wDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAqEjzbVox +aaxCXs/lQuJE5/l/QSBs5MeE98zwINxCNmZYMsA9JmekyphP286fgdC7G2uRP89G +5lf9+UkHjfWK/N8l1t0NAA4LScMaD09SYCq9p/s7BfxfG0ZS5hfZ6MXuf6svYhL4 +gg7RQEUNZsaFSLvhMG0hGnBzKjDEPurrRnOx9tbtQF6/O6evN2Ig2ssqKjn/m1As +1mxGZy1ZCyREQvHEyj0p36LQtWJOYGRDncflJbLSMBrWq/bxQkATMYJuPPetHIJH +nQzbsbagUrTGZPM8B4LJXD8RtnXmH7zrU+JOunshxTfnl0vo+ezvKT0ig2q2M/t1 +DH0Em8EUgJUlUEOxUfA2hZ2Oq2RrLNz01oK06D0De5JL3CwUpSqbzqJ7F5M5os53 +I9FXSiKbjJUxZijH6NkTZ1gP6GpsEEWc6qOXXAYJWNrW12L7+QjnjgjWI176xO0y +VrvVGBgeOCoFAD/4FSzmCiee9v9sbdzd1GkfkXztPJKdeorRPyetob/zK+4btW4n +0dxfv6XahyBgoKVA7a0kn8ZqM/g4hmkfX4LujTK+C75d8p669zopQ3O76XRBsyJF +dM7J2DwRudG2NphtJyXWXdDSdK9s3iPUiS0y+j4gg9I/cFBQUjKD0R5ZPcRrdG4N +9zeN5A/Kg7vHsbpREm0YtLO9LvlLUp0HUS4= +-----END CERTIFICATE----- diff --git a/test/service/node_c/server.test.covenantsql.io-key.pem b/test/service/node_c/server.test.covenantsql.io-key.pem new file mode 100644 index 000000000..97b9d4b6b --- /dev/null +++ b/test/service/node_c/server.test.covenantsql.io-key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDYqNT8V2PPMnWI +Ka2PxM7z2cf1WMrJq30EXFCboCJXyxLKFrBvb8LqgXpLonhug5kIVVaz3yHSph6d +lFEw4RCLzDm4PbvUxMbX3h0v/uirT8wCy8RvXYwQcisl2++bpO8IqFTiqg4O5Thr +O7BSqNXdS0/yi3PVN8UCzGckGXfPLsD7bCWPAFJq1YmvJ8XAqgAm7h3XPoWbUSEK +jOdLjU3jDq7/kCdZNN2DSkTDkE5JE2pf90BnALUIijggeLQn2080NfbFSW09j8kg +6BzsYfqBo+xxQ1MEP2N/0Zuqf+xW6jDYrHweznDXJdNHUN/yxp+64kxHgp2wyR4f +NxLYt7HvAgMBAAECggEBANNt9uMGGRWyxTWKjqBVTCx1o4fPDZ4+ZrLhr5wfakRI +nV5vQ+CLrSgSEJlMxL/8VlPmi8Teg/BAQnI+sfjEOdRjCRS90dXx7aXtUIhs9vtu +1MUJuvl+zdeiwm6gsbQvAUFum9/SWgO5NxSWXBxePM5G1472/aPeV7jCZgi5fczE +pC21VB7zzPG20UjWqVj2vAD8tS9/UQybc12/IOnS7z6pQP1wpn/2N99BEcEXWpDW +m7/jDbrZ6qJD18QmAoltMVfQF5Pi6qpLkU8qOYKFioO7GGNhapWz6lvgeLanux3l +mU71RAMANgmgdjs4RFdC0hfy0a/xPRfINCeVkSwC7mkCgYEA323IVoDaKSatFtS1 +W7QlX9tjtL1esuvQfHsR7B5EAqir6Lw4Hpn/gPlwNaTl+34GJy6dm4Shbn/A5pka +ow8bjWNPynTxuvlT5NXOW9TmgkrzATIhSrQfMHO7saDCo8JVqRZUvewFXXo4bitm +2bsHYh8Z1XClOz1fka97zEr3Wg0CgYEA+D5sEf9JdFU9LNuDC5JshG/Fc401mukg +AckE1Nmi1y4aa8XNEZYwUIlBb4r+OUlV/cMQvxEqnt0bl5oiL5myYW+sSnSCkHU6 +O3CQl1PO+uuiyNYl3QGNcq5Hw01joR/HejD+h0I5Mb642sXmUcba4fcLKBS1ZG6g +tCANeXBuKOsCgYEAzDYPMeE7hPkwovlveY3By0c+bmfXMjmOqPfjfah8COIZnfLK +aE3g1gUmpiE9gwvAm/djXk1vLwvdR+cQDZE1YZkvyJ/ygS55m2I/5ndE6DmQubsT +6q+PAj4Fg2in/f0VRiJ++cfLb5DSGv/YVZE4Qlqixg7bNrX1r7ZwtFygj9ECgYBA +S3qWFrahqMoVai1AvAXbL0/Go9Y0bxjZHYVg05V3gftZ2ntIiMuusD4Ac9FwaOwa +s4EM25dcWgwhccxU48vtrIzFI/QFEjeo2Xi5mP1Mw+b/eWeJHDPUdgskLFEXlDGI +FlR2F9LUbX9XOlZy67wZNnDvSp3Ii1aYEI0s3M/LTQKBgCadu59DWqTxHfzu/vRG +e7xIMuqXZ12zA/9Ks2pasw1Aa9ZWwgRpZmP4PiFn9tyXEsUXYVbNxWEu3ZUOMQEY +Pq4BeyADEWLDeoo1rHbAEv2X+cr7rm4Sobu2vxtfi0uMlUILtWyK3XuiRoTdlXOH +U9xfXHYXJp08l0Q2dXIHtEZl +-----END PRIVATE KEY----- diff --git a/test/service/node_c/server.test.covenantsql.io.pem b/test/service/node_c/server.test.covenantsql.io.pem new file mode 100644 index 000000000..1b9428afa --- /dev/null +++ b/test/service/node_c/server.test.covenantsql.io.pem @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEQDCCAqigAwIBAgIQEKobji5n26kQYHutrsnlgjANBgkqhkiG9w0BAQsFADBt +MR4wHAYDVQQKExVta2NlcnQgZGV2ZWxvcG1lbnQgQ0ExITAfBgNVBAsMGHhxMjYy +MTQ0QFFpcy1NYWNCb29rLVBybzEoMCYGA1UEAwwfbWtjZXJ0IHhxMjYyMTQ0QFFp +cy1NYWNCb29rLVBybzAeFw0xODA3MzExNTA5MDVaFw0yODA3MzExNTA5MDVaMEwx +JzAlBgNVBAoTHm1rY2VydCBkZXZlbG9wbWVudCBjZXJ0aWZpY2F0ZTEhMB8GA1UE +CwwYeHEyNjIxNDRAUWlzLU1hY0Jvb2stUHJvMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA2KjU/FdjzzJ1iCmtj8TO89nH9VjKyat9BFxQm6AiV8sSyhaw +b2/C6oF6S6J4boOZCFVWs98h0qYenZRRMOEQi8w5uD271MTG194dL/7oq0/MAsvE +b12MEHIrJdvvm6TvCKhU4qoODuU4azuwUqjV3UtP8otz1TfFAsxnJBl3zy7A+2wl +jwBSatWJryfFwKoAJu4d1z6Fm1EhCoznS41N4w6u/5AnWTTdg0pEw5BOSRNqX/dA +ZwC1CIo4IHi0J9tPNDX2xUltPY/JIOgc7GH6gaPscUNTBD9jf9Gbqn/sVuow2Kx8 +Hs5w1yXTR1Df8safuuJMR4KdsMkeHzcS2Lex7wIDAQABo30wezAOBgNVHQ8BAf8E +BAMCBaAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADAfBgNVHSME +GDAWgBSD0tobP0meocjRN1XBYqlSTOHglTAlBgNVHREEHjAcghpzZXJ2ZXIudGVz +dC5jb3ZlbmFudHNxbC5pbzANBgkqhkiG9w0BAQsFAAOCAYEARjlPL41xnYOUHz+k +Qrj/2figGRYGBwfnLVJrjkkSuWY1KRTLUlUYcc9ofkLzAcwRxVbdhcwLLHDA/ddZ +Yii7AY9Z/amzagu/btgvaWu1KMb8IKe6PKy1ZjzzpT6M9xGbW/YyxSWSfNXxD2t1 ++ThvFKZai+525IC2PjlOP8k9hKu4A55wNjvekleqQ+B944iXDRBVOHqgK3Fy3JQ5 +pcAGm9Q0Bn8xNZhEsVERPKeMOnxF/rfggEiCdPp6fexG9X+dUziPSXR8RGZDn16E +Ho8S4m3or0fMX2W2EsYkRY/ESxsE8Y5KFELh4RW2DrUfzibHaS3ZeXyJLAuBTUzj +s4BqXUwpKwqoQqv3d0Mi1RZanfVMWG470tuvGdmaW3HdZoIBmo44fVjx63/6wEGm +0A45avtOHRwQGObM446Q+Gs6zsZspLgEHjmPwr+0PsIjbR6weehXnAAOnr9RWX8n +UstyEkOSDZA8vJmSWSu8tXwky31ZF+cSC7DYZxBP7dhPWDCn +-----END CERTIFICATE----- diff --git a/test/service/node_c/write.test.covenantsql.io-key.pem b/test/service/node_c/write.test.covenantsql.io-key.pem new file mode 100644 index 000000000..1b0d20305 --- /dev/null +++ b/test/service/node_c/write.test.covenantsql.io-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJJwIBAAKCAgEArwFtfbMdNu7m332+1KH/6hY7/zv+YhPu3NJ7WZC2wJlyc9nI +BzuD7SV8lcwv75w2n4aclr/KWFXYBPeqtfn4ebV/pvIZtyd+P4MGKbT3FuHdxPhI +7uTcw7LENXUKtmqO01OO1mx4+cbn14Hd8f1NUoxs5vEnohBoV7etI84fLAxglNAd +dtGTeN+jlSN+ipFKI74aPeEqnnJeJ3j35ZgvYb79hYEHXPngKOCsAa01cDrpxzuD +L+ukveP5SLee1lCYHpUKEMt+0SMBalVW6XltBnmMrLL5tjfA4RyAmgRxle8q+O2w +DxivStX1kInUL22kj6AjSrBpOPy12rwQiPa6pJt0pjk7+S71p69NJeoYyYz2+l6g +uWXqJ8b/jlwT4+CBOlOuDuvCvU1almQADKcv1eN9g0X2d8F+9CHgUdHaVkpGJ9ue +Ogrw5HWipnjt2B/YFRErh3125CDnGap+SCEoPCJFTxVG0GahzmWC1sQr6EsHdMDB +k6YTINY+4iNvoxlTUhAMWiYeLNY4PZaqL9q3tcjuHMxMcykAYpMiEQe0mGsWiRjz +DfnDtGcyeagqHkcjBE/w1qwGjLQBJFmjYmwpk5cDtS0OhhEIOastqKoacc+L4rnq +ALEM94uuPS8VMfu+d9rspfBMy/a/PjMrOO5pYLD0yIIYVHDJxyHT0p0QVZkCAwEA +AQKCAgBzaAueQwnW+gCCDVhUvGgZJIR4MkX0w5RXRu5VCBucMxTI1SsVqee78WaR +Gk/aQTe8R3bn6p4zVpjX3cNTsf5rtIbzvt+6am7Uz0C3LEFtc5FdnSXrdD0pSLAf +WImx9d8t+QJO4MV+Ye7trRSByjq9XyFJwmoSc6N7hQLGg90GnTrrp7pmappHsaMc +bIW8N0ee/nQrrlr+lgkFGr7PR2annN1utsH2TEnIazDDAkglNJSJ7/L5HPpMxxPT +IlO6nPdT45D5tlhw7ha22oQv/wUoqetcz8Hgqi+lw7gC2T9WUpwSAByEOBEQ1rvT +jzC//hvxIvdi/6bED9KU5kQ5Lgux/fWUvg/l6u7EebM6TeG/Er5Tq1D6j2+IjfkR +bPHLEk2Cv2oE7W1PhP+yinJnoxwHeic+nu7wsuevzvPhNZ7lLaWgHFvc1YVTmLDq +E/DGm5Qj6mh8SP2NcxW79m6fjdbgjw0OuPxdEo2sj74cxcifTy54GuSoZqGnw18g +28qXpDLkWgHQFrm6LvLqnaY3uvNMYLBWd4kqH0Y9XKI1N3j82ensAwH5e6Ol8Stw +I7GWT/1GggOEPbIwYtVBfbwghfktPttmHU4vs8IQnufqExgEjDhWspjJVZEuoO8V +8weDCQADS8/266GqRN9CUtNWMmbM0jAHgL7Bq9AFwgYSrP/BiQKCAQEA3GNqpUw8 +Ix3yYxvEXbceOguwqjzOW1vGBhJ4k6ptPG9kPI0GqMY6sw/RgRlCpfCq7Ig/Renj +6LPbSyjkylfavlLNODi/iGbnd6cLKtsvaeexWP1wYU4T367rF8ifqKl/C+l4XqBN +4j4KooyPF889PrgXJ73517jWwhxnncgDabJ2zTetjkvFbf3bAua5F7rufHi39Zng +Rt0gEmFg99XJjrtbS7iND5fhZpGK14hhkdwHptu6XS+yGIiVwbCwS0odtZDID3vk +s9CEUzhjnK8ld04RJ4vSMlxfzlub3e88Lvii79mmZgdH4aP0cPhmFJ3i0mefUVpw +cSmQSVMsxHkh6wKCAQEAy0i1KjX3k7LwWgsvwtRjnJMuEWJ/SoRiE4+Cho9BSCVg +onG4NyBOUgfQI9pBKf2CPWVDBA5VQrDN81ozmTPTgb7isDcFDSiowqyVSyRCorUB +AfjbpD7z6QMdBt15xHR3CXWwpio5NwBQqQ+I2AJ1koBYUVj5TupDOZzwVY8/BbqD +fmhtqLd4c2q5Go2ESK+EVAA1jvFmZUTjr9jC9a/8s/cn5Xqv7/s4BCmmfqQSKZS2 +LPBA2Th1zsUrSW3Os9v+c6LUU92LVEZKKKZyRykTemQRH/oljGG9Dn/hUDcvaI2z +A2+T15rQd9p6ePySD8BuZzxwFvAJQPOYaqivrzsBiwKCAQAmd5fSuEa63mxDTkJt +FRxKh2XToP9nxNIAl1LCe3nLlanKQ9dIuCjgvj8UKIOQkTxUQsfAfT2RjWsWaFHe +24zLsYouaQFNXqDCKr7xQQa6ln1HCh2Gbmlbnp1cLmFnwAXz31FqOtK9TZTvoFcN +kdefzeQExM0KETIy+WBAkvu9hC/mS/SYJLOWKjwC+qCN+svLoAqD7NLPq6MAckzJ +lWAz8JHT2qeMdDccfwTb7+sP2XbgcfPKdhvA2n5BK4Tp70rWOSoiQb6+gAPIvsvs +Oknw1Ah8fZQ3xBXY3/aJu0sm67EM6lF394ddZA+zdDflG1XO4dVWDtIXfmi307O5 +q2b3AoIBAFkTMfceEK8SkIkUL8hyYnegcmZBv786RPOHRc2KhjOD1VU4+VyGdmsx +az3ajAVHRUN71KK5WRjQ+l2w37043WwT5acLZNZAQ7qR/xUe/WfoYlmn3y6YOy6W +I6j3cTzpP6PQgyg8hjeYlr+NxAvLABPC03BJyWyP8AcVwqXrD9WFxcqlHa/5PPlu +AVAmRJnI9vYL5WwOUSz8w7wxAjS/+b4uBbhjSyaf8Qq56W/CmwbHWBBW8kN8nvqM +oQwa5qEfO98VsW5SPJQf/KzVSmvuDs/peyuE4+EgjsQEuwj4NXjd5lwSDzlBaCms +fU/4dFQcoQPxkrgqVBO26cmKwvjIpUMCggEAIb85XP+bwnSOTjFbn61k8PdgyPBq +kBDkiofKiBuR2UzuYhVqxkBqWHtDUhqq1y7A0S/ya75bSv67q4ZlHWEjCEiA8thv +KZwn/8yRVFFKEgtB0afub62Zgc+pPXAr2JwwtZK5dg91QxPaKF20YEz6tOcZdjut +gcQ8Bt4dpRvoz2vOJQnqMIhQM9+HiE7XXV7fgUwT55nC+4wRhILd3xOZKYzDzgMJ +ShMUAb7QkLRujyQwcYPxjWiqRFGSodMoNE2OdofLmfwD1vQfZ/gAorLBH3BVyXAz +53zHfE7+kLgobJBYf7T7Jk246soVYOLSZbeVjAT0ajMKD4ay2jNdnSlxKA== +-----END RSA PRIVATE KEY----- diff --git a/test/service/node_c/write.test.covenantsql.io.pem b/test/service/node_c/write.test.covenantsql.io.pem new file mode 100644 index 000000000..ccebf106c --- /dev/null +++ b/test/service/node_c/write.test.covenantsql.io.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFyDCCA7ACCQCofDYaBrdh7TANBgkqhkiG9w0BAQsFADCBojELMAkGA1UEBhMC +Q04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0JlaWppbmcxFjAUBgNVBAoT +DU1lcmlkaWFuIEx0ZC4xEDAOBgNVBAsTB0RldmVsb3AxHDAaBgNVBAMTE3Rlc3Qu +Y292ZW5hbnRzcWwuaW8xJzAlBgkqhkiG9w0BCQEWGHdlYm1hc3RlckBjb3ZlbmFu +dHNxbC5pbzAeFw0xODA4MDEwNDIxMDZaFw0yODA3MjkwNDIxMDZaMIGoMQswCQYD +VQQGEwJDTjEQMA4GA1UECBMHQmVpamluZzEQMA4GA1UEBxMHQmVpamluZzEWMBQG +A1UEChMNTWVyaWRpYW4gTHRkLjEQMA4GA1UECxMHRGV2ZWxvcDEiMCAGA1UEAxMZ +d3JpdGUudGVzdC5jb3ZlbmFudHNxbC5pbzEnMCUGCSqGSIb3DQEJARYYd2VibWFz +dGVyQGNvdmVuYW50c3FsLmlvMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC +AgEArwFtfbMdNu7m332+1KH/6hY7/zv+YhPu3NJ7WZC2wJlyc9nIBzuD7SV8lcwv +75w2n4aclr/KWFXYBPeqtfn4ebV/pvIZtyd+P4MGKbT3FuHdxPhI7uTcw7LENXUK +tmqO01OO1mx4+cbn14Hd8f1NUoxs5vEnohBoV7etI84fLAxglNAddtGTeN+jlSN+ +ipFKI74aPeEqnnJeJ3j35ZgvYb79hYEHXPngKOCsAa01cDrpxzuDL+ukveP5SLee +1lCYHpUKEMt+0SMBalVW6XltBnmMrLL5tjfA4RyAmgRxle8q+O2wDxivStX1kInU +L22kj6AjSrBpOPy12rwQiPa6pJt0pjk7+S71p69NJeoYyYz2+l6guWXqJ8b/jlwT +4+CBOlOuDuvCvU1almQADKcv1eN9g0X2d8F+9CHgUdHaVkpGJ9ueOgrw5HWipnjt +2B/YFRErh3125CDnGap+SCEoPCJFTxVG0GahzmWC1sQr6EsHdMDBk6YTINY+4iNv +oxlTUhAMWiYeLNY4PZaqL9q3tcjuHMxMcykAYpMiEQe0mGsWiRjzDfnDtGcyeagq +HkcjBE/w1qwGjLQBJFmjYmwpk5cDtS0OhhEIOastqKoacc+L4rnqALEM94uuPS8V +Mfu+d9rspfBMy/a/PjMrOO5pYLD0yIIYVHDJxyHT0p0QVZkCAwEAATANBgkqhkiG +9w0BAQsFAAOCAgEAM+1j12Px/guyMnZFwwsRC9ITa5zJkAkfR++LzRZcT+Gz5du1 +FyQp+5L4Pws96OLFADKHVYE0EFlgdVbskVBErrEIQeZRw0bmp1zDEhfxr4c8fivY ++hW/AXjHsJuO8WVTlRctnefY1g6OdvfI6Sc2092GM9Nvquf1OhKIbPso1NxUUrnp +HQ4ffhQNAFsJk/PkPsjTBzP2iJrzynPdoIPK9jO6NbUg6XfZDQRwchvI7NduWq+x +nNTWV1D8oHvP0+FwHdRyctIVVjkxqd7wnenWl2mUr0SBf0FnfJPl9fz+YLVBLroF +4NGwGG/r6q9tRBAXATm+qbNlth589Tz8mMZMnq2+D6O4499I4MJLceuXw689rO05 +s9/BXWzjJThDnrFaQPyf/YTyMuFaf919F0UGLTLYLYf4vfuflUhaStmYyvArv229 +F4DJy/QDM+NWjo/pJH3ETeEA1stD7kQq7GGqy/MiB5YXqRLnGjpa9vqOECsMIm29 +1TUgdCVN9Gsk8JQPGm/lJUeJECq20LThSeXG+sY6RU+0rmOUJvR8Uv3kjkn0Xd+/ +p2xM/CboFXVcmU+fe9UfJar87MlPJcZP5SenVQuWZ3imI0kFeaObfHHKKJfNAoFl +agBFqnAc/EkYqekxGkxc3pVhBBiZ3D+FlinC2yRko9glPkRKA2WxINPVxm0= +-----END CERTIFICATE----- From e659592835900e81cbc4c82774d18a1b8a8d53f2 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 7 Nov 2018 03:19:07 +0800 Subject: [PATCH 25/32] Fix empty time encode/decode and hash verification bug --- Gopkg.lock | 41 +- .../HashStablePack/marshalhash/write_bytes.go | 3 + vendor/github.com/davecgh/go-spew/LICENSE | 15 - .../github.com/davecgh/go-spew/spew/bypass.go | 145 - .../davecgh/go-spew/spew/bypasssafe.go | 38 - .../github.com/davecgh/go-spew/spew/common.go | 341 --- .../github.com/davecgh/go-spew/spew/config.go | 306 -- vendor/github.com/davecgh/go-spew/spew/doc.go | 211 -- .../github.com/davecgh/go-spew/spew/dump.go | 509 ---- .../github.com/davecgh/go-spew/spew/format.go | 419 --- .../github.com/davecgh/go-spew/spew/spew.go | 148 - vendor/github.com/pmezard/go-difflib/LICENSE | 27 - .../pmezard/go-difflib/difflib/difflib.go | 772 ----- .../github.com/stretchr/objx/.codeclimate.yml | 13 - vendor/github.com/stretchr/objx/Gopkg.lock | 30 - vendor/github.com/stretchr/objx/Gopkg.toml | 8 - vendor/github.com/stretchr/objx/LICENSE | 22 - vendor/github.com/stretchr/objx/README.md | 80 - vendor/github.com/stretchr/objx/Taskfile.yml | 32 - vendor/github.com/stretchr/objx/accessors.go | 148 - vendor/github.com/stretchr/objx/constants.go | 13 - .../github.com/stretchr/objx/conversions.go | 108 - vendor/github.com/stretchr/objx/doc.go | 66 - vendor/github.com/stretchr/objx/map.go | 190 -- vendor/github.com/stretchr/objx/mutations.go | 77 - vendor/github.com/stretchr/objx/security.go | 12 - vendor/github.com/stretchr/objx/tests.go | 17 - .../stretchr/objx/type_specific_codegen.go | 2501 ----------------- vendor/github.com/stretchr/objx/value.go | 53 - vendor/github.com/stretchr/testify/LICENSE | 22 - .../testify/assert/assertion_format.go | 484 ---- .../testify/assert/assertion_format.go.tmpl | 5 - .../testify/assert/assertion_forward.go | 956 ------- .../testify/assert/assertion_forward.go.tmpl | 5 - .../stretchr/testify/assert/assertions.go | 1394 --------- .../github.com/stretchr/testify/assert/doc.go | 45 - .../stretchr/testify/assert/errors.go | 10 - .../testify/assert/forward_assertions.go | 16 - .../testify/assert/http_assertions.go | 143 - .../github.com/stretchr/testify/mock/doc.go | 44 - .../github.com/stretchr/testify/mock/mock.go | 885 ------ worker/types/types_test.go | 13 + 42 files changed, 19 insertions(+), 10348 deletions(-) delete mode 100644 vendor/github.com/davecgh/go-spew/LICENSE delete mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go delete mode 100644 vendor/github.com/pmezard/go-difflib/LICENSE delete mode 100644 vendor/github.com/pmezard/go-difflib/difflib/difflib.go delete mode 100644 vendor/github.com/stretchr/objx/.codeclimate.yml delete mode 100644 vendor/github.com/stretchr/objx/Gopkg.lock delete mode 100644 vendor/github.com/stretchr/objx/Gopkg.toml delete mode 100644 vendor/github.com/stretchr/objx/LICENSE delete mode 100644 vendor/github.com/stretchr/objx/README.md delete mode 100644 vendor/github.com/stretchr/objx/Taskfile.yml delete mode 100644 vendor/github.com/stretchr/objx/accessors.go delete mode 100644 vendor/github.com/stretchr/objx/constants.go delete mode 100644 vendor/github.com/stretchr/objx/conversions.go delete mode 100644 vendor/github.com/stretchr/objx/doc.go delete mode 100644 vendor/github.com/stretchr/objx/map.go delete mode 100644 vendor/github.com/stretchr/objx/mutations.go delete mode 100644 vendor/github.com/stretchr/objx/security.go delete mode 100644 vendor/github.com/stretchr/objx/tests.go delete mode 100644 vendor/github.com/stretchr/objx/type_specific_codegen.go delete mode 100644 vendor/github.com/stretchr/objx/value.go delete mode 100644 vendor/github.com/stretchr/testify/LICENSE delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_format.go delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_forward.go delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl delete mode 100644 vendor/github.com/stretchr/testify/assert/assertions.go delete mode 100644 vendor/github.com/stretchr/testify/assert/doc.go delete mode 100644 vendor/github.com/stretchr/testify/assert/errors.go delete mode 100644 vendor/github.com/stretchr/testify/assert/forward_assertions.go delete mode 100644 vendor/github.com/stretchr/testify/assert/http_assertions.go delete mode 100644 vendor/github.com/stretchr/testify/mock/doc.go delete mode 100644 vendor/github.com/stretchr/testify/mock/mock.go diff --git a/Gopkg.lock b/Gopkg.lock index 128582fbf..1cee45a5b 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -3,11 +3,11 @@ [[projects]] branch = "master" - digest = "1:13749560a469f9e2eb39a2798d9aae94c77a751254d8a48f4ed954dfe9e664a7" + digest = "1:341f9de25b320f45124840fa084aa804a6659cfeae2ea93a45424fd73f9d7da5" name = "github.com/CovenantSQL/HashStablePack" packages = ["marshalhash"] pruneopts = "UT" - revision = "1627b606c496aeafb4f0693c1ee5cd935b85dd73" + revision = "f5d7cc3bf3356c85eadcb0f66007f2f2b7ee81bc" [[projects]] branch = "develop" @@ -135,14 +135,6 @@ pruneopts = "UT" revision = "cbb64ac3d964b81592e64f957ad53df015803288" -[[projects]] - digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "UT" - revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" - version = "v1.1.1" - [[projects]] digest = "1:72dc2b6056e7097f829260e4a2ff08d32fec6017df1982a66e110ab4128486f8" name = "github.com/dlclark/regexp2" @@ -338,14 +330,6 @@ revision = "645ef00459ed84a119197bfb8d8205042c6df63d" version = "v0.8.0" -[[projects]] - digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - pruneopts = "UT" - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - [[projects]] branch = "master" digest = "1:f696f304d2a14745859a153f1041b66e0e2cf150eff731beb6431e93e27ddc5c" @@ -474,25 +458,6 @@ revision = "9e8dc3f972df6c8fcc0375ef492c24d0bb204857" version = "1.6.3" -[[projects]] - digest = "1:ac83cf90d08b63ad5f7e020ef480d319ae890c208f8524622a2f3136e2686b02" - name = "github.com/stretchr/objx" - packages = ["."] - pruneopts = "UT" - revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c" - version = "v0.1.1" - -[[projects]] - digest = "1:15a4a7e5afac3cea801fa24831fce3bf3b5bd3620cbf8355a07b7dbf06877883" - name = "github.com/stretchr/testify" - packages = [ - "assert", - "mock", - ] - pruneopts = "UT" - revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" - version = "v1.2.2" - [[projects]] branch = "master" digest = "1:59483b8e8183f10ab21a85ba1f4cbb4a2335d48891801f79ed7b9499f44d383c" @@ -677,8 +642,8 @@ "github.com/siddontang/go-mysql/server", "github.com/sirupsen/logrus", "github.com/smartystreets/goconvey/convey", - "github.com/stretchr/testify/mock", "github.com/syndtr/goleveldb/leveldb", + "github.com/syndtr/goleveldb/leveldb/iterator", "github.com/syndtr/goleveldb/leveldb/opt", "github.com/syndtr/goleveldb/leveldb/util", "github.com/tchap/go-patricia/patricia", diff --git a/vendor/github.com/CovenantSQL/HashStablePack/marshalhash/write_bytes.go b/vendor/github.com/CovenantSQL/HashStablePack/marshalhash/write_bytes.go index d4e9dde2f..584e93780 100644 --- a/vendor/github.com/CovenantSQL/HashStablePack/marshalhash/write_bytes.go +++ b/vendor/github.com/CovenantSQL/HashStablePack/marshalhash/write_bytes.go @@ -276,6 +276,9 @@ func AppendComplex128(b []byte, c complex128) []byte { // AppendTime appends a time.Time to the slice as a MessagePack extension func AppendTime(b []byte, t time.Time) []byte { + if t.IsZero() { + return AppendNil(b) + } o, n := ensure(b, TimeSize) t = t.UTC() o[n] = mext8 diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE deleted file mode 100644 index bc52e96f2..000000000 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012-2016 Dave Collins - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go deleted file mode 100644 index 792994785..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine, compiled by GopherJS, and -// "-tags safe" is not added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// Go versions prior to 1.4 are disabled because they use a different layout -// for interfaces which make the implementation of unsafeReflectValue more complex. -// +build !js,!appengine,!safe,!disableunsafe,go1.4 - -package spew - -import ( - "reflect" - "unsafe" -) - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = false - - // ptrSize is the size of a pointer on the current arch. - ptrSize = unsafe.Sizeof((*byte)(nil)) -) - -type flag uintptr - -var ( - // flagRO indicates whether the value field of a reflect.Value - // is read-only. - flagRO flag - - // flagAddr indicates whether the address of the reflect.Value's - // value may be taken. - flagAddr flag -) - -// flagKindMask holds the bits that make up the kind -// part of the flags field. In all the supported versions, -// it is in the lower 5 bits. -const flagKindMask = flag(0x1f) - -// Different versions of Go have used different -// bit layouts for the flags type. This table -// records the known combinations. -var okFlags = []struct { - ro, addr flag -}{{ - // From Go 1.4 to 1.5 - ro: 1 << 5, - addr: 1 << 7, -}, { - // Up to Go tip. - ro: 1<<5 | 1<<6, - addr: 1 << 8, -}} - -var flagValOffset = func() uintptr { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - return field.Offset -}() - -// flagField returns a pointer to the flag field of a reflect.Value. -func flagField(v *reflect.Value) *flag { - return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) -} - -// unsafeReflectValue converts the passed reflect.Value into a one that bypasses -// the typical safety restrictions preventing access to unaddressable and -// unexported data. It works by digging the raw pointer to the underlying -// value out of the protected value and generating a new unprotected (unsafe) -// reflect.Value to it. -// -// This allows us to check for implementations of the Stringer and error -// interfaces to be used for pretty printing ordinarily unaddressable and -// inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) reflect.Value { - if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { - return v - } - flagFieldPtr := flagField(&v) - *flagFieldPtr &^= flagRO - *flagFieldPtr |= flagAddr - return v -} - -// Sanity checks against future reflect package changes -// to the type or semantics of the Value.flag field. -func init() { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { - panic("reflect.Value flag field has changed kind") - } - type t0 int - var t struct { - A t0 - // t0 will have flagEmbedRO set. - t0 - // a will have flagStickyRO set - a t0 - } - vA := reflect.ValueOf(t).FieldByName("A") - va := reflect.ValueOf(t).FieldByName("a") - vt0 := reflect.ValueOf(t).FieldByName("t0") - - // Infer flagRO from the difference between the flags - // for the (otherwise identical) fields in t. - flagPublic := *flagField(&vA) - flagWithRO := *flagField(&va) | *flagField(&vt0) - flagRO = flagPublic ^ flagWithRO - - // Infer flagAddr from the difference between a value - // taken from a pointer and not. - vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") - flagNoPtr := *flagField(&vA) - flagPtr := *flagField(&vPtrA) - flagAddr = flagNoPtr ^ flagPtr - - // Check that the inferred flags tally with one of the known versions. - for _, f := range okFlags { - if flagRO == f.ro && flagAddr == f.addr { - return - } - } - panic("reflect.Value read-only flag has changed semantics") -} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go deleted file mode 100644 index 205c28d68..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is running on Google App Engine, compiled by GopherJS, or -// "-tags safe" is added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe !go1.4 - -package spew - -import "reflect" - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = true -) - -// unsafeReflectValue typically converts the passed reflect.Value into a one -// that bypasses the typical safety restrictions preventing access to -// unaddressable and unexported data. However, doing this relies on access to -// the unsafe package. This is a stub version which simply returns the passed -// reflect.Value when the unsafe package is not available. -func unsafeReflectValue(v reflect.Value) reflect.Value { - return v -} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go deleted file mode 100644 index 1be8ce945..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "reflect" - "sort" - "strconv" -) - -// Some constants in the form of bytes to avoid string overhead. This mirrors -// the technique used in the fmt package. -var ( - panicBytes = []byte("(PANIC=") - plusBytes = []byte("+") - iBytes = []byte("i") - trueBytes = []byte("true") - falseBytes = []byte("false") - interfaceBytes = []byte("(interface {})") - commaNewlineBytes = []byte(",\n") - newlineBytes = []byte("\n") - openBraceBytes = []byte("{") - openBraceNewlineBytes = []byte("{\n") - closeBraceBytes = []byte("}") - asteriskBytes = []byte("*") - colonBytes = []byte(":") - colonSpaceBytes = []byte(": ") - openParenBytes = []byte("(") - closeParenBytes = []byte(")") - spaceBytes = []byte(" ") - pointerChainBytes = []byte("->") - nilAngleBytes = []byte("") - maxNewlineBytes = []byte("\n") - maxShortBytes = []byte("") - circularBytes = []byte("") - circularShortBytes = []byte("") - invalidAngleBytes = []byte("") - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - percentBytes = []byte("%") - precisionBytes = []byte(".") - openAngleBytes = []byte("<") - closeAngleBytes = []byte(">") - openMapBytes = []byte("map[") - closeMapBytes = []byte("]") - lenEqualsBytes = []byte("len=") - capEqualsBytes = []byte("cap=") -) - -// hexDigits is used to map a decimal value to a hex digit. -var hexDigits = "0123456789abcdef" - -// catchPanic handles any panics that might occur during the handleMethods -// calls. -func catchPanic(w io.Writer, v reflect.Value) { - if err := recover(); err != nil { - w.Write(panicBytes) - fmt.Fprintf(w, "%v", err) - w.Write(closeParenBytes) - } -} - -// handleMethods attempts to call the Error and String methods on the underlying -// type the passed reflect.Value represents and outputes the result to Writer w. -// -// It handles panics in any called methods by catching and displaying the error -// as the formatted value. -func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { - // We need an interface to check if the type implements the error or - // Stringer interface. However, the reflect package won't give us an - // interface on certain things like unexported struct fields in order - // to enforce visibility rules. We use unsafe, when it's available, - // to bypass these restrictions since this package does not mutate the - // values. - if !v.CanInterface() { - if UnsafeDisabled { - return false - } - - v = unsafeReflectValue(v) - } - - // Choose whether or not to do error and Stringer interface lookups against - // the base type or a pointer to the base type depending on settings. - // Technically calling one of these methods with a pointer receiver can - // mutate the value, however, types which choose to satisify an error or - // Stringer interface with a pointer receiver should not be mutating their - // state inside these interface methods. - if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { - v = unsafeReflectValue(v) - } - if v.CanAddr() { - v = v.Addr() - } - - // Is it an error or Stringer? - switch iface := v.Interface().(type) { - case error: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.Error())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - - w.Write([]byte(iface.Error())) - return true - - case fmt.Stringer: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.String())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - w.Write([]byte(iface.String())) - return true - } - return false -} - -// printBool outputs a boolean value as true or false to Writer w. -func printBool(w io.Writer, val bool) { - if val { - w.Write(trueBytes) - } else { - w.Write(falseBytes) - } -} - -// printInt outputs a signed integer value to Writer w. -func printInt(w io.Writer, val int64, base int) { - w.Write([]byte(strconv.FormatInt(val, base))) -} - -// printUint outputs an unsigned integer value to Writer w. -func printUint(w io.Writer, val uint64, base int) { - w.Write([]byte(strconv.FormatUint(val, base))) -} - -// printFloat outputs a floating point value using the specified precision, -// which is expected to be 32 or 64bit, to Writer w. -func printFloat(w io.Writer, val float64, precision int) { - w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) -} - -// printComplex outputs a complex value using the specified float precision -// for the real and imaginary parts to Writer w. -func printComplex(w io.Writer, c complex128, floatPrecision int) { - r := real(c) - w.Write(openParenBytes) - w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) - i := imag(c) - if i >= 0 { - w.Write(plusBytes) - } - w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) - w.Write(iBytes) - w.Write(closeParenBytes) -} - -// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' -// prefix to Writer w. -func printHexPtr(w io.Writer, p uintptr) { - // Null pointer. - num := uint64(p) - if num == 0 { - w.Write(nilAngleBytes) - return - } - - // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix - buf := make([]byte, 18) - - // It's simpler to construct the hex string right to left. - base := uint64(16) - i := len(buf) - 1 - for num >= base { - buf[i] = hexDigits[num%base] - num /= base - i-- - } - buf[i] = hexDigits[num] - - // Add '0x' prefix. - i-- - buf[i] = 'x' - i-- - buf[i] = '0' - - // Strip unused leading bytes. - buf = buf[i:] - w.Write(buf) -} - -// valuesSorter implements sort.Interface to allow a slice of reflect.Value -// elements to be sorted. -type valuesSorter struct { - values []reflect.Value - strings []string // either nil or same len and values - cs *ConfigState -} - -// newValuesSorter initializes a valuesSorter instance, which holds a set of -// surrogate keys on which the data should be sorted. It uses flags in -// ConfigState to decide if and how to populate those surrogate keys. -func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { - vs := &valuesSorter{values: values, cs: cs} - if canSortSimply(vs.values[0].Kind()) { - return vs - } - if !cs.DisableMethods { - vs.strings = make([]string, len(values)) - for i := range vs.values { - b := bytes.Buffer{} - if !handleMethods(cs, &b, vs.values[i]) { - vs.strings = nil - break - } - vs.strings[i] = b.String() - } - } - if vs.strings == nil && cs.SpewKeys { - vs.strings = make([]string, len(values)) - for i := range vs.values { - vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) - } - } - return vs -} - -// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted -// directly, or whether it should be considered for sorting by surrogate keys -// (if the ConfigState allows it). -func canSortSimply(kind reflect.Kind) bool { - // This switch parallels valueSortLess, except for the default case. - switch kind { - case reflect.Bool: - return true - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return true - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Uintptr: - return true - case reflect.Array: - return true - } - return false -} - -// Len returns the number of values in the slice. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Len() int { - return len(s.values) -} - -// Swap swaps the values at the passed indices. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Swap(i, j int) { - s.values[i], s.values[j] = s.values[j], s.values[i] - if s.strings != nil { - s.strings[i], s.strings[j] = s.strings[j], s.strings[i] - } -} - -// valueSortLess returns whether the first value should sort before the second -// value. It is used by valueSorter.Less as part of the sort.Interface -// implementation. -func valueSortLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Bool: - return !a.Bool() && b.Bool() - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return a.Int() < b.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return a.Uint() < b.Uint() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.String: - return a.String() < b.String() - case reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Array: - // Compare the contents of both arrays. - l := a.Len() - for i := 0; i < l; i++ { - av := a.Index(i) - bv := b.Index(i) - if av.Interface() == bv.Interface() { - continue - } - return valueSortLess(av, bv) - } - } - return a.String() < b.String() -} - -// Less returns whether the value at index i should sort before the -// value at index j. It is part of the sort.Interface implementation. -func (s *valuesSorter) Less(i, j int) bool { - if s.strings == nil { - return valueSortLess(s.values[i], s.values[j]) - } - return s.strings[i] < s.strings[j] -} - -// sortValues is a sort function that handles both native types and any type that -// can be converted to error or Stringer. Other inputs are sorted according to -// their Value.String() value to ensure display stability. -func sortValues(values []reflect.Value, cs *ConfigState) { - if len(values) == 0 { - return - } - sort.Sort(newValuesSorter(values, cs)) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go deleted file mode 100644 index 2e3d22f31..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// ConfigState houses the configuration options used by spew to format and -// display values. There is a global instance, Config, that is used to control -// all top-level Formatter and Dump functionality. Each ConfigState instance -// provides methods equivalent to the top-level functions. -// -// The zero value for ConfigState provides no indentation. You would typically -// want to set it to a space or a tab. -// -// Alternatively, you can use NewDefaultConfig to get a ConfigState instance -// with default settings. See the documentation of NewDefaultConfig for default -// values. -type ConfigState struct { - // Indent specifies the string to use for each indentation level. The - // global config instance that all top-level functions use set this to a - // single space by default. If you would like more indentation, you might - // set this to a tab with "\t" or perhaps two spaces with " ". - Indent string - - // MaxDepth controls the maximum number of levels to descend into nested - // data structures. The default, 0, means there is no limit. - // - // NOTE: Circular data structures are properly detected, so it is not - // necessary to set this value unless you specifically want to limit deeply - // nested data structures. - MaxDepth int - - // DisableMethods specifies whether or not error and Stringer interfaces are - // invoked for types that implement them. - DisableMethods bool - - // DisablePointerMethods specifies whether or not to check for and invoke - // error and Stringer interfaces on types which only accept a pointer - // receiver when the current type is not a pointer. - // - // NOTE: This might be an unsafe action since calling one of these methods - // with a pointer receiver could technically mutate the value, however, - // in practice, types which choose to satisify an error or Stringer - // interface with a pointer receiver should not be mutating their state - // inside these interface methods. As a result, this option relies on - // access to the unsafe package, so it will not have any effect when - // running in environments without access to the unsafe package such as - // Google App Engine or with the "safe" build tag specified. - DisablePointerMethods bool - - // DisablePointerAddresses specifies whether to disable the printing of - // pointer addresses. This is useful when diffing data structures in tests. - DisablePointerAddresses bool - - // DisableCapacities specifies whether to disable the printing of capacities - // for arrays, slices, maps and channels. This is useful when diffing - // data structures in tests. - DisableCapacities bool - - // ContinueOnMethod specifies whether or not recursion should continue once - // a custom error or Stringer interface is invoked. The default, false, - // means it will print the results of invoking the custom error or Stringer - // interface and return immediately instead of continuing to recurse into - // the internals of the data type. - // - // NOTE: This flag does not have any effect if method invocation is disabled - // via the DisableMethods or DisablePointerMethods options. - ContinueOnMethod bool - - // SortKeys specifies map keys should be sorted before being printed. Use - // this to have a more deterministic, diffable output. Note that only - // native types (bool, int, uint, floats, uintptr and string) and types - // that support the error or Stringer interfaces (if methods are - // enabled) are supported, with other types sorted according to the - // reflect.Value.String() output which guarantees display stability. - SortKeys bool - - // SpewKeys specifies that, as a last resort attempt, map keys should - // be spewed to strings and sorted by those strings. This is only - // considered if SortKeys is true. - SpewKeys bool -} - -// Config is the active configuration of the top-level functions. -// The configuration can be changed by modifying the contents of spew.Config. -var Config = ConfigState{Indent: " "} - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the formatted string as a value that satisfies error. See NewFormatter -// for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, c.convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, c.convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, c.convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a Formatter interface returned by c.NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, c.convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Print(a ...interface{}) (n int, err error) { - return fmt.Print(c.convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, c.convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Println(a ...interface{}) (n int, err error) { - return fmt.Println(c.convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprint(a ...interface{}) string { - return fmt.Sprint(c.convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, c.convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a Formatter interface returned by c.NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintln(a ...interface{}) string { - return fmt.Sprintln(c.convertArgs(a)...) -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -c.Printf, c.Println, or c.Printf. -*/ -func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(c, v) -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { - fdump(c, w, a...) -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by modifying the public members -of c. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func (c *ConfigState) Dump(a ...interface{}) { - fdump(c, os.Stdout, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func (c *ConfigState) Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(c, &buf, a...) - return buf.String() -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a spew Formatter interface using -// the ConfigState associated with s. -func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = newFormatter(c, arg) - } - return formatters -} - -// NewDefaultConfig returns a ConfigState with the following default settings. -// -// Indent: " " -// MaxDepth: 0 -// DisableMethods: false -// DisablePointerMethods: false -// ContinueOnMethod: false -// SortKeys: false -func NewDefaultConfig() *ConfigState { - return &ConfigState{Indent: " "} -} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go deleted file mode 100644 index aacaac6f1..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Package spew implements a deep pretty printer for Go data structures to aid in -debugging. - -A quick overview of the additional features spew provides over the built-in -printing facilities for Go data types are as follows: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output (only when using - Dump style) - -There are two different approaches spew allows for dumping Go data structures: - - * Dump style which prints with newlines, customizable indentation, - and additional debug information such as types and all pointer addresses - used to indirect to the final value - * A custom Formatter interface that integrates cleanly with the standard fmt - package and replaces %v, %+v, %#v, and %#+v to provide inline printing - similar to the default %v while providing the additional functionality - outlined above and passing unsupported format verbs such as %x and %q - along to fmt - -Quick Start - -This section demonstrates how to quickly get started with spew. See the -sections below for further details on formatting and configuration options. - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - spew.Dump(myVar1, myVar2, ...) - spew.Fdump(someWriter, myVar1, myVar2, ...) - str := spew.Sdump(myVar1, myVar2, ...) - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with -%v (most compact), %+v (adds pointer addresses), %#v (adds types), or -%#+v (adds types and pointer addresses): - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available -via the spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -The following configuration options are available: - * Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - - * MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - - * DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - - * DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. - Pointer method invocation is enabled by default. - - * DisablePointerAddresses - DisablePointerAddresses specifies whether to disable the printing of - pointer addresses. This is useful when diffing data structures in tests. - - * DisableCapacities - DisableCapacities specifies whether to disable the printing of - capacities for arrays, slices, maps and channels. This is useful when - diffing data structures in tests. - - * ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - - * SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are - supported with other types sorted according to the - reflect.Value.String() output which guarantees display - stability. Natural map order is used by default. - - * SpewKeys - Specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only - considered if SortKeys is true. - -Dump Usage - -Simply call spew.Dump with a list of variables you want to dump: - - spew.Dump(myVar1, myVar2, ...) - -You may also call spew.Fdump if you would prefer to output to an arbitrary -io.Writer. For example, to dump to standard error: - - spew.Fdump(os.Stderr, myVar1, myVar2, ...) - -A third option is to call spew.Sdump to get the formatted output as a string: - - str := spew.Sdump(myVar1, myVar2, ...) - -Sample Dump Output - -See the Dump example for details on the setup of the types and variables being -shown here. - - (main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) (len=1) { - (string) (len=3) "one": (bool) true - } - } - -Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C -command as shown. - ([]uint8) (len=32 cap=32) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| - } - -Custom Formatter - -Spew provides a custom formatter that implements the fmt.Formatter interface -so that it integrates cleanly with standard fmt package printing functions. The -formatter is useful for inline printing of smaller data types similar to the -standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Custom Formatter Usage - -The simplest way to make use of the spew custom formatter is to call one of the -convenience functions such as spew.Printf, spew.Println, or spew.Printf. The -functions have syntax you are most likely already familiar with: - - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Println(myVar, myVar2) - spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -See the Index for the full list convenience functions. - -Sample Formatter Output - -Double pointer to a uint8: - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 - -Pointer to circular struct with a uint8 field and a pointer to itself: - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} - -See the Printf example for details on the setup of variables being shown -here. - -Errors - -Since it is possible for custom Stringer/error interfaces to panic, spew -detects them and handles them internally by printing the panic information -inline with the output. Since spew is intended to provide deep pretty printing -capabilities on structures, it intentionally does not return any errors. -*/ -package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go deleted file mode 100644 index f78d89fc1..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "os" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ( - // uint8Type is a reflect.Type representing a uint8. It is used to - // convert cgo types to uint8 slices for hexdumping. - uint8Type = reflect.TypeOf(uint8(0)) - - // cCharRE is a regular expression that matches a cgo char. - // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) - - // cUnsignedCharRE is a regular expression that matches a cgo unsigned - // char. It is used to detect unsigned character arrays to hexdump - // them. - cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) - - // cUint8tCharRE is a regular expression that matches a cgo uint8_t. - // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) -) - -// dumpState contains information about the state of a dump operation. -type dumpState struct { - w io.Writer - depth int - pointers map[uintptr]int - ignoreNextType bool - ignoreNextIndent bool - cs *ConfigState -} - -// indent performs indentation according to the depth level and cs.Indent -// option. -func (d *dumpState) indent() { - if d.ignoreNextIndent { - d.ignoreNextIndent = false - return - } - d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) -} - -// unpackValue returns values inside of non-nil interfaces when possible. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface && !v.IsNil() { - v = v.Elem() - } - return v -} - -// dumpPtr handles formatting of pointers by indirecting them as necessary. -func (d *dumpState) dumpPtr(v reflect.Value) { - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range d.pointers { - if depth >= d.depth { - delete(d.pointers, k) - } - } - - // Keep list of all dereferenced pointers to show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by dereferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := d.pointers[addr]; ok && pd < d.depth { - cycleFound = true - indirects-- - break - } - d.pointers[addr] = d.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type information. - d.w.Write(openParenBytes) - d.w.Write(bytes.Repeat(asteriskBytes, indirects)) - d.w.Write([]byte(ve.Type().String())) - d.w.Write(closeParenBytes) - - // Display pointer information. - if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { - d.w.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - d.w.Write(pointerChainBytes) - } - printHexPtr(d.w, addr) - } - d.w.Write(closeParenBytes) - } - - // Display dereferenced value. - d.w.Write(openParenBytes) - switch { - case nilFound: - d.w.Write(nilAngleBytes) - - case cycleFound: - d.w.Write(circularBytes) - - default: - d.ignoreNextType = true - d.dump(ve) - } - d.w.Write(closeParenBytes) -} - -// dumpSlice handles formatting of arrays and slices. Byte (uint8 under -// reflection) arrays and slices are dumped in hexdump -C fashion. -func (d *dumpState) dumpSlice(v reflect.Value) { - // Determine whether this type should be hex dumped or not. Also, - // for types which should be hexdumped, try to use the underlying data - // first, then fall back to trying to convert them to a uint8 slice. - var buf []uint8 - doConvert := false - doHexDump := false - numEntries := v.Len() - if numEntries > 0 { - vt := v.Index(0).Type() - vts := vt.String() - switch { - // C types that need to be converted. - case cCharRE.MatchString(vts): - fallthrough - case cUnsignedCharRE.MatchString(vts): - fallthrough - case cUint8tCharRE.MatchString(vts): - doConvert = true - - // Try to use existing uint8 slices and fall back to converting - // and copying if that fails. - case vt.Kind() == reflect.Uint8: - // We need an addressable interface to convert the type - // to a byte slice. However, the reflect package won't - // give us an interface on certain things like - // unexported struct fields in order to enforce - // visibility rules. We use unsafe, when available, to - // bypass these restrictions since this package does not - // mutate the values. - vs := v - if !vs.CanInterface() || !vs.CanAddr() { - vs = unsafeReflectValue(vs) - } - if !UnsafeDisabled { - vs = vs.Slice(0, numEntries) - - // Use the existing uint8 slice if it can be - // type asserted. - iface := vs.Interface() - if slice, ok := iface.([]uint8); ok { - buf = slice - doHexDump = true - break - } - } - - // The underlying data needs to be converted if it can't - // be type asserted to a uint8 slice. - doConvert = true - } - - // Copy and convert the underlying type if needed. - if doConvert && vt.ConvertibleTo(uint8Type) { - // Convert and copy each element into a uint8 byte - // slice. - buf = make([]uint8, numEntries) - for i := 0; i < numEntries; i++ { - vv := v.Index(i) - buf[i] = uint8(vv.Convert(uint8Type).Uint()) - } - doHexDump = true - } - } - - // Hexdump the entire slice as needed. - if doHexDump { - indent := strings.Repeat(d.cs.Indent, d.depth) - str := indent + hex.Dump(buf) - str = strings.Replace(str, "\n", "\n"+indent, -1) - str = strings.TrimRight(str, d.cs.Indent) - d.w.Write([]byte(str)) - return - } - - // Recursively call dump for each item. - for i := 0; i < numEntries; i++ { - d.dump(d.unpackValue(v.Index(i))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } -} - -// dump is the main workhorse for dumping a value. It uses the passed reflect -// value to figure out what kind of object we are dealing with and formats it -// appropriately. It is a recursive function, however circular data structures -// are detected and handled properly. -func (d *dumpState) dump(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - d.w.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - d.indent() - d.dumpPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !d.ignoreNextType { - d.indent() - d.w.Write(openParenBytes) - d.w.Write([]byte(v.Type().String())) - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - d.ignoreNextType = false - - // Display length and capacity if the built-in len and cap functions - // work with the value's kind and the len/cap itself is non-zero. - valueLen, valueCap := 0, 0 - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - valueLen, valueCap = v.Len(), v.Cap() - case reflect.Map, reflect.String: - valueLen = v.Len() - } - if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { - d.w.Write(openParenBytes) - if valueLen != 0 { - d.w.Write(lenEqualsBytes) - printInt(d.w, int64(valueLen), 10) - } - if !d.cs.DisableCapacities && valueCap != 0 { - if valueLen != 0 { - d.w.Write(spaceBytes) - } - d.w.Write(capEqualsBytes) - printInt(d.w, int64(valueCap), 10) - } - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - - // Call Stringer/error interfaces if they exist and the handle methods flag - // is enabled - if !d.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(d.cs, d.w, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(d.w, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(d.w, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(d.w, v.Uint(), 10) - - case reflect.Float32: - printFloat(d.w, v.Float(), 32) - - case reflect.Float64: - printFloat(d.w, v.Float(), 64) - - case reflect.Complex64: - printComplex(d.w, v.Complex(), 32) - - case reflect.Complex128: - printComplex(d.w, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - d.dumpSlice(v) - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.String: - d.w.Write([]byte(strconv.Quote(v.String()))) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - d.w.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - numEntries := v.Len() - keys := v.MapKeys() - if d.cs.SortKeys { - sortValues(keys, d.cs) - } - for i, key := range keys { - d.dump(d.unpackValue(key)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.MapIndex(key))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Struct: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - vt := v.Type() - numFields := v.NumField() - for i := 0; i < numFields; i++ { - d.indent() - vtf := vt.Field(i) - d.w.Write([]byte(vtf.Name)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.Field(i))) - if i < (numFields - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(d.w, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(d.w, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it in case any new - // types are added. - default: - if v.CanInterface() { - fmt.Fprintf(d.w, "%v", v.Interface()) - } else { - fmt.Fprintf(d.w, "%v", v.String()) - } - } -} - -// fdump is a helper function to consolidate the logic from the various public -// methods which take varying writers and config states. -func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { - for _, arg := range a { - if arg == nil { - w.Write(interfaceBytes) - w.Write(spaceBytes) - w.Write(nilAngleBytes) - w.Write(newlineBytes) - continue - } - - d := dumpState{w: w, cs: cs} - d.pointers = make(map[uintptr]int) - d.dump(reflect.ValueOf(arg)) - d.w.Write(newlineBytes) - } -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func Fdump(w io.Writer, a ...interface{}) { - fdump(&Config, w, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(&Config, &buf, a...) - return buf.String() -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by an exported package global, -spew.Config. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func Dump(a ...interface{}) { - fdump(&Config, os.Stdout, a...) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go deleted file mode 100644 index b04edb7d7..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" -) - -// supportedFlags is a list of all the character flags supported by fmt package. -const supportedFlags = "0-+# " - -// formatState implements the fmt.Formatter interface and contains information -// about the state of a formatting operation. The NewFormatter function can -// be used to get a new Formatter which can be used directly as arguments -// in standard fmt package printing calls. -type formatState struct { - value interface{} - fs fmt.State - depth int - pointers map[uintptr]int - ignoreNextType bool - cs *ConfigState -} - -// buildDefaultFormat recreates the original format string without precision -// and width information to pass in to fmt.Sprintf in the case of an -// unrecognized type. Unless new types are added to the language, this -// function won't ever be called. -func (f *formatState) buildDefaultFormat() (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - buf.WriteRune('v') - - format = buf.String() - return format -} - -// constructOrigFormat recreates the original format string including precision -// and width information to pass along to the standard fmt package. This allows -// automatic deferral of all format strings this package doesn't support. -func (f *formatState) constructOrigFormat(verb rune) (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - if width, ok := f.fs.Width(); ok { - buf.WriteString(strconv.Itoa(width)) - } - - if precision, ok := f.fs.Precision(); ok { - buf.Write(precisionBytes) - buf.WriteString(strconv.Itoa(precision)) - } - - buf.WriteRune(verb) - - format = buf.String() - return format -} - -// unpackValue returns values inside of non-nil interfaces when possible and -// ensures that types for values which have been unpacked from an interface -// are displayed when the show types flag is also set. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (f *formatState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface { - f.ignoreNextType = false - if !v.IsNil() { - v = v.Elem() - } - } - return v -} - -// formatPtr handles formatting of pointers by indirecting them as necessary. -func (f *formatState) formatPtr(v reflect.Value) { - // Display nil if top level pointer is nil. - showTypes := f.fs.Flag('#') - if v.IsNil() && (!showTypes || f.ignoreNextType) { - f.fs.Write(nilAngleBytes) - return - } - - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range f.pointers { - if depth >= f.depth { - delete(f.pointers, k) - } - } - - // Keep list of all dereferenced pointers to possibly show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by derferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := f.pointers[addr]; ok && pd < f.depth { - cycleFound = true - indirects-- - break - } - f.pointers[addr] = f.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type or indirection level depending on flags. - if showTypes && !f.ignoreNextType { - f.fs.Write(openParenBytes) - f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) - f.fs.Write([]byte(ve.Type().String())) - f.fs.Write(closeParenBytes) - } else { - if nilFound || cycleFound { - indirects += strings.Count(ve.Type().String(), "*") - } - f.fs.Write(openAngleBytes) - f.fs.Write([]byte(strings.Repeat("*", indirects))) - f.fs.Write(closeAngleBytes) - } - - // Display pointer information depending on flags. - if f.fs.Flag('+') && (len(pointerChain) > 0) { - f.fs.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - f.fs.Write(pointerChainBytes) - } - printHexPtr(f.fs, addr) - } - f.fs.Write(closeParenBytes) - } - - // Display dereferenced value. - switch { - case nilFound: - f.fs.Write(nilAngleBytes) - - case cycleFound: - f.fs.Write(circularShortBytes) - - default: - f.ignoreNextType = true - f.format(ve) - } -} - -// format is the main workhorse for providing the Formatter interface. It -// uses the passed reflect value to figure out what kind of object we are -// dealing with and formats it appropriately. It is a recursive function, -// however circular data structures are detected and handled properly. -func (f *formatState) format(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - f.fs.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - f.formatPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !f.ignoreNextType && f.fs.Flag('#') { - f.fs.Write(openParenBytes) - f.fs.Write([]byte(v.Type().String())) - f.fs.Write(closeParenBytes) - } - f.ignoreNextType = false - - // Call Stringer/error interfaces if they exist and the handle methods - // flag is enabled. - if !f.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(f.cs, f.fs, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(f.fs, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(f.fs, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(f.fs, v.Uint(), 10) - - case reflect.Float32: - printFloat(f.fs, v.Float(), 32) - - case reflect.Float64: - printFloat(f.fs, v.Float(), 64) - - case reflect.Complex64: - printComplex(f.fs, v.Complex(), 32) - - case reflect.Complex128: - printComplex(f.fs, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - f.fs.Write(openBracketBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - numEntries := v.Len() - for i := 0; i < numEntries; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(v.Index(i))) - } - } - f.depth-- - f.fs.Write(closeBracketBytes) - - case reflect.String: - f.fs.Write([]byte(v.String())) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - f.fs.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - - f.fs.Write(openMapBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - keys := v.MapKeys() - if f.cs.SortKeys { - sortValues(keys, f.cs) - } - for i, key := range keys { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(key)) - f.fs.Write(colonBytes) - f.ignoreNextType = true - f.format(f.unpackValue(v.MapIndex(key))) - } - } - f.depth-- - f.fs.Write(closeMapBytes) - - case reflect.Struct: - numFields := v.NumField() - f.fs.Write(openBraceBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - vt := v.Type() - for i := 0; i < numFields; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - vtf := vt.Field(i) - if f.fs.Flag('+') || f.fs.Flag('#') { - f.fs.Write([]byte(vtf.Name)) - f.fs.Write(colonBytes) - } - f.format(f.unpackValue(v.Field(i))) - } - } - f.depth-- - f.fs.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(f.fs, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(f.fs, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it if any get added. - default: - format := f.buildDefaultFormat() - if v.CanInterface() { - fmt.Fprintf(f.fs, format, v.Interface()) - } else { - fmt.Fprintf(f.fs, format, v.String()) - } - } -} - -// Format satisfies the fmt.Formatter interface. See NewFormatter for usage -// details. -func (f *formatState) Format(fs fmt.State, verb rune) { - f.fs = fs - - // Use standard formatting for verbs that are not v. - if verb != 'v' { - format := f.constructOrigFormat(verb) - fmt.Fprintf(fs, format, f.value) - return - } - - if f.value == nil { - if fs.Flag('#') { - fs.Write(interfaceBytes) - } - fs.Write(nilAngleBytes) - return - } - - f.format(reflect.ValueOf(f.value)) -} - -// newFormatter is a helper function to consolidate the logic from the various -// public methods which take varying config states. -func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { - fs := &formatState{value: v, cs: cs} - fs.pointers = make(map[uintptr]int) - return fs -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -Printf, Println, or Fprintf. -*/ -func NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(&Config, v) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go deleted file mode 100644 index 32c0e3388..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/spew.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "fmt" - "io" -) - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the formatted string as a value that satisfies error. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a default Formatter interface returned by NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) -func Print(a ...interface{}) (n int, err error) { - return fmt.Print(convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) -func Println(a ...interface{}) (n int, err error) { - return fmt.Println(convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprint(a ...interface{}) string { - return fmt.Sprint(convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintln(a ...interface{}) string { - return fmt.Sprintln(convertArgs(a)...) -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a default spew Formatter interface. -func convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = NewFormatter(arg) - } - return formatters -} diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE deleted file mode 100644 index c67dad612..000000000 --- a/vendor/github.com/pmezard/go-difflib/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013, Patrick Mezard -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - The names of its contributors may not be used to endorse or promote -products derived from this software without specific prior written -permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go deleted file mode 100644 index 003e99fad..000000000 --- a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go +++ /dev/null @@ -1,772 +0,0 @@ -// Package difflib is a partial port of Python difflib module. -// -// It provides tools to compare sequences of strings and generate textual diffs. -// -// The following class and functions have been ported: -// -// - SequenceMatcher -// -// - unified_diff -// -// - context_diff -// -// Getting unified diffs was the main goal of the port. Keep in mind this code -// is mostly suitable to output text differences in a human friendly way, there -// are no guarantees generated diffs are consumable by patch(1). -package difflib - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" -) - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -func calculateRatio(matches, length int) float64 { - if length > 0 { - return 2.0 * float64(matches) / float64(length) - } - return 1.0 -} - -type Match struct { - A int - B int - Size int -} - -type OpCode struct { - Tag byte - I1 int - I2 int - J1 int - J2 int -} - -// SequenceMatcher compares sequence of strings. The basic -// algorithm predates, and is a little fancier than, an algorithm -// published in the late 1980's by Ratcliff and Obershelp under the -// hyperbolic name "gestalt pattern matching". The basic idea is to find -// the longest contiguous matching subsequence that contains no "junk" -// elements (R-O doesn't address junk). The same idea is then applied -// recursively to the pieces of the sequences to the left and to the right -// of the matching subsequence. This does not yield minimal edit -// sequences, but does tend to yield matches that "look right" to people. -// -// SequenceMatcher tries to compute a "human-friendly diff" between two -// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the -// longest *contiguous* & junk-free matching subsequence. That's what -// catches peoples' eyes. The Windows(tm) windiff has another interesting -// notion, pairing up elements that appear uniquely in each sequence. -// That, and the method here, appear to yield more intuitive difference -// reports than does diff. This method appears to be the least vulnerable -// to synching up on blocks of "junk lines", though (like blank lines in -// ordinary text files, or maybe "

" lines in HTML files). That may be -// because this is the only method of the 3 that has a *concept* of -// "junk" . -// -// Timing: Basic R-O is cubic time worst case and quadratic time expected -// case. SequenceMatcher is quadratic time for the worst case and has -// expected-case behavior dependent in a complicated way on how many -// elements the sequences have in common; best case time is linear. -type SequenceMatcher struct { - a []string - b []string - b2j map[string][]int - IsJunk func(string) bool - autoJunk bool - bJunk map[string]struct{} - matchingBlocks []Match - fullBCount map[string]int - bPopular map[string]struct{} - opCodes []OpCode -} - -func NewMatcher(a, b []string) *SequenceMatcher { - m := SequenceMatcher{autoJunk: true} - m.SetSeqs(a, b) - return &m -} - -func NewMatcherWithJunk(a, b []string, autoJunk bool, - isJunk func(string) bool) *SequenceMatcher { - - m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} - m.SetSeqs(a, b) - return &m -} - -// Set two sequences to be compared. -func (m *SequenceMatcher) SetSeqs(a, b []string) { - m.SetSeq1(a) - m.SetSeq2(b) -} - -// Set the first sequence to be compared. The second sequence to be compared is -// not changed. -// -// SequenceMatcher computes and caches detailed information about the second -// sequence, so if you want to compare one sequence S against many sequences, -// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other -// sequences. -// -// See also SetSeqs() and SetSeq2(). -func (m *SequenceMatcher) SetSeq1(a []string) { - if &a == &m.a { - return - } - m.a = a - m.matchingBlocks = nil - m.opCodes = nil -} - -// Set the second sequence to be compared. The first sequence to be compared is -// not changed. -func (m *SequenceMatcher) SetSeq2(b []string) { - if &b == &m.b { - return - } - m.b = b - m.matchingBlocks = nil - m.opCodes = nil - m.fullBCount = nil - m.chainB() -} - -func (m *SequenceMatcher) chainB() { - // Populate line -> index mapping - b2j := map[string][]int{} - for i, s := range m.b { - indices := b2j[s] - indices = append(indices, i) - b2j[s] = indices - } - - // Purge junk elements - m.bJunk = map[string]struct{}{} - if m.IsJunk != nil { - junk := m.bJunk - for s, _ := range b2j { - if m.IsJunk(s) { - junk[s] = struct{}{} - } - } - for s, _ := range junk { - delete(b2j, s) - } - } - - // Purge remaining popular elements - popular := map[string]struct{}{} - n := len(m.b) - if m.autoJunk && n >= 200 { - ntest := n/100 + 1 - for s, indices := range b2j { - if len(indices) > ntest { - popular[s] = struct{}{} - } - } - for s, _ := range popular { - delete(b2j, s) - } - } - m.bPopular = popular - m.b2j = b2j -} - -func (m *SequenceMatcher) isBJunk(s string) bool { - _, ok := m.bJunk[s] - return ok -} - -// Find longest matching block in a[alo:ahi] and b[blo:bhi]. -// -// If IsJunk is not defined: -// -// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where -// alo <= i <= i+k <= ahi -// blo <= j <= j+k <= bhi -// and for all (i',j',k') meeting those conditions, -// k >= k' -// i <= i' -// and if i == i', j <= j' -// -// In other words, of all maximal matching blocks, return one that -// starts earliest in a, and of all those maximal matching blocks that -// start earliest in a, return the one that starts earliest in b. -// -// If IsJunk is defined, first the longest matching block is -// determined as above, but with the additional restriction that no -// junk element appears in the block. Then that block is extended as -// far as possible by matching (only) junk elements on both sides. So -// the resulting block never matches on junk except as identical junk -// happens to be adjacent to an "interesting" match. -// -// If no blocks match, return (alo, blo, 0). -func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { - // CAUTION: stripping common prefix or suffix would be incorrect. - // E.g., - // ab - // acab - // Longest matching block is "ab", but if common prefix is - // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so - // strip, so ends up claiming that ab is changed to acab by - // inserting "ca" in the middle. That's minimal but unintuitive: - // "it's obvious" that someone inserted "ac" at the front. - // Windiff ends up at the same place as diff, but by pairing up - // the unique 'b's and then matching the first two 'a's. - besti, bestj, bestsize := alo, blo, 0 - - // find longest junk-free match - // during an iteration of the loop, j2len[j] = length of longest - // junk-free match ending with a[i-1] and b[j] - j2len := map[int]int{} - for i := alo; i != ahi; i++ { - // look at all instances of a[i] in b; note that because - // b2j has no junk keys, the loop is skipped if a[i] is junk - newj2len := map[int]int{} - for _, j := range m.b2j[m.a[i]] { - // a[i] matches b[j] - if j < blo { - continue - } - if j >= bhi { - break - } - k := j2len[j-1] + 1 - newj2len[j] = k - if k > bestsize { - besti, bestj, bestsize = i-k+1, j-k+1, k - } - } - j2len = newj2len - } - - // Extend the best by non-junk elements on each end. In particular, - // "popular" non-junk elements aren't in b2j, which greatly speeds - // the inner loop above, but also means "the best" match so far - // doesn't contain any junk *or* popular non-junk elements. - for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - !m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - // Now that we have a wholly interesting match (albeit possibly - // empty!), we may as well suck up the matching junk on each - // side of it too. Can't think of a good reason not to, and it - // saves post-processing the (possibly considerable) expense of - // figuring out what to do with it. In the case of an empty - // interesting match, this is clearly the right thing to do, - // because no other kind of match is possible in the regions. - for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - return Match{A: besti, B: bestj, Size: bestsize} -} - -// Return list of triples describing matching subsequences. -// -// Each triple is of the form (i, j, n), and means that -// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in -// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are -// adjacent triples in the list, and the second is not the last triple in the -// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe -// adjacent equal blocks. -// -// The last triple is a dummy, (len(a), len(b), 0), and is the only -// triple with n==0. -func (m *SequenceMatcher) GetMatchingBlocks() []Match { - if m.matchingBlocks != nil { - return m.matchingBlocks - } - - var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match - matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { - match := m.findLongestMatch(alo, ahi, blo, bhi) - i, j, k := match.A, match.B, match.Size - if match.Size > 0 { - if alo < i && blo < j { - matched = matchBlocks(alo, i, blo, j, matched) - } - matched = append(matched, match) - if i+k < ahi && j+k < bhi { - matched = matchBlocks(i+k, ahi, j+k, bhi, matched) - } - } - return matched - } - matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) - - // It's possible that we have adjacent equal blocks in the - // matching_blocks list now. - nonAdjacent := []Match{} - i1, j1, k1 := 0, 0, 0 - for _, b := range matched { - // Is this block adjacent to i1, j1, k1? - i2, j2, k2 := b.A, b.B, b.Size - if i1+k1 == i2 && j1+k1 == j2 { - // Yes, so collapse them -- this just increases the length of - // the first block by the length of the second, and the first - // block so lengthened remains the block to compare against. - k1 += k2 - } else { - // Not adjacent. Remember the first block (k1==0 means it's - // the dummy we started with), and make the second block the - // new block to compare against. - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - i1, j1, k1 = i2, j2, k2 - } - } - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - - nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) - m.matchingBlocks = nonAdjacent - return m.matchingBlocks -} - -// Return list of 5-tuples describing how to turn a into b. -// -// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple -// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the -// tuple preceding it, and likewise for j1 == the previous j2. -// -// The tags are characters, with these meanings: -// -// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] -// -// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. -// -// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. -// -// 'e' (equal): a[i1:i2] == b[j1:j2] -func (m *SequenceMatcher) GetOpCodes() []OpCode { - if m.opCodes != nil { - return m.opCodes - } - i, j := 0, 0 - matching := m.GetMatchingBlocks() - opCodes := make([]OpCode, 0, len(matching)) - for _, m := range matching { - // invariant: we've pumped out correct diffs to change - // a[:i] into b[:j], and the next matching block is - // a[ai:ai+size] == b[bj:bj+size]. So we need to pump - // out a diff to change a[i:ai] into b[j:bj], pump out - // the matching block, and move (i,j) beyond the match - ai, bj, size := m.A, m.B, m.Size - tag := byte(0) - if i < ai && j < bj { - tag = 'r' - } else if i < ai { - tag = 'd' - } else if j < bj { - tag = 'i' - } - if tag > 0 { - opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) - } - i, j = ai+size, bj+size - // the list of matching blocks is terminated by a - // sentinel with size 0 - if size > 0 { - opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) - } - } - m.opCodes = opCodes - return m.opCodes -} - -// Isolate change clusters by eliminating ranges with no changes. -// -// Return a generator of groups with up to n lines of context. -// Each group is in the same format as returned by GetOpCodes(). -func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { - if n < 0 { - n = 3 - } - codes := m.GetOpCodes() - if len(codes) == 0 { - codes = []OpCode{OpCode{'e', 0, 1, 0, 1}} - } - // Fixup leading and trailing groups if they show no changes. - if codes[0].Tag == 'e' { - c := codes[0] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} - } - if codes[len(codes)-1].Tag == 'e' { - c := codes[len(codes)-1] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} - } - nn := n + n - groups := [][]OpCode{} - group := []OpCode{} - for _, c := range codes { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - // End the current group and start a new one whenever - // there is a large range with no changes. - if c.Tag == 'e' && i2-i1 > nn { - group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), - j1, min(j2, j1+n)}) - groups = append(groups, group) - group = []OpCode{} - i1, j1 = max(i1, i2-n), max(j1, j2-n) - } - group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) - } - if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { - groups = append(groups, group) - } - return groups -} - -// Return a measure of the sequences' similarity (float in [0,1]). -// -// Where T is the total number of elements in both sequences, and -// M is the number of matches, this is 2.0*M / T. -// Note that this is 1 if the sequences are identical, and 0 if -// they have nothing in common. -// -// .Ratio() is expensive to compute if you haven't already computed -// .GetMatchingBlocks() or .GetOpCodes(), in which case you may -// want to try .QuickRatio() or .RealQuickRation() first to get an -// upper bound. -func (m *SequenceMatcher) Ratio() float64 { - matches := 0 - for _, m := range m.GetMatchingBlocks() { - matches += m.Size - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() relatively quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute. -func (m *SequenceMatcher) QuickRatio() float64 { - // viewing a and b as multisets, set matches to the cardinality - // of their intersection; this counts the number of matches - // without regard to order, so is clearly an upper bound - if m.fullBCount == nil { - m.fullBCount = map[string]int{} - for _, s := range m.b { - m.fullBCount[s] = m.fullBCount[s] + 1 - } - } - - // avail[x] is the number of times x appears in 'b' less the - // number of times we've seen it in 'a' so far ... kinda - avail := map[string]int{} - matches := 0 - for _, s := range m.a { - n, ok := avail[s] - if !ok { - n = m.fullBCount[s] - } - avail[s] = n - 1 - if n > 0 { - matches += 1 - } - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() very quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute than either .Ratio() or .QuickRatio(). -func (m *SequenceMatcher) RealQuickRatio() float64 { - la, lb := len(m.a), len(m.b) - return calculateRatio(min(la, lb), la+lb) -} - -// Convert range to the "ed" format -func formatRangeUnified(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 1 { - return fmt.Sprintf("%d", beginning) - } - if length == 0 { - beginning -= 1 // empty ranges begin at line just before the range - } - return fmt.Sprintf("%d,%d", beginning, length) -} - -// Unified diff parameters -type UnifiedDiff struct { - A []string // First sequence lines - FromFile string // First file name - FromDate string // First file time - B []string // Second sequence lines - ToFile string // Second file name - ToDate string // Second file time - Eol string // Headers end of line, defaults to LF - Context int // Number of context lines -} - -// Compare two sequences of lines; generate the delta as a unified diff. -// -// Unified diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by 'n' which -// defaults to three. -// -// By default, the diff control lines (those with ---, +++, or @@) are -// created with a trailing newline. This is helpful so that inputs -// created from file.readlines() result in diffs that are suitable for -// file.writelines() since both the inputs and outputs have trailing -// newlines. -// -// For inputs that do not have trailing newlines, set the lineterm -// argument to "" so that the output will be uniformly newline free. -// -// The unidiff format normally has a header for filenames and modification -// times. Any or all of these may be specified using strings for -// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. -// The modification times are normally expressed in the ISO 8601 format. -func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - wf := func(format string, args ...interface{}) error { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - return err - } - ws := func(s string) error { - _, err := buf.WriteString(s) - return err - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - if diff.FromFile != "" || diff.ToFile != "" { - err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) - if err != nil { - return err - } - err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) - if err != nil { - return err - } - } - } - first, last := g[0], g[len(g)-1] - range1 := formatRangeUnified(first.I1, last.I2) - range2 := formatRangeUnified(first.J1, last.J2) - if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { - return err - } - for _, c := range g { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - if c.Tag == 'e' { - for _, line := range diff.A[i1:i2] { - if err := ws(" " + line); err != nil { - return err - } - } - continue - } - if c.Tag == 'r' || c.Tag == 'd' { - for _, line := range diff.A[i1:i2] { - if err := ws("-" + line); err != nil { - return err - } - } - } - if c.Tag == 'r' || c.Tag == 'i' { - for _, line := range diff.B[j1:j2] { - if err := ws("+" + line); err != nil { - return err - } - } - } - } - } - return nil -} - -// Like WriteUnifiedDiff but returns the diff a string. -func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteUnifiedDiff(w, diff) - return string(w.Bytes()), err -} - -// Convert range to the "ed" format. -func formatRangeContext(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 0 { - beginning -= 1 // empty ranges begin at line just before the range - } - if length <= 1 { - return fmt.Sprintf("%d", beginning) - } - return fmt.Sprintf("%d,%d", beginning, beginning+length-1) -} - -type ContextDiff UnifiedDiff - -// Compare two sequences of lines; generate the delta as a context diff. -// -// Context diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by diff.Context -// which defaults to three. -// -// By default, the diff control lines (those with *** or ---) are -// created with a trailing newline. -// -// For inputs that do not have trailing newlines, set the diff.Eol -// argument to "" so that the output will be uniformly newline free. -// -// The context diff format normally has a header for filenames and -// modification times. Any or all of these may be specified using -// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate. -// The modification times are normally expressed in the ISO 8601 format. -// If not specified, the strings default to blanks. -func WriteContextDiff(writer io.Writer, diff ContextDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - var diffErr error - wf := func(format string, args ...interface{}) { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - if diffErr == nil && err != nil { - diffErr = err - } - } - ws := func(s string) { - _, err := buf.WriteString(s) - if diffErr == nil && err != nil { - diffErr = err - } - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - prefix := map[byte]string{ - 'i': "+ ", - 'd': "- ", - 'r': "! ", - 'e': " ", - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - if diff.FromFile != "" || diff.ToFile != "" { - wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) - wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) - } - } - - first, last := g[0], g[len(g)-1] - ws("***************" + diff.Eol) - - range1 := formatRangeContext(first.I1, last.I2) - wf("*** %s ****%s", range1, diff.Eol) - for _, c := range g { - if c.Tag == 'r' || c.Tag == 'd' { - for _, cc := range g { - if cc.Tag == 'i' { - continue - } - for _, line := range diff.A[cc.I1:cc.I2] { - ws(prefix[cc.Tag] + line) - } - } - break - } - } - - range2 := formatRangeContext(first.J1, last.J2) - wf("--- %s ----%s", range2, diff.Eol) - for _, c := range g { - if c.Tag == 'r' || c.Tag == 'i' { - for _, cc := range g { - if cc.Tag == 'd' { - continue - } - for _, line := range diff.B[cc.J1:cc.J2] { - ws(prefix[cc.Tag] + line) - } - } - break - } - } - } - return diffErr -} - -// Like WriteContextDiff but returns the diff a string. -func GetContextDiffString(diff ContextDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteContextDiff(w, diff) - return string(w.Bytes()), err -} - -// Split a string on "\n" while preserving them. The output can be used -// as input for UnifiedDiff and ContextDiff structures. -func SplitLines(s string) []string { - lines := strings.SplitAfter(s, "\n") - lines[len(lines)-1] += "\n" - return lines -} diff --git a/vendor/github.com/stretchr/objx/.codeclimate.yml b/vendor/github.com/stretchr/objx/.codeclimate.yml deleted file mode 100644 index 010d4ccd5..000000000 --- a/vendor/github.com/stretchr/objx/.codeclimate.yml +++ /dev/null @@ -1,13 +0,0 @@ -engines: - gofmt: - enabled: true - golint: - enabled: true - govet: - enabled: true - -exclude_patterns: -- ".github/" -- "vendor/" -- "codegen/" -- "doc.go" diff --git a/vendor/github.com/stretchr/objx/Gopkg.lock b/vendor/github.com/stretchr/objx/Gopkg.lock deleted file mode 100644 index eebe342a9..000000000 --- a/vendor/github.com/stretchr/objx/Gopkg.lock +++ /dev/null @@ -1,30 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "github.com/davecgh/go-spew" - packages = ["spew"] - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - version = "v1.1.0" - -[[projects]] - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - name = "github.com/stretchr/testify" - packages = [ - "assert", - "require" - ] - revision = "b91bfb9ebec76498946beb6af7c0230c7cc7ba6c" - version = "v1.2.0" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "2d160a7dea4ffd13c6c31dab40373822f9d78c73beba016d662bef8f7a998876" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/stretchr/objx/Gopkg.toml b/vendor/github.com/stretchr/objx/Gopkg.toml deleted file mode 100644 index d70f1570b..000000000 --- a/vendor/github.com/stretchr/objx/Gopkg.toml +++ /dev/null @@ -1,8 +0,0 @@ -[prune] - unused-packages = true - non-go = true - go-tests = true - -[[constraint]] - name = "github.com/stretchr/testify" - version = "~1.2.0" diff --git a/vendor/github.com/stretchr/objx/LICENSE b/vendor/github.com/stretchr/objx/LICENSE deleted file mode 100644 index 44d4d9d5a..000000000 --- a/vendor/github.com/stretchr/objx/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License - -Copyright (c) 2014 Stretchr, Inc. -Copyright (c) 2017-2018 objx contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/stretchr/objx/README.md b/vendor/github.com/stretchr/objx/README.md deleted file mode 100644 index be5750c94..000000000 --- a/vendor/github.com/stretchr/objx/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# Objx -[![Build Status](https://travis-ci.org/stretchr/objx.svg?branch=master)](https://travis-ci.org/stretchr/objx) -[![Go Report Card](https://goreportcard.com/badge/github.com/stretchr/objx)](https://goreportcard.com/report/github.com/stretchr/objx) -[![Maintainability](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/maintainability)](https://codeclimate.com/github/stretchr/objx/maintainability) -[![Test Coverage](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/test_coverage)](https://codeclimate.com/github/stretchr/objx/test_coverage) -[![Sourcegraph](https://sourcegraph.com/github.com/stretchr/objx/-/badge.svg)](https://sourcegraph.com/github.com/stretchr/objx) -[![GoDoc](https://godoc.org/github.com/stretchr/objx?status.svg)](https://godoc.org/github.com/stretchr/objx) - -Objx - Go package for dealing with maps, slices, JSON and other data. - -Get started: - -- Install Objx with [one line of code](#installation), or [update it with another](#staying-up-to-date) -- Check out the API Documentation http://godoc.org/github.com/stretchr/objx - -## Overview -Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes a powerful `Get` method (among others) that allows you to easily and quickly get access to data within the map, without having to worry too much about type assertions, missing data, default values etc. - -### Pattern -Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: - - m, err := objx.FromJSON(json) - -NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, the rest will be optimistic and try to figure things out without panicking. - -Use `Get` to access the value you're interested in. You can use dot and array -notation too: - - m.Get("places[0].latlng") - -Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. - - if m.Get("code").IsStr() { // Your code... } - -Or you can just assume the type, and use one of the strong type methods to extract the real value: - - m.Get("code").Int() - -If there's no value there (or if it's the wrong type) then a default value will be returned, or you can be explicit about the default value. - - Get("code").Int(-1) - -If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, manipulating and selecting that data. You can find out more by exploring the index below. - -### Reading data -A simple example of how to use Objx: - - // Use MustFromJSON to make an objx.Map from some JSON - m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) - - // Get the details - name := m.Get("name").Str() - age := m.Get("age").Int() - - // Get their nickname (or use their name if they don't have one) - nickname := m.Get("nickname").Str(name) - -### Ranging -Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For example, to `range` the data, do what you would expect: - - m := objx.MustFromJSON(json) - for key, value := range m { - // Your code... - } - -## Installation -To install Objx, use go get: - - go get github.com/stretchr/objx - -### Staying up to date -To update Objx to the latest version, run: - - go get -u github.com/stretchr/objx - -### Supported go versions -We support the lastest two major Go versions, which are 1.8 and 1.9 at the moment. - -## Contributing -Please feel free to submit issues, fork the repository and send pull requests! diff --git a/vendor/github.com/stretchr/objx/Taskfile.yml b/vendor/github.com/stretchr/objx/Taskfile.yml deleted file mode 100644 index f8035641f..000000000 --- a/vendor/github.com/stretchr/objx/Taskfile.yml +++ /dev/null @@ -1,32 +0,0 @@ -default: - deps: [test] - -dl-deps: - desc: Downloads cli dependencies - cmds: - - go get -u github.com/golang/lint/golint - - go get -u github.com/golang/dep/cmd/dep - -update-deps: - desc: Updates dependencies - cmds: - - dep ensure - - dep ensure -update - -lint: - desc: Runs golint - cmds: - - go fmt $(go list ./... | grep -v /vendor/) - - go vet $(go list ./... | grep -v /vendor/) - - golint $(ls *.go | grep -v "doc.go") - silent: true - -test: - desc: Runs go tests - cmds: - - go test -race . - -test-coverage: - desc: Runs go tests and calucates test coverage - cmds: - - go test -coverprofile=c.out . diff --git a/vendor/github.com/stretchr/objx/accessors.go b/vendor/github.com/stretchr/objx/accessors.go deleted file mode 100644 index 204356a22..000000000 --- a/vendor/github.com/stretchr/objx/accessors.go +++ /dev/null @@ -1,148 +0,0 @@ -package objx - -import ( - "regexp" - "strconv" - "strings" -) - -// arrayAccesRegexString is the regex used to extract the array number -// from the access path -const arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` - -// arrayAccesRegex is the compiled arrayAccesRegexString -var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) - -// Get gets the value using the specified selector and -// returns it inside a new Obj object. -// -// If it cannot find the value, Get will return a nil -// value inside an instance of Obj. -// -// Get can only operate directly on map[string]interface{} and []interface. -// -// Example -// -// To access the title of the third chapter of the second book, do: -// -// o.Get("books[1].chapters[2].title") -func (m Map) Get(selector string) *Value { - rawObj := access(m, selector, nil, false) - return &Value{data: rawObj} -} - -// Set sets the value using the specified selector and -// returns the object on which Set was called. -// -// Set can only operate directly on map[string]interface{} and []interface -// -// Example -// -// To set the title of the third chapter of the second book, do: -// -// o.Set("books[1].chapters[2].title","Time to Go") -func (m Map) Set(selector string, value interface{}) Map { - access(m, selector, value, true) - return m -} - -// access accesses the object using the selector and performs the -// appropriate action. -func access(current, selector, value interface{}, isSet bool) interface{} { - switch selector.(type) { - case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: - if array, ok := current.([]interface{}); ok { - index := intFromInterface(selector) - if index >= len(array) { - return nil - } - return array[index] - } - return nil - - case string: - selStr := selector.(string) - selSegs := strings.SplitN(selStr, PathSeparator, 2) - thisSel := selSegs[0] - index := -1 - var err error - - if strings.Contains(thisSel, "[") { - arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel) - if len(arrayMatches) > 0 { - // Get the key into the map - thisSel = arrayMatches[1] - - // Get the index into the array at the key - index, err = strconv.Atoi(arrayMatches[2]) - - if err != nil { - // This should never happen. If it does, something has gone - // seriously wrong. Panic. - panic("objx: Array index is not an integer. Must use array[int].") - } - } - } - if curMap, ok := current.(Map); ok { - current = map[string]interface{}(curMap) - } - // get the object in question - switch current.(type) { - case map[string]interface{}: - curMSI := current.(map[string]interface{}) - if len(selSegs) <= 1 && isSet { - curMSI[thisSel] = value - return nil - } - current = curMSI[thisSel] - default: - current = nil - } - // do we need to access the item of an array? - if index > -1 { - if array, ok := current.([]interface{}); ok { - if index < len(array) { - current = array[index] - } else { - current = nil - } - } - } - if len(selSegs) > 1 { - current = access(current, selSegs[1], value, isSet) - } - } - return current -} - -// intFromInterface converts an interface object to the largest -// representation of an unsigned integer using a type switch and -// assertions -func intFromInterface(selector interface{}) int { - var value int - switch selector.(type) { - case int: - value = selector.(int) - case int8: - value = int(selector.(int8)) - case int16: - value = int(selector.(int16)) - case int32: - value = int(selector.(int32)) - case int64: - value = int(selector.(int64)) - case uint: - value = int(selector.(uint)) - case uint8: - value = int(selector.(uint8)) - case uint16: - value = int(selector.(uint16)) - case uint32: - value = int(selector.(uint32)) - case uint64: - value = int(selector.(uint64)) - default: - return 0 - } - return value -} diff --git a/vendor/github.com/stretchr/objx/constants.go b/vendor/github.com/stretchr/objx/constants.go deleted file mode 100644 index f9eb42a25..000000000 --- a/vendor/github.com/stretchr/objx/constants.go +++ /dev/null @@ -1,13 +0,0 @@ -package objx - -const ( - // PathSeparator is the character used to separate the elements - // of the keypath. - // - // For example, `location.address.city` - PathSeparator string = "." - - // SignatureSeparator is the character that is used to - // separate the Base64 string from the security signature. - SignatureSeparator = "_" -) diff --git a/vendor/github.com/stretchr/objx/conversions.go b/vendor/github.com/stretchr/objx/conversions.go deleted file mode 100644 index 5e020f310..000000000 --- a/vendor/github.com/stretchr/objx/conversions.go +++ /dev/null @@ -1,108 +0,0 @@ -package objx - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "net/url" -) - -// JSON converts the contained object to a JSON string -// representation -func (m Map) JSON() (string, error) { - result, err := json.Marshal(m) - if err != nil { - err = errors.New("objx: JSON encode failed with: " + err.Error()) - } - return string(result), err -} - -// MustJSON converts the contained object to a JSON string -// representation and panics if there is an error -func (m Map) MustJSON() string { - result, err := m.JSON() - if err != nil { - panic(err.Error()) - } - return result -} - -// Base64 converts the contained object to a Base64 string -// representation of the JSON string representation -func (m Map) Base64() (string, error) { - var buf bytes.Buffer - - jsonData, err := m.JSON() - if err != nil { - return "", err - } - - encoder := base64.NewEncoder(base64.StdEncoding, &buf) - _, err = encoder.Write([]byte(jsonData)) - if err != nil { - return "", err - } - _ = encoder.Close() - - return buf.String(), nil -} - -// MustBase64 converts the contained object to a Base64 string -// representation of the JSON string representation and panics -// if there is an error -func (m Map) MustBase64() string { - result, err := m.Base64() - if err != nil { - panic(err.Error()) - } - return result -} - -// SignedBase64 converts the contained object to a Base64 string -// representation of the JSON string representation and signs it -// using the provided key. -func (m Map) SignedBase64(key string) (string, error) { - base64, err := m.Base64() - if err != nil { - return "", err - } - - sig := HashWithKey(base64, key) - return base64 + SignatureSeparator + sig, nil -} - -// MustSignedBase64 converts the contained object to a Base64 string -// representation of the JSON string representation and signs it -// using the provided key and panics if there is an error -func (m Map) MustSignedBase64(key string) string { - result, err := m.SignedBase64(key) - if err != nil { - panic(err.Error()) - } - return result -} - -/* - URL Query - ------------------------------------------------ -*/ - -// URLValues creates a url.Values object from an Obj. This -// function requires that the wrapped object be a map[string]interface{} -func (m Map) URLValues() url.Values { - vals := make(url.Values) - for k, v := range m { - //TODO: can this be done without sprintf? - vals.Set(k, fmt.Sprintf("%v", v)) - } - return vals -} - -// URLQuery gets an encoded URL query representing the given -// Obj. This function requires that the wrapped object be a -// map[string]interface{} -func (m Map) URLQuery() (string, error) { - return m.URLValues().Encode(), nil -} diff --git a/vendor/github.com/stretchr/objx/doc.go b/vendor/github.com/stretchr/objx/doc.go deleted file mode 100644 index 6d6af1a83..000000000 --- a/vendor/github.com/stretchr/objx/doc.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Objx - Go package for dealing with maps, slices, JSON and other data. - -Overview - -Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes -a powerful `Get` method (among others) that allows you to easily and quickly get -access to data within the map, without having to worry too much about type assertions, -missing data, default values etc. - -Pattern - -Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. -Call one of the `objx.` functions to create your `objx.Map` to get going: - - m, err := objx.FromJSON(json) - -NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, -the rest will be optimistic and try to figure things out without panicking. - -Use `Get` to access the value you're interested in. You can use dot and array -notation too: - - m.Get("places[0].latlng") - -Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. - - if m.Get("code").IsStr() { // Your code... } - -Or you can just assume the type, and use one of the strong type methods to extract the real value: - - m.Get("code").Int() - -If there's no value there (or if it's the wrong type) then a default value will be returned, -or you can be explicit about the default value. - - Get("code").Int(-1) - -If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, -manipulating and selecting that data. You can find out more by exploring the index below. - -Reading data - -A simple example of how to use Objx: - - // Use MustFromJSON to make an objx.Map from some JSON - m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) - - // Get the details - name := m.Get("name").Str() - age := m.Get("age").Int() - - // Get their nickname (or use their name if they don't have one) - nickname := m.Get("nickname").Str(name) - -Ranging - -Since `objx.Map` is a `map[string]interface{}` you can treat it as such. -For example, to `range` the data, do what you would expect: - - m := objx.MustFromJSON(json) - for key, value := range m { - // Your code... - } -*/ -package objx diff --git a/vendor/github.com/stretchr/objx/map.go b/vendor/github.com/stretchr/objx/map.go deleted file mode 100644 index 406bc8926..000000000 --- a/vendor/github.com/stretchr/objx/map.go +++ /dev/null @@ -1,190 +0,0 @@ -package objx - -import ( - "encoding/base64" - "encoding/json" - "errors" - "io/ioutil" - "net/url" - "strings" -) - -// MSIConvertable is an interface that defines methods for converting your -// custom types to a map[string]interface{} representation. -type MSIConvertable interface { - // MSI gets a map[string]interface{} (msi) representing the - // object. - MSI() map[string]interface{} -} - -// Map provides extended functionality for working with -// untyped data, in particular map[string]interface (msi). -type Map map[string]interface{} - -// Value returns the internal value instance -func (m Map) Value() *Value { - return &Value{data: m} -} - -// Nil represents a nil Map. -var Nil = New(nil) - -// New creates a new Map containing the map[string]interface{} in the data argument. -// If the data argument is not a map[string]interface, New attempts to call the -// MSI() method on the MSIConvertable interface to create one. -func New(data interface{}) Map { - if _, ok := data.(map[string]interface{}); !ok { - if converter, ok := data.(MSIConvertable); ok { - data = converter.MSI() - } else { - return nil - } - } - return Map(data.(map[string]interface{})) -} - -// MSI creates a map[string]interface{} and puts it inside a new Map. -// -// The arguments follow a key, value pattern. -// -// -// Returns nil if any key argument is non-string or if there are an odd number of arguments. -// -// Example -// -// To easily create Maps: -// -// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) -// -// // creates an Map equivalent to -// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}} -func MSI(keyAndValuePairs ...interface{}) Map { - newMap := Map{} - keyAndValuePairsLen := len(keyAndValuePairs) - if keyAndValuePairsLen%2 != 0 { - return nil - } - for i := 0; i < keyAndValuePairsLen; i = i + 2 { - key := keyAndValuePairs[i] - value := keyAndValuePairs[i+1] - - // make sure the key is a string - keyString, keyStringOK := key.(string) - if !keyStringOK { - return nil - } - newMap[keyString] = value - } - return newMap -} - -// ****** Conversion Constructors - -// MustFromJSON creates a new Map containing the data specified in the -// jsonString. -// -// Panics if the JSON is invalid. -func MustFromJSON(jsonString string) Map { - o, err := FromJSON(jsonString) - if err != nil { - panic("objx: MustFromJSON failed with error: " + err.Error()) - } - return o -} - -// FromJSON creates a new Map containing the data specified in the -// jsonString. -// -// Returns an error if the JSON is invalid. -func FromJSON(jsonString string) (Map, error) { - var data interface{} - err := json.Unmarshal([]byte(jsonString), &data) - if err != nil { - return Nil, err - } - return New(data), nil -} - -// FromBase64 creates a new Obj containing the data specified -// in the Base64 string. -// -// The string is an encoded JSON string returned by Base64 -func FromBase64(base64String string) (Map, error) { - decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String)) - decoded, err := ioutil.ReadAll(decoder) - if err != nil { - return nil, err - } - return FromJSON(string(decoded)) -} - -// MustFromBase64 creates a new Obj containing the data specified -// in the Base64 string and panics if there is an error. -// -// The string is an encoded JSON string returned by Base64 -func MustFromBase64(base64String string) Map { - result, err := FromBase64(base64String) - if err != nil { - panic("objx: MustFromBase64 failed with error: " + err.Error()) - } - return result -} - -// FromSignedBase64 creates a new Obj containing the data specified -// in the Base64 string. -// -// The string is an encoded JSON string returned by SignedBase64 -func FromSignedBase64(base64String, key string) (Map, error) { - parts := strings.Split(base64String, SignatureSeparator) - if len(parts) != 2 { - return nil, errors.New("objx: Signed base64 string is malformed") - } - - sig := HashWithKey(parts[0], key) - if parts[1] != sig { - return nil, errors.New("objx: Signature for base64 data does not match") - } - return FromBase64(parts[0]) -} - -// MustFromSignedBase64 creates a new Obj containing the data specified -// in the Base64 string and panics if there is an error. -// -// The string is an encoded JSON string returned by Base64 -func MustFromSignedBase64(base64String, key string) Map { - result, err := FromSignedBase64(base64String, key) - if err != nil { - panic("objx: MustFromSignedBase64 failed with error: " + err.Error()) - } - return result -} - -// FromURLQuery generates a new Obj by parsing the specified -// query. -// -// For queries with multiple values, the first value is selected. -func FromURLQuery(query string) (Map, error) { - vals, err := url.ParseQuery(query) - if err != nil { - return nil, err - } - m := Map{} - for k, vals := range vals { - m[k] = vals[0] - } - return m, nil -} - -// MustFromURLQuery generates a new Obj by parsing the specified -// query. -// -// For queries with multiple values, the first value is selected. -// -// Panics if it encounters an error -func MustFromURLQuery(query string) Map { - o, err := FromURLQuery(query) - if err != nil { - panic("objx: MustFromURLQuery failed with error: " + err.Error()) - } - return o -} diff --git a/vendor/github.com/stretchr/objx/mutations.go b/vendor/github.com/stretchr/objx/mutations.go deleted file mode 100644 index c3400a3f7..000000000 --- a/vendor/github.com/stretchr/objx/mutations.go +++ /dev/null @@ -1,77 +0,0 @@ -package objx - -// Exclude returns a new Map with the keys in the specified []string -// excluded. -func (m Map) Exclude(exclude []string) Map { - excluded := make(Map) - for k, v := range m { - if !contains(exclude, k) { - excluded[k] = v - } - } - return excluded -} - -// Copy creates a shallow copy of the Obj. -func (m Map) Copy() Map { - copied := Map{} - for k, v := range m { - copied[k] = v - } - return copied -} - -// Merge blends the specified map with a copy of this map and returns the result. -// -// Keys that appear in both will be selected from the specified map. -// This method requires that the wrapped object be a map[string]interface{} -func (m Map) Merge(merge Map) Map { - return m.Copy().MergeHere(merge) -} - -// MergeHere blends the specified map with this map and returns the current map. -// -// Keys that appear in both will be selected from the specified map. The original map -// will be modified. This method requires that -// the wrapped object be a map[string]interface{} -func (m Map) MergeHere(merge Map) Map { - for k, v := range merge { - m[k] = v - } - return m -} - -// Transform builds a new Obj giving the transformer a chance -// to change the keys and values as it goes. This method requires that -// the wrapped object be a map[string]interface{} -func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map { - newMap := Map{} - for k, v := range m { - modifiedKey, modifiedVal := transformer(k, v) - newMap[modifiedKey] = modifiedVal - } - return newMap -} - -// TransformKeys builds a new map using the specified key mapping. -// -// Unspecified keys will be unaltered. -// This method requires that the wrapped object be a map[string]interface{} -func (m Map) TransformKeys(mapping map[string]string) Map { - return m.Transform(func(key string, value interface{}) (string, interface{}) { - if newKey, ok := mapping[key]; ok { - return newKey, value - } - return key, value - }) -} - -// Checks if a string slice contains a string -func contains(s []string, e string) bool { - for _, a := range s { - if a == e { - return true - } - } - return false -} diff --git a/vendor/github.com/stretchr/objx/security.go b/vendor/github.com/stretchr/objx/security.go deleted file mode 100644 index 692be8e2a..000000000 --- a/vendor/github.com/stretchr/objx/security.go +++ /dev/null @@ -1,12 +0,0 @@ -package objx - -import ( - "crypto/sha1" - "encoding/hex" -) - -// HashWithKey hashes the specified string using the security key -func HashWithKey(data, key string) string { - d := sha1.Sum([]byte(data + ":" + key)) - return hex.EncodeToString(d[:]) -} diff --git a/vendor/github.com/stretchr/objx/tests.go b/vendor/github.com/stretchr/objx/tests.go deleted file mode 100644 index d9e0b479a..000000000 --- a/vendor/github.com/stretchr/objx/tests.go +++ /dev/null @@ -1,17 +0,0 @@ -package objx - -// Has gets whether there is something at the specified selector -// or not. -// -// If m is nil, Has will always return false. -func (m Map) Has(selector string) bool { - if m == nil { - return false - } - return !m.Get(selector).IsNil() -} - -// IsNil gets whether the data is nil or not. -func (v *Value) IsNil() bool { - return v == nil || v.data == nil -} diff --git a/vendor/github.com/stretchr/objx/type_specific_codegen.go b/vendor/github.com/stretchr/objx/type_specific_codegen.go deleted file mode 100644 index 202a91f8c..000000000 --- a/vendor/github.com/stretchr/objx/type_specific_codegen.go +++ /dev/null @@ -1,2501 +0,0 @@ -package objx - -/* - Inter (interface{} and []interface{}) -*/ - -// Inter gets the value as a interface{}, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Inter(optionalDefault ...interface{}) interface{} { - if s, ok := v.data.(interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInter gets the value as a interface{}. -// -// Panics if the object is not a interface{}. -func (v *Value) MustInter() interface{} { - return v.data.(interface{}) -} - -// InterSlice gets the value as a []interface{}, returns the optionalDefault -// value or nil if the value is not a []interface{}. -func (v *Value) InterSlice(optionalDefault ...[]interface{}) []interface{} { - if s, ok := v.data.([]interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInterSlice gets the value as a []interface{}. -// -// Panics if the object is not a []interface{}. -func (v *Value) MustInterSlice() []interface{} { - return v.data.([]interface{}) -} - -// IsInter gets whether the object contained is a interface{} or not. -func (v *Value) IsInter() bool { - _, ok := v.data.(interface{}) - return ok -} - -// IsInterSlice gets whether the object contained is a []interface{} or not. -func (v *Value) IsInterSlice() bool { - _, ok := v.data.([]interface{}) - return ok -} - -// EachInter calls the specified callback for each object -// in the []interface{}. -// -// Panics if the object is the wrong type. -func (v *Value) EachInter(callback func(int, interface{}) bool) *Value { - for index, val := range v.MustInterSlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereInter uses the specified decider function to select items -// from the []interface{}. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value { - var selected []interface{} - v.EachInter(func(index int, val interface{}) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupInter uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]interface{}. -func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { - groups := make(map[string][]interface{}) - v.EachInter(func(index int, val interface{}) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]interface{}, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceInter uses the specified function to replace each interface{}s -// by iterating each item. The data in the returned result will be a -// []interface{} containing the replaced items. -func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value { - arr := v.MustInterSlice() - replaced := make([]interface{}, len(arr)) - v.EachInter(func(index int, val interface{}) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectInter uses the specified collector function to collect a value -// for each of the interface{}s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value { - arr := v.MustInterSlice() - collected := make([]interface{}, len(arr)) - v.EachInter(func(index int, val interface{}) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - MSI (map[string]interface{} and []map[string]interface{}) -*/ - -// MSI gets the value as a map[string]interface{}, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} { - if s, ok := v.data.(map[string]interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustMSI gets the value as a map[string]interface{}. -// -// Panics if the object is not a map[string]interface{}. -func (v *Value) MustMSI() map[string]interface{} { - return v.data.(map[string]interface{}) -} - -// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault -// value or nil if the value is not a []map[string]interface{}. -func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} { - if s, ok := v.data.([]map[string]interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustMSISlice gets the value as a []map[string]interface{}. -// -// Panics if the object is not a []map[string]interface{}. -func (v *Value) MustMSISlice() []map[string]interface{} { - return v.data.([]map[string]interface{}) -} - -// IsMSI gets whether the object contained is a map[string]interface{} or not. -func (v *Value) IsMSI() bool { - _, ok := v.data.(map[string]interface{}) - return ok -} - -// IsMSISlice gets whether the object contained is a []map[string]interface{} or not. -func (v *Value) IsMSISlice() bool { - _, ok := v.data.([]map[string]interface{}) - return ok -} - -// EachMSI calls the specified callback for each object -// in the []map[string]interface{}. -// -// Panics if the object is the wrong type. -func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { - for index, val := range v.MustMSISlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereMSI uses the specified decider function to select items -// from the []map[string]interface{}. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { - var selected []map[string]interface{} - v.EachMSI(func(index int, val map[string]interface{}) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupMSI uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]map[string]interface{}. -func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { - groups := make(map[string][]map[string]interface{}) - v.EachMSI(func(index int, val map[string]interface{}) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]map[string]interface{}, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceMSI uses the specified function to replace each map[string]interface{}s -// by iterating each item. The data in the returned result will be a -// []map[string]interface{} containing the replaced items. -func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { - arr := v.MustMSISlice() - replaced := make([]map[string]interface{}, len(arr)) - v.EachMSI(func(index int, val map[string]interface{}) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectMSI uses the specified collector function to collect a value -// for each of the map[string]interface{}s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { - arr := v.MustMSISlice() - collected := make([]interface{}, len(arr)) - v.EachMSI(func(index int, val map[string]interface{}) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - ObjxMap ((Map) and [](Map)) -*/ - -// ObjxMap gets the value as a (Map), returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) ObjxMap(optionalDefault ...(Map)) Map { - if s, ok := v.data.((Map)); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return New(nil) -} - -// MustObjxMap gets the value as a (Map). -// -// Panics if the object is not a (Map). -func (v *Value) MustObjxMap() Map { - return v.data.((Map)) -} - -// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault -// value or nil if the value is not a [](Map). -func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) { - if s, ok := v.data.([](Map)); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustObjxMapSlice gets the value as a [](Map). -// -// Panics if the object is not a [](Map). -func (v *Value) MustObjxMapSlice() [](Map) { - return v.data.([](Map)) -} - -// IsObjxMap gets whether the object contained is a (Map) or not. -func (v *Value) IsObjxMap() bool { - _, ok := v.data.((Map)) - return ok -} - -// IsObjxMapSlice gets whether the object contained is a [](Map) or not. -func (v *Value) IsObjxMapSlice() bool { - _, ok := v.data.([](Map)) - return ok -} - -// EachObjxMap calls the specified callback for each object -// in the [](Map). -// -// Panics if the object is the wrong type. -func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { - for index, val := range v.MustObjxMapSlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereObjxMap uses the specified decider function to select items -// from the [](Map). The object contained in the result will contain -// only the selected items. -func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { - var selected [](Map) - v.EachObjxMap(func(index int, val Map) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupObjxMap uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][](Map). -func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { - groups := make(map[string][](Map)) - v.EachObjxMap(func(index int, val Map) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([](Map), 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceObjxMap uses the specified function to replace each (Map)s -// by iterating each item. The data in the returned result will be a -// [](Map) containing the replaced items. -func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { - arr := v.MustObjxMapSlice() - replaced := make([](Map), len(arr)) - v.EachObjxMap(func(index int, val Map) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectObjxMap uses the specified collector function to collect a value -// for each of the (Map)s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { - arr := v.MustObjxMapSlice() - collected := make([]interface{}, len(arr)) - v.EachObjxMap(func(index int, val Map) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Bool (bool and []bool) -*/ - -// Bool gets the value as a bool, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Bool(optionalDefault ...bool) bool { - if s, ok := v.data.(bool); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return false -} - -// MustBool gets the value as a bool. -// -// Panics if the object is not a bool. -func (v *Value) MustBool() bool { - return v.data.(bool) -} - -// BoolSlice gets the value as a []bool, returns the optionalDefault -// value or nil if the value is not a []bool. -func (v *Value) BoolSlice(optionalDefault ...[]bool) []bool { - if s, ok := v.data.([]bool); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustBoolSlice gets the value as a []bool. -// -// Panics if the object is not a []bool. -func (v *Value) MustBoolSlice() []bool { - return v.data.([]bool) -} - -// IsBool gets whether the object contained is a bool or not. -func (v *Value) IsBool() bool { - _, ok := v.data.(bool) - return ok -} - -// IsBoolSlice gets whether the object contained is a []bool or not. -func (v *Value) IsBoolSlice() bool { - _, ok := v.data.([]bool) - return ok -} - -// EachBool calls the specified callback for each object -// in the []bool. -// -// Panics if the object is the wrong type. -func (v *Value) EachBool(callback func(int, bool) bool) *Value { - for index, val := range v.MustBoolSlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereBool uses the specified decider function to select items -// from the []bool. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereBool(decider func(int, bool) bool) *Value { - var selected []bool - v.EachBool(func(index int, val bool) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupBool uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]bool. -func (v *Value) GroupBool(grouper func(int, bool) string) *Value { - groups := make(map[string][]bool) - v.EachBool(func(index int, val bool) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]bool, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceBool uses the specified function to replace each bools -// by iterating each item. The data in the returned result will be a -// []bool containing the replaced items. -func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value { - arr := v.MustBoolSlice() - replaced := make([]bool, len(arr)) - v.EachBool(func(index int, val bool) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectBool uses the specified collector function to collect a value -// for each of the bools in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value { - arr := v.MustBoolSlice() - collected := make([]interface{}, len(arr)) - v.EachBool(func(index int, val bool) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Str (string and []string) -*/ - -// Str gets the value as a string, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Str(optionalDefault ...string) string { - if s, ok := v.data.(string); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return "" -} - -// MustStr gets the value as a string. -// -// Panics if the object is not a string. -func (v *Value) MustStr() string { - return v.data.(string) -} - -// StrSlice gets the value as a []string, returns the optionalDefault -// value or nil if the value is not a []string. -func (v *Value) StrSlice(optionalDefault ...[]string) []string { - if s, ok := v.data.([]string); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustStrSlice gets the value as a []string. -// -// Panics if the object is not a []string. -func (v *Value) MustStrSlice() []string { - return v.data.([]string) -} - -// IsStr gets whether the object contained is a string or not. -func (v *Value) IsStr() bool { - _, ok := v.data.(string) - return ok -} - -// IsStrSlice gets whether the object contained is a []string or not. -func (v *Value) IsStrSlice() bool { - _, ok := v.data.([]string) - return ok -} - -// EachStr calls the specified callback for each object -// in the []string. -// -// Panics if the object is the wrong type. -func (v *Value) EachStr(callback func(int, string) bool) *Value { - for index, val := range v.MustStrSlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereStr uses the specified decider function to select items -// from the []string. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereStr(decider func(int, string) bool) *Value { - var selected []string - v.EachStr(func(index int, val string) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupStr uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]string. -func (v *Value) GroupStr(grouper func(int, string) string) *Value { - groups := make(map[string][]string) - v.EachStr(func(index int, val string) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]string, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceStr uses the specified function to replace each strings -// by iterating each item. The data in the returned result will be a -// []string containing the replaced items. -func (v *Value) ReplaceStr(replacer func(int, string) string) *Value { - arr := v.MustStrSlice() - replaced := make([]string, len(arr)) - v.EachStr(func(index int, val string) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectStr uses the specified collector function to collect a value -// for each of the strings in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectStr(collector func(int, string) interface{}) *Value { - arr := v.MustStrSlice() - collected := make([]interface{}, len(arr)) - v.EachStr(func(index int, val string) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Int (int and []int) -*/ - -// Int gets the value as a int, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int(optionalDefault ...int) int { - if s, ok := v.data.(int); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt gets the value as a int. -// -// Panics if the object is not a int. -func (v *Value) MustInt() int { - return v.data.(int) -} - -// IntSlice gets the value as a []int, returns the optionalDefault -// value or nil if the value is not a []int. -func (v *Value) IntSlice(optionalDefault ...[]int) []int { - if s, ok := v.data.([]int); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustIntSlice gets the value as a []int. -// -// Panics if the object is not a []int. -func (v *Value) MustIntSlice() []int { - return v.data.([]int) -} - -// IsInt gets whether the object contained is a int or not. -func (v *Value) IsInt() bool { - _, ok := v.data.(int) - return ok -} - -// IsIntSlice gets whether the object contained is a []int or not. -func (v *Value) IsIntSlice() bool { - _, ok := v.data.([]int) - return ok -} - -// EachInt calls the specified callback for each object -// in the []int. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt(callback func(int, int) bool) *Value { - for index, val := range v.MustIntSlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereInt uses the specified decider function to select items -// from the []int. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt(decider func(int, int) bool) *Value { - var selected []int - v.EachInt(func(index int, val int) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupInt uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int. -func (v *Value) GroupInt(grouper func(int, int) string) *Value { - groups := make(map[string][]int) - v.EachInt(func(index int, val int) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceInt uses the specified function to replace each ints -// by iterating each item. The data in the returned result will be a -// []int containing the replaced items. -func (v *Value) ReplaceInt(replacer func(int, int) int) *Value { - arr := v.MustIntSlice() - replaced := make([]int, len(arr)) - v.EachInt(func(index int, val int) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectInt uses the specified collector function to collect a value -// for each of the ints in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt(collector func(int, int) interface{}) *Value { - arr := v.MustIntSlice() - collected := make([]interface{}, len(arr)) - v.EachInt(func(index int, val int) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Int8 (int8 and []int8) -*/ - -// Int8 gets the value as a int8, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int8(optionalDefault ...int8) int8 { - if s, ok := v.data.(int8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt8 gets the value as a int8. -// -// Panics if the object is not a int8. -func (v *Value) MustInt8() int8 { - return v.data.(int8) -} - -// Int8Slice gets the value as a []int8, returns the optionalDefault -// value or nil if the value is not a []int8. -func (v *Value) Int8Slice(optionalDefault ...[]int8) []int8 { - if s, ok := v.data.([]int8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt8Slice gets the value as a []int8. -// -// Panics if the object is not a []int8. -func (v *Value) MustInt8Slice() []int8 { - return v.data.([]int8) -} - -// IsInt8 gets whether the object contained is a int8 or not. -func (v *Value) IsInt8() bool { - _, ok := v.data.(int8) - return ok -} - -// IsInt8Slice gets whether the object contained is a []int8 or not. -func (v *Value) IsInt8Slice() bool { - _, ok := v.data.([]int8) - return ok -} - -// EachInt8 calls the specified callback for each object -// in the []int8. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt8(callback func(int, int8) bool) *Value { - for index, val := range v.MustInt8Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereInt8 uses the specified decider function to select items -// from the []int8. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt8(decider func(int, int8) bool) *Value { - var selected []int8 - v.EachInt8(func(index int, val int8) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupInt8 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int8. -func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { - groups := make(map[string][]int8) - v.EachInt8(func(index int, val int8) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int8, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceInt8 uses the specified function to replace each int8s -// by iterating each item. The data in the returned result will be a -// []int8 containing the replaced items. -func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value { - arr := v.MustInt8Slice() - replaced := make([]int8, len(arr)) - v.EachInt8(func(index int, val int8) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectInt8 uses the specified collector function to collect a value -// for each of the int8s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value { - arr := v.MustInt8Slice() - collected := make([]interface{}, len(arr)) - v.EachInt8(func(index int, val int8) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Int16 (int16 and []int16) -*/ - -// Int16 gets the value as a int16, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int16(optionalDefault ...int16) int16 { - if s, ok := v.data.(int16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt16 gets the value as a int16. -// -// Panics if the object is not a int16. -func (v *Value) MustInt16() int16 { - return v.data.(int16) -} - -// Int16Slice gets the value as a []int16, returns the optionalDefault -// value or nil if the value is not a []int16. -func (v *Value) Int16Slice(optionalDefault ...[]int16) []int16 { - if s, ok := v.data.([]int16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt16Slice gets the value as a []int16. -// -// Panics if the object is not a []int16. -func (v *Value) MustInt16Slice() []int16 { - return v.data.([]int16) -} - -// IsInt16 gets whether the object contained is a int16 or not. -func (v *Value) IsInt16() bool { - _, ok := v.data.(int16) - return ok -} - -// IsInt16Slice gets whether the object contained is a []int16 or not. -func (v *Value) IsInt16Slice() bool { - _, ok := v.data.([]int16) - return ok -} - -// EachInt16 calls the specified callback for each object -// in the []int16. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt16(callback func(int, int16) bool) *Value { - for index, val := range v.MustInt16Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereInt16 uses the specified decider function to select items -// from the []int16. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt16(decider func(int, int16) bool) *Value { - var selected []int16 - v.EachInt16(func(index int, val int16) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupInt16 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int16. -func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { - groups := make(map[string][]int16) - v.EachInt16(func(index int, val int16) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int16, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceInt16 uses the specified function to replace each int16s -// by iterating each item. The data in the returned result will be a -// []int16 containing the replaced items. -func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value { - arr := v.MustInt16Slice() - replaced := make([]int16, len(arr)) - v.EachInt16(func(index int, val int16) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectInt16 uses the specified collector function to collect a value -// for each of the int16s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value { - arr := v.MustInt16Slice() - collected := make([]interface{}, len(arr)) - v.EachInt16(func(index int, val int16) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Int32 (int32 and []int32) -*/ - -// Int32 gets the value as a int32, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int32(optionalDefault ...int32) int32 { - if s, ok := v.data.(int32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt32 gets the value as a int32. -// -// Panics if the object is not a int32. -func (v *Value) MustInt32() int32 { - return v.data.(int32) -} - -// Int32Slice gets the value as a []int32, returns the optionalDefault -// value or nil if the value is not a []int32. -func (v *Value) Int32Slice(optionalDefault ...[]int32) []int32 { - if s, ok := v.data.([]int32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt32Slice gets the value as a []int32. -// -// Panics if the object is not a []int32. -func (v *Value) MustInt32Slice() []int32 { - return v.data.([]int32) -} - -// IsInt32 gets whether the object contained is a int32 or not. -func (v *Value) IsInt32() bool { - _, ok := v.data.(int32) - return ok -} - -// IsInt32Slice gets whether the object contained is a []int32 or not. -func (v *Value) IsInt32Slice() bool { - _, ok := v.data.([]int32) - return ok -} - -// EachInt32 calls the specified callback for each object -// in the []int32. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt32(callback func(int, int32) bool) *Value { - for index, val := range v.MustInt32Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereInt32 uses the specified decider function to select items -// from the []int32. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt32(decider func(int, int32) bool) *Value { - var selected []int32 - v.EachInt32(func(index int, val int32) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupInt32 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int32. -func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { - groups := make(map[string][]int32) - v.EachInt32(func(index int, val int32) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int32, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceInt32 uses the specified function to replace each int32s -// by iterating each item. The data in the returned result will be a -// []int32 containing the replaced items. -func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value { - arr := v.MustInt32Slice() - replaced := make([]int32, len(arr)) - v.EachInt32(func(index int, val int32) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectInt32 uses the specified collector function to collect a value -// for each of the int32s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value { - arr := v.MustInt32Slice() - collected := make([]interface{}, len(arr)) - v.EachInt32(func(index int, val int32) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Int64 (int64 and []int64) -*/ - -// Int64 gets the value as a int64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int64(optionalDefault ...int64) int64 { - if s, ok := v.data.(int64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt64 gets the value as a int64. -// -// Panics if the object is not a int64. -func (v *Value) MustInt64() int64 { - return v.data.(int64) -} - -// Int64Slice gets the value as a []int64, returns the optionalDefault -// value or nil if the value is not a []int64. -func (v *Value) Int64Slice(optionalDefault ...[]int64) []int64 { - if s, ok := v.data.([]int64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt64Slice gets the value as a []int64. -// -// Panics if the object is not a []int64. -func (v *Value) MustInt64Slice() []int64 { - return v.data.([]int64) -} - -// IsInt64 gets whether the object contained is a int64 or not. -func (v *Value) IsInt64() bool { - _, ok := v.data.(int64) - return ok -} - -// IsInt64Slice gets whether the object contained is a []int64 or not. -func (v *Value) IsInt64Slice() bool { - _, ok := v.data.([]int64) - return ok -} - -// EachInt64 calls the specified callback for each object -// in the []int64. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt64(callback func(int, int64) bool) *Value { - for index, val := range v.MustInt64Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereInt64 uses the specified decider function to select items -// from the []int64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt64(decider func(int, int64) bool) *Value { - var selected []int64 - v.EachInt64(func(index int, val int64) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupInt64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int64. -func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { - groups := make(map[string][]int64) - v.EachInt64(func(index int, val int64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceInt64 uses the specified function to replace each int64s -// by iterating each item. The data in the returned result will be a -// []int64 containing the replaced items. -func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value { - arr := v.MustInt64Slice() - replaced := make([]int64, len(arr)) - v.EachInt64(func(index int, val int64) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectInt64 uses the specified collector function to collect a value -// for each of the int64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value { - arr := v.MustInt64Slice() - collected := make([]interface{}, len(arr)) - v.EachInt64(func(index int, val int64) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Uint (uint and []uint) -*/ - -// Uint gets the value as a uint, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint(optionalDefault ...uint) uint { - if s, ok := v.data.(uint); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint gets the value as a uint. -// -// Panics if the object is not a uint. -func (v *Value) MustUint() uint { - return v.data.(uint) -} - -// UintSlice gets the value as a []uint, returns the optionalDefault -// value or nil if the value is not a []uint. -func (v *Value) UintSlice(optionalDefault ...[]uint) []uint { - if s, ok := v.data.([]uint); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUintSlice gets the value as a []uint. -// -// Panics if the object is not a []uint. -func (v *Value) MustUintSlice() []uint { - return v.data.([]uint) -} - -// IsUint gets whether the object contained is a uint or not. -func (v *Value) IsUint() bool { - _, ok := v.data.(uint) - return ok -} - -// IsUintSlice gets whether the object contained is a []uint or not. -func (v *Value) IsUintSlice() bool { - _, ok := v.data.([]uint) - return ok -} - -// EachUint calls the specified callback for each object -// in the []uint. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint(callback func(int, uint) bool) *Value { - for index, val := range v.MustUintSlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereUint uses the specified decider function to select items -// from the []uint. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint(decider func(int, uint) bool) *Value { - var selected []uint - v.EachUint(func(index int, val uint) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupUint uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint. -func (v *Value) GroupUint(grouper func(int, uint) string) *Value { - groups := make(map[string][]uint) - v.EachUint(func(index int, val uint) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceUint uses the specified function to replace each uints -// by iterating each item. The data in the returned result will be a -// []uint containing the replaced items. -func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value { - arr := v.MustUintSlice() - replaced := make([]uint, len(arr)) - v.EachUint(func(index int, val uint) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectUint uses the specified collector function to collect a value -// for each of the uints in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value { - arr := v.MustUintSlice() - collected := make([]interface{}, len(arr)) - v.EachUint(func(index int, val uint) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Uint8 (uint8 and []uint8) -*/ - -// Uint8 gets the value as a uint8, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint8(optionalDefault ...uint8) uint8 { - if s, ok := v.data.(uint8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint8 gets the value as a uint8. -// -// Panics if the object is not a uint8. -func (v *Value) MustUint8() uint8 { - return v.data.(uint8) -} - -// Uint8Slice gets the value as a []uint8, returns the optionalDefault -// value or nil if the value is not a []uint8. -func (v *Value) Uint8Slice(optionalDefault ...[]uint8) []uint8 { - if s, ok := v.data.([]uint8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint8Slice gets the value as a []uint8. -// -// Panics if the object is not a []uint8. -func (v *Value) MustUint8Slice() []uint8 { - return v.data.([]uint8) -} - -// IsUint8 gets whether the object contained is a uint8 or not. -func (v *Value) IsUint8() bool { - _, ok := v.data.(uint8) - return ok -} - -// IsUint8Slice gets whether the object contained is a []uint8 or not. -func (v *Value) IsUint8Slice() bool { - _, ok := v.data.([]uint8) - return ok -} - -// EachUint8 calls the specified callback for each object -// in the []uint8. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint8(callback func(int, uint8) bool) *Value { - for index, val := range v.MustUint8Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereUint8 uses the specified decider function to select items -// from the []uint8. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value { - var selected []uint8 - v.EachUint8(func(index int, val uint8) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupUint8 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint8. -func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { - groups := make(map[string][]uint8) - v.EachUint8(func(index int, val uint8) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint8, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceUint8 uses the specified function to replace each uint8s -// by iterating each item. The data in the returned result will be a -// []uint8 containing the replaced items. -func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value { - arr := v.MustUint8Slice() - replaced := make([]uint8, len(arr)) - v.EachUint8(func(index int, val uint8) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectUint8 uses the specified collector function to collect a value -// for each of the uint8s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value { - arr := v.MustUint8Slice() - collected := make([]interface{}, len(arr)) - v.EachUint8(func(index int, val uint8) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Uint16 (uint16 and []uint16) -*/ - -// Uint16 gets the value as a uint16, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint16(optionalDefault ...uint16) uint16 { - if s, ok := v.data.(uint16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint16 gets the value as a uint16. -// -// Panics if the object is not a uint16. -func (v *Value) MustUint16() uint16 { - return v.data.(uint16) -} - -// Uint16Slice gets the value as a []uint16, returns the optionalDefault -// value or nil if the value is not a []uint16. -func (v *Value) Uint16Slice(optionalDefault ...[]uint16) []uint16 { - if s, ok := v.data.([]uint16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint16Slice gets the value as a []uint16. -// -// Panics if the object is not a []uint16. -func (v *Value) MustUint16Slice() []uint16 { - return v.data.([]uint16) -} - -// IsUint16 gets whether the object contained is a uint16 or not. -func (v *Value) IsUint16() bool { - _, ok := v.data.(uint16) - return ok -} - -// IsUint16Slice gets whether the object contained is a []uint16 or not. -func (v *Value) IsUint16Slice() bool { - _, ok := v.data.([]uint16) - return ok -} - -// EachUint16 calls the specified callback for each object -// in the []uint16. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint16(callback func(int, uint16) bool) *Value { - for index, val := range v.MustUint16Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereUint16 uses the specified decider function to select items -// from the []uint16. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value { - var selected []uint16 - v.EachUint16(func(index int, val uint16) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupUint16 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint16. -func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { - groups := make(map[string][]uint16) - v.EachUint16(func(index int, val uint16) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint16, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceUint16 uses the specified function to replace each uint16s -// by iterating each item. The data in the returned result will be a -// []uint16 containing the replaced items. -func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value { - arr := v.MustUint16Slice() - replaced := make([]uint16, len(arr)) - v.EachUint16(func(index int, val uint16) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectUint16 uses the specified collector function to collect a value -// for each of the uint16s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value { - arr := v.MustUint16Slice() - collected := make([]interface{}, len(arr)) - v.EachUint16(func(index int, val uint16) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Uint32 (uint32 and []uint32) -*/ - -// Uint32 gets the value as a uint32, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint32(optionalDefault ...uint32) uint32 { - if s, ok := v.data.(uint32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint32 gets the value as a uint32. -// -// Panics if the object is not a uint32. -func (v *Value) MustUint32() uint32 { - return v.data.(uint32) -} - -// Uint32Slice gets the value as a []uint32, returns the optionalDefault -// value or nil if the value is not a []uint32. -func (v *Value) Uint32Slice(optionalDefault ...[]uint32) []uint32 { - if s, ok := v.data.([]uint32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint32Slice gets the value as a []uint32. -// -// Panics if the object is not a []uint32. -func (v *Value) MustUint32Slice() []uint32 { - return v.data.([]uint32) -} - -// IsUint32 gets whether the object contained is a uint32 or not. -func (v *Value) IsUint32() bool { - _, ok := v.data.(uint32) - return ok -} - -// IsUint32Slice gets whether the object contained is a []uint32 or not. -func (v *Value) IsUint32Slice() bool { - _, ok := v.data.([]uint32) - return ok -} - -// EachUint32 calls the specified callback for each object -// in the []uint32. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint32(callback func(int, uint32) bool) *Value { - for index, val := range v.MustUint32Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereUint32 uses the specified decider function to select items -// from the []uint32. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value { - var selected []uint32 - v.EachUint32(func(index int, val uint32) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupUint32 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint32. -func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { - groups := make(map[string][]uint32) - v.EachUint32(func(index int, val uint32) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint32, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceUint32 uses the specified function to replace each uint32s -// by iterating each item. The data in the returned result will be a -// []uint32 containing the replaced items. -func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value { - arr := v.MustUint32Slice() - replaced := make([]uint32, len(arr)) - v.EachUint32(func(index int, val uint32) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectUint32 uses the specified collector function to collect a value -// for each of the uint32s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value { - arr := v.MustUint32Slice() - collected := make([]interface{}, len(arr)) - v.EachUint32(func(index int, val uint32) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Uint64 (uint64 and []uint64) -*/ - -// Uint64 gets the value as a uint64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint64(optionalDefault ...uint64) uint64 { - if s, ok := v.data.(uint64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint64 gets the value as a uint64. -// -// Panics if the object is not a uint64. -func (v *Value) MustUint64() uint64 { - return v.data.(uint64) -} - -// Uint64Slice gets the value as a []uint64, returns the optionalDefault -// value or nil if the value is not a []uint64. -func (v *Value) Uint64Slice(optionalDefault ...[]uint64) []uint64 { - if s, ok := v.data.([]uint64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint64Slice gets the value as a []uint64. -// -// Panics if the object is not a []uint64. -func (v *Value) MustUint64Slice() []uint64 { - return v.data.([]uint64) -} - -// IsUint64 gets whether the object contained is a uint64 or not. -func (v *Value) IsUint64() bool { - _, ok := v.data.(uint64) - return ok -} - -// IsUint64Slice gets whether the object contained is a []uint64 or not. -func (v *Value) IsUint64Slice() bool { - _, ok := v.data.([]uint64) - return ok -} - -// EachUint64 calls the specified callback for each object -// in the []uint64. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint64(callback func(int, uint64) bool) *Value { - for index, val := range v.MustUint64Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereUint64 uses the specified decider function to select items -// from the []uint64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value { - var selected []uint64 - v.EachUint64(func(index int, val uint64) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupUint64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint64. -func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { - groups := make(map[string][]uint64) - v.EachUint64(func(index int, val uint64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceUint64 uses the specified function to replace each uint64s -// by iterating each item. The data in the returned result will be a -// []uint64 containing the replaced items. -func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value { - arr := v.MustUint64Slice() - replaced := make([]uint64, len(arr)) - v.EachUint64(func(index int, val uint64) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectUint64 uses the specified collector function to collect a value -// for each of the uint64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value { - arr := v.MustUint64Slice() - collected := make([]interface{}, len(arr)) - v.EachUint64(func(index int, val uint64) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Uintptr (uintptr and []uintptr) -*/ - -// Uintptr gets the value as a uintptr, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uintptr(optionalDefault ...uintptr) uintptr { - if s, ok := v.data.(uintptr); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUintptr gets the value as a uintptr. -// -// Panics if the object is not a uintptr. -func (v *Value) MustUintptr() uintptr { - return v.data.(uintptr) -} - -// UintptrSlice gets the value as a []uintptr, returns the optionalDefault -// value or nil if the value is not a []uintptr. -func (v *Value) UintptrSlice(optionalDefault ...[]uintptr) []uintptr { - if s, ok := v.data.([]uintptr); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUintptrSlice gets the value as a []uintptr. -// -// Panics if the object is not a []uintptr. -func (v *Value) MustUintptrSlice() []uintptr { - return v.data.([]uintptr) -} - -// IsUintptr gets whether the object contained is a uintptr or not. -func (v *Value) IsUintptr() bool { - _, ok := v.data.(uintptr) - return ok -} - -// IsUintptrSlice gets whether the object contained is a []uintptr or not. -func (v *Value) IsUintptrSlice() bool { - _, ok := v.data.([]uintptr) - return ok -} - -// EachUintptr calls the specified callback for each object -// in the []uintptr. -// -// Panics if the object is the wrong type. -func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value { - for index, val := range v.MustUintptrSlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereUintptr uses the specified decider function to select items -// from the []uintptr. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value { - var selected []uintptr - v.EachUintptr(func(index int, val uintptr) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupUintptr uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uintptr. -func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { - groups := make(map[string][]uintptr) - v.EachUintptr(func(index int, val uintptr) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uintptr, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceUintptr uses the specified function to replace each uintptrs -// by iterating each item. The data in the returned result will be a -// []uintptr containing the replaced items. -func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value { - arr := v.MustUintptrSlice() - replaced := make([]uintptr, len(arr)) - v.EachUintptr(func(index int, val uintptr) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectUintptr uses the specified collector function to collect a value -// for each of the uintptrs in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value { - arr := v.MustUintptrSlice() - collected := make([]interface{}, len(arr)) - v.EachUintptr(func(index int, val uintptr) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Float32 (float32 and []float32) -*/ - -// Float32 gets the value as a float32, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Float32(optionalDefault ...float32) float32 { - if s, ok := v.data.(float32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustFloat32 gets the value as a float32. -// -// Panics if the object is not a float32. -func (v *Value) MustFloat32() float32 { - return v.data.(float32) -} - -// Float32Slice gets the value as a []float32, returns the optionalDefault -// value or nil if the value is not a []float32. -func (v *Value) Float32Slice(optionalDefault ...[]float32) []float32 { - if s, ok := v.data.([]float32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustFloat32Slice gets the value as a []float32. -// -// Panics if the object is not a []float32. -func (v *Value) MustFloat32Slice() []float32 { - return v.data.([]float32) -} - -// IsFloat32 gets whether the object contained is a float32 or not. -func (v *Value) IsFloat32() bool { - _, ok := v.data.(float32) - return ok -} - -// IsFloat32Slice gets whether the object contained is a []float32 or not. -func (v *Value) IsFloat32Slice() bool { - _, ok := v.data.([]float32) - return ok -} - -// EachFloat32 calls the specified callback for each object -// in the []float32. -// -// Panics if the object is the wrong type. -func (v *Value) EachFloat32(callback func(int, float32) bool) *Value { - for index, val := range v.MustFloat32Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereFloat32 uses the specified decider function to select items -// from the []float32. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value { - var selected []float32 - v.EachFloat32(func(index int, val float32) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupFloat32 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]float32. -func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { - groups := make(map[string][]float32) - v.EachFloat32(func(index int, val float32) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]float32, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceFloat32 uses the specified function to replace each float32s -// by iterating each item. The data in the returned result will be a -// []float32 containing the replaced items. -func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value { - arr := v.MustFloat32Slice() - replaced := make([]float32, len(arr)) - v.EachFloat32(func(index int, val float32) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectFloat32 uses the specified collector function to collect a value -// for each of the float32s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value { - arr := v.MustFloat32Slice() - collected := make([]interface{}, len(arr)) - v.EachFloat32(func(index int, val float32) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Float64 (float64 and []float64) -*/ - -// Float64 gets the value as a float64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Float64(optionalDefault ...float64) float64 { - if s, ok := v.data.(float64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustFloat64 gets the value as a float64. -// -// Panics if the object is not a float64. -func (v *Value) MustFloat64() float64 { - return v.data.(float64) -} - -// Float64Slice gets the value as a []float64, returns the optionalDefault -// value or nil if the value is not a []float64. -func (v *Value) Float64Slice(optionalDefault ...[]float64) []float64 { - if s, ok := v.data.([]float64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustFloat64Slice gets the value as a []float64. -// -// Panics if the object is not a []float64. -func (v *Value) MustFloat64Slice() []float64 { - return v.data.([]float64) -} - -// IsFloat64 gets whether the object contained is a float64 or not. -func (v *Value) IsFloat64() bool { - _, ok := v.data.(float64) - return ok -} - -// IsFloat64Slice gets whether the object contained is a []float64 or not. -func (v *Value) IsFloat64Slice() bool { - _, ok := v.data.([]float64) - return ok -} - -// EachFloat64 calls the specified callback for each object -// in the []float64. -// -// Panics if the object is the wrong type. -func (v *Value) EachFloat64(callback func(int, float64) bool) *Value { - for index, val := range v.MustFloat64Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereFloat64 uses the specified decider function to select items -// from the []float64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value { - var selected []float64 - v.EachFloat64(func(index int, val float64) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupFloat64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]float64. -func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { - groups := make(map[string][]float64) - v.EachFloat64(func(index int, val float64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]float64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceFloat64 uses the specified function to replace each float64s -// by iterating each item. The data in the returned result will be a -// []float64 containing the replaced items. -func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value { - arr := v.MustFloat64Slice() - replaced := make([]float64, len(arr)) - v.EachFloat64(func(index int, val float64) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectFloat64 uses the specified collector function to collect a value -// for each of the float64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value { - arr := v.MustFloat64Slice() - collected := make([]interface{}, len(arr)) - v.EachFloat64(func(index int, val float64) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Complex64 (complex64 and []complex64) -*/ - -// Complex64 gets the value as a complex64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Complex64(optionalDefault ...complex64) complex64 { - if s, ok := v.data.(complex64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustComplex64 gets the value as a complex64. -// -// Panics if the object is not a complex64. -func (v *Value) MustComplex64() complex64 { - return v.data.(complex64) -} - -// Complex64Slice gets the value as a []complex64, returns the optionalDefault -// value or nil if the value is not a []complex64. -func (v *Value) Complex64Slice(optionalDefault ...[]complex64) []complex64 { - if s, ok := v.data.([]complex64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustComplex64Slice gets the value as a []complex64. -// -// Panics if the object is not a []complex64. -func (v *Value) MustComplex64Slice() []complex64 { - return v.data.([]complex64) -} - -// IsComplex64 gets whether the object contained is a complex64 or not. -func (v *Value) IsComplex64() bool { - _, ok := v.data.(complex64) - return ok -} - -// IsComplex64Slice gets whether the object contained is a []complex64 or not. -func (v *Value) IsComplex64Slice() bool { - _, ok := v.data.([]complex64) - return ok -} - -// EachComplex64 calls the specified callback for each object -// in the []complex64. -// -// Panics if the object is the wrong type. -func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value { - for index, val := range v.MustComplex64Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereComplex64 uses the specified decider function to select items -// from the []complex64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value { - var selected []complex64 - v.EachComplex64(func(index int, val complex64) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupComplex64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]complex64. -func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { - groups := make(map[string][]complex64) - v.EachComplex64(func(index int, val complex64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]complex64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceComplex64 uses the specified function to replace each complex64s -// by iterating each item. The data in the returned result will be a -// []complex64 containing the replaced items. -func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value { - arr := v.MustComplex64Slice() - replaced := make([]complex64, len(arr)) - v.EachComplex64(func(index int, val complex64) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectComplex64 uses the specified collector function to collect a value -// for each of the complex64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value { - arr := v.MustComplex64Slice() - collected := make([]interface{}, len(arr)) - v.EachComplex64(func(index int, val complex64) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Complex128 (complex128 and []complex128) -*/ - -// Complex128 gets the value as a complex128, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Complex128(optionalDefault ...complex128) complex128 { - if s, ok := v.data.(complex128); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustComplex128 gets the value as a complex128. -// -// Panics if the object is not a complex128. -func (v *Value) MustComplex128() complex128 { - return v.data.(complex128) -} - -// Complex128Slice gets the value as a []complex128, returns the optionalDefault -// value or nil if the value is not a []complex128. -func (v *Value) Complex128Slice(optionalDefault ...[]complex128) []complex128 { - if s, ok := v.data.([]complex128); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustComplex128Slice gets the value as a []complex128. -// -// Panics if the object is not a []complex128. -func (v *Value) MustComplex128Slice() []complex128 { - return v.data.([]complex128) -} - -// IsComplex128 gets whether the object contained is a complex128 or not. -func (v *Value) IsComplex128() bool { - _, ok := v.data.(complex128) - return ok -} - -// IsComplex128Slice gets whether the object contained is a []complex128 or not. -func (v *Value) IsComplex128Slice() bool { - _, ok := v.data.([]complex128) - return ok -} - -// EachComplex128 calls the specified callback for each object -// in the []complex128. -// -// Panics if the object is the wrong type. -func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value { - for index, val := range v.MustComplex128Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereComplex128 uses the specified decider function to select items -// from the []complex128. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value { - var selected []complex128 - v.EachComplex128(func(index int, val complex128) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupComplex128 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]complex128. -func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { - groups := make(map[string][]complex128) - v.EachComplex128(func(index int, val complex128) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]complex128, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceComplex128 uses the specified function to replace each complex128s -// by iterating each item. The data in the returned result will be a -// []complex128 containing the replaced items. -func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value { - arr := v.MustComplex128Slice() - replaced := make([]complex128, len(arr)) - v.EachComplex128(func(index int, val complex128) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectComplex128 uses the specified collector function to collect a value -// for each of the complex128s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value { - arr := v.MustComplex128Slice() - collected := make([]interface{}, len(arr)) - v.EachComplex128(func(index int, val complex128) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} diff --git a/vendor/github.com/stretchr/objx/value.go b/vendor/github.com/stretchr/objx/value.go deleted file mode 100644 index e4b4a1433..000000000 --- a/vendor/github.com/stretchr/objx/value.go +++ /dev/null @@ -1,53 +0,0 @@ -package objx - -import ( - "fmt" - "strconv" -) - -// Value provides methods for extracting interface{} data in various -// types. -type Value struct { - // data contains the raw data being managed by this Value - data interface{} -} - -// Data returns the raw data contained by this Value -func (v *Value) Data() interface{} { - return v.data -} - -// String returns the value always as a string -func (v *Value) String() string { - switch { - case v.IsStr(): - return v.Str() - case v.IsBool(): - return strconv.FormatBool(v.Bool()) - case v.IsFloat32(): - return strconv.FormatFloat(float64(v.Float32()), 'f', -1, 32) - case v.IsFloat64(): - return strconv.FormatFloat(v.Float64(), 'f', -1, 64) - case v.IsInt(): - return strconv.FormatInt(int64(v.Int()), 10) - case v.IsInt8(): - return strconv.FormatInt(int64(v.Int8()), 10) - case v.IsInt16(): - return strconv.FormatInt(int64(v.Int16()), 10) - case v.IsInt32(): - return strconv.FormatInt(int64(v.Int32()), 10) - case v.IsInt64(): - return strconv.FormatInt(v.Int64(), 10) - case v.IsUint(): - return strconv.FormatUint(uint64(v.Uint()), 10) - case v.IsUint8(): - return strconv.FormatUint(uint64(v.Uint8()), 10) - case v.IsUint16(): - return strconv.FormatUint(uint64(v.Uint16()), 10) - case v.IsUint32(): - return strconv.FormatUint(uint64(v.Uint32()), 10) - case v.IsUint64(): - return strconv.FormatUint(v.Uint64(), 10) - } - return fmt.Sprintf("%#v", v.Data()) -} diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE deleted file mode 100644 index 473b670a7..000000000 --- a/vendor/github.com/stretchr/testify/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell - -Please consider promoting this project if you find it useful. - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of the Software, -and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT -OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE -OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go deleted file mode 100644 index aa1c2b95c..000000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ /dev/null @@ -1,484 +0,0 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ - -package assert - -import ( - http "net/http" - url "net/url" - time "time" -) - -// Conditionf uses a Comparison to assert a complex condition. -func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Condition(t, comp, append([]interface{}{msg}, args...)...) -} - -// Containsf asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") -// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") -// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") -func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Contains(t, s, contains, append([]interface{}{msg}, args...)...) -} - -// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. -func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return DirExists(t, path, append([]interface{}{msg}, args...)...) -} - -// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") -func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) -} - -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// assert.Emptyf(t, obj, "error message %s", "formatted") -func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Empty(t, object, append([]interface{}{msg}, args...)...) -} - -// Equalf asserts that two objects are equal. -// -// assert.Equalf(t, 123, 123, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Equal(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// EqualErrorf asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") -func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...) -} - -// EqualValuesf asserts that two objects are equal or convertable to the same types -// and equal. -// -// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) -func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Errorf asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } -func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Error(t, err, append([]interface{}{msg}, args...)...) -} - -// Exactlyf asserts that two objects are equal in value and type. -// -// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) -func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Failf reports a failure through -func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, failureMessage, append([]interface{}{msg}, args...)...) -} - -// FailNowf fails test -func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...) -} - -// Falsef asserts that the specified value is false. -// -// assert.Falsef(t, myBool, "error message %s", "formatted") -func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return False(t, value, append([]interface{}{msg}, args...)...) -} - -// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. -func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return FileExists(t, path, append([]interface{}{msg}, args...)...) -} - -// HTTPBodyContainsf asserts that a specified handler returns a -// body that contains a string. -// -// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) -} - -// HTTPBodyNotContainsf asserts that a specified handler returns a -// body that does not contain a string. -// -// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) -} - -// HTTPErrorf asserts that a specified handler returns an error status code. -// -// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). -func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...) -} - -// HTTPRedirectf asserts that a specified handler returns a redirect status code. -// -// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). -func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...) -} - -// HTTPSuccessf asserts that a specified handler returns a success status code. -// -// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...) -} - -// Implementsf asserts that an object is implemented by the specified interface. -// -// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) -func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) -} - -// InDeltaf asserts that the two numerals are within delta of each other. -// -// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) -func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// InDeltaSlicef is the same as InDelta, except it compares two slices. -func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// InEpsilonf asserts that expected and actual have a relative error less than epsilon -func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) -} - -// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. -func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) -} - -// IsTypef asserts that the specified objects are of the same type. -func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...) -} - -// JSONEqf asserts that two JSON strings are equivalent. -// -// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") -func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Lenf asserts that the specified object has specific length. -// Lenf also fails if the object has a type that len() not accept. -// -// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") -func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Len(t, object, length, append([]interface{}{msg}, args...)...) -} - -// Nilf asserts that the specified object is nil. -// -// assert.Nilf(t, err, "error message %s", "formatted") -func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Nil(t, object, append([]interface{}{msg}, args...)...) -} - -// NoErrorf asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if assert.NoErrorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) -// } -func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NoError(t, err, append([]interface{}{msg}, args...)...) -} - -// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") -func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) -} - -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) -// } -func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotEmpty(t, object, append([]interface{}{msg}, args...)...) -} - -// NotEqualf asserts that the specified values are NOT equal. -// -// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// NotNilf asserts that the specified object is not nil. -// -// assert.NotNilf(t, err, "error message %s", "formatted") -func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotNil(t, object, append([]interface{}{msg}, args...)...) -} - -// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") -func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotPanics(t, f, append([]interface{}{msg}, args...)...) -} - -// NotRegexpf asserts that a specified regexp does not match a string. -// -// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") -// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") -func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...) -} - -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") -func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...) -} - -// NotZerof asserts that i is not the zero value for its type. -func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotZero(t, i, append([]interface{}{msg}, args...)...) -} - -// Panicsf asserts that the code inside the specified PanicTestFunc panics. -// -// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") -func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Panics(t, f, append([]interface{}{msg}, args...)...) -} - -// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...) -} - -// Regexpf asserts that a specified regexp matches a string. -// -// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") -// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") -func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Regexp(t, rx, str, append([]interface{}{msg}, args...)...) -} - -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") -func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Subset(t, list, subset, append([]interface{}{msg}, args...)...) -} - -// Truef asserts that the specified value is true. -// -// assert.Truef(t, myBool, "error message %s", "formatted") -func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return True(t, value, append([]interface{}{msg}, args...)...) -} - -// WithinDurationf asserts that the two times are within duration delta of each other. -// -// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") -func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// Zerof asserts that i is the zero value for its type. -func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Zero(t, i, append([]interface{}{msg}, args...)...) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl deleted file mode 100644 index d2bb0b817..000000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl +++ /dev/null @@ -1,5 +0,0 @@ -{{.CommentFormat}} -func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool { - if h, ok := t.(tHelper); ok { h.Helper() } - return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}}) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go deleted file mode 100644 index de39f794e..000000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ /dev/null @@ -1,956 +0,0 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ - -package assert - -import ( - http "net/http" - url "net/url" - time "time" -) - -// Condition uses a Comparison to assert a complex condition. -func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Condition(a.t, comp, msgAndArgs...) -} - -// Conditionf uses a Comparison to assert a complex condition. -func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Conditionf(a.t, comp, msg, args...) -} - -// Contains asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// a.Contains("Hello World", "World") -// a.Contains(["Hello", "World"], "World") -// a.Contains({"Hello": "World"}, "Hello") -func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Contains(a.t, s, contains, msgAndArgs...) -} - -// Containsf asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// a.Containsf("Hello World", "World", "error message %s", "formatted") -// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") -// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") -func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Containsf(a.t, s, contains, msg, args...) -} - -// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. -func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return DirExists(a.t, path, msgAndArgs...) -} - -// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. -func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return DirExistsf(a.t, path, msg, args...) -} - -// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]) -func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ElementsMatch(a.t, listA, listB, msgAndArgs...) -} - -// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") -func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ElementsMatchf(a.t, listA, listB, msg, args...) -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// a.Empty(obj) -func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Empty(a.t, object, msgAndArgs...) -} - -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// a.Emptyf(obj, "error message %s", "formatted") -func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Emptyf(a.t, object, msg, args...) -} - -// Equal asserts that two objects are equal. -// -// a.Equal(123, 123) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Equal(a.t, expected, actual, msgAndArgs...) -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// a.EqualError(err, expectedErrorString) -func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualError(a.t, theError, errString, msgAndArgs...) -} - -// EqualErrorf asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") -func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualErrorf(a.t, theError, errString, msg, args...) -} - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// a.EqualValues(uint32(123), int32(123)) -func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualValues(a.t, expected, actual, msgAndArgs...) -} - -// EqualValuesf asserts that two objects are equal or convertable to the same types -// and equal. -// -// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) -func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualValuesf(a.t, expected, actual, msg, args...) -} - -// Equalf asserts that two objects are equal. -// -// a.Equalf(123, 123, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Equalf(a.t, expected, actual, msg, args...) -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } -func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Error(a.t, err, msgAndArgs...) -} - -// Errorf asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } -func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Errorf(a.t, err, msg, args...) -} - -// Exactly asserts that two objects are equal in value and type. -// -// a.Exactly(int32(123), int64(123)) -func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Exactly(a.t, expected, actual, msgAndArgs...) -} - -// Exactlyf asserts that two objects are equal in value and type. -// -// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) -func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Exactlyf(a.t, expected, actual, msg, args...) -} - -// Fail reports a failure through -func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Fail(a.t, failureMessage, msgAndArgs...) -} - -// FailNow fails test -func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FailNow(a.t, failureMessage, msgAndArgs...) -} - -// FailNowf fails test -func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FailNowf(a.t, failureMessage, msg, args...) -} - -// Failf reports a failure through -func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Failf(a.t, failureMessage, msg, args...) -} - -// False asserts that the specified value is false. -// -// a.False(myBool) -func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return False(a.t, value, msgAndArgs...) -} - -// Falsef asserts that the specified value is false. -// -// a.Falsef(myBool, "error message %s", "formatted") -func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Falsef(a.t, value, msg, args...) -} - -// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. -func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FileExists(a.t, path, msgAndArgs...) -} - -// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. -func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FileExistsf(a.t, path, msg, args...) -} - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...) -} - -// HTTPBodyContainsf asserts that a specified handler returns a -// body that contains a string. -// -// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...) -} - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...) -} - -// HTTPBodyNotContainsf asserts that a specified handler returns a -// body that does not contain a string. -// -// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...) -} - -// HTTPError asserts that a specified handler returns an error status code. -// -// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPError(a.t, handler, method, url, values, msgAndArgs...) -} - -// HTTPErrorf asserts that a specified handler returns an error status code. -// -// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). -func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPErrorf(a.t, handler, method, url, values, msg, args...) -} - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...) -} - -// HTTPRedirectf asserts that a specified handler returns a redirect status code. -// -// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). -func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPRedirectf(a.t, handler, method, url, values, msg, args...) -} - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...) -} - -// HTTPSuccessf asserts that a specified handler returns a success status code. -// -// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPSuccessf(a.t, handler, method, url, values, msg, args...) -} - -// Implements asserts that an object is implemented by the specified interface. -// -// a.Implements((*MyInterface)(nil), new(MyObject)) -func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Implements(a.t, interfaceObject, object, msgAndArgs...) -} - -// Implementsf asserts that an object is implemented by the specified interface. -// -// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) -func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Implementsf(a.t, interfaceObject, object, msg, args...) -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// a.InDelta(math.Pi, (22 / 7.0), 0.01) -func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDelta(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...) -} - -// InDeltaSlice is the same as InDelta, except it compares two slices. -func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDeltaSlicef is the same as InDelta, except it compares two slices. -func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaSlicef(a.t, expected, actual, delta, msg, args...) -} - -// InDeltaf asserts that the two numerals are within delta of each other. -// -// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) -func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaf(a.t, expected, actual, delta, msg, args...) -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) -} - -// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. -func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) -} - -// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. -func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...) -} - -// InEpsilonf asserts that expected and actual have a relative error less than epsilon -func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilonf(a.t, expected, actual, epsilon, msg, args...) -} - -// IsType asserts that the specified objects are of the same type. -func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsType(a.t, expectedType, object, msgAndArgs...) -} - -// IsTypef asserts that the specified objects are of the same type. -func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsTypef(a.t, expectedType, object, msg, args...) -} - -// JSONEq asserts that two JSON strings are equivalent. -// -// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return JSONEq(a.t, expected, actual, msgAndArgs...) -} - -// JSONEqf asserts that two JSON strings are equivalent. -// -// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") -func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return JSONEqf(a.t, expected, actual, msg, args...) -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// a.Len(mySlice, 3) -func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Len(a.t, object, length, msgAndArgs...) -} - -// Lenf asserts that the specified object has specific length. -// Lenf also fails if the object has a type that len() not accept. -// -// a.Lenf(mySlice, 3, "error message %s", "formatted") -func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Lenf(a.t, object, length, msg, args...) -} - -// Nil asserts that the specified object is nil. -// -// a.Nil(err) -func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Nil(a.t, object, msgAndArgs...) -} - -// Nilf asserts that the specified object is nil. -// -// a.Nilf(err, "error message %s", "formatted") -func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Nilf(a.t, object, msg, args...) -} - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if a.NoError(err) { -// assert.Equal(t, expectedObj, actualObj) -// } -func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoError(a.t, err, msgAndArgs...) -} - -// NoErrorf asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if a.NoErrorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) -// } -func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoErrorf(a.t, err, msg, args...) -} - -// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// a.NotContains("Hello World", "Earth") -// a.NotContains(["Hello", "World"], "Earth") -// a.NotContains({"Hello": "World"}, "Earth") -func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotContains(a.t, s, contains, msgAndArgs...) -} - -// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") -// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") -// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") -func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotContainsf(a.t, s, contains, msg, args...) -} - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if a.NotEmpty(obj) { -// assert.Equal(t, "two", obj[1]) -// } -func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEmpty(a.t, object, msgAndArgs...) -} - -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if a.NotEmptyf(obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) -// } -func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEmptyf(a.t, object, msg, args...) -} - -// NotEqual asserts that the specified values are NOT equal. -// -// a.NotEqual(obj1, obj2) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEqual(a.t, expected, actual, msgAndArgs...) -} - -// NotEqualf asserts that the specified values are NOT equal. -// -// a.NotEqualf(obj1, obj2, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEqualf(a.t, expected, actual, msg, args...) -} - -// NotNil asserts that the specified object is not nil. -// -// a.NotNil(err) -func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotNil(a.t, object, msgAndArgs...) -} - -// NotNilf asserts that the specified object is not nil. -// -// a.NotNilf(err, "error message %s", "formatted") -func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotNilf(a.t, object, msg, args...) -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// a.NotPanics(func(){ RemainCalm() }) -func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotPanics(a.t, f, msgAndArgs...) -} - -// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") -func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotPanicsf(a.t, f, msg, args...) -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") -// a.NotRegexp("^start", "it's not starting") -func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotRegexp(a.t, rx, str, msgAndArgs...) -} - -// NotRegexpf asserts that a specified regexp does not match a string. -// -// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") -// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") -func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotRegexpf(a.t, rx, str, msg, args...) -} - -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") -func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotSubset(a.t, list, subset, msgAndArgs...) -} - -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") -func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotSubsetf(a.t, list, subset, msg, args...) -} - -// NotZero asserts that i is not the zero value for its type. -func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotZero(a.t, i, msgAndArgs...) -} - -// NotZerof asserts that i is not the zero value for its type. -func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotZerof(a.t, i, msg, args...) -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// a.Panics(func(){ GoCrazy() }) -func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Panics(a.t, f, msgAndArgs...) -} - -// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) -func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return PanicsWithValue(a.t, expected, f, msgAndArgs...) -} - -// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return PanicsWithValuef(a.t, expected, f, msg, args...) -} - -// Panicsf asserts that the code inside the specified PanicTestFunc panics. -// -// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") -func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Panicsf(a.t, f, msg, args...) -} - -// Regexp asserts that a specified regexp matches a string. -// -// a.Regexp(regexp.MustCompile("start"), "it's starting") -// a.Regexp("start...$", "it's not starting") -func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Regexp(a.t, rx, str, msgAndArgs...) -} - -// Regexpf asserts that a specified regexp matches a string. -// -// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") -// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") -func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Regexpf(a.t, rx, str, msg, args...) -} - -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") -func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Subset(a.t, list, subset, msgAndArgs...) -} - -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") -func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Subsetf(a.t, list, subset, msg, args...) -} - -// True asserts that the specified value is true. -// -// a.True(myBool) -func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return True(a.t, value, msgAndArgs...) -} - -// Truef asserts that the specified value is true. -// -// a.Truef(myBool, "error message %s", "formatted") -func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Truef(a.t, value, msg, args...) -} - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) -func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) -} - -// WithinDurationf asserts that the two times are within duration delta of each other. -// -// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") -func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return WithinDurationf(a.t, expected, actual, delta, msg, args...) -} - -// Zero asserts that i is the zero value for its type. -func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Zero(a.t, i, msgAndArgs...) -} - -// Zerof asserts that i is the zero value for its type. -func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Zerof(a.t, i, msg, args...) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl deleted file mode 100644 index 188bb9e17..000000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl +++ /dev/null @@ -1,5 +0,0 @@ -{{.CommentWithoutT "a"}} -func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool { - if h, ok := a.t.(tHelper); ok { h.Helper() } - return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go deleted file mode 100644 index 5bdec56cd..000000000 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ /dev/null @@ -1,1394 +0,0 @@ -package assert - -import ( - "bufio" - "bytes" - "encoding/json" - "errors" - "fmt" - "math" - "os" - "reflect" - "regexp" - "runtime" - "strings" - "time" - "unicode" - "unicode/utf8" - - "github.com/davecgh/go-spew/spew" - "github.com/pmezard/go-difflib/difflib" -) - -//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl - -// TestingT is an interface wrapper around *testing.T -type TestingT interface { - Errorf(format string, args ...interface{}) -} - -// ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful -// for table driven tests. -type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{}) bool - -// ValueAssertionFunc is a common function prototype when validating a single value. Can be useful -// for table driven tests. -type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) bool - -// BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful -// for table driven tests. -type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool - -// ValuesAssertionFunc is a common function prototype when validating an error value. Can be useful -// for table driven tests. -type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool - -// Comparison a custom function that returns true on success and false on failure -type Comparison func() (success bool) - -/* - Helper functions -*/ - -// ObjectsAreEqual determines if two objects are considered equal. -// -// This function does no assertion of any kind. -func ObjectsAreEqual(expected, actual interface{}) bool { - if expected == nil || actual == nil { - return expected == actual - } - - exp, ok := expected.([]byte) - if !ok { - return reflect.DeepEqual(expected, actual) - } - - act, ok := actual.([]byte) - if !ok { - return false - } - if exp == nil || act == nil { - return exp == nil && act == nil - } - return bytes.Equal(exp, act) -} - -// ObjectsAreEqualValues gets whether two objects are equal, or if their -// values are equal. -func ObjectsAreEqualValues(expected, actual interface{}) bool { - if ObjectsAreEqual(expected, actual) { - return true - } - - actualType := reflect.TypeOf(actual) - if actualType == nil { - return false - } - expectedValue := reflect.ValueOf(expected) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { - // Attempt comparison after type conversion - return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) - } - - return false -} - -/* CallerInfo is necessary because the assert functions use the testing object -internally, causing it to print the file:line of the assert method, rather than where -the problem actually occurred in calling code.*/ - -// CallerInfo returns an array of strings containing the file and line number -// of each stack frame leading from the current test to the assert call that -// failed. -func CallerInfo() []string { - - pc := uintptr(0) - file := "" - line := 0 - ok := false - name := "" - - callers := []string{} - for i := 0; ; i++ { - pc, file, line, ok = runtime.Caller(i) - if !ok { - // The breaks below failed to terminate the loop, and we ran off the - // end of the call stack. - break - } - - // This is a huge edge case, but it will panic if this is the case, see #180 - if file == "" { - break - } - - f := runtime.FuncForPC(pc) - if f == nil { - break - } - name = f.Name() - - // testing.tRunner is the standard library function that calls - // tests. Subtests are called directly by tRunner, without going through - // the Test/Benchmark/Example function that contains the t.Run calls, so - // with subtests we should break when we hit tRunner, without adding it - // to the list of callers. - if name == "testing.tRunner" { - break - } - - parts := strings.Split(file, "/") - file = parts[len(parts)-1] - if len(parts) > 1 { - dir := parts[len(parts)-2] - if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) - } - } - - // Drop the package - segments := strings.Split(name, ".") - name = segments[len(segments)-1] - if isTest(name, "Test") || - isTest(name, "Benchmark") || - isTest(name, "Example") { - break - } - } - - return callers -} - -// Stolen from the `go test` tool. -// isTest tells whether name looks like a test (or benchmark, according to prefix). -// It is a Test (say) if there is a character after Test that is not a lower-case letter. -// We don't want TesticularCancer. -func isTest(name, prefix string) bool { - if !strings.HasPrefix(name, prefix) { - return false - } - if len(name) == len(prefix) { // "Test" is ok - return true - } - rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) - return !unicode.IsLower(rune) -} - -func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { - if len(msgAndArgs) == 0 || msgAndArgs == nil { - return "" - } - if len(msgAndArgs) == 1 { - return msgAndArgs[0].(string) - } - if len(msgAndArgs) > 1 { - return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) - } - return "" -} - -// Aligns the provided message so that all lines after the first line start at the same location as the first line. -// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). -// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the -// basis on which the alignment occurs). -func indentMessageLines(message string, longestLabelLen int) string { - outBuf := new(bytes.Buffer) - - for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { - // no need to align first line because it starts at the correct location (after the label) - if i != 0 { - // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab - outBuf.WriteString("\n\t" + strings.Repeat(" ", longestLabelLen+1) + "\t") - } - outBuf.WriteString(scanner.Text()) - } - - return outBuf.String() -} - -type failNower interface { - FailNow() -} - -// FailNow fails test -func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - Fail(t, failureMessage, msgAndArgs...) - - // We cannot extend TestingT with FailNow() and - // maintain backwards compatibility, so we fallback - // to panicking when FailNow is not available in - // TestingT. - // See issue #263 - - if t, ok := t.(failNower); ok { - t.FailNow() - } else { - panic("test failed and t is missing `FailNow()`") - } - return false -} - -// Fail reports a failure through -func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - content := []labeledContent{ - {"Error Trace", strings.Join(CallerInfo(), "\n\t\t\t")}, - {"Error", failureMessage}, - } - - // Add test name if the Go version supports it - if n, ok := t.(interface { - Name() string - }); ok { - content = append(content, labeledContent{"Test", n.Name()}) - } - - message := messageFromMsgAndArgs(msgAndArgs...) - if len(message) > 0 { - content = append(content, labeledContent{"Messages", message}) - } - - t.Errorf("\n%s", ""+labeledOutput(content...)) - - return false -} - -type labeledContent struct { - label string - content string -} - -// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner: -// -// \t{{label}}:{{align_spaces}}\t{{content}}\n -// -// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label. -// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this -// alignment is achieved, "\t{{content}}\n" is added for the output. -// -// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line. -func labeledOutput(content ...labeledContent) string { - longestLabel := 0 - for _, v := range content { - if len(v.label) > longestLabel { - longestLabel = len(v.label) - } - } - var output string - for _, v := range content { - output += "\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n" - } - return output -} - -// Implements asserts that an object is implemented by the specified interface. -// -// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) -func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - interfaceType := reflect.TypeOf(interfaceObject).Elem() - - if object == nil { - return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...) - } - if !reflect.TypeOf(object).Implements(interfaceType) { - return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...) - } - - return true -} - -// IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { - return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) - } - - return true -} - -// Equal asserts that two objects are equal. -// -// assert.Equal(t, 123, 123) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if err := validateEqualArgs(expected, actual); err != nil { - return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)", - expected, actual, err), msgAndArgs...) - } - - if !ObjectsAreEqual(expected, actual) { - diff := diff(expected, actual) - expected, actual = formatUnequalValues(expected, actual) - return Fail(t, fmt.Sprintf("Not equal: \n"+ - "expected: %s\n"+ - "actual : %s%s", expected, actual, diff), msgAndArgs...) - } - - return true - -} - -// formatUnequalValues takes two values of arbitrary types and returns string -// representations appropriate to be presented to the user. -// -// If the values are not of like type, the returned strings will be prefixed -// with the type name, and the value will be enclosed in parenthesis similar -// to a type conversion in the Go grammar. -func formatUnequalValues(expected, actual interface{}) (e string, a string) { - if reflect.TypeOf(expected) != reflect.TypeOf(actual) { - return fmt.Sprintf("%T(%#v)", expected, expected), - fmt.Sprintf("%T(%#v)", actual, actual) - } - - return fmt.Sprintf("%#v", expected), - fmt.Sprintf("%#v", actual) -} - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// assert.EqualValues(t, uint32(123), int32(123)) -func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if !ObjectsAreEqualValues(expected, actual) { - diff := diff(expected, actual) - expected, actual = formatUnequalValues(expected, actual) - return Fail(t, fmt.Sprintf("Not equal: \n"+ - "expected: %s\n"+ - "actual : %s%s", expected, actual, diff), msgAndArgs...) - } - - return true - -} - -// Exactly asserts that two objects are equal in value and type. -// -// assert.Exactly(t, int32(123), int64(123)) -func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - aType := reflect.TypeOf(expected) - bType := reflect.TypeOf(actual) - - if aType != bType { - return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) - } - - return Equal(t, expected, actual, msgAndArgs...) - -} - -// NotNil asserts that the specified object is not nil. -// -// assert.NotNil(t, err) -func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if !isNil(object) { - return true - } - return Fail(t, "Expected value not to be nil.", msgAndArgs...) -} - -// isNil checks if a specified object is nil or not, without Failing. -func isNil(object interface{}) bool { - if object == nil { - return true - } - - value := reflect.ValueOf(object) - kind := value.Kind() - if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { - return true - } - - return false -} - -// Nil asserts that the specified object is nil. -// -// assert.Nil(t, err) -func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if isNil(object) { - return true - } - return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) -} - -// isEmpty gets whether the specified object is considered empty or not. -func isEmpty(object interface{}) bool { - - // get nil case out of the way - if object == nil { - return true - } - - objValue := reflect.ValueOf(object) - - switch objValue.Kind() { - // collection types are empty when they have no element - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty - case reflect.Ptr: - if objValue.IsNil() { - return true - } - deref := objValue.Elem().Interface() - return isEmpty(deref) - // for all other types, compare against the zero value - default: - zero := reflect.Zero(objValue.Type()) - return reflect.DeepEqual(object, zero.Interface()) - } -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// assert.Empty(t, obj) -func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - pass := isEmpty(object) - if !pass { - Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) -// } -func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - pass := !isEmpty(object) - if !pass { - Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// getLen try to get length of object. -// return (false, 0) if impossible. -func getLen(x interface{}) (ok bool, length int) { - v := reflect.ValueOf(x) - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - return true, v.Len() -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// assert.Len(t, mySlice, 3) -func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - ok, l := getLen(object) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) - } - - if l != length { - return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) - } - return true -} - -// True asserts that the specified value is true. -// -// assert.True(t, myBool) -func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if h, ok := t.(interface { - Helper() - }); ok { - h.Helper() - } - - if value != true { - return Fail(t, "Should be true", msgAndArgs...) - } - - return true - -} - -// False asserts that the specified value is false. -// -// assert.False(t, myBool) -func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if value != false { - return Fail(t, "Should be false", msgAndArgs...) - } - - return true - -} - -// NotEqual asserts that the specified values are NOT equal. -// -// assert.NotEqual(t, obj1, obj2) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if err := validateEqualArgs(expected, actual); err != nil { - return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)", - expected, actual, err), msgAndArgs...) - } - - if ObjectsAreEqual(expected, actual) { - return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) - } - - return true - -} - -// containsElement try loop over the list check if the list includes the element. -// return (false, false) if impossible. -// return (true, false) if element was not found. -// return (true, true) if element was found. -func includeElement(list interface{}, element interface{}) (ok, found bool) { - - listValue := reflect.ValueOf(list) - elementValue := reflect.ValueOf(element) - defer func() { - if e := recover(); e != nil { - ok = false - found = false - } - }() - - if reflect.TypeOf(list).Kind() == reflect.String { - return true, strings.Contains(listValue.String(), elementValue.String()) - } - - if reflect.TypeOf(list).Kind() == reflect.Map { - mapKeys := listValue.MapKeys() - for i := 0; i < len(mapKeys); i++ { - if ObjectsAreEqual(mapKeys[i].Interface(), element) { - return true, true - } - } - return true, false - } - - for i := 0; i < listValue.Len(); i++ { - if ObjectsAreEqual(listValue.Index(i).Interface(), element) { - return true, true - } - } - return true, false - -} - -// Contains asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// assert.Contains(t, "Hello World", "World") -// assert.Contains(t, ["Hello", "World"], "World") -// assert.Contains(t, {"Hello": "World"}, "Hello") -func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - ok, found := includeElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) - } - if !found { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) - } - - return true - -} - -// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// assert.NotContains(t, "Hello World", "Earth") -// assert.NotContains(t, ["Hello", "World"], "Earth") -// assert.NotContains(t, {"Hello": "World"}, "Earth") -func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - ok, found := includeElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) - } - if found { - return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) - } - - return true - -} - -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") -func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if subset == nil { - return true // we consider nil to be equal to the nil set - } - - subsetValue := reflect.ValueOf(subset) - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - - if listKind != reflect.Array && listKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) - } - - if subsetKind != reflect.Array && subsetKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) - } - - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) - } - if !found { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...) - } - } - - return true -} - -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") -func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if subset == nil { - return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) - } - - subsetValue := reflect.ValueOf(subset) - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - - if listKind != reflect.Array && listKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) - } - - if subsetKind != reflect.Array && subsetKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) - } - - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) - } - if !found { - return true - } - } - - return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) -} - -// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) -func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if isEmpty(listA) && isEmpty(listB) { - return true - } - - aKind := reflect.TypeOf(listA).Kind() - bKind := reflect.TypeOf(listB).Kind() - - if aKind != reflect.Array && aKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...) - } - - if bKind != reflect.Array && bKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...) - } - - aValue := reflect.ValueOf(listA) - bValue := reflect.ValueOf(listB) - - aLen := aValue.Len() - bLen := bValue.Len() - - if aLen != bLen { - return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...) - } - - // Mark indexes in bValue that we already used - visited := make([]bool, bLen) - for i := 0; i < aLen; i++ { - element := aValue.Index(i).Interface() - found := false - for j := 0; j < bLen; j++ { - if visited[j] { - continue - } - if ObjectsAreEqual(bValue.Index(j).Interface(), element) { - visited[j] = true - found = true - break - } - } - if !found { - return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...) - } - } - - return true -} - -// Condition uses a Comparison to assert a complex condition. -func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - result := comp() - if !result { - Fail(t, "Condition failed!", msgAndArgs...) - } - return result -} - -// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics -// methods, and represents a simple func that takes no arguments, and returns nothing. -type PanicTestFunc func() - -// didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}) { - - didPanic := false - var message interface{} - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - } - }() - - // call the target function - f() - - }() - - return didPanic, message - -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// assert.Panics(t, func(){ GoCrazy() }) -func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) - } - - return true -} - -// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) -func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - funcDidPanic, panicValue := didPanic(f) - if !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) - } - if panicValue != expected { - return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v", f, expected, panicValue), msgAndArgs...) - } - - return true -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// assert.NotPanics(t, func(){ RemainCalm() }) -func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if funcDidPanic, panicValue := didPanic(f); funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v", f, panicValue), msgAndArgs...) - } - - return true -} - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) -func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - dt := expected.Sub(actual) - if dt < -delta || dt > delta { - return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) - } - - return true -} - -func toFloat(x interface{}) (float64, bool) { - var xf float64 - xok := true - - switch xn := x.(type) { - case uint8: - xf = float64(xn) - case uint16: - xf = float64(xn) - case uint32: - xf = float64(xn) - case uint64: - xf = float64(xn) - case int: - xf = float64(xn) - case int8: - xf = float64(xn) - case int16: - xf = float64(xn) - case int32: - xf = float64(xn) - case int64: - xf = float64(xn) - case float32: - xf = float64(xn) - case float64: - xf = float64(xn) - case time.Duration: - xf = float64(xn) - default: - xok = false - } - - return xf, xok -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) -func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - af, aok := toFloat(expected) - bf, bok := toFloat(actual) - - if !aok || !bok { - return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) - } - - if math.IsNaN(af) { - return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) - } - - if math.IsNaN(bf) { - return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...) - } - - dt := af - bf - if dt < -delta || dt > delta { - return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) - } - - return true -} - -// InDeltaSlice is the same as InDelta, except it compares two slices. -func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) - } - - actualSlice := reflect.ValueOf(actual) - expectedSlice := reflect.ValueOf(expected) - - for i := 0; i < actualSlice.Len(); i++ { - result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...) - if !result { - return result - } - } - - return true -} - -// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Map || - reflect.TypeOf(expected).Kind() != reflect.Map { - return Fail(t, "Arguments must be maps", msgAndArgs...) - } - - expectedMap := reflect.ValueOf(expected) - actualMap := reflect.ValueOf(actual) - - if expectedMap.Len() != actualMap.Len() { - return Fail(t, "Arguments must have the same number of keys", msgAndArgs...) - } - - for _, k := range expectedMap.MapKeys() { - ev := expectedMap.MapIndex(k) - av := actualMap.MapIndex(k) - - if !ev.IsValid() { - return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...) - } - - if !av.IsValid() { - return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...) - } - - if !InDelta( - t, - ev.Interface(), - av.Interface(), - delta, - msgAndArgs..., - ) { - return false - } - } - - return true -} - -func calcRelativeError(expected, actual interface{}) (float64, error) { - af, aok := toFloat(expected) - if !aok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) - } - if af == 0 { - return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") - } - bf, bok := toFloat(actual) - if !bok { - return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) - } - - return math.Abs(af-bf) / math.Abs(af), nil -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - actualEpsilon, err := calcRelativeError(expected, actual) - if err != nil { - return Fail(t, err.Error(), msgAndArgs...) - } - if actualEpsilon > epsilon { - return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ - " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) - } - - return true -} - -// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. -func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) - } - - actualSlice := reflect.ValueOf(actual) - expectedSlice := reflect.ValueOf(expected) - - for i := 0; i < actualSlice.Len(); i++ { - result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) - if !result { - return result - } - } - - return true -} - -/* - Errors -*/ - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, expectedObj, actualObj) -// } -func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if err != nil { - return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...) - } - - return true -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) -// } -func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if err == nil { - return Fail(t, "An error is expected but got nil.", msgAndArgs...) - } - - return true -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// assert.EqualError(t, err, expectedErrorString) -func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if !Error(t, theError, msgAndArgs...) { - return false - } - expected := errString - actual := theError.Error() - // don't need to use deep equals here, we know they are both strings - if expected != actual { - return Fail(t, fmt.Sprintf("Error message not equal:\n"+ - "expected: %q\n"+ - "actual : %q", expected, actual), msgAndArgs...) - } - return true -} - -// matchRegexp return true if a specified regexp matches a string. -func matchRegexp(rx interface{}, str interface{}) bool { - - var r *regexp.Regexp - if rr, ok := rx.(*regexp.Regexp); ok { - r = rr - } else { - r = regexp.MustCompile(fmt.Sprint(rx)) - } - - return (r.FindStringIndex(fmt.Sprint(str)) != nil) - -} - -// Regexp asserts that a specified regexp matches a string. -// -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") -func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - match := matchRegexp(rx, str) - - if !match { - Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) - } - - return match -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") -func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - match := matchRegexp(rx, str) - - if match { - Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) - } - - return !match - -} - -// Zero asserts that i is the zero value for its type. -func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { - return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...) - } - return true -} - -// NotZero asserts that i is not the zero value for its type. -func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { - return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...) - } - return true -} - -// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. -func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - info, err := os.Lstat(path) - if err != nil { - if os.IsNotExist(err) { - return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) - } - return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) - } - if info.IsDir() { - return Fail(t, fmt.Sprintf("%q is a directory", path), msgAndArgs...) - } - return true -} - -// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. -func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - info, err := os.Lstat(path) - if err != nil { - if os.IsNotExist(err) { - return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) - } - return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) - } - if !info.IsDir() { - return Fail(t, fmt.Sprintf("%q is a file", path), msgAndArgs...) - } - return true -} - -// JSONEq asserts that two JSON strings are equivalent. -// -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - var expectedJSONAsInterface, actualJSONAsInterface interface{} - - if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) - } - - if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) - } - - return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) -} - -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice or array. Otherwise it returns an empty string. -func diff(expected interface{}, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { - return "" - } - - var e, a string - if ek != reflect.String { - e = spewConfig.Sdump(expected) - a = spewConfig.Sdump(actual) - } else { - e = expected.(string) - a = actual.(string) - } - - diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ - A: difflib.SplitLines(e), - B: difflib.SplitLines(a), - FromFile: "Expected", - FromDate: "", - ToFile: "Actual", - ToDate: "", - Context: 1, - }) - - return "\n\nDiff:\n" + diff -} - -// validateEqualArgs checks whether provided arguments can be safely used in the -// Equal/NotEqual functions. -func validateEqualArgs(expected, actual interface{}) error { - if isFunction(expected) || isFunction(actual) { - return errors.New("cannot take func type as argument") - } - return nil -} - -func isFunction(arg interface{}) bool { - if arg == nil { - return false - } - return reflect.TypeOf(arg).Kind() == reflect.Func -} - -var spewConfig = spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, -} - -type tHelper interface { - Helper() -} diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go deleted file mode 100644 index c9dccc4d6..000000000 --- a/vendor/github.com/stretchr/testify/assert/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. -// -// Example Usage -// -// The following is a complete example using assert in a standard test function: -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(t, a, b, "The two words should be the same.") -// -// } -// -// if you assert many times, use the format below: -// -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// assert := assert.New(t) -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(a, b, "The two words should be the same.") -// } -// -// Assertions -// -// Assertions allow you to easily write test code, and are global funcs in the `assert` package. -// All assertion functions take, as the first argument, the `*testing.T` object provided by the -// testing framework. This allows the assertion funcs to write the failings and other details to -// the correct place. -// -// Every assertion function also takes an optional string message as the final argument, -// allowing custom error messages to be appended to the message the assertion method outputs. -package assert diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go deleted file mode 100644 index ac9dc9d1d..000000000 --- a/vendor/github.com/stretchr/testify/assert/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package assert - -import ( - "errors" -) - -// AnError is an error instance useful for testing. If the code does not care -// about error specifics, and only needs to return the error for example, this -// error should be used to make the test code more readable. -var AnError = errors.New("assert.AnError general error for testing") diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go deleted file mode 100644 index 9ad56851d..000000000 --- a/vendor/github.com/stretchr/testify/assert/forward_assertions.go +++ /dev/null @@ -1,16 +0,0 @@ -package assert - -// Assertions provides assertion methods around the -// TestingT interface. -type Assertions struct { - t TestingT -} - -// New makes a new Assertions object for the specified TestingT. -func New(t TestingT) *Assertions { - return &Assertions{ - t: t, - } -} - -//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go deleted file mode 100644 index df46fa777..000000000 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ /dev/null @@ -1,143 +0,0 @@ -package assert - -import ( - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "strings" -) - -// httpCode is a helper that returns HTTP code of the response. It returns -1 and -// an error if building a new request fails. -func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { - w := httptest.NewRecorder() - req, err := http.NewRequest(method, url, nil) - if err != nil { - return -1, err - } - req.URL.RawQuery = values.Encode() - handler(w, req) - return w.Code, nil -} - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - code, err := httpCode(handler, method, url, values) - if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) - return false - } - - isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent - if !isSuccessCode { - Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) - } - - return isSuccessCode -} - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - code, err := httpCode(handler, method, url, values) - if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) - return false - } - - isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect - if !isRedirectCode { - Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) - } - - return isRedirectCode -} - -// HTTPError asserts that a specified handler returns an error status code. -// -// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - code, err := httpCode(handler, method, url, values) - if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) - return false - } - - isErrorCode := code >= http.StatusBadRequest - if !isErrorCode { - Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) - } - - return isErrorCode -} - -// HTTPBody is a helper that returns HTTP body of the response. It returns -// empty string if building a new request fails. -func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { - w := httptest.NewRecorder() - req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) - if err != nil { - return "" - } - handler(w, req) - return w.Body.String() -} - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - body := HTTPBody(handler, method, url, values) - - contains := strings.Contains(body, fmt.Sprint(str)) - if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) - } - - return contains -} - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - body := HTTPBody(handler, method, url, values) - - contains := strings.Contains(body, fmt.Sprint(str)) - if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) - } - - return !contains -} diff --git a/vendor/github.com/stretchr/testify/mock/doc.go b/vendor/github.com/stretchr/testify/mock/doc.go deleted file mode 100644 index 7324128ef..000000000 --- a/vendor/github.com/stretchr/testify/mock/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -// Package mock provides a system by which it is possible to mock your objects -// and verify calls are happening as expected. -// -// Example Usage -// -// The mock package provides an object, Mock, that tracks activity on another object. It is usually -// embedded into a test object as shown below: -// -// type MyTestObject struct { -// // add a Mock object instance -// mock.Mock -// -// // other fields go here as normal -// } -// -// When implementing the methods of an interface, you wire your functions up -// to call the Mock.Called(args...) method, and return the appropriate values. -// -// For example, to mock a method that saves the name and age of a person and returns -// the year of their birth or an error, you might write this: -// -// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) { -// args := o.Called(firstname, lastname, age) -// return args.Int(0), args.Error(1) -// } -// -// The Int, Error and Bool methods are examples of strongly typed getters that take the argument -// index position. Given this argument list: -// -// (12, true, "Something") -// -// You could read them out strongly typed like this: -// -// args.Int(0) -// args.Bool(1) -// args.String(2) -// -// For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion: -// -// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine) -// -// This may cause a panic if the object you are getting is nil (the type assertion will fail), in those -// cases you should check for nil first. -package mock diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go deleted file mode 100644 index cc4f642b5..000000000 --- a/vendor/github.com/stretchr/testify/mock/mock.go +++ /dev/null @@ -1,885 +0,0 @@ -package mock - -import ( - "errors" - "fmt" - "reflect" - "regexp" - "runtime" - "strings" - "sync" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pmezard/go-difflib/difflib" - "github.com/stretchr/objx" - "github.com/stretchr/testify/assert" -) - -// TestingT is an interface wrapper around *testing.T -type TestingT interface { - Logf(format string, args ...interface{}) - Errorf(format string, args ...interface{}) - FailNow() -} - -/* - Call -*/ - -// Call represents a method call and is used for setting expectations, -// as well as recording activity. -type Call struct { - Parent *Mock - - // The name of the method that was or will be called. - Method string - - // Holds the arguments of the method. - Arguments Arguments - - // Holds the arguments that should be returned when - // this method is called. - ReturnArguments Arguments - - // Holds the caller info for the On() call - callerInfo []string - - // The number of times to return the return arguments when setting - // expectations. 0 means to always return the value. - Repeatability int - - // Amount of times this call has been called - totalCalls int - - // Call to this method can be optional - optional bool - - // Holds a channel that will be used to block the Return until it either - // receives a message or is closed. nil means it returns immediately. - WaitFor <-chan time.Time - - waitTime time.Duration - - // Holds a handler used to manipulate arguments content that are passed by - // reference. It's useful when mocking methods such as unmarshalers or - // decoders. - RunFn func(Arguments) -} - -func newCall(parent *Mock, methodName string, callerInfo []string, methodArguments ...interface{}) *Call { - return &Call{ - Parent: parent, - Method: methodName, - Arguments: methodArguments, - ReturnArguments: make([]interface{}, 0), - callerInfo: callerInfo, - Repeatability: 0, - WaitFor: nil, - RunFn: nil, - } -} - -func (c *Call) lock() { - c.Parent.mutex.Lock() -} - -func (c *Call) unlock() { - c.Parent.mutex.Unlock() -} - -// Return specifies the return arguments for the expectation. -// -// Mock.On("DoSomething").Return(errors.New("failed")) -func (c *Call) Return(returnArguments ...interface{}) *Call { - c.lock() - defer c.unlock() - - c.ReturnArguments = returnArguments - - return c -} - -// Once indicates that that the mock should only return the value once. -// -// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() -func (c *Call) Once() *Call { - return c.Times(1) -} - -// Twice indicates that that the mock should only return the value twice. -// -// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() -func (c *Call) Twice() *Call { - return c.Times(2) -} - -// Times indicates that that the mock should only return the indicated number -// of times. -// -// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) -func (c *Call) Times(i int) *Call { - c.lock() - defer c.unlock() - c.Repeatability = i - return c -} - -// WaitUntil sets the channel that will block the mock's return until its closed -// or a message is received. -// -// Mock.On("MyMethod", arg1, arg2).WaitUntil(time.After(time.Second)) -func (c *Call) WaitUntil(w <-chan time.Time) *Call { - c.lock() - defer c.unlock() - c.WaitFor = w - return c -} - -// After sets how long to block until the call returns -// -// Mock.On("MyMethod", arg1, arg2).After(time.Second) -func (c *Call) After(d time.Duration) *Call { - c.lock() - defer c.unlock() - c.waitTime = d - return c -} - -// Run sets a handler to be called before returning. It can be used when -// mocking a method such as unmarshalers that takes a pointer to a struct and -// sets properties in such struct -// -// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}").Return().Run(func(args Arguments) { -// arg := args.Get(0).(*map[string]interface{}) -// arg["foo"] = "bar" -// }) -func (c *Call) Run(fn func(args Arguments)) *Call { - c.lock() - defer c.unlock() - c.RunFn = fn - return c -} - -// Maybe allows the method call to be optional. Not calling an optional method -// will not cause an error while asserting expectations -func (c *Call) Maybe() *Call { - c.lock() - defer c.unlock() - c.optional = true - return c -} - -// On chains a new expectation description onto the mocked interface. This -// allows syntax like. -// -// Mock. -// On("MyMethod", 1).Return(nil). -// On("MyOtherMethod", 'a', 'b', 'c').Return(errors.New("Some Error")) -func (c *Call) On(methodName string, arguments ...interface{}) *Call { - return c.Parent.On(methodName, arguments...) -} - -// Mock is the workhorse used to track activity on another object. -// For an example of its usage, refer to the "Example Usage" section at the top -// of this document. -type Mock struct { - // Represents the calls that are expected of - // an object. - ExpectedCalls []*Call - - // Holds the calls that were made to this mocked object. - Calls []Call - - // test is An optional variable that holds the test struct, to be used when an - // invalid mock call was made. - test TestingT - - // TestData holds any data that might be useful for testing. Testify ignores - // this data completely allowing you to do whatever you like with it. - testData objx.Map - - mutex sync.Mutex -} - -// TestData holds any data that might be useful for testing. Testify ignores -// this data completely allowing you to do whatever you like with it. -func (m *Mock) TestData() objx.Map { - - if m.testData == nil { - m.testData = make(objx.Map) - } - - return m.testData -} - -/* - Setting expectations -*/ - -// Test sets the test struct variable of the mock object -func (m *Mock) Test(t TestingT) { - m.mutex.Lock() - defer m.mutex.Unlock() - m.test = t -} - -// fail fails the current test with the given formatted format and args. -// In case that a test was defined, it uses the test APIs for failing a test, -// otherwise it uses panic. -func (m *Mock) fail(format string, args ...interface{}) { - m.mutex.Lock() - defer m.mutex.Unlock() - - if m.test == nil { - panic(fmt.Sprintf(format, args...)) - } - m.test.Errorf(format, args...) - m.test.FailNow() -} - -// On starts a description of an expectation of the specified method -// being called. -// -// Mock.On("MyMethod", arg1, arg2) -func (m *Mock) On(methodName string, arguments ...interface{}) *Call { - for _, arg := range arguments { - if v := reflect.ValueOf(arg); v.Kind() == reflect.Func { - panic(fmt.Sprintf("cannot use Func in expectations. Use mock.AnythingOfType(\"%T\")", arg)) - } - } - - m.mutex.Lock() - defer m.mutex.Unlock() - c := newCall(m, methodName, assert.CallerInfo(), arguments...) - m.ExpectedCalls = append(m.ExpectedCalls, c) - return c -} - -// /* -// Recording and responding to activity -// */ - -func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) { - for i, call := range m.ExpectedCalls { - if call.Method == method && call.Repeatability > -1 { - - _, diffCount := call.Arguments.Diff(arguments) - if diffCount == 0 { - return i, call - } - - } - } - return -1, nil -} - -func (m *Mock) findClosestCall(method string, arguments ...interface{}) (*Call, string) { - var diffCount int - var closestCall *Call - var err string - - for _, call := range m.expectedCalls() { - if call.Method == method { - - errInfo, tempDiffCount := call.Arguments.Diff(arguments) - if tempDiffCount < diffCount || diffCount == 0 { - diffCount = tempDiffCount - closestCall = call - err = errInfo - } - - } - } - - return closestCall, err -} - -func callString(method string, arguments Arguments, includeArgumentValues bool) string { - - var argValsString string - if includeArgumentValues { - var argVals []string - for argIndex, arg := range arguments { - argVals = append(argVals, fmt.Sprintf("%d: %#v", argIndex, arg)) - } - argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t")) - } - - return fmt.Sprintf("%s(%s)%s", method, arguments.String(), argValsString) -} - -// Called tells the mock object that a method has been called, and gets an array -// of arguments to return. Panics if the call is unexpected (i.e. not preceded by -// appropriate .On .Return() calls) -// If Call.WaitFor is set, blocks until the channel is closed or receives a message. -func (m *Mock) Called(arguments ...interface{}) Arguments { - // get the calling function's name - pc, _, _, ok := runtime.Caller(1) - if !ok { - panic("Couldn't get the caller information") - } - functionPath := runtime.FuncForPC(pc).Name() - //Next four lines are required to use GCCGO function naming conventions. - //For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock - //uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree - //With GCCGO we need to remove interface information starting from pN

. - re := regexp.MustCompile("\\.pN\\d+_") - if re.MatchString(functionPath) { - functionPath = re.Split(functionPath, -1)[0] - } - parts := strings.Split(functionPath, ".") - functionName := parts[len(parts)-1] - return m.MethodCalled(functionName, arguments...) -} - -// MethodCalled tells the mock object that the given method has been called, and gets -// an array of arguments to return. Panics if the call is unexpected (i.e. not preceded -// by appropriate .On .Return() calls) -// If Call.WaitFor is set, blocks until the channel is closed or receives a message. -func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Arguments { - m.mutex.Lock() - //TODO: could combine expected and closes in single loop - found, call := m.findExpectedCall(methodName, arguments...) - - if found < 0 { - // we have to fail here - because we don't know what to do - // as the return arguments. This is because: - // - // a) this is a totally unexpected call to this method, - // b) the arguments are not what was expected, or - // c) the developer has forgotten to add an accompanying On...Return pair. - - closestCall, mismatch := m.findClosestCall(methodName, arguments...) - m.mutex.Unlock() - - if closestCall != nil { - m.fail("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\nDiff: %s", - callString(methodName, arguments, true), - callString(methodName, closestCall.Arguments, true), - diffArguments(closestCall.Arguments, arguments), - strings.TrimSpace(mismatch), - ) - } else { - m.fail("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo()) - } - } - - if call.Repeatability == 1 { - call.Repeatability = -1 - } else if call.Repeatability > 1 { - call.Repeatability-- - } - call.totalCalls++ - - // add the call - m.Calls = append(m.Calls, *newCall(m, methodName, assert.CallerInfo(), arguments...)) - m.mutex.Unlock() - - // block if specified - if call.WaitFor != nil { - <-call.WaitFor - } else { - time.Sleep(call.waitTime) - } - - m.mutex.Lock() - runFn := call.RunFn - m.mutex.Unlock() - - if runFn != nil { - runFn(arguments) - } - - m.mutex.Lock() - returnArgs := call.ReturnArguments - m.mutex.Unlock() - - return returnArgs -} - -/* - Assertions -*/ - -type assertExpectationser interface { - AssertExpectations(TestingT) bool -} - -// AssertExpectationsForObjects asserts that everything specified with On and Return -// of the specified objects was in fact called as expected. -// -// Calls may have occurred in any order. -func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - for _, obj := range testObjects { - if m, ok := obj.(Mock); ok { - t.Logf("Deprecated mock.AssertExpectationsForObjects(myMock.Mock) use mock.AssertExpectationsForObjects(myMock)") - obj = &m - } - m := obj.(assertExpectationser) - if !m.AssertExpectations(t) { - t.Logf("Expectations didn't match for Mock: %+v", reflect.TypeOf(m)) - return false - } - } - return true -} - -// AssertExpectations asserts that everything specified with On and Return was -// in fact called as expected. Calls may have occurred in any order. -func (m *Mock) AssertExpectations(t TestingT) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - m.mutex.Lock() - defer m.mutex.Unlock() - var somethingMissing bool - var failedExpectations int - - // iterate through each expectation - expectedCalls := m.expectedCalls() - for _, expectedCall := range expectedCalls { - if !expectedCall.optional && !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments) && expectedCall.totalCalls == 0 { - somethingMissing = true - failedExpectations++ - t.Logf("FAIL:\t%s(%s)\n\t\tat: %s", expectedCall.Method, expectedCall.Arguments.String(), expectedCall.callerInfo) - } else { - if expectedCall.Repeatability > 0 { - somethingMissing = true - failedExpectations++ - t.Logf("FAIL:\t%s(%s)\n\t\tat: %s", expectedCall.Method, expectedCall.Arguments.String(), expectedCall.callerInfo) - } else { - t.Logf("PASS:\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) - } - } - } - - if somethingMissing { - t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(expectedCalls)-failedExpectations, len(expectedCalls), failedExpectations, assert.CallerInfo()) - } - - return !somethingMissing -} - -// AssertNumberOfCalls asserts that the method was called expectedCalls times. -func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - m.mutex.Lock() - defer m.mutex.Unlock() - var actualCalls int - for _, call := range m.calls() { - if call.Method == methodName { - actualCalls++ - } - } - return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls)) -} - -// AssertCalled asserts that the method was called. -// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. -func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - m.mutex.Lock() - defer m.mutex.Unlock() - if !m.methodWasCalled(methodName, arguments) { - var calledWithArgs []string - for _, call := range m.calls() { - calledWithArgs = append(calledWithArgs, fmt.Sprintf("%v", call.Arguments)) - } - if len(calledWithArgs) == 0 { - return assert.Fail(t, "Should have called with given arguments", - fmt.Sprintf("Expected %q to have been called with:\n%v\nbut no actual calls happened", methodName, arguments)) - } - return assert.Fail(t, "Should have called with given arguments", - fmt.Sprintf("Expected %q to have been called with:\n%v\nbut actual calls were:\n %v", methodName, arguments, strings.Join(calledWithArgs, "\n"))) - } - return true -} - -// AssertNotCalled asserts that the method was not called. -// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. -func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - m.mutex.Lock() - defer m.mutex.Unlock() - if m.methodWasCalled(methodName, arguments) { - return assert.Fail(t, "Should not have called with given arguments", - fmt.Sprintf("Expected %q to not have been called with:\n%v\nbut actually it was.", methodName, arguments)) - } - return true -} - -func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool { - for _, call := range m.calls() { - if call.Method == methodName { - - _, differences := Arguments(expected).Diff(call.Arguments) - - if differences == 0 { - // found the expected call - return true - } - - } - } - // we didn't find the expected call - return false -} - -func (m *Mock) expectedCalls() []*Call { - return append([]*Call{}, m.ExpectedCalls...) -} - -func (m *Mock) calls() []Call { - return append([]Call{}, m.Calls...) -} - -/* - Arguments -*/ - -// Arguments holds an array of method arguments or return values. -type Arguments []interface{} - -const ( - // Anything is used in Diff and Assert when the argument being tested - // shouldn't be taken into consideration. - Anything = "mock.Anything" -) - -// AnythingOfTypeArgument is a string that contains the type of an argument -// for use when type checking. Used in Diff and Assert. -type AnythingOfTypeArgument string - -// AnythingOfType returns an AnythingOfTypeArgument object containing the -// name of the type to check for. Used in Diff and Assert. -// -// For example: -// Assert(t, AnythingOfType("string"), AnythingOfType("int")) -func AnythingOfType(t string) AnythingOfTypeArgument { - return AnythingOfTypeArgument(t) -} - -// argumentMatcher performs custom argument matching, returning whether or -// not the argument is matched by the expectation fixture function. -type argumentMatcher struct { - // fn is a function which accepts one argument, and returns a bool. - fn reflect.Value -} - -func (f argumentMatcher) Matches(argument interface{}) bool { - expectType := f.fn.Type().In(0) - expectTypeNilSupported := false - switch expectType.Kind() { - case reflect.Interface, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Ptr: - expectTypeNilSupported = true - } - - argType := reflect.TypeOf(argument) - var arg reflect.Value - if argType == nil { - arg = reflect.New(expectType).Elem() - } else { - arg = reflect.ValueOf(argument) - } - - if argType == nil && !expectTypeNilSupported { - panic(errors.New("attempting to call matcher with nil for non-nil expected type")) - } - if argType == nil || argType.AssignableTo(expectType) { - result := f.fn.Call([]reflect.Value{arg}) - return result[0].Bool() - } - return false -} - -func (f argumentMatcher) String() string { - return fmt.Sprintf("func(%s) bool", f.fn.Type().In(0).Name()) -} - -// MatchedBy can be used to match a mock call based on only certain properties -// from a complex struct or some calculation. It takes a function that will be -// evaluated with the called argument and will return true when there's a match -// and false otherwise. -// -// Example: -// m.On("Do", MatchedBy(func(req *http.Request) bool { return req.Host == "example.com" })) -// -// |fn|, must be a function accepting a single argument (of the expected type) -// which returns a bool. If |fn| doesn't match the required signature, -// MatchedBy() panics. -func MatchedBy(fn interface{}) argumentMatcher { - fnType := reflect.TypeOf(fn) - - if fnType.Kind() != reflect.Func { - panic(fmt.Sprintf("assert: arguments: %s is not a func", fn)) - } - if fnType.NumIn() != 1 { - panic(fmt.Sprintf("assert: arguments: %s does not take exactly one argument", fn)) - } - if fnType.NumOut() != 1 || fnType.Out(0).Kind() != reflect.Bool { - panic(fmt.Sprintf("assert: arguments: %s does not return a bool", fn)) - } - - return argumentMatcher{fn: reflect.ValueOf(fn)} -} - -// Get Returns the argument at the specified index. -func (args Arguments) Get(index int) interface{} { - if index+1 > len(args) { - panic(fmt.Sprintf("assert: arguments: Cannot call Get(%d) because there are %d argument(s).", index, len(args))) - } - return args[index] -} - -// Is gets whether the objects match the arguments specified. -func (args Arguments) Is(objects ...interface{}) bool { - for i, obj := range args { - if obj != objects[i] { - return false - } - } - return true -} - -// Diff gets a string describing the differences between the arguments -// and the specified objects. -// -// Returns the diff string and number of differences found. -func (args Arguments) Diff(objects []interface{}) (string, int) { - //TODO: could return string as error and nil for No difference - - var output = "\n" - var differences int - - var maxArgCount = len(args) - if len(objects) > maxArgCount { - maxArgCount = len(objects) - } - - for i := 0; i < maxArgCount; i++ { - var actual, expected interface{} - var actualFmt, expectedFmt string - - if len(objects) <= i { - actual = "(Missing)" - actualFmt = "(Missing)" - } else { - actual = objects[i] - actualFmt = fmt.Sprintf("(%[1]T=%[1]v)", actual) - } - - if len(args) <= i { - expected = "(Missing)" - expectedFmt = "(Missing)" - } else { - expected = args[i] - expectedFmt = fmt.Sprintf("(%[1]T=%[1]v)", expected) - } - - if matcher, ok := expected.(argumentMatcher); ok { - if matcher.Matches(actual) { - output = fmt.Sprintf("%s\t%d: PASS: %s matched by %s\n", output, i, actualFmt, matcher) - } else { - differences++ - output = fmt.Sprintf("%s\t%d: PASS: %s not matched by %s\n", output, i, actualFmt, matcher) - } - } else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { - - // type checking - if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { - // not match - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt) - } - - } else { - - // normal checking - - if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { - // match - output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actualFmt, expectedFmt) - } else { - // not match - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actualFmt, expectedFmt) - } - } - - } - - if differences == 0 { - return "No differences.", differences - } - - return output, differences - -} - -// Assert compares the arguments with the specified objects and fails if -// they do not exactly match. -func (args Arguments) Assert(t TestingT, objects ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - // get the differences - diff, diffCount := args.Diff(objects) - - if diffCount == 0 { - return true - } - - // there are differences... report them... - t.Logf(diff) - t.Errorf("%sArguments do not match.", assert.CallerInfo()) - - return false - -} - -// String gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -// -// If no index is provided, String() returns a complete string representation -// of the arguments. -func (args Arguments) String(indexOrNil ...int) string { - - if len(indexOrNil) == 0 { - // normal String() method - return a string representation of the args - var argsStr []string - for _, arg := range args { - argsStr = append(argsStr, fmt.Sprintf("%s", reflect.TypeOf(arg))) - } - return strings.Join(argsStr, ",") - } else if len(indexOrNil) == 1 { - // Index has been specified - get the argument at that index - var index = indexOrNil[0] - var s string - var ok bool - if s, ok = args.Get(index).(string); !ok { - panic(fmt.Sprintf("assert: arguments: String(%d) failed because object wasn't correct type: %s", index, args.Get(index))) - } - return s - } - - panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String. Must be 0 or 1, not %d", len(indexOrNil))) - -} - -// Int gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -func (args Arguments) Int(index int) int { - var s int - var ok bool - if s, ok = args.Get(index).(int); !ok { - panic(fmt.Sprintf("assert: arguments: Int(%d) failed because object wasn't correct type: %v", index, args.Get(index))) - } - return s -} - -// Error gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -func (args Arguments) Error(index int) error { - obj := args.Get(index) - var s error - var ok bool - if obj == nil { - return nil - } - if s, ok = obj.(error); !ok { - panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index))) - } - return s -} - -// Bool gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -func (args Arguments) Bool(index int) bool { - var s bool - var ok bool - if s, ok = args.Get(index).(bool); !ok { - panic(fmt.Sprintf("assert: arguments: Bool(%d) failed because object wasn't correct type: %v", index, args.Get(index))) - } - return s -} - -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - -func diffArguments(expected Arguments, actual Arguments) string { - if len(expected) != len(actual) { - return fmt.Sprintf("Provided %v arguments, mocked for %v arguments", len(expected), len(actual)) - } - - for x := range expected { - if diffString := diff(expected[x], actual[x]); diffString != "" { - return fmt.Sprintf("Difference found in argument %v:\n\n%s", x, diffString) - } - } - - return "" -} - -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice or array. Otherwise it returns an empty string. -func diff(expected interface{}, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array { - return "" - } - - e := spewConfig.Sdump(expected) - a := spewConfig.Sdump(actual) - - diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ - A: difflib.SplitLines(e), - B: difflib.SplitLines(a), - FromFile: "Expected", - FromDate: "", - ToFile: "Actual", - ToDate: "", - Context: 1, - }) - - return diff -} - -var spewConfig = spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, -} - -type tHelper interface { - Helper() -} diff --git a/worker/types/types_test.go b/worker/types/types_test.go index d095ecf46..89996e21b 100644 --- a/worker/types/types_test.go +++ b/worker/types/types_test.go @@ -22,6 +22,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/pkg/errors" . "github.com/smartystreets/goconvey/convey" ) @@ -186,6 +187,7 @@ func TestResponse_Sign(t *testing.T) { "test_float", "test_binary_string", "test_string", + "test_empty_time", }, DeclTypes: []string{ "INTEGER", @@ -195,6 +197,7 @@ func TestResponse_Sign(t *testing.T) { "FLOAT", "BLOB", "TEXT", + "DATETIME", }, Rows: []ResponseRow{ { @@ -206,6 +209,7 @@ func TestResponse_Sign(t *testing.T) { float64(1.0001), "11111\0001111111", "11111111111111", + time.Time{}, }, }, }, @@ -239,6 +243,15 @@ func TestResponse_Sign(t *testing.T) { err = res.Verify() So(err, ShouldBeNil) + Convey("encode/decode verify", func() { + buf, err := utils.EncodeMsgPack(res) + So(err, ShouldBeNil) + var r *Response + err = utils.DecodeMsgPack(buf.Bytes(), &r) + So(err, ShouldBeNil) + err = r.Verify() + So(err, ShouldBeNil) + }) Convey("request change", func() { res.Header.Request.BatchCount = 200 From 1debdf43fe3745cf62d5b482c9dbcb82a315e3b4 Mon Sep 17 00:00:00 2001 From: auxten Date: Wed, 7 Nov 2018 03:45:54 +0800 Subject: [PATCH 26/32] Adapter use test/service/node_adapter conf in docker-compose --- docker-compose.yml | 8 +- .../admin.test.covenantsql.io-key.pem | 51 ++++++++ .../admin.test.covenantsql.io.pem | 33 ++++++ test/service/node_adapter/config.yaml | 110 ++++++++++++++++++ test/service/node_adapter/private.key | Bin 0 -> 96 bytes .../read.test.covenantsql.io-key.pem | 51 ++++++++ .../node_adapter/read.test.covenantsql.io.pem | 33 ++++++ test/service/node_adapter/rootCA-key.pem | 51 ++++++++ test/service/node_adapter/rootCA.pem | 39 +++++++ .../server.test.covenantsql.io-key.pem | 28 +++++ .../server.test.covenantsql.io.pem | 25 ++++ .../write.test.covenantsql.io-key.pem | 51 ++++++++ .../write.test.covenantsql.io.pem | 33 ++++++ 13 files changed, 509 insertions(+), 4 deletions(-) create mode 100644 test/service/node_adapter/admin.test.covenantsql.io-key.pem create mode 100644 test/service/node_adapter/admin.test.covenantsql.io.pem create mode 100644 test/service/node_adapter/config.yaml create mode 100644 test/service/node_adapter/private.key create mode 100644 test/service/node_adapter/read.test.covenantsql.io-key.pem create mode 100644 test/service/node_adapter/read.test.covenantsql.io.pem create mode 100644 test/service/node_adapter/rootCA-key.pem create mode 100644 test/service/node_adapter/rootCA.pem create mode 100644 test/service/node_adapter/server.test.covenantsql.io-key.pem create mode 100644 test/service/node_adapter/server.test.covenantsql.io.pem create mode 100644 test/service/node_adapter/write.test.covenantsql.io-key.pem create mode 100644 test/service/node_adapter/write.test.covenantsql.io.pem diff --git a/docker-compose.yml b/docker-compose.yml index 90ab2e8c2..e4b1fdf5d 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -123,11 +123,11 @@ services: - "11105:4661" environment: COVENANT_ROLE: adapter - COVENANT_CONF: ./node_c/config.yaml + COVENANT_CONF: ./node_adapter/config.yaml volumes: - - ./test/service/node_c/config.yaml:/app/config.yaml - - ./test/service/node_c/private.key:/app/private.key - - ./test/service/node_c/:/app/node_c/ + - ./test/service/node_adapter/config.yaml:/app/config.yaml + - ./test/service/node_adapter/private.key:/app/private.key + - ./test/service/node_adapter/:/app/node_adapter/ networks: default: ipv4_address: 172.254.1.8 diff --git a/test/service/node_adapter/admin.test.covenantsql.io-key.pem b/test/service/node_adapter/admin.test.covenantsql.io-key.pem new file mode 100644 index 000000000..46206b1a4 --- /dev/null +++ b/test/service/node_adapter/admin.test.covenantsql.io-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEAo/ktdmTGZAxZedA2vCVwXTb8iWNHg2D+EfI79oZ23qCBf09T +bp3RyRnAAogWh833Po+wsvAeKt5qtKQozKABjzyF0KJbwZhpDUcpf94SRX0FHha4 +G60hCmB9I9XzL2qx5A18G+d/Y4l962KA2DTgEoxoN33HUhHafCmpdJH8npkGsNol +2q+LElNXnS07caRXFbjckFZzm1s0YBlyT60CaNhI/R4kJvnO0UHfJ05vLunBlnJ7 +WGFSxJkcdrPGTGP6TdkU0AZTVQMYnkTnkTp6cdwhVBj7FrLvzCJgPfzcEQvxkxpB +EQwureJUstLOarnMGCretg/dNszyoVzdMjAZtNxtM2u49TLIZstEgY1KEu4h3Gc8 +omIXC1F3EEf3X+3rrYd37gems1ki5q/ow6wblwwUtvbKCyggubHMFzSaRpmNueV7 +e6hKnKZU6GWZ3/Q21gj5Ma5d3eauHdurquCS/tzYf+GNbYZHmczIADrTRlemqfhB +5zMCbSSb8cZ5/APjoZtq30/WfrvmhHxdUnxgML0n6q3sq5oSnFZ55vyMEQOBX3z/ +Vx9jb1S+pxZywJlRHaJ8GTz+hhgr+ojXvMBtEsn99r2Ndu4R/FEgEJV/26GZP0JD +c/SJ3GkZQhZ7IjUZfpQJ6/VKW0yiu86doPNsoG2gWJ+fF5VeagICYE5iBFUCAwEA +AQKCAgB145pp+n4gRDi4OZiAoLIucnASHsy1ijBgmrW9wmMIIIG6FEA50UGYweio +aUs5jD1sP0ac/8HQtGQnR7cFlyxH3Q2gOHqbr4Ynw7f0dKbSStY5EcCANXMB0Oln +sFTNDHqlKYTHUyLlX16mswVLbIiFDWmIK+f3+1oH1rQ8WRE0vXRwBgcdOQRVwpHF +MVYBmFP8DBKXu3AWi/YV+XWUDyEiXA3t3ZPEaenlzOQxkFSjd/B0yA4iNqaZLjOm +rA2vslmtSpuKDGIxRq6Wa5fJdC/AWLGlkuhDI5cAPt7O8lMN9nZSepe5N/b/kS+v ++ZvqY3Z3EatZXgJ/ec4fcXKeuLJhjOLRg38pfr49nq+ewVKivbH7bvfeYSyJrIx6 +ZCiXHl1IvQmS1272gx7rTAvBUJa29sKLwo1hw5vcCi06R/6GxWnTqUXhcg6W4yA/ +ejpkUJduDh4drU9w9FZ7OPfP+AzqPP5yhdEcCvxI/9wOVHgdILA8aM8OLE1QdoEW +sRc/my0dG3rmtx2tNOKE0oghZknlRdOj6j1Uq6O2XHHjQJuTHO+bvqLe87kOwr4F +KOq9APYUs7hRhtUROgx3fygc6nfyG0qi7Khz/2cidosAmObb547d+5a827zQ4dYS +xQ2lZeEe6cabuAZg1Kz1roC4t4Vl6Xi++rkqPEms574ITSHdaQKCAQEA2H+xvTpE +PoIOEBvZ2ECVjwk05qCmyqZhJzIKRgpXRTwRYjAQsSuRvPuafxtRv/sUC1eXFbuV +zCDCP1NiO1YD1lxG1FFfjRdoeKHoZlKd/DNpjv9s1knPhMm7Nms408c9V1202ttS +zmQr5DM6o2K+f3V701cnuBPyKbFDA4IQ7sYKFr7mEs+O44cKVr5+NPdsCu/4dyj6 +9ailBoWy7nkjPUrsaFcTyszwZ9b1LMJ4NcT1InvJS1LuP1SWIczqUiv7iaj5i0Hf +lubNQ1tjHe0XYIj+tW0shg8e6oAu73yhf43iVKFDa3kZCMD5Ht1jnAUxg8Nowvvu +XGAbJIDAOIJoKwKCAQEAweQa4mZS0dR3Jtvz4sxrqxz+pS2mSQB8pqfekfQwE6mx +UobFPd6qztDBf5PmfprkDZBPFl8S9ZYyEiJyjVDqeUyANZdHq3NPYOUtbT8SiCrl +ymsP/OX1sf2vPsxdwJ48PET5iFrWbEHFXCkeNuwgZIM3EhaqE7cMC/Uj9DyZwatJ +j1er5w+3E5A5oLhPpy2XuM83wlXyKTWXH2bbDpdN1HRcujESiY+rSzLpixvLcwl4 +ejFr3T/MfQXC5fEDmQI0R4hG6BpzNfGznSyY1+J0uJ8gDqzJ911MyQyD9eMNTOZU +PhMqLmBt1VyMUz5ekcFxM5v5vgPmF+fn9A9M5baFfwKCAQEAng+ETVnHzzcWW05q +Gkb0qewX0jUB8LvN/Fa9R2tvUZ3MNzpORXtAuI+cuSXR5m7BsJIvPO+qKtDT4HXZ +JubigFL4ZzRNpW2smT3jtSimLSW/8GWtKTnUJuc9Jjrbz0oMD8fbLVmouARMQxvf +uL9zwwyb7a0Y03zEdQn0mhAQmrK9VOPkh2E/uf+yXahP7g2htM6EQUMLDeUlLoDY +JOEOCEa2GGtSiOJctgMrFpWYO/Fi4t0rFjIivNvdjCnV/U4dI+DY54GdYsd4nq+O +yp95TMJX608cjXdmo+AX5ELCiaSl1BG0bjeIPmrctlr9yT/FaaR1zL0vxgNobZsO +O3OB5wKCAQBDQu1sotCSSCF5ko4dnIqxVqKkDJ0F8CxN6ChW+53+BD0mguhD2U3p +5xNpPZaVTwhUCD7XZO3/0jXWgqq4iVx97eMANFXBjYP4+ifzIRE9uZvzx4ZJVkEQ +mQ/FOkI/wuTkh40FF3YRIhPkL8NyjCGEnNxq4v/nTPXZ5BWv8aHpRJGFL4XL53C8 +UakcLzQ6q59ZllEikowqbZPaaeUOP8DZNfDBCqsCm5txv9yyzFactqlbwm9H1o0K +xgfhmuWDm/ck5YqrlBlpmkqT+Neg9MdHELSfQqPhszUi/bt9fmGrzq9kxWM5qWwQ +u0VWz2khKTkrDS3rFBErM+EMko47lkDjAoIBAEdoQOdMnKn5hzbhxUhDit6I/NoX +K9xEc7VH0oBd9KLsINFzQyGYz857jSyCZ7L6o7JHTVLs/469lcjcuDJ/9JkNU7G0 +p3/h33sHN/w/cGh5OyWpaAt+m1PoP6fEoHomFAilAINCkXlT06+sLQo7dl7khJ7z +5qsogIVzeW1etFICikJHIHSsND21vCkVmRbrOA3MZxNpDwsTcK/LxmF3xq34PTS3 +1BKFZA872IuMf/xLGQ0RdEbLzxtSUppkMl2SWE1Vph1dV3xR+YUeYMziYq692cRE +6McNJpjK8RhdC9t3AlLrViyAphcU1v8T8YprQHMS/1xCbGZ/8nrCAnD81gU= +-----END RSA PRIVATE KEY----- diff --git a/test/service/node_adapter/admin.test.covenantsql.io.pem b/test/service/node_adapter/admin.test.covenantsql.io.pem new file mode 100644 index 000000000..1e3d7d608 --- /dev/null +++ b/test/service/node_adapter/admin.test.covenantsql.io.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFyDCCA7ACCQCofDYaBrdh6zANBgkqhkiG9w0BAQsFADCBojELMAkGA1UEBhMC +Q04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0JlaWppbmcxFjAUBgNVBAoT +DU1lcmlkaWFuIEx0ZC4xEDAOBgNVBAsTB0RldmVsb3AxHDAaBgNVBAMTE3Rlc3Qu +Y292ZW5hbnRzcWwuaW8xJzAlBgkqhkiG9w0BCQEWGHdlYm1hc3RlckBjb3ZlbmFu +dHNxbC5pbzAeFw0xODA4MDEwNDIwNDFaFw0yODA3MjkwNDIwNDFaMIGoMQswCQYD +VQQGEwJDTjEQMA4GA1UECBMHQmVpamluZzEQMA4GA1UEBxMHQmVpamluZzEWMBQG +A1UEChMNTWVyaWRpYW4gTHRkLjEQMA4GA1UECxMHRGV2ZWxvcDEiMCAGA1UEAxMZ +YWRtaW4udGVzdC5jb3ZlbmFudHNxbC5pbzEnMCUGCSqGSIb3DQEJARYYd2VibWFz +dGVyQGNvdmVuYW50c3FsLmlvMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC +AgEAo/ktdmTGZAxZedA2vCVwXTb8iWNHg2D+EfI79oZ23qCBf09Tbp3RyRnAAogW +h833Po+wsvAeKt5qtKQozKABjzyF0KJbwZhpDUcpf94SRX0FHha4G60hCmB9I9Xz +L2qx5A18G+d/Y4l962KA2DTgEoxoN33HUhHafCmpdJH8npkGsNol2q+LElNXnS07 +caRXFbjckFZzm1s0YBlyT60CaNhI/R4kJvnO0UHfJ05vLunBlnJ7WGFSxJkcdrPG +TGP6TdkU0AZTVQMYnkTnkTp6cdwhVBj7FrLvzCJgPfzcEQvxkxpBEQwureJUstLO +arnMGCretg/dNszyoVzdMjAZtNxtM2u49TLIZstEgY1KEu4h3Gc8omIXC1F3EEf3 +X+3rrYd37gems1ki5q/ow6wblwwUtvbKCyggubHMFzSaRpmNueV7e6hKnKZU6GWZ +3/Q21gj5Ma5d3eauHdurquCS/tzYf+GNbYZHmczIADrTRlemqfhB5zMCbSSb8cZ5 +/APjoZtq30/WfrvmhHxdUnxgML0n6q3sq5oSnFZ55vyMEQOBX3z/Vx9jb1S+pxZy +wJlRHaJ8GTz+hhgr+ojXvMBtEsn99r2Ndu4R/FEgEJV/26GZP0JDc/SJ3GkZQhZ7 +IjUZfpQJ6/VKW0yiu86doPNsoG2gWJ+fF5VeagICYE5iBFUCAwEAATANBgkqhkiG +9w0BAQsFAAOCAgEARu5lULDZastDfdkkWhdBlPphbSksyNqT0tr/RZr0EtWjtNjq +IEYLuqOyVom3r3FiNjBO9u74BJxSqzbH2GH7qjZPnGfMgFQaxnS96T9HnXjZlPn5 +spcYA1m0W5TpF17N/rzxH+/c5VyIhwsVBdRF/uVow/6r+GkM+knC1K4Md27Wz0KU +jqOQ5eUm5KV4kyOQUg7MmTafqQcwt1Xh10kJ/52hAG53IznMgCo5ZSqYZroLlF8j +WXTlQtGr6SnsK8poSJW/JuidgBfwliL7OGFMnvWrCVk6FhAL3rlY/PmhDZ+OnG8x ++b5JuuxZcHnA0JVvK01eWAmcMixHlgtnZ+6Cgsx4CtUUo+PKuOZBBo4lWqw+/y5V +A0cvPy+8DadAndT/xd/NHUXgxrNjbaTaFuDeAJwN/i2wWh2wibEPhv25rCVQTvOP +HG9b2izWR4eYTqBSbTZjrfagnt3Ikx9os1C+/wuwGRMC/1GEwQ58bSuWHaKXdXSy +1syTvm+tt2Jg7shaKsfw+ZMY6iChUJ49yBB5W1F6VBHUgKqsGxnKlrEC4z6YoOkl +E9WNb6R/8ROF+OCYPgbisYaxIUFp6KJXK3Eh3J7s7XqW6Fn6nw5e0eMn1SZZIZNt +XeLTiv7tjmSREMVzABvaIaFQk0s5GmWkZvqQVkRLJRiHuCCgbIWMrZUZf24= +-----END CERTIFICATE----- diff --git a/test/service/node_adapter/config.yaml b/test/service/node_adapter/config.yaml new file mode 100644 index 000000000..aeefebf7f --- /dev/null +++ b/test/service/node_adapter/config.yaml @@ -0,0 +1,110 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "172.254.1.4:4661" +ThisNodeID: "00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d" +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com +MinNodeIDDifficulty: 2 +DNSSeed: + EnforcedDNSSEC: false + DNSServers: + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 + +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 172.254.1.2:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 172.254.1.3:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 172.254.1.4:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 172.254.1.5:4661 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 172.254.1.6:4661 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 172.254.1.7:4661 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner +Adapter: + ListenAddr: 0.0.0.0:4661 + CertificatePath: ./server.test.covenantsql.io.pem + PrivateKeyPath: ./server.test.covenantsql.io-key.pem + VerifyCertificate: true + ClientCAPath: ./rootCA.pem + AdminCerts: + - ./admin.test.covenantsql.io.pem + WriteCerts: + - ./write.test.covenantsql.io.pem + StorageDriver: covenantsql diff --git a/test/service/node_adapter/private.key b/test/service/node_adapter/private.key new file mode 100644 index 0000000000000000000000000000000000000000..f563980c1fcd669303b1bee9c2172bf5a3519b8c GIT binary patch literal 96 zcmV-m0H6PF*slzHCqzPE3aw^kxJ?Q%G%ogw14*THn=7~eV;?h-t?#^t5W+6R^1DgL z$@60LgW8>L#Ft4anW%5%J6f5~?krWm@CHc~TLX=J0P-Na@n`wgY{PEN*;2omcYC0; Ca4_ls literal 0 HcmV?d00001 diff --git a/test/service/node_adapter/read.test.covenantsql.io-key.pem b/test/service/node_adapter/read.test.covenantsql.io-key.pem new file mode 100644 index 000000000..4cbc33ec5 --- /dev/null +++ b/test/service/node_adapter/read.test.covenantsql.io-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEA7cWb0RE4+hFRBhwJpAgQUSUOG+8H9evC85z5IgyrgwIPP7sl +6xz7VpiJ3O+1SP3Y+aHf1vNryp+AqW6Y2G9fPjPSusp4dFE19xC5hG7kYL2TLmtv +8B5ceLtQnI1XZd46TAiFqOg3rkB2X8oHOPfHY4zhPQ+4PC4EnIlFwiczoAbdomV2 +pQrKKM4F4ENwvthVr9uPg74pOEbJcy8NoW0l0WZlY82DVfC9ydvOLJH4nsncLTEY +BDUki9ETQIrWgqC03U21MZmDcyfZbi5M53aKky1iQOzNQZ3Rr/fFdNMcbLYKVlMH +hXQC05oXH8GvPYigqOzNyMngWdousKGiE/k3vncJPwVFhdovQXLfa/aaZGznBrP+ +dJe7lpBmAIpFd7LmdqVWPnuDwywhmVEneaI1aeBEXEl60/V6bFDUmSd62yBtFPcx +axUk8tABWDL7vN3kY3W4bUbNyvscQo8Q6waPjD5hTsbXAptQnXNsLzssDTnBX+Kk +ZNI1tTZ0suikRnbtvTDYd4hN6GFtmD6kF5J8F/e9iNZiBy2JnOIbvSckqkVhelyQ +o8zbr0k/rgNRXnV4UfJShYaWEaG74i+JcAVmp/P4Y9LyBwbiSPgH5oGg+eNdo6P/ +JOil0ArFvjBbCwDiHxKtd03jdSZ/B3pnwPGd9lHI0M8Tulp37LZsZm6bfI0CAwEA +AQKCAgB3I1rNyPlpo5+blhTmkfvLDOwi5wRwHq/SbUcP3pVZ0YBeiKGZSy5M16XM +hHermTZM7uU/yTyrjHxlaTtAx51Lh6ABZE4yyjZmE4VBbGcWaicDTWYLRMtE22aq +6s9uBYnkayi9141+zGID4TD5RH4tzXtWozfHP6+j18ySWh4uAwKuynRGgj+FbqXX +FzO5DKDyuusQMgppXl62Tk9gIVafs9T9yw4R08zlBjQqdQHEXpTqN/02roIfZKVm +46pUTb4SXUt7DNamrsLtyFlUaTtKP6VJrt2yESfuKhJQVS+a8SQA2R2dquF3sXAA +w4XRKVKHEhCSmUTHAOIAMx0JMQjSeYffbUR3dF8t8jK/RYHZ32oKuYgt6LLLMMUt +nfehiweMYKkjhLzW0WCyhuqlhk4T3x1Wgh6S+HiOHgvD8dW8wnmNL2k11h3STroM +g6Fc9+9KMBp97FrsCYFrIDeTY6uWCJxE5Dkb1Y7VXUdGMuIHztNnSKIHcNDOL2Mz +N6qr2smE1I5Wzm7CGv46AXTt0TOKnXgEyjNxp8LRkl+oYm/GlbqG1RrXHqWOcQEv +1Y6FSo1yP3SlOcPq3YLcZzTLH2hrrR1R4ie7hKRL6j19TnBR8R1CsP7LHOHJ4ahM +14SkS4srowtYsXJijoGh56K2H2sxnElVxQJ85qALdTBeR64eKQKCAQEA901lznD2 +5ZHFtYWLKhlqXaqgM/4Whu6cR0f3C9SLcQzAYjuaZjGmOKauackLRWSVpDnSR8zB +ol1QrRyY2upbVKRXxR9nkqamVtYZxSBS+8YjRBOvcESNY5HhjIIqwBQBTJY9v1DQ +kA5WSfivThQZGJDH1y2PrYFi5ZxmwRBnMMw+NXQy0ccRIagVMt4qQLEnA7diab9F +2ZYAgpk4o4d/tA4rF/22AcWX7pdk89zA99qoz0p2ko5/JrV+FZMGZ8PPbm5I4HsV +ahHXBXIUWExpOhonVjLNWXDvTHhkRS2zT2uhav2ohLf0+CouSA/aGoDBpOTzSndw +pL7yIQTu9B2W4wKCAQEA9iJnNuS8qfENZu+/5fzLlh7OaAerMN4JLdg251ESp4fs +LTOWFTlzU880/57SREcLO5RfYhMw2FUzyU+tXrc1cAZMNe8cGmPqDeUyQSgavs0M +WzSnFUk7z6jHH3GNCAPBC9A7M3oogyNiNm8fXZX51Fwv/EyAJX9lQhmXPMh/c53f +ulWCD28XKVEgsjEMMKZZVOvkRGBN9KLJ6DlLCtrixZSCfUyP04AjLxDeMGnhqi/P +nDADvVcxrRuMs5/8OQ7DVg4UxuOK4D/v3KcBq34hK13uOvupdxBHO1yYlUVa71G7 +ZX4KhumUOZZQkoYSEzW1N6IZXzA8+nV/Ulh8u4WJzwKCAQEAn+9eN/S2uCFeS9bh ++YgWUh1XHkjlKL6IM1FHZE9BHwuwH9eMMytI5LpnceKjd21lmaALboPtdqQC2PH1 +qR6HkmX2nXWB9kXwrZgpcmNFR68Mf6p7e4/aINrnk4dbPn2xmWZQ6LnLKF8dTxmV +xlkZIdoAZBkDIqLa4sQTcCi7k8ODN+6+Lw0e9zVNAGjNyqjHIpAnBVy+P8nS6qNN +DfVDkZ7YH9vlKaAwcg1XLJ9H7QNsySLPLFkbwlz9/dXn/pOUQ0bvur3fS4neFZeB +sNk59GmVpxmT1JRFLp9tuY+kt2hULG1/3tVZiGU/KTuXQiyjD5FCBpbYMrOKw+/8 +2cOJIQKCAQBO6ub3Jc4MGxr190crIavRHV2G43aTO43r4hhwgIEfsCgcsh6b/Yip +xZUzpKO8ep7yYndWxdpycpchI+ftp4Z9vbcvz9PN7l08SVGcrJQuuyYMFEzCOXHw ++iemQE081Z9O/1wL/E4DBhRWabi/0/d/jHNiTNEFtNwtnnDsb0jWNDdo0kPaWP8v +IzD9kVZcPuoDnYLaHZrBJnTgfYY/G8F8IkrYi/TNlpcxXxIuqbROUfgaFxcL3Woz +G9M4QMKpNL+S1v74ajq7/iQVNoMFjnJqKjrZNJm4cEK2mNDfg5ZNh4IzX39WlIwP +DtAUuuIOwLiy9sl1yMy0bXn+WBreMUnnAoIBAFkchyvXwhi/l+rRiHAuFMTc3BsO +br1fVA5Avlv0MSBNRa2sISoslCDcqgLgdRckJplz2Q7YX0tL5aYZvZtMpiNmvyES +RL2hNqulrKJ/8Yuf04hUW14MhXizq7+NgMCTtOeLo3W40+EGswV9wvq/wTgdE5Yo +WgstDYvQ1YlqVXP1kWZDcFY1kO0zLIOWwFWbtmmtM2TDi09kZFNLGOoGXsJvKCWE +6vJ8xORPmmrVQ83hHIPqGlFkxts7R209RLWgGWSSOatdhDEd3uiuVS/XlNA3Q16l +70ME8P5a/MqEwmCF1sODndfqnc2A9n/XBM65IdFproaANOwsIcL2jW6T/3U= +-----END RSA PRIVATE KEY----- diff --git a/test/service/node_adapter/read.test.covenantsql.io.pem b/test/service/node_adapter/read.test.covenantsql.io.pem new file mode 100644 index 000000000..1fa09dd22 --- /dev/null +++ b/test/service/node_adapter/read.test.covenantsql.io.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFxzCCA68CCQCofDYaBrdh7DANBgkqhkiG9w0BAQsFADCBojELMAkGA1UEBhMC +Q04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0JlaWppbmcxFjAUBgNVBAoT +DU1lcmlkaWFuIEx0ZC4xEDAOBgNVBAsTB0RldmVsb3AxHDAaBgNVBAMTE3Rlc3Qu +Y292ZW5hbnRzcWwuaW8xJzAlBgkqhkiG9w0BCQEWGHdlYm1hc3RlckBjb3ZlbmFu +dHNxbC5pbzAeFw0xODA4MDEwNDIwNTdaFw0yODA3MjkwNDIwNTdaMIGnMQswCQYD +VQQGEwJDTjEQMA4GA1UECBMHQmVpamluZzEQMA4GA1UEBxMHQmVpamluZzEWMBQG +A1UEChMNTWVyaWRpYW4gTHRkLjEQMA4GA1UECxMHRGV2ZWxvcDEhMB8GA1UEAxMY +cmVhZC50ZXN0LmNvdmVuYW50c3FsLmlvMScwJQYJKoZIhvcNAQkBFhh3ZWJtYXN0 +ZXJAY292ZW5hbnRzcWwuaW8wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDtxZvRETj6EVEGHAmkCBBRJQ4b7wf168LznPkiDKuDAg8/uyXrHPtWmInc77VI +/dj5od/W82vKn4CpbpjYb18+M9K6ynh0UTX3ELmEbuRgvZMua2/wHlx4u1CcjVdl +3jpMCIWo6DeuQHZfygc498djjOE9D7g8LgSciUXCJzOgBt2iZXalCsoozgXgQ3C+ +2FWv24+Dvik4RslzLw2hbSXRZmVjzYNV8L3J284skfieydwtMRgENSSL0RNAitaC +oLTdTbUxmYNzJ9luLkzndoqTLWJA7M1BndGv98V00xxstgpWUweFdALTmhcfwa89 +iKCo7M3IyeBZ2i6woaIT+Te+dwk/BUWF2i9Bct9r9ppkbOcGs/50l7uWkGYAikV3 +suZ2pVY+e4PDLCGZUSd5ojVp4ERcSXrT9XpsUNSZJ3rbIG0U9zFrFSTy0AFYMvu8 +3eRjdbhtRs3K+xxCjxDrBo+MPmFOxtcCm1Cdc2wvOywNOcFf4qRk0jW1NnSy6KRG +du29MNh3iE3oYW2YPqQXknwX972I1mIHLYmc4hu9JySqRWF6XJCjzNuvST+uA1Fe +dXhR8lKFhpYRobviL4lwBWan8/hj0vIHBuJI+AfmgaD5412jo/8k6KXQCsW+MFsL +AOIfEq13TeN1Jn8HemfA8Z32UcjQzxO6Wnfstmxmbpt8jQIDAQABMA0GCSqGSIb3 +DQEBCwUAA4ICAQCq3FVZnp9HGItWlAXpViXrJx51D5W+bh83yKKlo23fo4u/6BM0 +H0gXTtl0XpG/nsp1oqINpc9+NXzEbs7Twx4utN29WyboacbLu5KPD6q17bWTdIH3 +VijHcyOchlru0nPhweNVtSR7+hmVMZrqHy+Ib2uzuDieD7ulvHTaX/JDkRvZYhYS +8qCptWk9VObeNnA3cyoZo5WyvRLXBQ5Q6LW5EMmXXQIKWyejX3vzwraZXFyhkLzz +GwY3h/ez4dm5Vgbf+lodAtslO5SEKcA6tSQLcdCO4J5+aZrbyIuzEGUra+Y2ZiRl +xtYzSkgaMRpMYZU7y96v7qoj2UOJw7KYj+3bN8rb3iTiXKXBG2XoH6Kn7IQb8pYD +k0+KGZmtZQ38St5UNmT0V2G1eoZA0F0FpuyVPe+ZOF3TxCq4BkvQC9puTrpHZiFm +mWw9xQsjOX34B88GckJsldUq86f+SNLhBFUBQOVRxWWjOV9R7PHHr+d28foTdPfU +gjf6Ff8XGoDw40peFLodsJfuI7xvZHa/4IoDnhEYHyDml++jskDypfNmSBn4m8fx +EtcwxUmsjHdW/mXqdFtgMsT+NGiGZ766KNS+JTWkv9ZJQMUS/714v3q/ymgzIIQ1 +BNhosSnSqa/eyAzggu6+US/FaG69xDBZGwoI+xw3kzQ+WoTQzjwoz57Enw== +-----END CERTIFICATE----- diff --git a/test/service/node_adapter/rootCA-key.pem b/test/service/node_adapter/rootCA-key.pem new file mode 100644 index 000000000..d4e545428 --- /dev/null +++ b/test/service/node_adapter/rootCA-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEA0ry0EA1+Yeidf13L2GdhO1WiSEUKUDslVcYfzqbHh+NJyw4F +7J2cnolA2UcJ9zg7NoB5W3mYjoRBxr/SY3JVeDa7E3NnniNvO+n7ZD1sfCgjsJdB +89kMY4adzMbyJIvrXcllMawP2237jZ8LsZRl+NLJzWe296pfivWD1dI8RxnVk6Rr +Ub+VrcxFLp7W/NoCfW37xU3SYo4jFhVfCS5P9IMqI3w6aBK7mLcRj2HH8+M4xCxz +YLmRsHxkazZcn6KL/Tef5QyxM9a9yexwg8u16z4yxn74m6egQ/CNVtWcqg6zNpr4 +EI8WfgagJr1dus1OrpZrhdeNQDIQwMLa2RHhj72PF2qQ7bLkuWAY0UyuFvcwsGFi +EfxYtfNxnl2YPM5aVqrt31lQLi0AOLEvC6rYXd5sykDg9XAthuNe3cJAfqcGUpFr +3GrAwbPcuGUHPrgo6UjyNmwaBYLlGFmz3te8Pj2P1fLoLFXROtCg5hpYHsAqNZoV +zrR4/3uGvPn0eABzMR6BaNYl1m0mPkSY7bCDH2oulEmU/E1Ck0QuJ9+jd5vVrHPc +jV4B3jQsmK5UP7TXkcsJ5n8OozPahoItv5cwNYt4XGWpCqpEoqRmOSWZu6utX/nD +oByaw8mcEqPBHEotG7in8qpQLIboPO003e3lwq7mECwSz7UXFrIkfx8l7PMCAwEA +AQKCAgEAxfVjhCzYyqpTwMBga3F5Yd5oalEIhiN5R+CsIliC2MCGoksS/5ly9W3j +T/eugd/9p4358hcAWugcTdIFlUtixGFNTNE8xc1tgS+j6h7VGLAwDoOX/bOnMprT +Avjjn7ccKuazu3xxDOR8yCVeO7s2Kw3/aYeC1ZXi2EsXQ7WQ0A2RlnZ+JbW9qhxX +5JprQ+ybKC43srkO52uzw9vhgWNS0lKgM+NPjlICjUtzIGhvB0gsHAPRgkvvcoT3 +Y8sWKRLtQ7mL5wMMNrEDaXpEm1myE0BDPDkr2jQVlZyTeL2CxDC44pOicROowkwB +B0MdmAuiXNiKOpkoY+Rj3l9sazqj0cfzc1aFmUchAyb0Q+a2V3ubEUgRVtynRO75 +p41SrdB5Jo4rm83GmRoV2tbIK53rseRrXQ9VT72pu7D2XN6KhEgyUbc+4p9jbTY5 +GFGkWPbfp6ryoyiFWnwQyqlKQZnz+k74aweQQ0uroc5JUKgNxaS7kLIB2+4DrIRF +P0RwuUTR5wI9WjpdB4J17NzpBNgJ2s5eaQ40CCFHSictUX1a9kFk24nel7XI9br0 +F6tFwC9F3TdSxx5HyHna66WfOfG+vs6Kt6RC4Dzft08/jrQeQ8fnZcufjaeFG6Uy +xPZQQJ24krJ/SrsZiZmrR5bFCRFTE/n2N9npZpBHhajYhjbhs8kCggEBAP04RPKR +vw9knLkuqK78QVUBR4UzydqMDQpZFF9wM1x2lhg312K/og1Y8785SEHqsTgtXNvT +cleE9NhjUqsLfENfJov0ofCCXbjEUuRxCEZd+1R5XfX/SLOGmGmWqqiBMReHE/Cu +c0e8nBY/isGDtl5E9FPxdTUQDrPz61UAt94SThs0Jhq0oKT90QRm8/vxKkgOcYWf +s4D3BgGcvdDXA0zwH8RC36fAPvYLfi8i9OQ1upi9gNBs0EgYOtM3VLHZ3HQrZWTT +gUCwR+la0no19eZOgpbJQS2XzGLTVC3FFNQK6emOQ5g3h6bml8ukFQOHIWeHVOqJ +K0G7B/lT+S4WCEUCggEBANUNBunyt1/Y3+2JyRhF9C5Rq7Av5k+sedhsMLFHuQ8x +Cf/wAs8yKW09a0YrqX6laVmu7VcBHaMVY37lac/U6Slr11JnsHLTNPBgwwl20Z7U +QSG7/WdE/p0ylatKKg7dJ6iA2ctjYbjG0ML1XWuj6QbkvNDh/KR3cD/niNqXNCQ2 +KihJ62mQO1odKRRgBqImYtRVo7E6hgYvkYqK9TBgGQ5ZtX4tiMjHah/YR7AtEuOr +O3Yt4aaAww7w6JeRecIEg4JSW4KuK/ztJ7D5PNRg7sz2hECjELcFP6fTxF+qcEj5 +IzRgdTjs/bNUZz4H7ikH9ejBJdEvwPHlyDQHlFPsP9cCggEAKWGGsvVqecOBcSnU +2zPSIWgiHfyGojZ88xH3qFkXq6adhLurcTHL885zlu5vhoYqC/ot0KbPasoJkUs5 ++UXZOtFT5U9HH5zOYCGFQlvOdGFrbzSeTFM5uEzon2jF3t+t/CBQ++YmZLTH9ULR +FCrIJMO0AfvVoaRMItBbxvplEd9/8CYni/m0vwHTpJqGiMeyly/1EVc16H919dF9 +m6Fnoq0jI9mh3zIll+Ps7RsTVjAJnGhroqQFraJ4CohiSOZHhpyI519BIicsuU/k +UaB73PU7lhSxmBfUiNnsScaJTtWxwD9FgJyiiH3qlJbt8DOnG9ob4HAmJ4m/FdnJ +QOTM2QKCAQB1uesWH27A4eBrK/YZGZ6icbLDetRzNkVmF/KYI5/ZCyoRaRjEUV2e +5Y9/iOTx/IlIa2bu6sjrswf1uONNWsM0hkjHWlCgQqFAKtfbRPL0JymOcIjIJdHk +H22g5yxyZjZh4EF5KAN5zTLSaC8lKb+8dWz4p8epQe6fAVwYHfFMCTomZSJWhMKn +OvHWNnGz7C40UtZPOp2QkXyE5+AwyQlParblcFfjSn4T9rk2WtHTSG1lEllcXk5q +1ShRiKuVUFUzEDtM7N2Vt551JmQ8nwuV6qqN5Q15dMcF//jFPDMrv696Y8qimCJg +k8Uw+8TYm3OBGCnDe/XMNUL5rS6DaUqlAoIBAC8zFGOm1FFfSRAFOiKUEkGIYaBQ +Gd7GaBWzZFC2tDe6Dmp+WxFxqX7Dr/AG8nmCwOrbJOayhrEmIlYwjHmZNDSvphMp +L3dQYqVCqQRvCDx9ztXb+mus3iyhgD1vgWB/EwqhiK3S2n4rbaGU60h0YFC1JL0s +icrlRsZMkJV+l5O7gGFCVHCBZc9XZDeu6pqOjyMS0gx5IXyHGRBS7hS2HXD9QHid +/sufbNxzs2sCdwM/EwE8BlaKX0OiLGyxcQh7e5Ca4INuNzM5G+3ZEr2auVAkfTNF +u+sAmvfbC83U70HJakLGZuqq5F+xamj8dL/qnlYpo6D1wdnep1IeVvn83z8= +-----END RSA PRIVATE KEY----- diff --git a/test/service/node_adapter/rootCA.pem b/test/service/node_adapter/rootCA.pem new file mode 100644 index 000000000..1aa3ca429 --- /dev/null +++ b/test/service/node_adapter/rootCA.pem @@ -0,0 +1,39 @@ +-----BEGIN CERTIFICATE----- +MIIG1jCCBL6gAwIBAgIJAIMSiSlXKMA9MA0GCSqGSIb3DQEBCwUAMIGiMQswCQYD +VQQGEwJDTjEQMA4GA1UECBMHQmVpamluZzEQMA4GA1UEBxMHQmVpamluZzEWMBQG +A1UEChMNTWVyaWRpYW4gTHRkLjEQMA4GA1UECxMHRGV2ZWxvcDEcMBoGA1UEAxMT +dGVzdC5jb3ZlbmFudHNxbC5pbzEnMCUGCSqGSIb3DQEJARYYd2VibWFzdGVyQGNv +dmVuYW50c3FsLmlvMB4XDTE4MDgwMTA0MDc0OFoXDTI4MDcyOTA0MDc0OFowgaIx +CzAJBgNVBAYTAkNOMRAwDgYDVQQIEwdCZWlqaW5nMRAwDgYDVQQHEwdCZWlqaW5n +MRYwFAYDVQQKEw1NZXJpZGlhbiBMdGQuMRAwDgYDVQQLEwdEZXZlbG9wMRwwGgYD +VQQDExN0ZXN0LmNvdmVuYW50c3FsLmlvMScwJQYJKoZIhvcNAQkBFhh3ZWJtYXN0 +ZXJAY292ZW5hbnRzcWwuaW8wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDSvLQQDX5h6J1/XcvYZ2E7VaJIRQpQOyVVxh/OpseH40nLDgXsnZyeiUDZRwn3 +ODs2gHlbeZiOhEHGv9JjclV4NrsTc2eeI2876ftkPWx8KCOwl0Hz2Qxjhp3MxvIk +i+tdyWUxrA/bbfuNnwuxlGX40snNZ7b3ql+K9YPV0jxHGdWTpGtRv5WtzEUuntb8 +2gJ9bfvFTdJijiMWFV8JLk/0gyojfDpoEruYtxGPYcfz4zjELHNguZGwfGRrNlyf +oov9N5/lDLEz1r3J7HCDy7XrPjLGfvibp6BD8I1W1ZyqDrM2mvgQjxZ+BqAmvV26 +zU6ulmuF141AMhDAwtrZEeGPvY8XapDtsuS5YBjRTK4W9zCwYWIR/Fi183GeXZg8 +zlpWqu3fWVAuLQA4sS8Lqthd3mzKQOD1cC2G417dwkB+pwZSkWvcasDBs9y4ZQc+ +uCjpSPI2bBoFguUYWbPe17w+PY/V8ugsVdE60KDmGlgewCo1mhXOtHj/e4a8+fR4 +AHMxHoFo1iXWbSY+RJjtsIMfai6USZT8TUKTRC4n36N3m9Wsc9yNXgHeNCyYrlQ/ +tNeRywnmfw6jM9qGgi2/lzA1i3hcZakKqkSipGY5JZm7q61f+cOgHJrDyZwSo8Ec +Si0buKfyqlAshug87TTd7eXCruYQLBLPtRcWsiR/HyXs8wIDAQABo4IBCzCCAQcw +HQYDVR0OBBYEFFdgm7OKRRCg0gIK6kxGU4PuVhM7MIHXBgNVHSMEgc8wgcyAFFdg +m7OKRRCg0gIK6kxGU4PuVhM7oYGopIGlMIGiMQswCQYDVQQGEwJDTjEQMA4GA1UE +CBMHQmVpamluZzEQMA4GA1UEBxMHQmVpamluZzEWMBQGA1UEChMNTWVyaWRpYW4g +THRkLjEQMA4GA1UECxMHRGV2ZWxvcDEcMBoGA1UEAxMTdGVzdC5jb3ZlbmFudHNx +bC5pbzEnMCUGCSqGSIb3DQEJARYYd2VibWFzdGVyQGNvdmVuYW50c3FsLmlvggkA +gxKJKVcowD0wDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAqEjzbVox +aaxCXs/lQuJE5/l/QSBs5MeE98zwINxCNmZYMsA9JmekyphP286fgdC7G2uRP89G +5lf9+UkHjfWK/N8l1t0NAA4LScMaD09SYCq9p/s7BfxfG0ZS5hfZ6MXuf6svYhL4 +gg7RQEUNZsaFSLvhMG0hGnBzKjDEPurrRnOx9tbtQF6/O6evN2Ig2ssqKjn/m1As +1mxGZy1ZCyREQvHEyj0p36LQtWJOYGRDncflJbLSMBrWq/bxQkATMYJuPPetHIJH +nQzbsbagUrTGZPM8B4LJXD8RtnXmH7zrU+JOunshxTfnl0vo+ezvKT0ig2q2M/t1 +DH0Em8EUgJUlUEOxUfA2hZ2Oq2RrLNz01oK06D0De5JL3CwUpSqbzqJ7F5M5os53 +I9FXSiKbjJUxZijH6NkTZ1gP6GpsEEWc6qOXXAYJWNrW12L7+QjnjgjWI176xO0y +VrvVGBgeOCoFAD/4FSzmCiee9v9sbdzd1GkfkXztPJKdeorRPyetob/zK+4btW4n +0dxfv6XahyBgoKVA7a0kn8ZqM/g4hmkfX4LujTK+C75d8p669zopQ3O76XRBsyJF +dM7J2DwRudG2NphtJyXWXdDSdK9s3iPUiS0y+j4gg9I/cFBQUjKD0R5ZPcRrdG4N +9zeN5A/Kg7vHsbpREm0YtLO9LvlLUp0HUS4= +-----END CERTIFICATE----- diff --git a/test/service/node_adapter/server.test.covenantsql.io-key.pem b/test/service/node_adapter/server.test.covenantsql.io-key.pem new file mode 100644 index 000000000..97b9d4b6b --- /dev/null +++ b/test/service/node_adapter/server.test.covenantsql.io-key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDYqNT8V2PPMnWI +Ka2PxM7z2cf1WMrJq30EXFCboCJXyxLKFrBvb8LqgXpLonhug5kIVVaz3yHSph6d +lFEw4RCLzDm4PbvUxMbX3h0v/uirT8wCy8RvXYwQcisl2++bpO8IqFTiqg4O5Thr +O7BSqNXdS0/yi3PVN8UCzGckGXfPLsD7bCWPAFJq1YmvJ8XAqgAm7h3XPoWbUSEK +jOdLjU3jDq7/kCdZNN2DSkTDkE5JE2pf90BnALUIijggeLQn2080NfbFSW09j8kg +6BzsYfqBo+xxQ1MEP2N/0Zuqf+xW6jDYrHweznDXJdNHUN/yxp+64kxHgp2wyR4f +NxLYt7HvAgMBAAECggEBANNt9uMGGRWyxTWKjqBVTCx1o4fPDZ4+ZrLhr5wfakRI +nV5vQ+CLrSgSEJlMxL/8VlPmi8Teg/BAQnI+sfjEOdRjCRS90dXx7aXtUIhs9vtu +1MUJuvl+zdeiwm6gsbQvAUFum9/SWgO5NxSWXBxePM5G1472/aPeV7jCZgi5fczE +pC21VB7zzPG20UjWqVj2vAD8tS9/UQybc12/IOnS7z6pQP1wpn/2N99BEcEXWpDW +m7/jDbrZ6qJD18QmAoltMVfQF5Pi6qpLkU8qOYKFioO7GGNhapWz6lvgeLanux3l +mU71RAMANgmgdjs4RFdC0hfy0a/xPRfINCeVkSwC7mkCgYEA323IVoDaKSatFtS1 +W7QlX9tjtL1esuvQfHsR7B5EAqir6Lw4Hpn/gPlwNaTl+34GJy6dm4Shbn/A5pka +ow8bjWNPynTxuvlT5NXOW9TmgkrzATIhSrQfMHO7saDCo8JVqRZUvewFXXo4bitm +2bsHYh8Z1XClOz1fka97zEr3Wg0CgYEA+D5sEf9JdFU9LNuDC5JshG/Fc401mukg +AckE1Nmi1y4aa8XNEZYwUIlBb4r+OUlV/cMQvxEqnt0bl5oiL5myYW+sSnSCkHU6 +O3CQl1PO+uuiyNYl3QGNcq5Hw01joR/HejD+h0I5Mb642sXmUcba4fcLKBS1ZG6g +tCANeXBuKOsCgYEAzDYPMeE7hPkwovlveY3By0c+bmfXMjmOqPfjfah8COIZnfLK +aE3g1gUmpiE9gwvAm/djXk1vLwvdR+cQDZE1YZkvyJ/ygS55m2I/5ndE6DmQubsT +6q+PAj4Fg2in/f0VRiJ++cfLb5DSGv/YVZE4Qlqixg7bNrX1r7ZwtFygj9ECgYBA +S3qWFrahqMoVai1AvAXbL0/Go9Y0bxjZHYVg05V3gftZ2ntIiMuusD4Ac9FwaOwa +s4EM25dcWgwhccxU48vtrIzFI/QFEjeo2Xi5mP1Mw+b/eWeJHDPUdgskLFEXlDGI +FlR2F9LUbX9XOlZy67wZNnDvSp3Ii1aYEI0s3M/LTQKBgCadu59DWqTxHfzu/vRG +e7xIMuqXZ12zA/9Ks2pasw1Aa9ZWwgRpZmP4PiFn9tyXEsUXYVbNxWEu3ZUOMQEY +Pq4BeyADEWLDeoo1rHbAEv2X+cr7rm4Sobu2vxtfi0uMlUILtWyK3XuiRoTdlXOH +U9xfXHYXJp08l0Q2dXIHtEZl +-----END PRIVATE KEY----- diff --git a/test/service/node_adapter/server.test.covenantsql.io.pem b/test/service/node_adapter/server.test.covenantsql.io.pem new file mode 100644 index 000000000..1b9428afa --- /dev/null +++ b/test/service/node_adapter/server.test.covenantsql.io.pem @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEQDCCAqigAwIBAgIQEKobji5n26kQYHutrsnlgjANBgkqhkiG9w0BAQsFADBt +MR4wHAYDVQQKExVta2NlcnQgZGV2ZWxvcG1lbnQgQ0ExITAfBgNVBAsMGHhxMjYy +MTQ0QFFpcy1NYWNCb29rLVBybzEoMCYGA1UEAwwfbWtjZXJ0IHhxMjYyMTQ0QFFp +cy1NYWNCb29rLVBybzAeFw0xODA3MzExNTA5MDVaFw0yODA3MzExNTA5MDVaMEwx +JzAlBgNVBAoTHm1rY2VydCBkZXZlbG9wbWVudCBjZXJ0aWZpY2F0ZTEhMB8GA1UE +CwwYeHEyNjIxNDRAUWlzLU1hY0Jvb2stUHJvMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA2KjU/FdjzzJ1iCmtj8TO89nH9VjKyat9BFxQm6AiV8sSyhaw +b2/C6oF6S6J4boOZCFVWs98h0qYenZRRMOEQi8w5uD271MTG194dL/7oq0/MAsvE +b12MEHIrJdvvm6TvCKhU4qoODuU4azuwUqjV3UtP8otz1TfFAsxnJBl3zy7A+2wl +jwBSatWJryfFwKoAJu4d1z6Fm1EhCoznS41N4w6u/5AnWTTdg0pEw5BOSRNqX/dA +ZwC1CIo4IHi0J9tPNDX2xUltPY/JIOgc7GH6gaPscUNTBD9jf9Gbqn/sVuow2Kx8 +Hs5w1yXTR1Df8safuuJMR4KdsMkeHzcS2Lex7wIDAQABo30wezAOBgNVHQ8BAf8E +BAMCBaAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADAfBgNVHSME +GDAWgBSD0tobP0meocjRN1XBYqlSTOHglTAlBgNVHREEHjAcghpzZXJ2ZXIudGVz +dC5jb3ZlbmFudHNxbC5pbzANBgkqhkiG9w0BAQsFAAOCAYEARjlPL41xnYOUHz+k +Qrj/2figGRYGBwfnLVJrjkkSuWY1KRTLUlUYcc9ofkLzAcwRxVbdhcwLLHDA/ddZ +Yii7AY9Z/amzagu/btgvaWu1KMb8IKe6PKy1ZjzzpT6M9xGbW/YyxSWSfNXxD2t1 ++ThvFKZai+525IC2PjlOP8k9hKu4A55wNjvekleqQ+B944iXDRBVOHqgK3Fy3JQ5 +pcAGm9Q0Bn8xNZhEsVERPKeMOnxF/rfggEiCdPp6fexG9X+dUziPSXR8RGZDn16E +Ho8S4m3or0fMX2W2EsYkRY/ESxsE8Y5KFELh4RW2DrUfzibHaS3ZeXyJLAuBTUzj +s4BqXUwpKwqoQqv3d0Mi1RZanfVMWG470tuvGdmaW3HdZoIBmo44fVjx63/6wEGm +0A45avtOHRwQGObM446Q+Gs6zsZspLgEHjmPwr+0PsIjbR6weehXnAAOnr9RWX8n +UstyEkOSDZA8vJmSWSu8tXwky31ZF+cSC7DYZxBP7dhPWDCn +-----END CERTIFICATE----- diff --git a/test/service/node_adapter/write.test.covenantsql.io-key.pem b/test/service/node_adapter/write.test.covenantsql.io-key.pem new file mode 100644 index 000000000..1b0d20305 --- /dev/null +++ b/test/service/node_adapter/write.test.covenantsql.io-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJJwIBAAKCAgEArwFtfbMdNu7m332+1KH/6hY7/zv+YhPu3NJ7WZC2wJlyc9nI +BzuD7SV8lcwv75w2n4aclr/KWFXYBPeqtfn4ebV/pvIZtyd+P4MGKbT3FuHdxPhI +7uTcw7LENXUKtmqO01OO1mx4+cbn14Hd8f1NUoxs5vEnohBoV7etI84fLAxglNAd +dtGTeN+jlSN+ipFKI74aPeEqnnJeJ3j35ZgvYb79hYEHXPngKOCsAa01cDrpxzuD +L+ukveP5SLee1lCYHpUKEMt+0SMBalVW6XltBnmMrLL5tjfA4RyAmgRxle8q+O2w +DxivStX1kInUL22kj6AjSrBpOPy12rwQiPa6pJt0pjk7+S71p69NJeoYyYz2+l6g +uWXqJ8b/jlwT4+CBOlOuDuvCvU1almQADKcv1eN9g0X2d8F+9CHgUdHaVkpGJ9ue +Ogrw5HWipnjt2B/YFRErh3125CDnGap+SCEoPCJFTxVG0GahzmWC1sQr6EsHdMDB +k6YTINY+4iNvoxlTUhAMWiYeLNY4PZaqL9q3tcjuHMxMcykAYpMiEQe0mGsWiRjz +DfnDtGcyeagqHkcjBE/w1qwGjLQBJFmjYmwpk5cDtS0OhhEIOastqKoacc+L4rnq +ALEM94uuPS8VMfu+d9rspfBMy/a/PjMrOO5pYLD0yIIYVHDJxyHT0p0QVZkCAwEA +AQKCAgBzaAueQwnW+gCCDVhUvGgZJIR4MkX0w5RXRu5VCBucMxTI1SsVqee78WaR +Gk/aQTe8R3bn6p4zVpjX3cNTsf5rtIbzvt+6am7Uz0C3LEFtc5FdnSXrdD0pSLAf +WImx9d8t+QJO4MV+Ye7trRSByjq9XyFJwmoSc6N7hQLGg90GnTrrp7pmappHsaMc +bIW8N0ee/nQrrlr+lgkFGr7PR2annN1utsH2TEnIazDDAkglNJSJ7/L5HPpMxxPT +IlO6nPdT45D5tlhw7ha22oQv/wUoqetcz8Hgqi+lw7gC2T9WUpwSAByEOBEQ1rvT +jzC//hvxIvdi/6bED9KU5kQ5Lgux/fWUvg/l6u7EebM6TeG/Er5Tq1D6j2+IjfkR +bPHLEk2Cv2oE7W1PhP+yinJnoxwHeic+nu7wsuevzvPhNZ7lLaWgHFvc1YVTmLDq +E/DGm5Qj6mh8SP2NcxW79m6fjdbgjw0OuPxdEo2sj74cxcifTy54GuSoZqGnw18g +28qXpDLkWgHQFrm6LvLqnaY3uvNMYLBWd4kqH0Y9XKI1N3j82ensAwH5e6Ol8Stw +I7GWT/1GggOEPbIwYtVBfbwghfktPttmHU4vs8IQnufqExgEjDhWspjJVZEuoO8V +8weDCQADS8/266GqRN9CUtNWMmbM0jAHgL7Bq9AFwgYSrP/BiQKCAQEA3GNqpUw8 +Ix3yYxvEXbceOguwqjzOW1vGBhJ4k6ptPG9kPI0GqMY6sw/RgRlCpfCq7Ig/Renj +6LPbSyjkylfavlLNODi/iGbnd6cLKtsvaeexWP1wYU4T367rF8ifqKl/C+l4XqBN +4j4KooyPF889PrgXJ73517jWwhxnncgDabJ2zTetjkvFbf3bAua5F7rufHi39Zng +Rt0gEmFg99XJjrtbS7iND5fhZpGK14hhkdwHptu6XS+yGIiVwbCwS0odtZDID3vk +s9CEUzhjnK8ld04RJ4vSMlxfzlub3e88Lvii79mmZgdH4aP0cPhmFJ3i0mefUVpw +cSmQSVMsxHkh6wKCAQEAy0i1KjX3k7LwWgsvwtRjnJMuEWJ/SoRiE4+Cho9BSCVg +onG4NyBOUgfQI9pBKf2CPWVDBA5VQrDN81ozmTPTgb7isDcFDSiowqyVSyRCorUB +AfjbpD7z6QMdBt15xHR3CXWwpio5NwBQqQ+I2AJ1koBYUVj5TupDOZzwVY8/BbqD +fmhtqLd4c2q5Go2ESK+EVAA1jvFmZUTjr9jC9a/8s/cn5Xqv7/s4BCmmfqQSKZS2 +LPBA2Th1zsUrSW3Os9v+c6LUU92LVEZKKKZyRykTemQRH/oljGG9Dn/hUDcvaI2z +A2+T15rQd9p6ePySD8BuZzxwFvAJQPOYaqivrzsBiwKCAQAmd5fSuEa63mxDTkJt +FRxKh2XToP9nxNIAl1LCe3nLlanKQ9dIuCjgvj8UKIOQkTxUQsfAfT2RjWsWaFHe +24zLsYouaQFNXqDCKr7xQQa6ln1HCh2Gbmlbnp1cLmFnwAXz31FqOtK9TZTvoFcN +kdefzeQExM0KETIy+WBAkvu9hC/mS/SYJLOWKjwC+qCN+svLoAqD7NLPq6MAckzJ +lWAz8JHT2qeMdDccfwTb7+sP2XbgcfPKdhvA2n5BK4Tp70rWOSoiQb6+gAPIvsvs +Oknw1Ah8fZQ3xBXY3/aJu0sm67EM6lF394ddZA+zdDflG1XO4dVWDtIXfmi307O5 +q2b3AoIBAFkTMfceEK8SkIkUL8hyYnegcmZBv786RPOHRc2KhjOD1VU4+VyGdmsx +az3ajAVHRUN71KK5WRjQ+l2w37043WwT5acLZNZAQ7qR/xUe/WfoYlmn3y6YOy6W +I6j3cTzpP6PQgyg8hjeYlr+NxAvLABPC03BJyWyP8AcVwqXrD9WFxcqlHa/5PPlu +AVAmRJnI9vYL5WwOUSz8w7wxAjS/+b4uBbhjSyaf8Qq56W/CmwbHWBBW8kN8nvqM +oQwa5qEfO98VsW5SPJQf/KzVSmvuDs/peyuE4+EgjsQEuwj4NXjd5lwSDzlBaCms +fU/4dFQcoQPxkrgqVBO26cmKwvjIpUMCggEAIb85XP+bwnSOTjFbn61k8PdgyPBq +kBDkiofKiBuR2UzuYhVqxkBqWHtDUhqq1y7A0S/ya75bSv67q4ZlHWEjCEiA8thv +KZwn/8yRVFFKEgtB0afub62Zgc+pPXAr2JwwtZK5dg91QxPaKF20YEz6tOcZdjut +gcQ8Bt4dpRvoz2vOJQnqMIhQM9+HiE7XXV7fgUwT55nC+4wRhILd3xOZKYzDzgMJ +ShMUAb7QkLRujyQwcYPxjWiqRFGSodMoNE2OdofLmfwD1vQfZ/gAorLBH3BVyXAz +53zHfE7+kLgobJBYf7T7Jk246soVYOLSZbeVjAT0ajMKD4ay2jNdnSlxKA== +-----END RSA PRIVATE KEY----- diff --git a/test/service/node_adapter/write.test.covenantsql.io.pem b/test/service/node_adapter/write.test.covenantsql.io.pem new file mode 100644 index 000000000..ccebf106c --- /dev/null +++ b/test/service/node_adapter/write.test.covenantsql.io.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFyDCCA7ACCQCofDYaBrdh7TANBgkqhkiG9w0BAQsFADCBojELMAkGA1UEBhMC +Q04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0JlaWppbmcxFjAUBgNVBAoT +DU1lcmlkaWFuIEx0ZC4xEDAOBgNVBAsTB0RldmVsb3AxHDAaBgNVBAMTE3Rlc3Qu +Y292ZW5hbnRzcWwuaW8xJzAlBgkqhkiG9w0BCQEWGHdlYm1hc3RlckBjb3ZlbmFu +dHNxbC5pbzAeFw0xODA4MDEwNDIxMDZaFw0yODA3MjkwNDIxMDZaMIGoMQswCQYD +VQQGEwJDTjEQMA4GA1UECBMHQmVpamluZzEQMA4GA1UEBxMHQmVpamluZzEWMBQG +A1UEChMNTWVyaWRpYW4gTHRkLjEQMA4GA1UECxMHRGV2ZWxvcDEiMCAGA1UEAxMZ +d3JpdGUudGVzdC5jb3ZlbmFudHNxbC5pbzEnMCUGCSqGSIb3DQEJARYYd2VibWFz +dGVyQGNvdmVuYW50c3FsLmlvMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC +AgEArwFtfbMdNu7m332+1KH/6hY7/zv+YhPu3NJ7WZC2wJlyc9nIBzuD7SV8lcwv +75w2n4aclr/KWFXYBPeqtfn4ebV/pvIZtyd+P4MGKbT3FuHdxPhI7uTcw7LENXUK +tmqO01OO1mx4+cbn14Hd8f1NUoxs5vEnohBoV7etI84fLAxglNAddtGTeN+jlSN+ +ipFKI74aPeEqnnJeJ3j35ZgvYb79hYEHXPngKOCsAa01cDrpxzuDL+ukveP5SLee +1lCYHpUKEMt+0SMBalVW6XltBnmMrLL5tjfA4RyAmgRxle8q+O2wDxivStX1kInU +L22kj6AjSrBpOPy12rwQiPa6pJt0pjk7+S71p69NJeoYyYz2+l6guWXqJ8b/jlwT +4+CBOlOuDuvCvU1almQADKcv1eN9g0X2d8F+9CHgUdHaVkpGJ9ueOgrw5HWipnjt +2B/YFRErh3125CDnGap+SCEoPCJFTxVG0GahzmWC1sQr6EsHdMDBk6YTINY+4iNv +oxlTUhAMWiYeLNY4PZaqL9q3tcjuHMxMcykAYpMiEQe0mGsWiRjzDfnDtGcyeagq +HkcjBE/w1qwGjLQBJFmjYmwpk5cDtS0OhhEIOastqKoacc+L4rnqALEM94uuPS8V +Mfu+d9rspfBMy/a/PjMrOO5pYLD0yIIYVHDJxyHT0p0QVZkCAwEAATANBgkqhkiG +9w0BAQsFAAOCAgEAM+1j12Px/guyMnZFwwsRC9ITa5zJkAkfR++LzRZcT+Gz5du1 +FyQp+5L4Pws96OLFADKHVYE0EFlgdVbskVBErrEIQeZRw0bmp1zDEhfxr4c8fivY ++hW/AXjHsJuO8WVTlRctnefY1g6OdvfI6Sc2092GM9Nvquf1OhKIbPso1NxUUrnp +HQ4ffhQNAFsJk/PkPsjTBzP2iJrzynPdoIPK9jO6NbUg6XfZDQRwchvI7NduWq+x +nNTWV1D8oHvP0+FwHdRyctIVVjkxqd7wnenWl2mUr0SBf0FnfJPl9fz+YLVBLroF +4NGwGG/r6q9tRBAXATm+qbNlth589Tz8mMZMnq2+D6O4499I4MJLceuXw689rO05 +s9/BXWzjJThDnrFaQPyf/YTyMuFaf919F0UGLTLYLYf4vfuflUhaStmYyvArv229 +F4DJy/QDM+NWjo/pJH3ETeEA1stD7kQq7GGqy/MiB5YXqRLnGjpa9vqOECsMIm29 +1TUgdCVN9Gsk8JQPGm/lJUeJECq20LThSeXG+sY6RU+0rmOUJvR8Uv3kjkn0Xd+/ +p2xM/CboFXVcmU+fe9UfJar87MlPJcZP5SenVQuWZ3imI0kFeaObfHHKKJfNAoFl +agBFqnAc/EkYqekxGkxc3pVhBBiZ3D+FlinC2yRko9glPkRKA2WxINPVxm0= +-----END CERTIFICATE----- From eaeb23353c3caff000e5cb9dd9ffb66cf3fab470 Mon Sep 17 00:00:00 2001 From: auxten Date: Wed, 7 Nov 2018 12:21:01 +0800 Subject: [PATCH 27/32] If CryptoConn.Conn is nil, Close just return --- crypto/etls/conn.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crypto/etls/conn.go b/crypto/etls/conn.go index fe471adfc..d3c81359d 100644 --- a/crypto/etls/conn.go +++ b/crypto/etls/conn.go @@ -122,6 +122,9 @@ func (c *CryptoConn) Write(b []byte) (n int, err error) { // Close closes the connection. // Any blocked Read or Write operations will be unblocked and return errors. func (c *CryptoConn) Close() error { + if c.Conn == nil { + return nil + } return c.Conn.Close() } From 32093c647d27034436e96c62eee963ec8344ef9c Mon Sep 17 00:00:00 2001 From: auxten Date: Wed, 7 Nov 2018 13:22:02 +0800 Subject: [PATCH 28/32] For benchtest --- cmd/cql-minerd/integration_test.go | 53 ++++++++++++++++-------------- 1 file changed, 28 insertions(+), 25 deletions(-) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index d11fb5f7b..cb47b0dfc 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -458,16 +458,19 @@ func benchDB(b *testing.B, db *sql.DB, createDB bool) { b.Run("benchmark SELECT", func(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { - index := i%insertedCount + int(start) + 1 - row := db.QueryRow("SELECT nonIndexedColumn FROM test WHERE indexedColumn = ? LIMIT 1", index) - var result int - err = row.Scan(&result) - if err != nil || result < 0 { - log.Errorf("i = %d", i) - b.Fatal(err) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + i := atomic.AddInt32(&i, 1) + index := int(i)%insertedCount + int(start) + 1 + row := db.QueryRow("SELECT nonIndexedColumn FROM test WHERE indexedColumn = ? LIMIT 1", index) + var result int + err = row.Scan(&result) + if err != nil || result < 0 { + log.Errorf("i = %d", i) + b.Fatal(err) + } } - } + }) }) row := db.QueryRow("SELECT nonIndexedColumn FROM test LIMIT 1") @@ -484,18 +487,18 @@ func benchDB(b *testing.B, db *sql.DB, createDB bool) { func benchMiner(b *testing.B, minerCount uint16, bypassSign bool) { log.Warnf("Benchmark for %d Miners, BypassSignature: %v", minerCount, bypassSign) asymmetric.BypassSignature = bypassSign - if minerCount > 0 { - startNodesProfile(bypassSign) - utils.WaitToConnect(context.Background(), "127.0.0.1", []int{ - 2144, - 2145, - 2146, - 3122, - 3121, - 3120, - }, 2*time.Second) - time.Sleep(time.Second) - } + //if minerCount > 0 { + // startNodesProfile(bypassSign) + // utils.WaitToConnect(context.Background(), "127.0.0.1", []int{ + // 2144, + // 2145, + // 2146, + // 3122, + // 3121, + // 3120, + // }, 2*time.Second) + // time.Sleep(time.Second) + //} // Create temp directory testDataDir, err := ioutil.TempDir(testWorkingDir, "covenantsql") @@ -503,9 +506,9 @@ func benchMiner(b *testing.B, minerCount uint16, bypassSign bool) { panic(err) } defer os.RemoveAll(testDataDir) - clientConf := FJ(testWorkingDir, "./integration/node_c/config.yaml") + clientConf := FJ(testWorkingDir, "./service/node_c/config.yaml") tempConf := FJ(testDataDir, "config.yaml") - clientKey := FJ(testWorkingDir, "./integration/node_c/private.key") + clientKey := FJ(testWorkingDir, "./service/node_c/private.key") tempKey := FJ(testDataDir, "private.key") utils.CopyFile(clientConf, tempConf) utils.CopyFile(clientKey, tempKey) @@ -533,7 +536,7 @@ func benchMiner(b *testing.B, minerCount uint16, bypassSign bool) { db, err := sql.Open("covenantsql", dsn) So(err, ShouldBeNil) - benchDB(b, db, minerCount > 0) + benchDB(b, db, true) err = client.Drop(dsn) So(err, ShouldBeNil) @@ -673,4 +676,4 @@ func BenchmarkMinerGNTE8(b *testing.B) { Convey("bench GNTE three node", b, func() { benchGNTEMiner(b, 8, false) }) -} +} \ No newline at end of file From 3934eeeae49fb3aa0b4af756e677e3651ec87ee8 Mon Sep 17 00:00:00 2001 From: auxten Date: Wed, 7 Nov 2018 12:30:07 +0800 Subject: [PATCH 29/32] Clean up chain.db and observer.db when start --- bin/docker-entry.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bin/docker-entry.sh b/bin/docker-entry.sh index fd5348d4a..23104a50e 100755 --- a/bin/docker-entry.sh +++ b/bin/docker-entry.sh @@ -7,9 +7,11 @@ miner) exec /app/cql-minerd -config "${COVENANT_CONF}" ;; blockproducer) + rm -f /app/node_*/chain.db exec /app/cqld -config "${COVENANT_CONF}" ;; observer) + rm -f /app/node_observer/observer.db exec /app/cql-observer -config "${COVENANT_CONF}" "${@}" ;; adapter) From d989f114853decd1b18cd3fbd1b81c21049093ba Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 7 Nov 2018 12:37:47 +0800 Subject: [PATCH 30/32] Remove unused delete statement --- cmd/cqld/bootstrap.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index 781c64b26..b2c052c3d 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -191,8 +191,6 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { } func createServer(privateKeyPath, pubKeyStorePath string, masterKey []byte, listenAddr string) (server *rpc.Server, err error) { - os.Remove(pubKeyStorePath) - server = rpc.NewServer() if err = server.InitRPCServer(listenAddr, privateKeyPath, masterKey); err != nil { From 2544402bc7fa3cbdeb93280c7eb9c51ca8f0be4c Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 7 Nov 2018 20:34:47 +0800 Subject: [PATCH 31/32] Goimports & golint --- blockproducer/chain_test.go | 3 +- client/_example/gdpaverage.go | 3 +- cmd/cql-minerd/integration_test.go | 2 +- cmd/cqld/kayak.go | 3 ++ cmd/hotfix/hash-upgrade/main.go | 44 +++++++++++++++--------------- utils/log/entrylogwrapper.go | 37 +++++++++++++++++++++---- utils/log/logwrapper.go | 1 + worker/dbms_mux.go | 4 +++ 8 files changed, 65 insertions(+), 32 deletions(-) diff --git a/blockproducer/chain_test.go b/blockproducer/chain_test.go index c809dde19..5104cd08f 100644 --- a/blockproducer/chain_test.go +++ b/blockproducer/chain_test.go @@ -238,9 +238,8 @@ func TestMultiNode(t *testing.T) { Role: func(peers *proto.Peers, i int) proto.ServerRole { if peers.Leader.IsEqual(&peers.Servers[i]) { return proto.Leader - } else { - return proto.Follower } + return proto.Follower }(peers, i), Addr: server.Listener.Addr().String(), PublicKey: pub, diff --git a/client/_example/gdpaverage.go b/client/_example/gdpaverage.go index 22878cb81..f3a1e098e 100644 --- a/client/_example/gdpaverage.go +++ b/client/_example/gdpaverage.go @@ -19,8 +19,9 @@ package main import ( "database/sql" "flag" - "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/utils/log" ) func main() { diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index cb47b0dfc..eedc70c67 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -676,4 +676,4 @@ func BenchmarkMinerGNTE8(b *testing.B) { Convey("bench GNTE three node", b, func() { benchGNTEMiner(b, 8, false) }) -} \ No newline at end of file +} diff --git a/cmd/cqld/kayak.go b/cmd/cqld/kayak.go index 807054268..2d64c9e04 100644 --- a/cmd/cqld/kayak.go +++ b/cmd/cqld/kayak.go @@ -22,11 +22,13 @@ import ( "github.com/CovenantSQL/CovenantSQL/rpc" ) +// KayakService defines the leader service kayak. type KayakService struct { serviceName string rt *kayak.Runtime } +// NewKayakService returns new kayak service instance for block producer consensus. func NewKayakService(server *rpc.Server, serviceName string, rt *kayak.Runtime) (s *KayakService, err error) { s = &KayakService{ serviceName: serviceName, @@ -36,6 +38,7 @@ func NewKayakService(server *rpc.Server, serviceName string, rt *kayak.Runtime) return } +// Call handles kayak call. func (s *KayakService) Call(req *kt.RPCRequest, _ *interface{}) (err error) { return s.rt.FollowerApply(req.Log) } diff --git a/cmd/hotfix/hash-upgrade/main.go b/cmd/hotfix/hash-upgrade/main.go index c4db07472..1c534a00d 100644 --- a/cmd/hotfix/hash-upgrade/main.go +++ b/cmd/hotfix/hash-upgrade/main.go @@ -48,16 +48,16 @@ func init() { flag.StringVar(&privateKey, "private", "private.key", "private key to use for signing") } -// Block_ type mocks current sqlchain block type for custom serialization. -type Block_ ct.Block +// OldBlock type mocks current sqlchain block type for custom serialization. +type OldBlock ct.Block -// MarshalBinary implements custom binary marshaller for Block_. -func (b *Block_) MarshalBinary() ([]byte, error) { +// MarshalBinary implements custom binary marshaller for OldBlock. +func (b *OldBlock) MarshalBinary() ([]byte, error) { return nil, nil } -// UnmarshalBinary implements custom binary unmarshaller for Block_. -func (b *Block_) UnmarshalBinary(data []byte) (err error) { +// UnmarshalBinary implements custom binary unmarshaller for OldBlock. +func (b *OldBlock) UnmarshalBinary(data []byte) (err error) { reader := bytes.NewReader(data) var headerBuf []byte @@ -82,39 +82,39 @@ func (b *Block_) UnmarshalBinary(data []byte) (err error) { return } -// Server_ ports back the original kayak server structure. -type Server_ struct { +// OldServer ports back the original kayak server structure. +type OldServer struct { Role proto.ServerRole ID proto.NodeID PubKey *asymmetric.PublicKey } -// Peers_ ports back the original kayak peers structure. -type Peers_ struct { +// OldPeers ports back the original kayak peers structure. +type OldPeers struct { Term uint64 - Leader *Server_ - Servers []*Server_ + Leader *OldServer + Servers []*OldServer PubKey *asymmetric.PublicKey Signature *asymmetric.Signature } -// ServiceInstancePlainOld defines the plain old service instance type before marshaller updates. -type ServiceInstancePlainOld struct { +// PlainOldServiceInstance defines the plain old service instance type before marshaller updates. +type PlainOldServiceInstance struct { DatabaseID proto.DatabaseID - Peers *Peers_ + Peers *OldPeers ResourceMeta wt.ResourceMeta - GenesisBlock *Block_ + GenesisBlock *OldBlock } -// ServiceInstanceOld defines the old service instance type before marshaller updates. -type ServiceInstanceOld struct { +// OldServiceInstance defines the old service instance type before marshaller updates. +type OldServiceInstance struct { DatabaseID proto.DatabaseID - Peers *Peers_ + Peers *OldPeers ResourceMeta wt.ResourceMeta GenesisBlock *ct.Block } -func convertPeers(oldPeers *Peers_) (newPeers *proto.Peers) { +func convertPeers(oldPeers *OldPeers) (newPeers *proto.Peers) { if oldPeers == nil { return } @@ -182,7 +182,7 @@ func main() { // detect if the genesis block is in old version if strings.Contains(fmt.Sprintf("%#v", testDecode), "\"GenesisBlock\":[]uint8") { log.Info("detected plain old version (without msgpack tag and use custom serializer)") - var instance ServiceInstancePlainOld + var instance PlainOldServiceInstance if err := utils.DecodeMsgPackPlain(rawInstance, &instance); err != nil { log.WithError(err).Fatal("decode msgpack failed") @@ -198,7 +198,7 @@ func main() { } } else if strings.Contains(fmt.Sprintf("%#v", testDecode), "\"PubKey\"") { log.Info("detected old version (old kayak implementation [called as kaar])") - var instance ServiceInstanceOld + var instance OldServiceInstance if err := utils.DecodeMsgPack(rawInstance, &instance); err != nil { log.WithError(err).Fatal("decode msgpack failed") diff --git a/utils/log/entrylogwrapper.go b/utils/log/entrylogwrapper.go index 125c05b74..666558b9a 100644 --- a/utils/log/entrylogwrapper.go +++ b/utils/log/entrylogwrapper.go @@ -22,8 +22,10 @@ import ( "github.com/sirupsen/logrus" ) +// Entry defines alias for logrus entry. type Entry logrus.Entry +// NewEntry returns new entry for logrus logger. func NewEntry(logger *Logger) *Entry { return &Entry{ Logger: (*logrus.Logger)(logger), @@ -32,128 +34,151 @@ func NewEntry(logger *Logger) *Entry { } } -// Returns the string representation from the reader and ultimately the -// formatter. +// Returns the string representation from the reader and ultimately the formatter. func (entry *Entry) String() (string, error) { return (*logrus.Entry)(entry).String() } -// Add an error as single field (using the key defined in ErrorKey) to the Entry. +// WithError adds an error as single field (using the key defined in ErrorKey) to the Entry. func (entry *Entry) WithError(err error) *Entry { return (*Entry)((*logrus.Entry)(entry).WithError(err)) } -// Add a single field to the Entry. +// WithField add a single field to the Entry. func (entry *Entry) WithField(key string, value interface{}) *Entry { return (*Entry)((*logrus.Entry)(entry).WithField(key, value)) } -// Add a map of fields to the Entry. +// WithFields add a map of fields to the Entry. func (entry *Entry) WithFields(fields Fields) *Entry { return (*Entry)((*logrus.Entry)(entry).WithFields((logrus.Fields)(fields))) } -// Overrides the time of the Entry. +// WithTime overrides the time of the Entry. func (entry *Entry) WithTime(t time.Time) *Entry { return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t} } +// Debug record a new debug level log. func (entry *Entry) Debug(args ...interface{}) { (*logrus.Entry)(entry).Debug(args...) } +// Print record a new non-level log. func (entry *Entry) Print(args ...interface{}) { (*logrus.Entry)(entry).Print(args...) } +// Info record a new info level log. func (entry *Entry) Info(args ...interface{}) { (*logrus.Entry)(entry).Info(args...) } +// Warn record a new warning level log. func (entry *Entry) Warn(args ...interface{}) { (*logrus.Entry)(entry).Warn(args...) } +// Warning record a new warning level log. func (entry *Entry) Warning(args ...interface{}) { (*logrus.Entry)(entry).Warning(args...) } +// Error record a new error level log. func (entry *Entry) Error(args ...interface{}) { (*logrus.Entry)(entry).Error(args...) } +// Fatal record a fatal level log. func (entry *Entry) Fatal(args ...interface{}) { (*logrus.Entry)(entry).Fatal(args...) } +// Panic record a panic level log. func (entry *Entry) Panic(args ...interface{}) { (*logrus.Entry)(entry).Panic(args...) } // Entry Printf family functions +// Debugf record a debug level log. func (entry *Entry) Debugf(format string, args ...interface{}) { (*logrus.Entry)(entry).Debugf(format, args...) } +// Infof record a info level log. func (entry *Entry) Infof(format string, args ...interface{}) { (*logrus.Entry)(entry).Infof(format, args...) } +// Printf record a new non-level log. func (entry *Entry) Printf(format string, args ...interface{}) { (*logrus.Entry)(entry).Printf(format, args...) } +// Warnf record a warning level log. func (entry *Entry) Warnf(format string, args ...interface{}) { (*logrus.Entry)(entry).Warnf(format, args...) } +// Warningf record a warning level log. func (entry *Entry) Warningf(format string, args ...interface{}) { (*logrus.Entry)(entry).Warningf(format, args...) } +// Errorf record a error level log. func (entry *Entry) Errorf(format string, args ...interface{}) { (*logrus.Entry)(entry).Errorf(format, args...) } +// Fatalf record a fatal level log. func (entry *Entry) Fatalf(format string, args ...interface{}) { (*logrus.Entry)(entry).Fatalf(format, args...) } +// Panicf record a panic level log. func (entry *Entry) Panicf(format string, args ...interface{}) { (*logrus.Entry)(entry).Panicf(format, args...) } // Entry Println family functions +// Debugln record a debug level log. func (entry *Entry) Debugln(args ...interface{}) { (*logrus.Entry)(entry).Debugln(args...) } +// Infoln record a info level log. func (entry *Entry) Infoln(args ...interface{}) { (*logrus.Entry)(entry).Infoln(args...) } +// Println record a non-level log. func (entry *Entry) Println(args ...interface{}) { (*logrus.Entry)(entry).Println(args...) } +// Warnln record a warning level log. func (entry *Entry) Warnln(args ...interface{}) { (*logrus.Entry)(entry).Warnln(args...) } +// Warningln record a warning level log. func (entry *Entry) Warningln(args ...interface{}) { (*logrus.Entry)(entry).Warningln(args...) } +// Errorln record a error level log. func (entry *Entry) Errorln(args ...interface{}) { (*logrus.Entry)(entry).Errorln(args...) } +// Fatalln record a fatal level log. func (entry *Entry) Fatalln(args ...interface{}) { (*logrus.Entry)(entry).Fatalln(args...) } +// Panicln record a panic level log. func (entry *Entry) Panicln(args ...interface{}) { (*logrus.Entry)(entry).Panicln(args...) } diff --git a/utils/log/logwrapper.go b/utils/log/logwrapper.go index 9bb99c5ff..684e04a8f 100644 --- a/utils/log/logwrapper.go +++ b/utils/log/logwrapper.go @@ -224,6 +224,7 @@ func WithFields(fields Fields) *Entry { return (*Entry)(logrus.WithFields(logrus.Fields(fields))) } +// WithTime add time fields to log entry. func WithTime(t time.Time) *Entry { return (*Entry)(logrus.WithTime(t)) } diff --git a/worker/dbms_mux.go b/worker/dbms_mux.go index 8209a5d20..bb9a897fa 100644 --- a/worker/dbms_mux.go +++ b/worker/dbms_mux.go @@ -27,14 +27,17 @@ import ( ) const ( + // DBKayakMethodName defines the database kayak rpc method name. DBKayakMethodName = "Call" ) +// DBKayakMuxService defines a mux service for sqlchain kayak. type DBKayakMuxService struct { serviceName string serviceMap sync.Map } +// NewDBKayakMuxService returns a new kayak mux service. func NewDBKayakMuxService(serviceName string, server *rpc.Server) (s *DBKayakMuxService, err error) { s = &DBKayakMuxService{ serviceName: serviceName, @@ -52,6 +55,7 @@ func (s *DBKayakMuxService) unregister(id proto.DatabaseID) { s.serviceMap.Delete(id) } +// Call handles kayak call. func (s *DBKayakMuxService) Call(req *kt.RPCRequest, _ *interface{}) (err error) { // call apply to specified kayak // treat req.Instance as DatabaseID From ef4ce2f9c78f5e5eb1f989b2d27cbf378123362e Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 8 Nov 2018 10:37:28 +0800 Subject: [PATCH 32/32] Remove useless code --- kayak/rpc.go | 33 --------------------------------- 1 file changed, 33 deletions(-) delete mode 100644 kayak/rpc.go diff --git a/kayak/rpc.go b/kayak/rpc.go deleted file mode 100644 index 15d09784b..000000000 --- a/kayak/rpc.go +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kayak - -// RPCHandler handles rpc. -type muxService struct { -} - -func (h *muxService) Prepare() (err error) { - return -} - -func (h *muxService) Rollback() (err error) { - return -} - -func (h *muxService) Commit() (err error) { - return -}