diff --git a/p2p/node/peerManager/peerdb/counter_test.go b/p2p/node/peerManager/peerdb/counter_test.go index a5b4fe3bea..45024bb1c2 100644 --- a/p2p/node/peerManager/peerdb/counter_test.go +++ b/p2p/node/peerManager/peerdb/counter_test.go @@ -7,7 +7,8 @@ import ( "testing" "github.com/dominant-strategies/go-quai/cmd/utils" - "github.com/ipfs/go-datastore" + "github.com/dominant-strategies/go-quai/common" + datastore "github.com/ipfs/go-datastore" "github.com/spf13/viper" "github.com/stretchr/testify/require" ) @@ -15,9 +16,10 @@ import ( func TestCounter(t *testing.T) { viper.GetViper().Set(utils.DataDirFlag.Name, os.TempDir()) dbDir := "testdb" - + location := common.Location{0, 0} + locationName := location.Name() // creat a new peerdb - ps, err := NewPeerDB(dbDir) + ps, err := NewPeerDB(dbDir, locationName) require.NoError(t, err) // create 10 peers @@ -30,7 +32,7 @@ func TestCounter(t *testing.T) { require.NoError(t, err) // remove the db file - dbFile := viper.GetString(utils.DataDirFlag.Name) + "/" + dbDir + dbFile := viper.GetString(utils.DataDirFlag.Name) + "/" + locationName + dbDir err = os.RemoveAll(dbFile) require.NoError(t, err) }, @@ -54,7 +56,7 @@ func TestCounter(t *testing.T) { require.NoError(t, err) // reopen the db - ps, err = NewPeerDB(dbDir) + ps, err = NewPeerDB(dbDir, locationName) require.NoError(t, err) // verify the counter diff --git a/p2p/node/peerManager/peerdb/datastore_test.go b/p2p/node/peerManager/peerdb/datastore_test.go index b04adbb9e7..95d19435d8 100644 --- a/p2p/node/peerManager/peerdb/datastore_test.go +++ b/p2p/node/peerManager/peerdb/datastore_test.go @@ -3,6 +3,8 @@ package peerdb import ( "context" "encoding/json" + "math/rand" + "sync" "testing" datastore "github.com/ipfs/go-datastore" @@ -11,10 +13,8 @@ import ( ) func TestPeerDB_PutGetDeletePeer(t *testing.T) { - db, teardown := setupDB(t) - defer teardown() - - ps := &PeerDB{db: db} + ps, teardown := setupDB(t) + t.Cleanup(teardown) peerInfo := createPeers(t, 1)[0] @@ -50,10 +50,8 @@ func TestPeerDB_PutGetDeletePeer(t *testing.T) { } func TestHas(t *testing.T) { - db, teardown := setupDB(t) - defer teardown() - - ps := &PeerDB{db: db} + ps, teardown := setupDB(t) + t.Cleanup(teardown) peerInfo := createPeers(t, 1)[0] @@ -79,10 +77,8 @@ func TestHas(t *testing.T) { } func TestQuery(t *testing.T) { - db, teardown := setupDB(t) - defer teardown() - - ps := &PeerDB{db: db} + ps, teardown := setupDB(t) + t.Cleanup(teardown) peers := createPeers(t, 5) @@ -116,3 +112,158 @@ func TestQuery(t *testing.T) { require.Equal(t, key, "/"+peers[0].AddrInfo.ID.String()) }) } + +func TestGetSize(t *testing.T) { + ps, teardown := setupDB(t) + defer teardown() + + peers := createPeers(t, 5) + + keys := make([]datastore.Key, len(peers)) + + cases := []struct { + i int + Entropy uint64 + PubKey []byte + }{ + { + i: 0, + Entropy: uint64(12345), + PubKey: []byte(""), + }, + { + i: 1, + Entropy: uint64(1234567890), + PubKey: []byte("pub"), + }, + { + i: 2, + Entropy: uint64(1), + PubKey: []byte("pubkey"), + }, + { + i: 3, + Entropy: uint64(0), + PubKey: []byte("pubkey1234567890"), + }, + { + i: 4, + Entropy: uint64(12345678901234), + PubKey: []byte("pubkey12345678901234567890"), + }, + } + + var wg sync.WaitGroup + + // Add value to keys and test first time + for i, peer := range peers { + wg.Add(1) + go func(peer *PeerInfo, i int) { + defer wg.Done() + keys[i] = datastore.NewKey(peer.AddrInfo.ID.String()) + value, err := json.Marshal(peer) + require.NoError(t, err) + + err = ps.Put(context.Background(), keys[i], value) + require.NoError(t, err) + size, err := ps.GetSize(context.Background(), keys[i]) + require.NoError(t, err) + require.Equal(t, len(value), size) + }(peer, i) + } + wg.Wait() + + // Update keys is parallel and check if size is updated + for i, peer := range peers { + wg.Add(1) + go func(peer *PeerInfo, i int) { + defer wg.Done() + peer.Entropy = cases[i].Entropy + updateAndTest(t, ps, peer, keys[i]) + + peer.PubKey = cases[i].PubKey + updateAndTest(t, ps, peer, keys[i]) + }(peer, i) + } + wg.Wait() + + //Test with non existent key + size, err := ps.GetSize(context.Background(), datastore.NewKey("non-existent-key")) + require.Error(t, err) + require.Equal(t, 0, size) +} + +func updateAndTest(t *testing.T, ps *PeerDB, peer *PeerInfo, key datastore.Key) { + value, err := json.Marshal(peer) + require.NoError(t, err) + + err = ps.Put(context.Background(), key, value) + require.NoError(t, err) + size, err := ps.GetSize(context.Background(), key) + require.NoError(t, err) + require.Equal(t, len(value), size) +} + +func TestMultipleDatabaseUpdates(t *testing.T) { + ps, teardown := setupDB(t) + defer teardown() + iterations := 10000 + + var wg sync.WaitGroup + wg.Add(1) + go func(t *testing.T, ps *PeerDB) { + defer wg.Done() + for i := 1; i <= iterations; i++ { + wg.Add(1) + go func(t *testing.T, ps *PeerDB) { + defer wg.Done() + peers := createPeers(t, 10) + for _, peer := range peers { + key := datastore.NewKey(peer.AddrInfo.ID.String()) + value, err := json.Marshal(peer) + require.NoError(t, err) + err = ps.Put(context.Background(), key, value) + require.NoError(t, err) + size, err := ps.GetSize(context.Background(), key) + require.NoError(t, err) + require.Equal(t, len(value), size) + err = ps.Delete(context.Background(), key) + require.NoError(t, err) + size, err = ps.GetSize(context.Background(), key) + require.Error(t, err) + require.Equal(t, 0, size) + } + }(t, ps) + } + }(t, ps) + + wg.Add(1) + go func(t *testing.T, ps *PeerDB) { + defer wg.Done() + for i := 1; i <= iterations; i++ { + wg.Add(1) + go func(t *testing.T, ps *PeerDB) { + peers := createPeers(t, 10) + defer wg.Done() + for _, peer := range peers { + // insert into database + key := datastore.NewKey(peer.AddrInfo.ID.String()) + value, err := json.Marshal(peer) + require.NoError(t, err) + err = ps.Put(context.Background(), key, value) + require.NoError(t, err) + size, err := ps.GetSize(context.Background(), key) + require.NoError(t, err) + require.Equal(t, len(value), size) + + // update the database + peer.Entropy = rand.Uint64() + updateAndTest(t, ps, peer, key) + peer.PubKey = []byte{byte(rand.Intn(256))} + updateAndTest(t, ps, peer, key) + } + }(t, ps) + } + }(t, ps) + wg.Wait() +} diff --git a/p2p/node/peerManager/peerdb/main_test.go b/p2p/node/peerManager/peerdb/main_test.go index 853f669de7..ff715ded4e 100644 --- a/p2p/node/peerManager/peerdb/main_test.go +++ b/p2p/node/peerManager/peerdb/main_test.go @@ -5,11 +5,13 @@ import ( "os" "testing" + "github.com/dominant-strategies/go-quai/cmd/utils" + "github.com/dominant-strategies/go-quai/common" "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multiaddr" + "github.com/spf13/viper" "github.com/stretchr/testify/require" - "github.com/syndtr/goleveldb/leveldb" ) func TestMain(m *testing.M) { @@ -20,16 +22,26 @@ func TestMain(m *testing.M) { // helper functions to run peerdb tests -func setupDB(t *testing.T) (*leveldb.DB, func()) { +func setupDB(t *testing.T) (*PeerDB, func()) { t.Helper() - dir := os.TempDir() + "peerstore_test" + viper.GetViper().Set(utils.DataDirFlag.Name, os.TempDir()) + dbDir := "testdb" + location := common.Location{0, 0} + locationName := location.Name() - db, err := leveldb.OpenFile(dir, nil) + // creat a new peerdb + ps, err := NewPeerDB(dbDir, locationName) require.NoError(t, err) - return db, func() { - db.Close() - os.RemoveAll(dir) + return ps, func() { + // close the db + err := ps.Close() + require.NoError(t, err) + + // remove the db file + dbFile := viper.GetString(utils.DataDirFlag.Name) + "/" + locationName + dbDir + err = os.RemoveAll(dbFile) + require.NoError(t, err) } }