Skip to content

Commit

Permalink
Refactor hashmap memory representation to radix-trie based memory
Browse files Browse the repository at this point in the history
  • Loading branch information
mininny committed Aug 19, 2024
1 parent 07fbd06 commit bae1b25
Show file tree
Hide file tree
Showing 3 changed files with 526 additions and 133 deletions.
116 changes: 10 additions & 106 deletions rvgo/fast/memory.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import (
"encoding/json"
"fmt"
"io"
"math/bits"
"sort"

"github.com/ethereum/go-ethereum/crypto"
Expand Down Expand Up @@ -39,11 +38,13 @@ var zeroHashes = func() [256][32]byte {

type Memory struct {
// generalized index -> merkle root or nil if invalidated
nodes map[uint64]*[32]byte

// pageIndex -> cached page

pages map[uint64]*CachedPage

radix *RadixNodeLevel1
branchFactors [5]uint64

// Note: since we don't de-alloc pages, we don't do ref-counting.
// Once a page exists, it doesn't leave memory

Expand All @@ -55,9 +56,11 @@ type Memory struct {

func NewMemory() *Memory {
return &Memory{
nodes: make(map[uint64]*[32]byte),
pages: make(map[uint64]*CachedPage),
lastPageKeys: [2]uint64{^uint64(0), ^uint64(0)}, // default to invalid keys, to not match any pages
//nodes: make(map[uint64]*[32]byte),
radix: &RadixNodeLevel1{},
pages: make(map[uint64]*CachedPage),
branchFactors: [5]uint64{BF1, BF2, BF3, BF4, BF5},
lastPageKeys: [2]uint64{^uint64(0), ^uint64(0)}, // default to invalid keys, to not match any pages
}
}

Expand All @@ -74,93 +77,6 @@ func (m *Memory) ForEachPage(fn func(pageIndex uint64, page *Page) error) error
return nil
}

func (m *Memory) Invalidate(addr uint64) {
// find page, and invalidate addr within it
if p, ok := m.pageLookup(addr >> PageAddrSize); ok {
prevValid := p.Ok[1]
p.Invalidate(addr & PageAddrMask)
if !prevValid { // if the page was already invalid before, then nodes to mem-root will also still be.
return
}
} else { // no page? nothing to invalidate
return
}

// find the gindex of the first page covering the address
gindex := (uint64(addr) >> PageAddrSize) | (1 << (64 - PageAddrSize))

for gindex > 0 {
m.nodes[gindex] = nil
gindex >>= 1
}
}

func (m *Memory) MerkleizeSubtree(gindex uint64) [32]byte {
l := uint64(bits.Len64(gindex))
if l > ProofLen {
panic("gindex too deep")
}
if l > PageKeySize {
depthIntoPage := l - 1 - PageKeySize
pageIndex := (gindex >> depthIntoPage) & PageKeyMask
if p, ok := m.pages[uint64(pageIndex)]; ok {
pageGindex := (1 << depthIntoPage) | (gindex & ((1 << depthIntoPage) - 1))
return p.MerkleizeSubtree(pageGindex)
} else {
return zeroHashes[64-5+1-l] // page does not exist
}
}
if l > PageKeySize+1 {
panic("cannot jump into intermediate node of page")
}
n, ok := m.nodes[gindex]
if !ok {
// if the node doesn't exist, the whole sub-tree is zeroed
return zeroHashes[64-5+1-l]
}
if n != nil {
return *n
}
left := m.MerkleizeSubtree(gindex << 1)
right := m.MerkleizeSubtree((gindex << 1) | 1)
r := HashPair(left, right)
m.nodes[gindex] = &r
return r
}

func (m *Memory) MerkleProof(addr uint64) (out [ProofLen * 32]byte) {
proof := m.traverseBranch(1, addr, 0)
// encode the proof
for i := 0; i < ProofLen; i++ {
copy(out[i*32:(i+1)*32], proof[i][:])
}
return out
}

func (m *Memory) traverseBranch(parent uint64, addr uint64, depth uint8) (proof [][32]byte) {
if depth == ProofLen-1 {
proof = make([][32]byte, 0, ProofLen)
proof = append(proof, m.MerkleizeSubtree(parent))
return
}
if depth > ProofLen-1 {
panic("traversed too deep")
}
self := parent << 1
sibling := self | 1
if addr&(1<<(63-depth)) != 0 {
self, sibling = sibling, self
}
proof = m.traverseBranch(self, addr, depth+1)
siblingNode := m.MerkleizeSubtree(sibling)
proof = append(proof, siblingNode)
return
}

func (m *Memory) MerkleRoot() [32]byte {
return m.MerkleizeSubtree(1)
}

func (m *Memory) pageLookup(pageIndex uint64) (*CachedPage, bool) {
// hit caches
if pageIndex == m.lastPageKeys[0] {
Expand Down Expand Up @@ -259,18 +175,6 @@ func (m *Memory) GetUnaligned(addr uint64, dest []byte) {
}
}

func (m *Memory) AllocPage(pageIndex uint64) *CachedPage {
p := &CachedPage{Data: new(Page)}
m.pages[pageIndex] = p
// make nodes to root
k := (1 << PageKeySize) | uint64(pageIndex)
for k > 0 {
m.nodes[k] = nil
k >>= 1
}
return p
}

type pageEntry struct {
Index uint64 `json:"index"`
Data *Page `json:"data"`
Expand All @@ -295,7 +199,7 @@ func (m *Memory) UnmarshalJSON(data []byte) error {
if err := json.Unmarshal(data, &pages); err != nil {
return err
}
m.nodes = make(map[uint64]*[32]byte)
//m.nodes = make(map[uint64]*[32]byte)
m.pages = make(map[uint64]*CachedPage)
m.lastPageKeys = [2]uint64{^uint64(0), ^uint64(0)}
m.lastPage = [2]*CachedPage{nil, nil}
Expand Down
57 changes: 30 additions & 27 deletions rvgo/fast/memory_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,14 @@ func TestMemoryMerkleProof(t *testing.T) {
})
t.Run("fuller tree", func(t *testing.T) {
m := NewMemory()
m.SetUnaligned(0x10000, []byte{0xaa, 0xbb, 0xcc, 0xdd})
m.SetUnaligned(0x80004, []byte{42})
m.SetUnaligned(0x13370000, []byte{123})
m.SetUnaligned(0x1002221234200, []byte{0xaa, 0xbb, 0xcc, 0xdd})
m.SetUnaligned(0x8002212342204, []byte{42})
m.SetUnaligned(0x1337022212342000, []byte{123})
root := m.MerkleRoot()
proof := m.MerkleProof(0x80004)
proof := m.MerkleProof(0x8002212342204)
require.Equal(t, uint32(42<<24), binary.BigEndian.Uint32(proof[4:8]))
node := *(*[32]byte)(proof[:32])
path := uint32(0x80004) >> 5
path := 0x8002212342204 >> 5
for i := 32; i < len(proof); i += 32 {
sib := *(*[32]byte)(proof[i : i+32])
if path&1 != 0 {
Expand Down Expand Up @@ -77,28 +77,31 @@ func TestMemoryMerkleRoot(t *testing.T) {
root := m.MerkleRoot()
require.Equal(t, zeroHashes[64-5], root, "zero still")
})
t.Run("random few pages", func(t *testing.T) {
m := NewMemory()
m.SetUnaligned(PageSize*3, []byte{1})
m.SetUnaligned(PageSize*5, []byte{42})
m.SetUnaligned(PageSize*6, []byte{123})
p3 := m.MerkleizeSubtree((1 << PageKeySize) | 3)
p5 := m.MerkleizeSubtree((1 << PageKeySize) | 5)
p6 := m.MerkleizeSubtree((1 << PageKeySize) | 6)
z := zeroHashes[PageAddrSize-5]
r1 := HashPair(
HashPair(
HashPair(z, z), // 0,1
HashPair(z, p3), // 2,3
),
HashPair(
HashPair(z, p5), // 4,5
HashPair(p6, z), // 6,7
),
)
r2 := m.MerkleizeSubtree(1 << (PageKeySize - 3))
require.Equal(t, r1, r2, "expecting manual page combination to match subtree merkle func")
})

//t.Run("random few pages", func(t *testing.T) {
// m := NewMemory()
// m.SetUnaligned(PageSize*3, []byte{1})
// m.SetUnaligned(PageSize*5, []byte{42})
// m.SetUnaligned(PageSize*6, []byte{123})
// p3 := m.MerkleizeNode(m.radix, (1<<PageKeySize)|3, 0)
// p5 := m.MerkleizeNode(m.radix, (1<<PageKeySize)|5, 0)
// p6 := m.MerkleizeNode(m.radix, (1<<PageKeySize)|6, 0)
// z := zeroHashes[PageAddrSize-5]
// r1 := HashPair(
// HashPair(
// HashPair(z, z), // 0,1
// HashPair(z, p3), // 2,3
// ),
// HashPair(
// HashPair(z, p5), // 4,5
// HashPair(p6, z), // 6,7
// ),
// )
// r2 := m.MerkleizeNode(m.radix, 1<<(PageKeySize-3), 0)
// r3 := m.MerkleizeNode3(m.radix, 1, 0)
// require.Equal(t, r1, r2, "expecting manual page combination to match subtree merkle func")
// require.Equal(t, r3, r2, "expecting manual page combination to match subtree merkle func")
//})
t.Run("invalidate page", func(t *testing.T) {
m := NewMemory()
m.SetUnaligned(0xF000, []byte{0})
Expand Down
Loading

0 comments on commit bae1b25

Please sign in to comment.