From bb5afa109e990bf3c63acf4f8ceaf9ed8b64189d Mon Sep 17 00:00:00 2001 From: Francesco Cosentino Date: Sun, 2 Apr 2023 17:40:17 +0200 Subject: [PATCH 01/10] updated dependencies --- go.mod | 2 +- go.sum | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 871ed60..00c8222 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.19 require ( github.com/longbridgeapp/assert v1.1.0 - github.com/redis/go-redis/v9 v9.0.2 + github.com/redis/go-redis/v9 v9.0.3 github.com/shamaton/msgpack/v2 v2.1.1 github.com/ugorji/go/codec v1.2.11 ) diff --git a/go.sum b/go.sum index 306bd3e..a682fa0 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -github.com/bsm/ginkgo/v2 v2.5.0 h1:aOAnND1T40wEdAtkGSkvSICWeQ8L3UASX7YVCqQx+eQ= -github.com/bsm/gomega v1.20.0 h1:JhAwLmtRzXFTx2AkALSLa8ijZafntmhSoU63Ok18Uq8= +github.com/bsm/ginkgo/v2 v2.7.0 h1:ItPMPH90RbmZJt5GtkcNvIRuGEdwlBItdNVoyzaNQao= +github.com/bsm/gomega v1.26.0 h1:LhQm+AFcgV2M0WyKroMASzAzCAJVpAxQXv4SaI9a69Y= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -11,8 +11,8 @@ github.com/longbridgeapp/assert v1.1.0 h1:L+/HISOhuGbNAAmJNXgk3+Tm5QmSB70kwdktJX github.com/longbridgeapp/assert v1.1.0/go.mod h1:UOI7O3rzlzlz715lQm0atWs6JbrYGuIJUEeOekutL6o= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/redis/go-redis/v9 v9.0.2 h1:BA426Zqe/7r56kCcvxYLWe1mkaz71LKF77GwgFzSxfE= -github.com/redis/go-redis/v9 v9.0.2/go.mod h1:/xDTe9EF1LM61hek62Poq2nzQSGj0xSrEtEHbBQevps= +github.com/redis/go-redis/v9 v9.0.3 h1:+7mmR26M0IvyLxGZUHxu4GiBkJkVDid0Un+j4ScYu4k= +github.com/redis/go-redis/v9 v9.0.3/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk= github.com/shamaton/msgpack/v2 v2.1.1 h1:gAMxOtVJz93R0EwewwUc8tx30n34aV6BzJuwHE8ogAk= github.com/shamaton/msgpack/v2 v2.1.1/go.mod h1:aTUEmh31ziGX1Ml7wMPLVY0f4vT3CRsCvZRoSCs+VGg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= From c43da8e17d1c747769c8b3bcb7589a53e84e1458 Mon Sep 17 00:00:00 2001 From: Francesco Cosentino Date: Sun, 2 Apr 2023 20:38:49 +0200 Subject: [PATCH 02/10] improved eviction algos --- eviction/arc.go | 13 ++- eviction/clock.go | 84 +++++++++------- eviction/eviction.go | 4 +- eviction/lfu.go | 174 ++++++++++++++++------------------ eviction/lru.go | 57 ++++++----- examples/eviction/eviction.go | 1 - examples/redis/redis.go | 1 - hypercache.go | 2 +- utils/types.go | 5 - 9 files changed, 176 insertions(+), 165 deletions(-) diff --git a/eviction/arc.go b/eviction/arc.go index f8a8abe..fc17d64 100644 --- a/eviction/arc.go +++ b/eviction/arc.go @@ -25,9 +25,9 @@ type ARC struct { mutex sync.RWMutex // mutex is a read-write mutex that protects the cache } -// NewARC creates a new in-memory cache with the given capacity and the Adaptive Replacement Cache (ARC) algorithm. +// NewARCAlgorithm creates a new in-memory cache with the given capacity and the Adaptive Replacement Cache (ARC) algorithm. // If the capacity is negative, it returns an error. -func NewARC(capacity int) (*ARC, error) { +func NewARCAlgorithm(capacity int) (*ARC, error) { if capacity < 0 { return nil, errors.ErrInvalidCapacity } @@ -46,26 +46,29 @@ func NewARC(capacity int) (*ARC, error) { // If the key is not found in the cache, it returns nil. func (arc *ARC) Get(key string) (any, bool) { arc.mutex.RLock() - defer arc.mutex.RUnlock() // Check t1 item, ok := arc.t1[key] if ok { + arc.mutex.RUnlock() arc.promote(key) return item.Value, true } // Check t2 item, ok = arc.t2[key] if ok { + arc.mutex.RUnlock() arc.demote(key) return item.Value, true } - // arc.mutex.RUnlock() + arc.mutex.RUnlock() return nil, false } // Promote moves the item with the given key from t2 to t1. func (arc *ARC) promote(key string) { + arc.mutex.Lock() + defer arc.mutex.Unlock() item, ok := arc.t2[key] if !ok { return @@ -80,6 +83,8 @@ func (arc *ARC) promote(key string) { // Demote moves the item with the given key from t1 to t2. func (arc *ARC) demote(key string) { + arc.mutex.Lock() + defer arc.mutex.Unlock() item, ok := arc.t1[key] if !ok { return diff --git a/eviction/clock.go b/eviction/clock.go index 2ebefd4..51e4619 100644 --- a/eviction/clock.go +++ b/eviction/clock.go @@ -8,7 +8,6 @@ package eviction import ( "sync" - "time" "github.com/hyp3rd/hypercache/errors" "github.com/hyp3rd/hypercache/models" @@ -16,9 +15,12 @@ import ( // ClockAlgorithm is an in-memory cache with the Clock algorithm. type ClockAlgorithm struct { - items map[string]*models.Item - mutex sync.RWMutex - capacity int + items []*models.Item + keys map[string]int + mutex sync.RWMutex + evictMutex sync.Mutex + capacity int + hand int } // NewClockAlgorithm creates a new in-memory cache with the given capacity and the Clock algorithm. @@ -28,66 +30,82 @@ func NewClockAlgorithm(capacity int) (*ClockAlgorithm, error) { } return &ClockAlgorithm{ - items: make(map[string]*models.Item, capacity), + items: make([]*models.Item, capacity), + keys: make(map[string]int, capacity), capacity: capacity, + hand: 0, }, nil } -// Evict evicts the least recently used item from the cache. +// Evict evicts an item from the cache based on the Clock algorithm. func (c *ClockAlgorithm) Evict() (string, bool) { - c.mutex.RLock() - defer c.mutex.RUnlock() - - var oldestKey string - oldestTime := time.Now() - for key, item := range c.items { - if item.LastAccess.Before(oldestTime) { - oldestTime = item.LastAccess - oldestKey = key + c.evictMutex.Lock() + defer c.evictMutex.Unlock() + + for i := 0; i < c.capacity; i++ { + item := c.items[c.hand] + if item == nil { + c.hand = (c.hand + 1) % c.capacity + continue } + if item.AccessCount > 0 { + item.AccessCount-- + } else { + delete(c.keys, item.Key) + models.ItemPool.Put(item) + c.items[c.hand] = nil + return item.Key, true + } + c.hand = (c.hand + 1) % c.capacity } - if oldestKey == "" { - return "", false - } - c.Delete(oldestKey) - return oldestKey, true + return "", false } // Set sets the item with the given key and value in the cache. func (c *ClockAlgorithm) Set(key string, value any) { - // c.mutex.RLock() - // defer c.mutex.RUnlock() + c.mutex.Lock() + defer c.mutex.Unlock() + + evictedKey, ok := c.Evict() + if ok { + c.Delete(evictedKey) + } item := models.ItemPool.Get().(*models.Item) + item.Key = key item.Value = value - item.LastAccess = time.Now() - item.AccessCount = 0 - c.items[key] = item + item.AccessCount = 1 + + c.keys[key] = c.hand + c.items[c.hand] = item + c.hand = (c.hand + 1) % c.capacity } // Get retrieves the item with the given key from the cache. func (c *ClockAlgorithm) Get(key string) (any, bool) { - c.mutex.RLock() - defer c.mutex.RUnlock() + c.mutex.Lock() + defer c.mutex.Unlock() - item, ok := c.items[key] + index, ok := c.keys[key] if !ok { return nil, false } - item.LastAccess = time.Now() + item := c.items[index] item.AccessCount++ return item.Value, true } // Delete deletes the item with the given key from the cache. func (c *ClockAlgorithm) Delete(key string) { - // c.mutex.RLock() - // defer c.mutex.RUnlock() + c.evictMutex.Lock() + defer c.evictMutex.Unlock() - item, ok := c.items[key] + index, ok := c.keys[key] if !ok { return } - delete(c.items, key) + item := c.items[index] + delete(c.keys, key) + c.items[index] = nil models.ItemPool.Put(item) } diff --git a/eviction/eviction.go b/eviction/eviction.go index 51aba41..5b0929a 100644 --- a/eviction/eviction.go +++ b/eviction/eviction.go @@ -57,10 +57,10 @@ func init() { // Define the default eviction algorithms. algorithms := map[string]func(capacity int) (IAlgorithm, error){ "arc": func(capacity int) (IAlgorithm, error) { - return NewARC(capacity) + return NewARCAlgorithm(capacity) }, "lru": func(capacity int) (IAlgorithm, error) { - return NewLRU(capacity) + return NewLRUAlgorithm(capacity) }, "clock": func(capacity int) (IAlgorithm, error) { return NewClockAlgorithm(capacity) diff --git a/eviction/lfu.go b/eviction/lfu.go index b6c82dc..d130125 100644 --- a/eviction/lfu.go +++ b/eviction/lfu.go @@ -3,6 +3,7 @@ package eviction // Least Frequently Used (LFU) eviction algorithm implementation import ( + "container/heap" "sync" "github.com/hyp3rd/hypercache/errors" @@ -10,36 +11,61 @@ import ( // LFUAlgorithm is an eviction algorithm that uses the Least Frequently Used (LFU) policy to select items for eviction. type LFUAlgorithm struct { - items map[string]*Node // map to store the items in the cache - list *LinkedList // linked list to store the items in the cache, with the most frequently used items at the front - mutex sync.RWMutex // mutex to protect the linked list - length int // number of items in the cache - cap int // capacity of the cache + items map[string]*Node + freqs *FrequencyHeap + mutex sync.RWMutex + length int + cap int } -// Node is a struct that represents a node in the linked list. It has a key, value, and access count field. +// Node is a node in the LFUAlgorithm. type Node struct { - key string // key of the item - value any // value of the item - count int // number of times the item has been accessed - prev *Node // previous node in the linked list - next *Node // next node in the linked list + key string + value any + count int + index int } -// LFUNodePool is a pool of Node values. -var LFUNodePool = sync.Pool{ - New: func() interface{} { - return &Node{} - }, +// FrequencyHeap is a heap of Nodes. +type FrequencyHeap []*Node + +// Len returns the length of the heap. +func (fh FrequencyHeap) Len() int { return len(fh) } + +// Less returns true if the node at index i has a lower frequency than the node at index j. +func (fh FrequencyHeap) Less(i, j int) bool { + if fh[i].count == fh[j].count { + return fh[i].index < fh[j].index + } + return fh[i].count < fh[j].count +} + +// Swap swaps the nodes at index i and j. +func (fh FrequencyHeap) Swap(i, j int) { + fh[i], fh[j] = fh[j], fh[i] + fh[i].index = i + fh[j].index = j } -// LinkedList is a struct that represents a linked list. It has a head and tail field. -type LinkedList struct { - head *Node // head of the linked list - tail *Node // tail of the linked list +// Push adds a node to the heap. +func (fh *FrequencyHeap) Push(x interface{}) { + n := len(*fh) + node := x.(*Node) + node.index = n + *fh = append(*fh, node) } -// NewLFUAlgorithm returns a new LFUAlgorithm with the given capacity. +// Pop removes the last node from the heap. +func (fh *FrequencyHeap) Pop() interface{} { + old := *fh + n := len(old) + node := old[n-1] + node.index = -1 + *fh = old[0 : n-1] + return node +} + +// NewLFUAlgorithm creates a new LFUAlgorithm with the given capacity. func NewLFUAlgorithm(capacity int) (*LFUAlgorithm, error) { if capacity < 0 { return nil, errors.ErrInvalidCapacity @@ -47,118 +73,80 @@ func NewLFUAlgorithm(capacity int) (*LFUAlgorithm, error) { return &LFUAlgorithm{ items: make(map[string]*Node, capacity), - list: &LinkedList{}, + freqs: &FrequencyHeap{}, length: 0, cap: capacity, }, nil } -// Evict returns the next item to be evicted from the cache. -func (l *LFUAlgorithm) Evict() (string, bool) { - l.mutex.RLock() - defer l.mutex.RUnlock() - // if the cache is empty, return an error +// internalEvict evicts an item from the cache based on the LFU algorithm. +func (l *LFUAlgorithm) internalEvict() (string, bool) { if l.length == 0 { return "", false } - // remove the least frequently used item from the cache - node := l.list.tail - l.remove(node) + node := heap.Pop(l.freqs).(*Node) delete(l.items, node.key) l.length-- return node.key, true } -// Set adds a new item to the cache with the given key. +// Evict evicts an item from the cache based on the LFU algorithm. +func (l *LFUAlgorithm) Evict() (string, bool) { + l.mutex.Lock() + defer l.mutex.Unlock() + return l.internalEvict() +} + +// Set sets a key-value pair in the cache. func (l *LFUAlgorithm) Set(key string, value any) { l.mutex.Lock() - // if the cache is full, evict an item + defer l.mutex.Unlock() + if l.length == l.cap { - l.mutex.Unlock() // unlock before evicting to avoid deadlock - _, _ = l.Evict() // evict an item - l.mutex.Lock() // lock again + _, _ = l.internalEvict() } - // add the new item to the cache - node := LFUNodePool.Get().(*Node) - node.count = 1 - node.value = value - + node := &Node{ + key: key, + value: value, + count: 1, + } l.items[key] = node - l.addToFront(node) + heap.Push(l.freqs, node) l.length++ - l.mutex.Unlock() } -// Get returns the value for the given key from the cache. If the key is not in the cache, it returns false. +// Get gets a value from the cache. func (l *LFUAlgorithm) Get(key string) (any, bool) { - l.mutex.RLock() - defer l.mutex.RUnlock() + l.mutex.Lock() + defer l.mutex.Unlock() + node, ok := l.items[key] if !ok { return nil, false } + node.count++ - l.moveToFront(node) + heap.Fix(l.freqs, node.index) return node.value, true } -// remove removes the given node from the linked list. -func (l *LFUAlgorithm) remove(node *Node) { - if node == l.list.head && node == l.list.tail { - l.list.head = nil - l.list.tail = nil - } else if node == l.list.head { - l.list.head = node.next - node.next.prev = nil - } else if node == l.list.tail { - l.list.tail = node.prev - node.prev.next = nil - } else { - node.prev.next = node.next - node.next.prev = node.prev - } - delete(l.items, node.key) - l.length-- - LFUNodePool.Put(node) -} - -// addToFront adds the given node to the front of the linked list. -func (l *LFUAlgorithm) addToFront(node *Node) { - node.next = l.list.head - node.prev = nil - if l.list.head != nil { - l.list.head.prev = node - } - l.list.head = node - if l.list.tail == nil { - l.list.tail = node - } - l.items[node.key] = node - l.length++ -} - -// moveToFront moves the given node to the front of the linked list. -func (l *LFUAlgorithm) moveToFront(node *Node) { - if node == l.list.head { - return - } - l.remove(node) - l.addToFront(node) -} - -// Delete removes the given key from the cache. +// Delete deletes a key-value pair from the cache. func (l *LFUAlgorithm) Delete(key string) { l.mutex.Lock() defer l.mutex.Unlock() + node, ok := l.items[key] if !ok { return } - l.remove(node) + + heap.Remove(l.freqs, node.index) + for i := node.index; i < len(*l.freqs); i++ { + (*l.freqs)[i].index-- + } delete(l.items, key) l.length-- - LFUNodePool.Put(node) } diff --git a/eviction/lru.go b/eviction/lru.go index aae04ba..b6e194d 100644 --- a/eviction/lru.go +++ b/eviction/lru.go @@ -16,45 +16,45 @@ import ( "github.com/hyp3rd/hypercache/errors" ) -// LRUCacheItem represents an item in the LRU cache -type LRUCacheItem struct { +// lruCacheItem represents an item in the LRU cache +type lruCacheItem struct { Key string Value any - prev *LRUCacheItem - next *LRUCacheItem + prev *lruCacheItem + next *lruCacheItem } // LRUCacheItemmPool is a pool of LRUCacheItemm values. var LRUCacheItemmPool = sync.Pool{ New: func() interface{} { - return &LRUCacheItem{} + return &lruCacheItem{} }, } // LRU represents a LRU cache type LRU struct { - capacity int // The maximum number of items in the cache - items map[string]*LRUCacheItem // The items in the cache - head *LRUCacheItem // The head of the linked list - tail *LRUCacheItem // The tail of the linked list - mutex sync.RWMutex // The mutex used to protect the cache + capacity int // The maximum number of items in the cache + items map[string]*lruCacheItem // The items in the cache + head *lruCacheItem // The head of the linked list + tail *lruCacheItem // The tail of the linked list + sync.RWMutex // The mutex used to protect the cache } -// NewLRU creates a new LRU cache with the given capacity -func NewLRU(capacity int) (*LRU, error) { +// NewLRUAlgorithm creates a new LRU cache with the given capacity +func NewLRUAlgorithm(capacity int) (*LRU, error) { if capacity < 0 { return nil, errors.ErrInvalidCapacity } return &LRU{ capacity: capacity, - items: make(map[string]*LRUCacheItem, capacity), + items: make(map[string]*lruCacheItem, capacity), }, nil } // Get retrieves the value for the given key from the cache. If the key is not func (lru *LRU) Get(key string) (any, bool) { - lru.mutex.RLock() - defer lru.mutex.RUnlock() + lru.RLock() + defer lru.RUnlock() item, ok := lru.items[key] if !ok { return nil, false @@ -66,8 +66,8 @@ func (lru *LRU) Get(key string) (any, bool) { // Set sets the value for the given key in the cache. If the key is not already in the cache, it is added. // If the cache is full, the least recently used item is evicted. func (lru *LRU) Set(key string, value any) { - lru.mutex.Lock() - defer lru.mutex.Unlock() + lru.Lock() + defer lru.Unlock() item, ok := lru.items[key] if ok { item.Value = value @@ -80,7 +80,7 @@ func (lru *LRU) Set(key string, value any) { } // get a new item from the pool - item = LRUCacheItemmPool.Get().(*LRUCacheItem) + item = LRUCacheItemmPool.Get().(*lruCacheItem) item.Key = key item.Value = value @@ -91,8 +91,8 @@ func (lru *LRU) Set(key string, value any) { // Evict removes the least recently used item from the cache and returns its key. func (lru *LRU) Evict() (string, bool) { - lru.mutex.Lock() - defer lru.mutex.Unlock() + lru.Lock() + defer lru.Unlock() if lru.tail == nil { return "", false } @@ -105,8 +105,8 @@ func (lru *LRU) Evict() (string, bool) { // Delete removes the given key from the cache. func (lru *LRU) Delete(key string) { - lru.mutex.Lock() - defer lru.mutex.Unlock() + lru.Lock() + defer lru.Unlock() item, ok := lru.items[key] if !ok { return @@ -115,8 +115,15 @@ func (lru *LRU) Delete(key string) { delete(lru.items, key) } +// Len returns the number of items in the cache. +func (lru *LRU) Len() int { + lru.RLock() + defer lru.RUnlock() + return len(lru.items) +} + // moveToFront moves the given item to the front of the list. -func (lru *LRU) moveToFront(item *LRUCacheItem) { +func (lru *LRU) moveToFront(item *lruCacheItem) { if item == lru.head { return } @@ -125,7 +132,7 @@ func (lru *LRU) moveToFront(item *LRUCacheItem) { } // removeFromList removes the given item from the list. -func (lru *LRU) removeFromList(item *LRUCacheItem) { +func (lru *LRU) removeFromList(item *lruCacheItem) { if item == lru.head { lru.head = item.next } else { @@ -141,7 +148,7 @@ func (lru *LRU) removeFromList(item *LRUCacheItem) { } // addToFront adds the given item to the front of the list. -func (lru *LRU) addToFront(item *LRUCacheItem) { +func (lru *LRU) addToFront(item *lruCacheItem) { if lru.head == nil { lru.head = item lru.tail = item diff --git a/examples/eviction/eviction.go b/examples/eviction/eviction.go index b148fb8..e9b10b5 100644 --- a/examples/eviction/eviction.go +++ b/examples/eviction/eviction.go @@ -26,7 +26,6 @@ func executeExample(evictionInterval time.Duration) { config := hypercache.NewConfig[backend.InMemory]("in-memory") config.HyperCacheOptions = []hypercache.Option[backend.InMemory]{ hypercache.WithEvictionInterval[backend.InMemory](evictionInterval), - hypercache.WithEvictionAlgorithm[backend.InMemory]("cawolfu"), } config.InMemoryOptions = []backend.Option[backend.InMemory]{ diff --git a/examples/redis/redis.go b/examples/redis/redis.go index 08ac661..7d9e370 100644 --- a/examples/redis/redis.go +++ b/examples/redis/redis.go @@ -31,7 +31,6 @@ func main() { }, HyperCacheOptions: []hypercache.Option[backend.Redis]{ hypercache.WithEvictionInterval[backend.Redis](time.Second * 5), - hypercache.WithEvictionAlgorithm[backend.Redis]("clock"), }, } diff --git a/hypercache.go b/hypercache.go index d2e4dcc..cb63239 100644 --- a/hypercache.go +++ b/hypercache.go @@ -141,7 +141,7 @@ func New[T backend.IBackendConstrain](config *Config[T]) (hyperCache *HyperCache // Initialize the eviction algorithm if hyperCache.evictionAlgorithmName == "" { // Use the default eviction algorithm if none is specified - hyperCache.evictionAlgorithm, err = eviction.NewCAWOLFU(int(hyperCache.maxEvictionCount)) + hyperCache.evictionAlgorithm, err = eviction.NewLRUAlgorithm(int(hyperCache.maxEvictionCount)) } else { // Use the specified eviction algorithm hyperCache.evictionAlgorithm, err = eviction.NewEvictionAlgorithm(hyperCache.evictionAlgorithmName, int(hyperCache.maxEvictionCount)) diff --git a/utils/types.go b/utils/types.go index 770598c..ac85d58 100644 --- a/utils/types.go +++ b/utils/types.go @@ -37,8 +37,3 @@ func (c *CacheBackendChecker[T]) IsRedis() bool { _, ok := c.Backend.(*backend.Redis) return ok } - -// func (c *CacheBackendChecker[T]) IsRedisBackend() (backend.Redis, bool) { -// obj, ok := c.backend.(*backend.Redis) -// return *obj, ok -// } From 7431402e8c4dc5bd8613dedb5e50a628e220ace9 Mon Sep 17 00:00:00 2001 From: Francesco Cosentino Date: Sun, 2 Apr 2023 21:27:20 +0200 Subject: [PATCH 03/10] implemented factory pattern for the backends --- examples/eviction/eviction.go | 2 +- examples/redis/redis.go | 2 +- examples/size/size.go | 2 +- examples/stats/stats.go | 2 +- factory.go | 65 +++++++++++++------ hypercache.go | 27 ++++---- hypercache_test.go | 9 ++- .../hypercache_get_benchmark_test.go | 2 +- .../hypercache_set_benchmark_test.go | 2 +- tests/hypercache_get_multiple_test.go | 3 +- 10 files changed, 74 insertions(+), 42 deletions(-) diff --git a/examples/eviction/eviction.go b/examples/eviction/eviction.go index e9b10b5..e76f701 100644 --- a/examples/eviction/eviction.go +++ b/examples/eviction/eviction.go @@ -33,7 +33,7 @@ func executeExample(evictionInterval time.Duration) { } // Create a new HyperCache with a capacity of 10 - cache, err := hypercache.New(config) + cache, err := hypercache.New(hypercache.GetDefaultManager(), config) if err != nil { fmt.Println(err) return diff --git a/examples/redis/redis.go b/examples/redis/redis.go index 7d9e370..272cf1c 100644 --- a/examples/redis/redis.go +++ b/examples/redis/redis.go @@ -34,7 +34,7 @@ func main() { }, } - hyperCache, err := hypercache.New(conf) + hyperCache, err := hypercache.New(hypercache.GetDefaultManager(), conf) if err != nil { panic(err) } diff --git a/examples/size/size.go b/examples/size/size.go index 38e8eb2..35f13cb 100644 --- a/examples/size/size.go +++ b/examples/size/size.go @@ -22,7 +22,7 @@ func main() { } // Create a new HyperCache with a capacity of 10 - cache, err := hypercache.New(config) + cache, err := hypercache.New(hypercache.GetDefaultManager(), config) if err != nil { panic(err) diff --git a/examples/stats/stats.go b/examples/stats/stats.go index 4a6554a..3ba56f3 100644 --- a/examples/stats/stats.go +++ b/examples/stats/stats.go @@ -25,7 +25,7 @@ func main() { } // Create a new HyperCache with a capacity of 10 - hyperCache, err := hypercache.New(config) + hyperCache, err := hypercache.New(hypercache.GetDefaultManager(), config) if err != nil { fmt.Println(err) return diff --git a/factory.go b/factory.go index 4b33b88..bbbb0bc 100644 --- a/factory.go +++ b/factory.go @@ -5,36 +5,61 @@ import ( "github.com/hyp3rd/hypercache/errors" ) -// BackendConstructor is a function that returns a new backend instance -type BackendConstructor[T backend.IBackendConstrain] func(config *Config[T]) (backend.IBackend[T], error) +// IBackendConstructor is an interface for backend constructors +type IBackendConstructor interface { + Create(config interface{}) (interface{}, error) +} -// RegisterBackend registers a new backend constructor -var backendRegistry = make(map[string]interface{}) +// InMemoryBackendConstructor is a backend constructor for InMemory +type InMemoryBackendConstructor struct{} -// RegisterBackend registers a new backend constructor with the given name. -func RegisterBackend[T backend.IBackendConstrain](name string, constructor BackendConstructor[T]) { - backendRegistry[name] = constructor +// Create creates a new InMemory backend +func (ibc InMemoryBackendConstructor) Create(config interface{}) (interface{}, error) { + inMemoryConfig, ok := config.(*Config[backend.InMemory]) + if !ok { + return nil, errors.ErrInvalidBackendType + } + return backend.NewInMemory(inMemoryConfig.InMemoryOptions...) } -// GetBackend returns a new backend instance with the given name. -func GetBackend[T backend.IBackendConstrain](name string, config *Config[T]) (backend.IBackend[T], error) { - if constructor, ok := backendRegistry[name]; ok { - return constructor.(BackendConstructor[T])(config) +// RedisBackendConstructor is a backend constructor for Redis +type RedisBackendConstructor struct{} + +// Create creates a new Redis backend +func (rbc RedisBackendConstructor) Create(config interface{}) (interface{}, error) { + redisConfig, ok := config.(*Config[backend.Redis]) + if !ok { + return nil, errors.ErrInvalidBackendType } - return nil, errors.ErrBackendNotFound + return backend.NewRedis(redisConfig.RedisOptions...) } -// Wrapper functions for the constructors -func newInMemoryWrapper(config *Config[backend.InMemory]) (backend.IBackend[backend.InMemory], error) { - return backend.NewInMemory(config.InMemoryOptions...) +// HyperCacheManager is a factory for creating HyperCache backend instances +type HyperCacheManager struct { + backendRegistry map[string]IBackendConstructor } -func newRedisWrapper(config *Config[backend.Redis]) (backend.IBackend[backend.Redis], error) { - return backend.NewRedis(config.RedisOptions...) +// NewHyperCacheManager creates a new HyperCacheManager +func NewHyperCacheManager() *HyperCacheManager { + return &HyperCacheManager{ + backendRegistry: make(map[string]IBackendConstructor), + } +} + +// RegisterBackend registers a new backend constructor +func (hcm *HyperCacheManager) RegisterBackend(name string, constructor IBackendConstructor) { + hcm.backendRegistry[name] = constructor +} + +var defaultManager *HyperCacheManager + +// GetDefaultManager returns the default HyperCacheManager +func GetDefaultManager() *HyperCacheManager { + return defaultManager } -// Register the default backends. func init() { - RegisterBackend("in-memory", newInMemoryWrapper) - RegisterBackend("redis", newRedisWrapper) + defaultManager = NewHyperCacheManager() + defaultManager.RegisterBackend("in-memory", InMemoryBackendConstructor{}) + defaultManager.RegisterBackend("redis", RedisBackendConstructor{}) } diff --git a/hypercache.go b/hypercache.go index cb63239..f56f9cd 100644 --- a/hypercache.go +++ b/hypercache.go @@ -80,8 +80,11 @@ func NewInMemoryWithDefaults(capacity int) (hyperCache *HyperCache[backend.InMem config.InMemoryOptions = []backend.Option[backend.InMemory]{ backend.WithCapacity[backend.InMemory](capacity), } + + hcm := GetDefaultManager() + // Initialize the cache - hyperCache, err = New(config) + hyperCache, err = New(hcm, config) if err != nil { return nil, err } @@ -94,25 +97,25 @@ func NewInMemoryWithDefaults(capacity int) (hyperCache *HyperCache[backend.InMem // - The eviction algorithm is set to CAWOLFU. // - The expiration interval is set to 30 minutes. // - The stats collector is set to the HistogramStatsCollector stats collector. -func New[T backend.IBackendConstrain](config *Config[T]) (hyperCache *HyperCache[T], err error) { +func New[T backend.IBackendConstrain](hcm *HyperCacheManager, config *Config[T]) (hyperCache *HyperCache[T], err error) { // Get the backend constructor from the registry - constructor, exists := backendRegistry[config.BackendType] - // Check if the backend type is registered + constructor, exists := hcm.backendRegistry[config.BackendType] if !exists { return nil, fmt.Errorf("unknown backend type: %s", config.BackendType) } - // Check if the backend constructor is valid - typedConstructor, ok := constructor.(BackendConstructor[T]) - if !ok { - return nil, errors.ErrInvalidBackendType - } - // Initialize the backend - backend, err := typedConstructor(config) + + // Create the backend + backendInstance, err := constructor.Create(config) if err != nil { - // Return the error return nil, err } + // Check if the backend implements the IBackend interface + backend, ok := backendInstance.(backend.IBackend[T]) + if !ok { + return nil, errors.ErrInvalidBackendType + } + // Initialize the cache hyperCache = &HyperCache[T]{ backend: backend, diff --git a/hypercache_test.go b/hypercache_test.go index d7e14df..5a25220 100644 --- a/hypercache_test.go +++ b/hypercache_test.go @@ -57,7 +57,8 @@ func TestHyperCache_WithExpirationInterval(t *testing.T) { }, } // Test with custom expiration interval - cache, err = New(config) + hcm := GetDefaultManager() + cache, err = New(hcm, config) assert.Nil(t, err) assert.Equal(t, 1*time.Hour, cache.expirationInterval) } @@ -78,8 +79,9 @@ func TestHyperCache_WithEvictionInterval(t *testing.T) { backend.WithCapacity[backend.InMemory](10), }, } + hcm := GetDefaultManager() // Test with custom eviction interval - cache, err = New(config) + cache, err = New(hcm, config) assert.Nil(t, err) assert.Equal(t, 1*time.Hour, cache.evictionInterval) } @@ -101,7 +103,8 @@ func TestHyperCache_WithMaxEvictionCount(t *testing.T) { backend.WithCapacity[backend.InMemory](10), }, } - cache, err = New(config) + hcm := GetDefaultManager() + cache, err = New(hcm, config) assert.Nil(t, err) assert.Equal(t, uint(5), cache.maxEvictionCount) } diff --git a/tests/benchmark/hypercache_get_benchmark_test.go b/tests/benchmark/hypercache_get_benchmark_test.go index 3e13eb8..273fbd4 100644 --- a/tests/benchmark/hypercache_get_benchmark_test.go +++ b/tests/benchmark/hypercache_get_benchmark_test.go @@ -35,7 +35,7 @@ func BenchmarkHyperCache_Get_ProactiveEviction(b *testing.B) { } // Create a new HyperCache with a capacity of 10 - cache, _ := hypercache.New(config) + cache, _ := hypercache.New(hypercache.GetDefaultManager(), config) // Store a value in the cache with a key and expiration duration cache.Set("key", "value", time.Hour) diff --git a/tests/benchmark/hypercache_set_benchmark_test.go b/tests/benchmark/hypercache_set_benchmark_test.go index d66f43f..718823c 100644 --- a/tests/benchmark/hypercache_set_benchmark_test.go +++ b/tests/benchmark/hypercache_set_benchmark_test.go @@ -33,7 +33,7 @@ func BenchmarkHyperCache_Set_Proactive_Eviction(b *testing.B) { } // Create a new HyperCache with a capacity of 10 - cache, _ := hypercache.New(config) + cache, _ := hypercache.New(hypercache.GetDefaultManager(), config) b.ResetTimer() for i := 0; i < b.N; i++ { diff --git a/tests/hypercache_get_multiple_test.go b/tests/hypercache_get_multiple_test.go index aec71ec..ae62d83 100644 --- a/tests/hypercache_get_multiple_test.go +++ b/tests/hypercache_get_multiple_test.go @@ -64,7 +64,8 @@ func TestGetMultiple(t *testing.T) { backend.WithCapacity[backend.InMemory](10), }, } - cache, err := hypercache.New(config) + hypercache.GetDefaultManager() + cache, err := hypercache.New(hypercache.GetDefaultManager(), config) assert.Nil(t, err) test.setup(cache) From 81aedd76a29ed5cdb4f7ea4ed188ef77d9d5605c Mon Sep 17 00:00:00 2001 From: Francesco Cosentino Date: Sun, 2 Apr 2023 21:34:03 +0200 Subject: [PATCH 04/10] lint --- backend/inmemory.go | 2 +- factory.go | 20 ++++++++++---------- hypercache.go | 2 +- pool.go | 2 +- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/backend/inmemory.go b/backend/inmemory.go index 31c5625..1319666 100644 --- a/backend/inmemory.go +++ b/backend/inmemory.go @@ -78,7 +78,7 @@ func (cacheBackend *InMemory) Set(item *models.Item) error { return nil } -// List returns a list of all items in the cache filtered and ordered by the given options +// ListV1 returns a list of all items in the cache filtered and ordered by the given options func (cacheBackend *InMemory) ListV1(options ...FilterOption[InMemory]) ([]*models.Item, error) { // Apply the filter options ApplyFilterOptions(cacheBackend, options...) diff --git a/factory.go b/factory.go index bbbb0bc..22520f6 100644 --- a/factory.go +++ b/factory.go @@ -34,32 +34,32 @@ func (rbc RedisBackendConstructor) Create(config interface{}) (interface{}, erro return backend.NewRedis(redisConfig.RedisOptions...) } -// HyperCacheManager is a factory for creating HyperCache backend instances -type HyperCacheManager struct { +// BackendManager is a factory for creating HyperCache backend instances +type BackendManager struct { backendRegistry map[string]IBackendConstructor } -// NewHyperCacheManager creates a new HyperCacheManager -func NewHyperCacheManager() *HyperCacheManager { - return &HyperCacheManager{ +// NewBackendManager creates a new BackendManager +func NewBackendManager() *BackendManager { + return &BackendManager{ backendRegistry: make(map[string]IBackendConstructor), } } // RegisterBackend registers a new backend constructor -func (hcm *HyperCacheManager) RegisterBackend(name string, constructor IBackendConstructor) { +func (hcm *BackendManager) RegisterBackend(name string, constructor IBackendConstructor) { hcm.backendRegistry[name] = constructor } -var defaultManager *HyperCacheManager +var defaultManager *BackendManager -// GetDefaultManager returns the default HyperCacheManager -func GetDefaultManager() *HyperCacheManager { +// GetDefaultManager returns the default BackendManager +func GetDefaultManager() *BackendManager { return defaultManager } func init() { - defaultManager = NewHyperCacheManager() + defaultManager = NewBackendManager() defaultManager.RegisterBackend("in-memory", InMemoryBackendConstructor{}) defaultManager.RegisterBackend("redis", RedisBackendConstructor{}) } diff --git a/hypercache.go b/hypercache.go index f56f9cd..c4735a4 100644 --- a/hypercache.go +++ b/hypercache.go @@ -97,7 +97,7 @@ func NewInMemoryWithDefaults(capacity int) (hyperCache *HyperCache[backend.InMem // - The eviction algorithm is set to CAWOLFU. // - The expiration interval is set to 30 minutes. // - The stats collector is set to the HistogramStatsCollector stats collector. -func New[T backend.IBackendConstrain](hcm *HyperCacheManager, config *Config[T]) (hyperCache *HyperCache[T], err error) { +func New[T backend.IBackendConstrain](hcm *BackendManager, config *Config[T]) (hyperCache *HyperCache[T], err error) { // Get the backend constructor from the registry constructor, exists := hcm.backendRegistry[config.BackendType] if !exists { diff --git a/pool.go b/pool.go index 945b3e4..de899f9 100644 --- a/pool.go +++ b/pool.go @@ -54,7 +54,7 @@ func (pool *WorkerPool) Enqueue(job JobFunc) { pool.jobs <- job } -// Wait blocks until all jobs in the pool have been executed. +// Shutdown shuts down the worker pool. It waits for all jobs to finish. func (pool *WorkerPool) Shutdown() { close(pool.quit) pool.wg.Wait() From d06fa30dc32f7a421683ff1405ae344319f043ff Mon Sep 17 00:00:00 2001 From: Francesco Cosentino Date: Mon, 3 Apr 2023 12:25:08 +0200 Subject: [PATCH 05/10] interface improvements, different strategy for IBackend --- backend/backend.go | 67 ++++++++++---- backend/filters.go | 77 ++++++++++++++++ backend/inmemory.go | 132 +++++++++++---------------- backend/options.go | 77 ++++++++-------- backend/redis.go | 74 ++++++++------- examples/eviction/eviction.go | 21 ++++- examples/list/list.go | 38 +++++--- examples/redis/redis.go | 44 +++++---- examples/stats/stats.go | 12 +-- hypercache.go | 167 ++++++++++++++++++---------------- middleware/logging.go | 3 +- middleware/stats.go | 3 +- models/item.go | 27 ------ service.go | 3 +- utils/types.go | 8 +- 15 files changed, 426 insertions(+), 327 deletions(-) create mode 100644 backend/filters.go diff --git a/backend/backend.go b/backend/backend.go index d4ee253..c4ef247 100644 --- a/backend/backend.go +++ b/backend/backend.go @@ -7,28 +7,50 @@ import ( ) // IBackendConstrain is the interface that defines the constrain type that must be implemented by cache backends. -type IBackendConstrain interface { - InMemory | Redis -} +// type IBackendConstrain interface { +// InMemory | Redis +// } -// IInMemory is the interface that must be implemented by in-memory cache backends. -type IInMemory[T IBackendConstrain] interface { - // IBackend[T] is the interface that must be implemented by cache backends. - IBackend[T] - // List the items in the cache that meet the specified criteria. - List(options ...FilterOption[InMemory]) ([]*models.Item, error) - // Clear removes all items from the cache. - Clear() -} +// // IInMemory is the interface that must be implemented by in-memory cache backends. +// type IInMemory[T IBackendConstrain] interface { +// // IBackend[T] is the interface that must be implemented by cache backends. +// IBackend[T] +// // List the items in the cache that meet the specified criteria. +// List(options ...FilterOption[InMemory]) ([]*models.Item, error) +// // Clear removes all items from the cache. +// Clear() +// } -// IRedisBackend is the interface that must be implemented by Redis cache backends. -type IRedisBackend[T IBackendConstrain] interface { - // IBackend[T] is the interface that must be implemented by cache backends. - IBackend[T] - // List the items in the cache that meet the specified criteria. - List(ctx context.Context, options ...FilterOption[Redis]) ([]*models.Item, error) - // Clear removes all items from the cache. - Clear() error +// // IRedisBackend is the interface that must be implemented by Redis cache backends. +// type IRedisBackend[T IBackendConstrain] interface { +// // IBackend[T] is the interface that must be implemented by cache backends. +// IBackend[T] +// // List the items in the cache that meet the specified criteria. +// List(ctx context.Context, options ...FilterOption[Redis]) ([]*models.Item, error) +// // Clear removes all items from the cache. +// Clear() error +// } + +// // IBackend is the interface that must be implemented by cache backends. +// type IBackend[T IBackendConstrain] interface { +// // Get retrieves the item with the given key from the cache. +// // If the key is not found in the cache, it returns nil. +// Get(key string) (item *models.Item, ok bool) +// // Set adds a new item to the cache. +// Set(item *models.Item) error +// // Capacity returns the maximum number of items that can be stored in the cache. +// Capacity() int +// // SetCapacity sets the maximum number of items that can be stored in the cache. +// SetCapacity(capacity int) +// // Count returns the number of items currently stored in the cache. +// Count() int +// // Remove deletes the item with the given key from the cache. +// Remove(keys ...string) error +// } + +// IBackendConstrain is the interface that defines the constrain type that must be implemented by cache backends. +type IBackendConstrain interface { + InMemory | Redis } // IBackend is the interface that must be implemented by cache backends. @@ -46,4 +68,9 @@ type IBackend[T IBackendConstrain] interface { Count() int // Remove deletes the item with the given key from the cache. Remove(keys ...string) error + // List the items in the cache that meet the specified criteria. + // List(ctx context.Context, options ...FilterOption[T]) ([]*models.Item, error) + List(ctx context.Context, filters ...IFilter) ([]*models.Item, error) + // Clear removes all items from the cache. + Clear() error } diff --git a/backend/filters.go b/backend/filters.go new file mode 100644 index 0000000..83d5134 --- /dev/null +++ b/backend/filters.go @@ -0,0 +1,77 @@ +package backend + +import ( + "sort" + + "github.com/hyp3rd/hypercache/models" + "github.com/hyp3rd/hypercache/types" +) + +type IFilter interface { + ApplyFilter(backendType string, items []*models.Item) []*models.Item +} + +type sortByFilter struct { + field string +} + +func (f sortByFilter) ApplyFilter(backendType string, items []*models.Item) []*models.Item { + var sorter sort.Interface + switch f.field { + case types.SortByKey.String(): + sorter = &itemSorterByKey{items: items} + case types.SortByLastAccess.String(): + sorter = &itemSorterByLastAccess{items: items} + case types.SortByAccessCount.String(): + sorter = &itemSorterByAccessCount{items: items} + case types.SortByExpiration.String(): + sorter = &itemSorterByExpiration{items: items} + default: + return items + } + sort.Sort(sorter) + return items +} + +type sortOrderFilter struct { + ascending bool +} + +func (f sortOrderFilter) ApplyFilter(backendType string, items []*models.Item) []*models.Item { + if !f.ascending { + sort.Slice(items, func(i, j int) bool { + return items[j].Key > items[i].Key + }) + } else { + sort.Slice(items, func(i, j int) bool { + return items[i].Key < items[j].Key + }) + } + return items +} + +type filterFuncFilter struct { + fn func(item *models.Item) bool +} + +func (f filterFuncFilter) ApplyFilter(backendType string, items []*models.Item) []*models.Item { + filteredItems := make([]*models.Item, 0) + for _, item := range items { + if f.fn(item) { + filteredItems = append(filteredItems, item) + } + } + return filteredItems +} + +func WithSortBy(field string) IFilter { + return sortByFilter{field: field} +} + +func WithSortOrderAsc(ascending bool) sortOrderFilter { + return sortOrderFilter{ascending: ascending} +} + +func WithFilterFunc(fn func(item *models.Item) bool) IFilter { + return filterFuncFilter{fn: fn} +} diff --git a/backend/inmemory.go b/backend/inmemory.go index 1319666..72b1e57 100644 --- a/backend/inmemory.go +++ b/backend/inmemory.go @@ -1,14 +1,12 @@ package backend import ( - "fmt" - "sort" + "context" "sync" datastructure "github.com/hyp3rd/hypercache/datastructure/v4" "github.com/hyp3rd/hypercache/errors" "github.com/hyp3rd/hypercache/models" - "github.com/hyp3rd/hypercache/types" ) // InMemory is a cache backend that stores the items in memory, leveraging a custom `ConcurrentMap`. @@ -21,7 +19,7 @@ type InMemory struct { } // NewInMemory creates a new in-memory cache with the given options. -func NewInMemory(opts ...Option[InMemory]) (backend IInMemory[InMemory], err error) { +func NewInMemory(opts ...Option[InMemory]) (backend IBackend[InMemory], err error) { InMemory := &InMemory{ items: datastructure.New(), } @@ -79,62 +77,59 @@ func (cacheBackend *InMemory) Set(item *models.Item) error { } // ListV1 returns a list of all items in the cache filtered and ordered by the given options -func (cacheBackend *InMemory) ListV1(options ...FilterOption[InMemory]) ([]*models.Item, error) { - // Apply the filter options - ApplyFilterOptions(cacheBackend, options...) - - items := make([]*models.Item, 0, cacheBackend.items.Count()) - wg := sync.WaitGroup{} - wg.Add(cacheBackend.items.Count()) - for item := range cacheBackend.items.IterBuffered() { - go func(item datastructure.Tuple) { - defer wg.Done() - if cacheBackend.FilterFunc == nil || cacheBackend.FilterFunc(&item.Val) { - items = append(items, &item.Val) - } - }(item) - } - wg.Wait() - - if cacheBackend.SortBy == "" { - return items, nil - } - - var sorter sort.Interface - switch cacheBackend.SortBy { - case types.SortByKey.String(): - sorter = &itemSorterByKey{items: items} - case types.SortByLastAccess.String(): - sorter = &itemSorterByLastAccess{items: items} - case types.SortByAccessCount.String(): - sorter = &itemSorterByAccessCount{items: items} - case types.SortByExpiration.String(): - sorter = &itemSorterByExpiration{items: items} - default: - return nil, fmt.Errorf("unknown sortBy field: %s", cacheBackend.SortBy) - } - - if !cacheBackend.SortAscending { - sorter = sort.Reverse(sorter) - } - - sort.Sort(sorter) - return items, nil -} +// func (cacheBackend *InMemory) ListV1(options ...FilterOption[InMemory]) ([]*models.Item, error) { +// // Apply the filter options +// ApplyFilterOptions(cacheBackend, options...) + +// items := make([]*models.Item, 0, cacheBackend.items.Count()) +// wg := sync.WaitGroup{} +// wg.Add(cacheBackend.items.Count()) +// for item := range cacheBackend.items.IterBuffered() { +// go func(item datastructure.Tuple) { +// defer wg.Done() +// if cacheBackend.FilterFunc == nil || cacheBackend.FilterFunc(&item.Val) { +// items = append(items, &item.Val) +// } +// }(item) +// } +// wg.Wait() + +// if cacheBackend.SortBy == "" { +// return items, nil +// } + +// var sorter sort.Interface +// switch cacheBackend.SortBy { +// case types.SortByKey.String(): +// sorter = &itemSorterByKey{items: items} +// case types.SortByLastAccess.String(): +// sorter = &itemSorterByLastAccess{items: items} +// case types.SortByAccessCount.String(): +// sorter = &itemSorterByAccessCount{items: items} +// case types.SortByExpiration.String(): +// sorter = &itemSorterByExpiration{items: items} +// default: +// return nil, fmt.Errorf("unknown sortBy field: %s", cacheBackend.SortBy) +// } + +// if !cacheBackend.SortAscending { +// sorter = sort.Reverse(sorter) +// } + +// sort.Sort(sorter) +// return items, nil +// } // List returns a list of all items in the cache filtered and ordered by the given options -func (cacheBackend *InMemory) List(options ...FilterOption[InMemory]) ([]*models.Item, error) { - // Apply the filter options - ApplyFilterOptions(cacheBackend, options...) - - wg := sync.WaitGroup{} - +// func (cacheBackend *InMemory) List(ctx context.Context, options ...FilterOption[InMemory]) ([]*models.Item, error) { +func (cacheBackend *InMemory) List(ctx context.Context, filters ...IFilter) ([]*models.Item, error) { + // Apply the filters cacheBackend.mutex.RLock() defer cacheBackend.mutex.RUnlock() - itemsCount := cacheBackend.items.Count() - items := make([]*models.Item, 0, itemsCount) - wg.Add(itemsCount) + items := make([]*models.Item, 0, cacheBackend.Count()) + wg := sync.WaitGroup{} + wg.Add(cacheBackend.items.Count()) for item := range cacheBackend.items.IterBuffered() { go func(item datastructure.Tuple) { defer wg.Done() @@ -145,29 +140,10 @@ func (cacheBackend *InMemory) List(options ...FilterOption[InMemory]) ([]*models } wg.Wait() - if cacheBackend.SortBy == "" { - return items, nil - } - - var sorter sort.Interface - switch cacheBackend.SortBy { - case types.SortByKey.String(): - sorter = &itemSorterByKey{items: items} - case types.SortByLastAccess.String(): - sorter = &itemSorterByLastAccess{items: items} - case types.SortByAccessCount.String(): - sorter = &itemSorterByAccessCount{items: items} - case types.SortByExpiration.String(): - sorter = &itemSorterByExpiration{items: items} - default: - return nil, fmt.Errorf("unknown sortBy field: %s", cacheBackend.SortBy) + for _, filter := range filters { + items = filter.ApplyFilter("in-memory", items) } - if !cacheBackend.SortAscending { - sorter = sort.Reverse(sorter) - } - - sort.Sort(sorter) return items, nil } @@ -184,7 +160,9 @@ func (cacheBackend *InMemory) Remove(keys ...string) (err error) { } // Clear removes all items from the cacheBackend. -func (cacheBackend *InMemory) Clear() { +func (cacheBackend *InMemory) Clear() error { // clear the cacheBackend cacheBackend.items.Clear() + + return nil } diff --git a/backend/options.go b/backend/options.go index 575dc22..4311c7e 100644 --- a/backend/options.go +++ b/backend/options.go @@ -3,7 +3,6 @@ package backend import ( "github.com/hyp3rd/hypercache/libs/serializer" "github.com/hyp3rd/hypercache/models" - "github.com/hyp3rd/hypercache/types" "github.com/redis/go-redis/v9" ) @@ -113,41 +112,41 @@ func WithSerializer[T Redis](serializer serializer.ISerializer) Option[Redis] { } // FilterOption is a function type that can be used to configure the `Filter` struct. -type FilterOption[T any] func(*T) - -// ApplyFilterOptions applies the given options to the given filter. -func ApplyFilterOptions[T IBackendConstrain](backend *T, options ...FilterOption[T]) { - for _, option := range options { - option(backend) - } -} - -// WithSortBy is an option that sets the field to sort the items by. -// The field can be any of the fields in the `Item` struct. -func WithSortBy[T IBackendConstrain](field types.SortingField) FilterOption[T] { - return func(a *T) { - if sortable, ok := any(a).(iSortableBackend); ok { - sortable.setSortBy(field.String()) - } - } -} - -// WithSortOrderAsc is an option that sets the sort order to ascending or descending. -// When sorting the items in the cache, they will be sorted in ascending or descending order based on the field specified with the `WithSortBy` option. -func WithSortOrderAsc[T IBackendConstrain](ascending bool) FilterOption[T] { - return func(a *T) { - if sortable, ok := any(a).(iSortableBackend); ok { - sortable.setSortAscending(ascending) - } - } -} - -// WithFilterFunc is an option that sets the filter function to use. -// The filter function is a predicate that takes a `Item` as an argument and returns a boolean indicating whether the item should be included in the cache. -func WithFilterFunc[T any](fn func(item *models.Item) bool) FilterOption[T] { - return func(a *T) { - if filterable, ok := any(a).(IFilterableBackend); ok { - filterable.setFilterFunc(fn) - } - } -} +// type FilterOption[T any] func(*T) + +// // ApplyFilterOptions applies the given options to the given filter. +// func ApplyFilterOptions[T IBackendConstrain](backend *T, options ...FilterOption[T]) { +// for _, option := range options { +// option(backend) +// } +// } + +// // WithSortBy is an option that sets the field to sort the items by. +// // The field can be any of the fields in the `Item` struct. +// func WithSortBy[T IBackendConstrain](field types.SortingField) FilterOption[T] { +// return func(a *T) { +// if sortable, ok := any(a).(iSortableBackend); ok { +// sortable.setSortBy(field.String()) +// } +// } +// } + +// // WithSortOrderAsc is an option that sets the sort order to ascending or descending. +// // When sorting the items in the cache, they will be sorted in ascending or descending order based on the field specified with the `WithSortBy` option. +// func WithSortOrderAsc[T IBackendConstrain](ascending bool) FilterOption[T] { +// return func(a *T) { +// if sortable, ok := any(a).(iSortableBackend); ok { +// sortable.setSortAscending(ascending) +// } +// } +// } + +// // WithFilterFunc is an option that sets the filter function to use. +// // The filter function is a predicate that takes a `Item` as an argument and returns a boolean indicating whether the item should be included in the cache. +// func WithFilterFunc[T any](fn func(item *models.Item) bool) FilterOption[T] { +// return func(a *T) { +// if filterable, ok := any(a).(IFilterableBackend); ok { +// filterable.setFilterFunc(fn) +// } +// } +// } diff --git a/backend/redis.go b/backend/redis.go index 010d31f..3f7a393 100644 --- a/backend/redis.go +++ b/backend/redis.go @@ -2,13 +2,10 @@ package backend import ( "context" - "fmt" - "sort" "github.com/hyp3rd/hypercache/errors" "github.com/hyp3rd/hypercache/libs/serializer" "github.com/hyp3rd/hypercache/models" - "github.com/hyp3rd/hypercache/types" "github.com/redis/go-redis/v9" ) @@ -22,7 +19,7 @@ type Redis struct { } // NewRedis creates a new redis cache with the given options. -func NewRedis(redisOptions ...Option[Redis]) (backend IRedisBackend[Redis], err error) { +func NewRedis(redisOptions ...Option[Redis]) (backend IBackend[Redis], err error) { rb := &Redis{} // Apply the backend options ApplyOptions(rb, redisOptions...) @@ -145,9 +142,13 @@ func (cacheBackend *Redis) Set(item *models.Item) error { } // List returns a list of all the items in the cacheBackend that match the given filter options. -func (cacheBackend *Redis) List(ctx context.Context, options ...FilterOption[Redis]) ([]*models.Item, error) { - // Apply the filter options - ApplyFilterOptions(cacheBackend, options...) +// func (cacheBackend *Redis) List(ctx context.Context, options ...FilterOption[Redis]) ([]*models.Item, error) { +func (cacheBackend *Redis) List(ctx context.Context, filters ...IFilter) ([]*models.Item, error) { + // Apply the filters + // filterOptions := make([]FilterOption[Redis], len(filters)) + // for i, option := range filters { + // filterOptions[i] = option.(FilterOption[Redis]) + // } // Get the set of keys held in the cacheBackend with the given `keysSetName` keys, err := cacheBackend.rdb.SMembers(ctx, cacheBackend.keysSetName).Result() @@ -185,35 +186,42 @@ func (cacheBackend *Redis) List(ctx context.Context, options ...FilterOption[Red } } - // Check if the items should be sorted - if cacheBackend.SortBy == "" { - // No sorting - return items, nil - } - - // Sort the items - var sorter sort.Interface - switch cacheBackend.SortBy { - case types.SortByKey.String(): // Sort by key - sorter = &itemSorterByKey{items: items} - case types.SortByLastAccess.String(): // Sort by last access - sorter = &itemSorterByLastAccess{items: items} - case types.SortByAccessCount.String(): // Sort by access count - sorter = &itemSorterByAccessCount{items: items} - case types.SortByExpiration.String(): // Sort by expiration - sorter = &itemSorterByExpiration{items: items} - default: - return nil, fmt.Errorf("unknown sortBy field: %s", cacheBackend.SortBy) + // Apply the filters + for _, filter := range filters { + items = filter.ApplyFilter("redis", items) } - // Reverse the sort order if needed - if !cacheBackend.SortAscending { - sorter = sort.Reverse(sorter) - } - // Sort the items by the given field - sort.Sort(sorter) - return items, nil + + // Check if the items should be sorted + // if cacheBackend.SortBy == "" { + // // No sorting + // return items, nil + // } + + // // Sort the items + // var sorter sort.Interface + // switch cacheBackend.SortBy { + // case types.SortByKey.String(): // Sort by key + // sorter = &itemSorterByKey{items: items} + // case types.SortByLastAccess.String(): // Sort by last access + // sorter = &itemSorterByLastAccess{items: items} + // case types.SortByAccessCount.String(): // Sort by access count + // sorter = &itemSorterByAccessCount{items: items} + // case types.SortByExpiration.String(): // Sort by expiration + // sorter = &itemSorterByExpiration{items: items} + // default: + // return nil, fmt.Errorf("unknown sortBy field: %s", cacheBackend.SortBy) + // } + + // // Reverse the sort order if needed + // if !cacheBackend.SortAscending { + // sorter = sort.Reverse(sorter) + // } + // // Sort the items by the given field + // sort.Sort(sorter) + + // return items, nil } // Remove removes an item from the cache with the given key diff --git a/examples/eviction/eviction.go b/examples/eviction/eviction.go index e76f701..b9dff8e 100644 --- a/examples/eviction/eviction.go +++ b/examples/eviction/eviction.go @@ -60,22 +60,33 @@ func executeExample(evictionInterval time.Duration) { log.Println("capacity after adding 15 items", cache.Capacity()) log.Println("listing all items in the cache") - list, err := cache.List(context.TODO(), backend.WithSortBy[backend.InMemory](types.SortByKey)) + items, err := cache.List(context.TODO()) if err != nil { fmt.Println(err) return } - // Print the list of items - for i, ci := range list { - fmt.Println(i, ci.Value) + // Apply filters + sortByFilter := backend.WithSortBy(types.SortByKey.String()) + sortOrderFilter := backend.WithSortOrderAsc(true) + + filteredItems := sortByFilter.ApplyFilter("in-memory", items) + sortedItems := sortOrderFilter.ApplyFilter("in-memory", filteredItems) + + for _, item := range sortedItems { + fmt.Println(item.Key, item.Value) + } + + if err != nil { + fmt.Println(err) + return } if evictionInterval > 0 { fmt.Println("sleeping to allow the evition loop to complete", evictionInterval+2*time.Second) time.Sleep(evictionInterval + 2*time.Second) log.Println("listing all items in the cache the eviction is triggered") - list, err = cache.List(context.TODO(), backend.WithSortBy[backend.InMemory](types.SortByKey)) + list, err := cache.List(context.TODO()) if err != nil { fmt.Println(err) return diff --git a/examples/list/list.go b/examples/list/list.go index 647725f..739d463 100644 --- a/examples/list/list.go +++ b/examples/list/list.go @@ -24,8 +24,8 @@ func main() { defer hyperCache.Stop() // Add 100 items to the cache - for i := 0; i < 500; i++ { - key := fmt.Sprintf("key%d", i) + for i := 0; i < 10; i++ { + key := fmt.Sprintf("%d", i) val := fmt.Sprintf("val%d", i) err = hyperCache.Set(key, val, time.Minute) @@ -37,22 +37,32 @@ func main() { } // Retrieve the list of items from the cache - list, err := hyperCache.List(context.TODO(), - backend.WithSortBy[backend.InMemory](types.SortByKey), - backend.WithSortOrderAsc[backend.InMemory](true), - backend.WithFilterFunc[backend.InMemory](func(item *models.Item) bool { - return item.Value != "val98" - }), - ) - - // Check for errors + items, err := hyperCache.List(context.TODO()) if err != nil { fmt.Println(err) return } - // Print the list of items - for _, ci := range list { - fmt.Println(ci.Key, ci.Value) + // Apply filters + // Define a filter function + itemsFilterFunc := func(item *models.Item) bool { + // return time.Since(item.LastAccess) > 1*time.Microsecond + return item.Value != "val84" + } + + sortByFilter := backend.WithSortBy(types.SortByKey.String()) + // sortOrderFilter := backend.WithSortOrderAsc(true) + + // Create a filterFuncFilter with the defined filter function + filter := backend.WithFilterFunc(itemsFilterFunc) + + // Apply the filter to the items + filteredItems := filter.ApplyFilter("in-memory", items) + + filteredItems = sortByFilter.ApplyFilter("in-memory", filteredItems) + // sortedItems := sortOrderFilter.ApplyFilter("in-memory", filteredItems) + + for _, item := range filteredItems { + fmt.Println(item.Key, item.Value) } } diff --git a/examples/redis/redis.go b/examples/redis/redis.go index 272cf1c..3f192ab 100644 --- a/examples/redis/redis.go +++ b/examples/redis/redis.go @@ -51,22 +51,36 @@ func main() { fmt.Println("capacity", hyperCache.Capacity()) fmt.Println("fetching all items (sorted by key, ascending, filtered by value != 'value-16')") - allItems, err := hyperCache.List(context.TODO(), - backend.WithSortBy[backend.Redis](types.SortByKey), - backend.WithSortOrderAsc[backend.Redis](true), - backend.WithFilterFunc[backend.Redis](func(item *models.Item) bool { - return item.Value != "value-16" - }), - ) - // Check for errors + // Retrieve the list of items from the cache + allItems, err := hyperCache.List(context.TODO()) if err != nil { fmt.Println(err) + return + } + + // Apply filters + // Define a filter function + itemsFilterFunc := func(item *models.Item) bool { + // return time.Since(item.LastAccess) > 1*time.Microsecond + return item.Value != "value-16" } + sortByFilter := backend.WithSortBy(types.SortByKey.String()) + // sortOrderFilter := backend.WithSortOrderAsc(true) + + // Create a filterFuncFilter with the defined filter function + filter := backend.WithFilterFunc(itemsFilterFunc) + + // Apply the filter to the items + filteredItems := filter.ApplyFilter("redis", allItems) + + // Apply the sort filter to the filtered items + filteredItems = sortByFilter.ApplyFilter("redis", filteredItems) + fmt.Println("printing all items") // Print the list of items - for _, item := range allItems { + for _, item := range filteredItems { fmt.Println(item.Key, item.Value) } @@ -76,17 +90,11 @@ func main() { fmt.Println("sleep for 5 seconds to trigger eviction") time.Sleep(time.Second * 5) - fmt.Println("fetching all items (sorted by key, ascending, filtered by value != 'value-16')") - allItems, err = hyperCache.List(context.TODO(), - backend.WithSortBy[backend.Redis](types.SortByKey), - backend.WithSortOrderAsc[backend.Redis](true), - backend.WithFilterFunc[backend.Redis](func(item *models.Item) bool { - return item.Value != "value-16" - }), - ) - // Check for errors + fmt.Println("fetching all items again") + allItems, err = hyperCache.List(context.TODO()) if err != nil { fmt.Println(err) + return } fmt.Println("printing all items") diff --git a/examples/stats/stats.go b/examples/stats/stats.go index 3ba56f3..3abbf6c 100644 --- a/examples/stats/stats.go +++ b/examples/stats/stats.go @@ -7,8 +7,6 @@ import ( "github.com/hyp3rd/hypercache" "github.com/hyp3rd/hypercache/backend" - "github.com/hyp3rd/hypercache/models" - "github.com/hyp3rd/hypercache/types" ) func main() { @@ -51,15 +49,9 @@ func main() { time.Sleep(time.Second * 7) // Retrieve the list of items from the cache - list, err := hyperCache.List(context.TODO(), - backend.WithSortBy[backend.InMemory](types.SortByKey), - backend.WithSortOrderAsc[backend.InMemory](true), - backend.WithFilterFunc[backend.InMemory](func(item *models.Item) bool { - return item.Expiration > time.Second - }), - ) + list, err := hyperCache.List(context.TODO()) - // + // Check for errors if err != nil { fmt.Println(err) return diff --git a/hypercache.go b/hypercache.go index c4735a4..8139d8d 100644 --- a/hypercache.go +++ b/hypercache.go @@ -97,9 +97,9 @@ func NewInMemoryWithDefaults(capacity int) (hyperCache *HyperCache[backend.InMem // - The eviction algorithm is set to CAWOLFU. // - The expiration interval is set to 30 minutes. // - The stats collector is set to the HistogramStatsCollector stats collector. -func New[T backend.IBackendConstrain](hcm *BackendManager, config *Config[T]) (hyperCache *HyperCache[T], err error) { +func New[T backend.IBackendConstrain](bm *BackendManager, config *Config[T]) (hyperCache *HyperCache[T], err error) { // Get the backend constructor from the registry - constructor, exists := hcm.backendRegistry[config.BackendType] + constructor, exists := bm.backendRegistry[config.BackendType] if !exists { return nil, fmt.Errorf("unknown backend type: %s", config.BackendType) } @@ -127,7 +127,8 @@ func New[T backend.IBackendConstrain](hcm *BackendManager, config *Config[T]) (h // Initialize the cache backend type checker hyperCache.cacheBackendChecker = utils.CacheBackendChecker[T]{ - Backend: hyperCache.backend, + Backend: hyperCache.backend, + BackendType: config.BackendType, } // Apply options @@ -218,7 +219,6 @@ func New[T backend.IBackendConstrain](hcm *BackendManager, config *Config[T]) (h // expirationLoop is a function that runs in a separate goroutine and expires items in the cache based on their expiration duration. func (hyperCache *HyperCache[T]) expirationLoop() { - // Enqueue the expiration loop in the worker pool to avoid blocking the main goroutine hyperCache.workerPool.Enqueue(func() error { hyperCache.StatsCollector.Incr("expiration_loop_count", 1) defer hyperCache.StatsCollector.Timing("expiration_loop_duration", time.Now().UnixNano()) @@ -230,28 +230,21 @@ func (hyperCache *HyperCache[T]) expirationLoop() { ) // get all expired items - if hyperCache.cacheBackendChecker.IsInMemory() { - items, err = hyperCache.backend.(*backend.InMemory).List( - backend.WithSortBy[backend.InMemory](types.SortByExpiration), - backend.WithFilterFunc[backend.InMemory](func(item *models.Item) bool { - return item.Expiration > 0 && time.Since(item.LastAccess) > item.Expiration - }), - ) - } else if hyperCache.cacheBackendChecker.IsRedis() { - items, err = hyperCache.backend.(*backend.Redis).List(context.TODO(), - backend.WithSortBy[backend.Redis](types.SortByExpiration), - backend.WithFilterFunc[backend.Redis](func(item *models.Item) bool { - return item.Expiration > 0 && time.Since(item.LastAccess) > item.Expiration - }), - ) - } - - // when error, return + items, err = hyperCache.List(context.TODO()) if err != nil { return err } + + sortByFilter := backend.WithSortBy(types.SortByExpiration.String()) + filterFuncFilter := backend.WithFilterFunc(func(item *models.Item) bool { + return item.Expiration > 0 && time.Since(item.LastAccess) > item.Expiration + }) + + filteredItems := filterFuncFilter.ApplyFilter(hyperCache.cacheBackendChecker.GetRegisteredType(), items) + sortedItems := sortByFilter.ApplyFilter(hyperCache.cacheBackendChecker.GetRegisteredType(), filteredItems) + // iterate all expired items and remove them - for _, item := range items { + for _, item := range sortedItems { expiredCount++ hyperCache.Remove(item.Key) models.ItemPool.Put(item) @@ -500,58 +493,62 @@ func (hyperCache *HyperCache[T]) GetMultiple(keys ...string) (result map[string] return } +func (hyperCache *HyperCache[T]) List(ctx context.Context, filters ...backend.IFilter) ([]*models.Item, error) { + return hyperCache.backend.List(ctx, filters...) +} + // List lists the items in the cache that meet the specified criteria. // It takes in a variadic number of any type as filters, it then checks the backend type, and calls the corresponding // implementation of the List function for that backend, with the filters passed in as arguments -func (hyperCache *HyperCache[T]) List(ctx context.Context, filters ...any) ([]*models.Item, error) { - var listInstance listFunc - - // checking the backend type - if hyperCache.cacheBackendChecker.IsInMemory() { - // if the backend is an InMemory, we set the listFunc to the ListInMemory function - listInstance = listInMemory(hyperCache.backend.(*backend.InMemory)) - } - - if hyperCache.cacheBackendChecker.IsRedis() { - // if the backend is a Redis, we set the listFunc to the ListRedis function - listInstance = listRedis(hyperCache.backend.(*backend.Redis)) - } - - // calling the corresponding implementation of the list function - return listInstance(ctx, filters...) -} - -// listFunc is a type that defines a function that takes in a variable number of any type as arguments, and returns -// a slice of Item pointers, and an error -type listFunc func(ctx context.Context, options ...any) ([]*models.Item, error) - -// listInMemory is a function that takes in an InMemory, and returns a ListFunc -// it takes any type as filters, and converts them to the specific FilterOption type for the InMemory, -// and calls the InMemory's List function with these filters. -func listInMemory(cacheBackend *backend.InMemory) listFunc { - return func(ctx context.Context, options ...any) ([]*models.Item, error) { - // here we are converting the filters of any type to the specific FilterOption type for the InMemory - filterOptions := make([]backend.FilterOption[backend.InMemory], len(options)) - for i, option := range options { - filterOptions[i] = option.(backend.FilterOption[backend.InMemory]) - } - return cacheBackend.List(filterOptions...) - } -} - -// listRedis is a function that takes in a Redis, and returns a ListFunc -// it takes any type as filters, and converts them to the specific FilterOption type for the Redis, -// and calls the Redis's List function with these filters. -func listRedis(cacheBackend *backend.Redis) listFunc { - return func(ctx context.Context, options ...any) ([]*models.Item, error) { - // here we are converting the filters of any type to the specific FilterOption type for the Redis - filterOptions := make([]backend.FilterOption[backend.Redis], len(options)) - for i, option := range options { - filterOptions[i] = option.(backend.FilterOption[backend.Redis]) - } - return cacheBackend.List(ctx, filterOptions...) - } -} +// func (hyperCache *HyperCache[T]) List(ctx context.Context, filters ...any) ([]*models.Item, error) { +// var listInstance listFunc + +// // checking the backend type +// if hyperCache.cacheBackendChecker.IsInMemory() { +// // if the backend is an InMemory, we set the listFunc to the ListInMemory function +// listInstance = listInMemory(hyperCache.backend.(*backend.InMemory)) +// } + +// if hyperCache.cacheBackendChecker.IsRedis() { +// // if the backend is a Redis, we set the listFunc to the ListRedis function +// listInstance = listRedis(hyperCache.backend.(*backend.Redis)) +// } + +// // calling the corresponding implementation of the list function +// return listInstance(ctx, filters...) +// } + +// // listFunc is a type that defines a function that takes in a variable number of any type as arguments, and returns +// // a slice of Item pointers, and an error +// type listFunc func(ctx context.Context, options ...any) ([]*models.Item, error) + +// // listInMemory is a function that takes in an InMemory, and returns a ListFunc +// // it takes any type as filters, and converts them to the specific FilterOption type for the InMemory, +// // and calls the InMemory's List function with these filters. +// func listInMemory(cacheBackend *backend.InMemory) listFunc { +// return func(ctx context.Context, options ...any) ([]*models.Item, error) { +// // here we are converting the filters of any type to the specific FilterOption type for the InMemory +// filterOptions := make([]backend.FilterOption[backend.InMemory], len(options)) +// for i, option := range options { +// filterOptions[i] = option.(backend.FilterOption[backend.InMemory]) +// } +// return cacheBackend.List(filterOptions...) +// } +// } + +// // listRedis is a function that takes in a Redis, and returns a ListFunc +// // it takes any type as filters, and converts them to the specific FilterOption type for the Redis, +// // and calls the Redis's List function with these filters. +// func listRedis(cacheBackend *backend.Redis) listFunc { +// return func(ctx context.Context, options ...any) ([]*models.Item, error) { +// // here we are converting the filters of any type to the specific FilterOption type for the Redis +// filterOptions := make([]backend.FilterOption[backend.Redis], len(options)) +// for i, option := range options { +// filterOptions[i] = option.(backend.FilterOption[backend.Redis]) +// } +// return cacheBackend.List(ctx, filterOptions...) +// } +// } // Remove removes items with the given key from the cache. If an item is not found, it does nothing. func (hyperCache *HyperCache[T]) Remove(keys ...string) { @@ -576,16 +573,26 @@ func (hyperCache *HyperCache[T]) Clear() error { ) // get all expired items - if cb, ok := hyperCache.backend.(*backend.InMemory); ok { - items, err = cb.List() - cb.Clear() - } else if cb, ok := hyperCache.backend.(*backend.Redis); ok { - items, err = cb.List(context.TODO()) - if err != nil { - return err - } - err = cb.Clear() + items, err = hyperCache.backend.List(context.TODO()) + if err != nil { + return err + } + + // clear the cacheBackend + err = hyperCache.backend.Clear() + if err != nil { + return err } + // if cb, ok := hyperCache.backend.(*backend.InMemory); ok { + // items, err = cb.List() + // cb.Clear() + // } else if cb, ok := hyperCache.backend.(*backend.Redis); ok { + // items, err = cb.List(context.TODO()) + // if err != nil { + // return err + // } + // err = cb.Clear() + // } for _, item := range items { hyperCache.evictionAlgorithm.Delete(item.Key) diff --git a/middleware/logging.go b/middleware/logging.go index 4e60036..50ddff3 100644 --- a/middleware/logging.go +++ b/middleware/logging.go @@ -5,6 +5,7 @@ import ( "time" "github.com/hyp3rd/hypercache" + "github.com/hyp3rd/hypercache/backend" "github.com/hyp3rd/hypercache/models" "github.com/hyp3rd/hypercache/stats" ) @@ -79,7 +80,7 @@ func (mw LoggingMiddleware) GetMultiple(keys ...string) (result map[string]any, } // List logs the time it takes to execute the next middleware. -func (mw LoggingMiddleware) List(ctx context.Context, filters ...any) ([]*models.Item, error) { +func (mw LoggingMiddleware) List(ctx context.Context, filters ...backend.IFilter) ([]*models.Item, error) { defer func(begin time.Time) { mw.logger.Printf("method List took: %s", time.Since(begin)) }(time.Now()) diff --git a/middleware/stats.go b/middleware/stats.go index 86bd572..9f8d213 100644 --- a/middleware/stats.go +++ b/middleware/stats.go @@ -5,6 +5,7 @@ import ( "time" "github.com/hyp3rd/hypercache" + "github.com/hyp3rd/hypercache/backend" "github.com/hyp3rd/hypercache/models" "github.com/hyp3rd/hypercache/stats" ) @@ -72,7 +73,7 @@ func (mw StatsCollectorMiddleware) GetMultiple(keys ...string) (result map[strin } // List collects stats for the List method. -func (mw StatsCollectorMiddleware) List(ctx context.Context, filters ...any) ([]*models.Item, error) { +func (mw StatsCollectorMiddleware) List(ctx context.Context, filters ...backend.IFilter) ([]*models.Item, error) { start := time.Now() defer func() { mw.statsCollector.Timing("hypercache_list_duration", time.Since(start).Nanoseconds()) diff --git a/models/item.go b/models/item.go index d0c2acf..399c930 100644 --- a/models/item.go +++ b/models/item.go @@ -20,16 +20,6 @@ var ( }, } - // buf is a buffer used to calculate the size of the item. - // buf bytes.Buffer - - // encoderPool is a pool of encoders used to calculate the size of the item. - // encoderPool = sync.Pool{ - // New: func() any { - // return gob.NewEncoder(&buf) - // }, - // } - // buf is a buffer used to calculate the size of the item. buf []byte @@ -51,23 +41,6 @@ type Item struct { AccessCount uint // AccessCount of times the item has been accessed } -// SetSize stores the size of the Item in bytes -// func (item *Item) SetSize() error { -// // Get an encoder from the pool -// enc := encoderPool.Get().(*gob.Encoder) -// // Reset the buffer and put the encoder back in the pool -// defer buf.Reset() -// defer encoderPool.Put(enc) - -// // Encode the item -// if err := enc.Encode(item.Value); err != nil { -// return errors.ErrInvalidSize -// } -// // Set the size of the item -// item.Size = int64(buf.Len()) -// return nil -// } - // SetSize stores the size of the Item in bytes func (item *Item) SetSize() error { enc := encoderPool.Get().(*codec.Encoder) diff --git a/service.go b/service.go index 790ef0e..b1286c1 100644 --- a/service.go +++ b/service.go @@ -4,6 +4,7 @@ import ( "context" "time" + "github.com/hyp3rd/hypercache/backend" "github.com/hyp3rd/hypercache/models" "github.com/hyp3rd/hypercache/stats" ) @@ -22,7 +23,7 @@ type Service interface { // GetMultiple retrieves a list of values from the cache using the keys GetMultiple(keys ...string) (result map[string]any, failed map[string]error) // List returns a list of all items in the cache - List(ctx context.Context, filters ...any) ([]*models.Item, error) + List(ctx context.Context, filters ...backend.IFilter) ([]*models.Item, error) // Remove removes a value from the cache using the key Remove(keys ...string) // Clear removes all values from the cache diff --git a/utils/types.go b/utils/types.go index ac85d58..c134da4 100644 --- a/utils/types.go +++ b/utils/types.go @@ -23,7 +23,8 @@ func TypeName(object interface{}) (typeName string, inferredType string) { // CacheBackendChecker is a helper struct to check the type of the backend type CacheBackendChecker[T backend.IBackendConstrain] struct { - Backend backend.IBackend[T] + Backend backend.IBackend[T] + BackendType string } // IsInMemory returns true if the backend is an InMemory @@ -37,3 +38,8 @@ func (c *CacheBackendChecker[T]) IsRedis() bool { _, ok := c.Backend.(*backend.Redis) return ok } + +// GetRegisteredType returns the backend type as a string. +func (c *CacheBackendChecker[T]) GetRegisteredType() string { + return c.BackendType +} From b74c06c99732f8b26863bbc3c842918329669ff0 Mon Sep 17 00:00:00 2001 From: Francesco Cosentino Date: Mon, 3 Apr 2023 12:35:50 +0200 Subject: [PATCH 06/10] refactored models to types --- backend/backend.go | 18 ++++++++--------- backend/filters.go | 15 +++++++------- backend/inmemory.go | 24 +++++++++++------------ backend/options.go | 6 +++--- backend/redis.go | 20 +++++++++---------- backend/sorting.go | 14 ++++++------- datastructure/v3/cmap.go | 18 ++++++++--------- datastructure/v4/cmap.go | 18 ++++++++--------- eviction/arc.go | 32 +++++++++++++++--------------- eviction/clock.go | 12 ++++++------ examples/list/list.go | 3 +-- examples/redis/redis.go | 3 +-- hypercache.go | 41 +++++++++++++++++++-------------------- middleware/logging.go | 6 +++--- middleware/stats.go | 6 +++--- service.go | 8 ++++---- {models => types}/item.go | 2 +- 17 files changed, 120 insertions(+), 126 deletions(-) rename {models => types}/item.go (99%) diff --git a/backend/backend.go b/backend/backend.go index c4ef247..9939e77 100644 --- a/backend/backend.go +++ b/backend/backend.go @@ -3,7 +3,7 @@ package backend import ( "context" - "github.com/hyp3rd/hypercache/models" + "github.com/hyp3rd/hypercache/types" ) // IBackendConstrain is the interface that defines the constrain type that must be implemented by cache backends. @@ -16,7 +16,7 @@ import ( // // IBackend[T] is the interface that must be implemented by cache backends. // IBackend[T] // // List the items in the cache that meet the specified criteria. -// List(options ...FilterOption[InMemory]) ([]*models.Item, error) +// List(options ...FilterOption[InMemory]) ([]*types.Item, error) // // Clear removes all items from the cache. // Clear() // } @@ -26,7 +26,7 @@ import ( // // IBackend[T] is the interface that must be implemented by cache backends. // IBackend[T] // // List the items in the cache that meet the specified criteria. -// List(ctx context.Context, options ...FilterOption[Redis]) ([]*models.Item, error) +// List(ctx context.Context, options ...FilterOption[Redis]) ([]*types.Item, error) // // Clear removes all items from the cache. // Clear() error // } @@ -35,9 +35,9 @@ import ( // type IBackend[T IBackendConstrain] interface { // // Get retrieves the item with the given key from the cache. // // If the key is not found in the cache, it returns nil. -// Get(key string) (item *models.Item, ok bool) +// Get(key string) (item *types.Item, ok bool) // // Set adds a new item to the cache. -// Set(item *models.Item) error +// Set(item *types.Item) error // // Capacity returns the maximum number of items that can be stored in the cache. // Capacity() int // // SetCapacity sets the maximum number of items that can be stored in the cache. @@ -57,9 +57,9 @@ type IBackendConstrain interface { type IBackend[T IBackendConstrain] interface { // Get retrieves the item with the given key from the cache. // If the key is not found in the cache, it returns nil. - Get(key string) (item *models.Item, ok bool) + Get(key string) (item *types.Item, ok bool) // Set adds a new item to the cache. - Set(item *models.Item) error + Set(item *types.Item) error // Capacity returns the maximum number of items that can be stored in the cache. Capacity() int // SetCapacity sets the maximum number of items that can be stored in the cache. @@ -69,8 +69,8 @@ type IBackend[T IBackendConstrain] interface { // Remove deletes the item with the given key from the cache. Remove(keys ...string) error // List the items in the cache that meet the specified criteria. - // List(ctx context.Context, options ...FilterOption[T]) ([]*models.Item, error) - List(ctx context.Context, filters ...IFilter) ([]*models.Item, error) + // List(ctx context.Context, options ...FilterOption[T]) ([]*types.Item, error) + List(ctx context.Context, filters ...IFilter) ([]*types.Item, error) // Clear removes all items from the cache. Clear() error } diff --git a/backend/filters.go b/backend/filters.go index 83d5134..925f8cb 100644 --- a/backend/filters.go +++ b/backend/filters.go @@ -3,19 +3,18 @@ package backend import ( "sort" - "github.com/hyp3rd/hypercache/models" "github.com/hyp3rd/hypercache/types" ) type IFilter interface { - ApplyFilter(backendType string, items []*models.Item) []*models.Item + ApplyFilter(backendType string, items []*types.Item) []*types.Item } type sortByFilter struct { field string } -func (f sortByFilter) ApplyFilter(backendType string, items []*models.Item) []*models.Item { +func (f sortByFilter) ApplyFilter(backendType string, items []*types.Item) []*types.Item { var sorter sort.Interface switch f.field { case types.SortByKey.String(): @@ -37,7 +36,7 @@ type sortOrderFilter struct { ascending bool } -func (f sortOrderFilter) ApplyFilter(backendType string, items []*models.Item) []*models.Item { +func (f sortOrderFilter) ApplyFilter(backendType string, items []*types.Item) []*types.Item { if !f.ascending { sort.Slice(items, func(i, j int) bool { return items[j].Key > items[i].Key @@ -51,11 +50,11 @@ func (f sortOrderFilter) ApplyFilter(backendType string, items []*models.Item) [ } type filterFuncFilter struct { - fn func(item *models.Item) bool + fn func(item *types.Item) bool } -func (f filterFuncFilter) ApplyFilter(backendType string, items []*models.Item) []*models.Item { - filteredItems := make([]*models.Item, 0) +func (f filterFuncFilter) ApplyFilter(backendType string, items []*types.Item) []*types.Item { + filteredItems := make([]*types.Item, 0) for _, item := range items { if f.fn(item) { filteredItems = append(filteredItems, item) @@ -72,6 +71,6 @@ func WithSortOrderAsc(ascending bool) sortOrderFilter { return sortOrderFilter{ascending: ascending} } -func WithFilterFunc(fn func(item *models.Item) bool) IFilter { +func WithFilterFunc(fn func(item *types.Item) bool) IFilter { return filterFuncFilter{fn: fn} } diff --git a/backend/inmemory.go b/backend/inmemory.go index 72b1e57..a083608 100644 --- a/backend/inmemory.go +++ b/backend/inmemory.go @@ -6,12 +6,12 @@ import ( datastructure "github.com/hyp3rd/hypercache/datastructure/v4" "github.com/hyp3rd/hypercache/errors" - "github.com/hyp3rd/hypercache/models" + "github.com/hyp3rd/hypercache/types" ) // InMemory is a cache backend that stores the items in memory, leveraging a custom `ConcurrentMap`. type InMemory struct { - // items datastructure.ConcurrentMap[string, *models.Item] // map to store the items in the cache + // items datastructure.ConcurrentMap[string, *types.Item] // map to store the items in the cache items datastructure.ConcurrentMap // map to store the items in the cache capacity int // capacity of the cache, limits the number of items that can be stored in the cache mutex sync.RWMutex // mutex to protect the cache from concurrent access @@ -52,7 +52,7 @@ func (cacheBackend *InMemory) Count() int { } // Get retrieves the item with the given key from the cacheBackend. If the item is not found, it returns nil. -func (cacheBackend *InMemory) Get(key string) (item *models.Item, ok bool) { +func (cacheBackend *InMemory) Get(key string) (item *types.Item, ok bool) { item, ok = cacheBackend.items.Get(key) if !ok { return nil, false @@ -62,10 +62,10 @@ func (cacheBackend *InMemory) Get(key string) (item *models.Item, ok bool) { } // Set adds a Item to the cache. -func (cacheBackend *InMemory) Set(item *models.Item) error { +func (cacheBackend *InMemory) Set(item *types.Item) error { // Check for invalid key, value, or duration if err := item.Valid(); err != nil { - models.ItemPool.Put(item) + types.ItemPool.Put(item) return err } @@ -77,11 +77,11 @@ func (cacheBackend *InMemory) Set(item *models.Item) error { } // ListV1 returns a list of all items in the cache filtered and ordered by the given options -// func (cacheBackend *InMemory) ListV1(options ...FilterOption[InMemory]) ([]*models.Item, error) { +// func (cacheBackend *InMemory) ListV1(options ...FilterOption[InMemory]) ([]*types.Item, error) { // // Apply the filter options // ApplyFilterOptions(cacheBackend, options...) -// items := make([]*models.Item, 0, cacheBackend.items.Count()) +// items := make([]*types.Item, 0, cacheBackend.items.Count()) // wg := sync.WaitGroup{} // wg.Add(cacheBackend.items.Count()) // for item := range cacheBackend.items.IterBuffered() { @@ -121,13 +121,13 @@ func (cacheBackend *InMemory) Set(item *models.Item) error { // } // List returns a list of all items in the cache filtered and ordered by the given options -// func (cacheBackend *InMemory) List(ctx context.Context, options ...FilterOption[InMemory]) ([]*models.Item, error) { -func (cacheBackend *InMemory) List(ctx context.Context, filters ...IFilter) ([]*models.Item, error) { +// func (cacheBackend *InMemory) List(ctx context.Context, options ...FilterOption[InMemory]) ([]*types.Item, error) { +func (cacheBackend *InMemory) List(ctx context.Context, filters ...IFilter) ([]*types.Item, error) { // Apply the filters cacheBackend.mutex.RLock() defer cacheBackend.mutex.RUnlock() - items := make([]*models.Item, 0, cacheBackend.Count()) + items := make([]*types.Item, 0, cacheBackend.Count()) wg := sync.WaitGroup{} wg.Add(cacheBackend.items.Count()) for item := range cacheBackend.items.IterBuffered() { @@ -151,8 +151,8 @@ func (cacheBackend *InMemory) List(ctx context.Context, filters ...IFilter) ([]* func (cacheBackend *InMemory) Remove(keys ...string) (err error) { //TODO: determine if handling the error or not // var ok bool - // item := models.ItemPool.Get().(*models.Item) - // defer models.ItemPool.Put(item) + // item := types.ItemPool.Get().(*types.Item) + // defer types.ItemPool.Put(item) for _, key := range keys { cacheBackend.items.Remove(key) } diff --git a/backend/options.go b/backend/options.go index 4311c7e..6892e53 100644 --- a/backend/options.go +++ b/backend/options.go @@ -2,7 +2,7 @@ package backend import ( "github.com/hyp3rd/hypercache/libs/serializer" - "github.com/hyp3rd/hypercache/models" + "github.com/hyp3rd/hypercache/types" "github.com/redis/go-redis/v9" ) @@ -35,7 +35,7 @@ func (rb *Redis) setSortBy(sortBy string) { } // FilterFunc is a predicate that takes a `Item` as an argument and returns a boolean indicating whether the item should be included in the cache. -type FilterFunc func(item *models.Item) bool // filters applied when listing the items in the cache +type FilterFunc func(item *types.Item) bool // filters applied when listing the items in the cache // IFilterableBackend is an interface that defines the methods that a backend should implement to be filterable. type IFilterableBackend interface { @@ -143,7 +143,7 @@ func WithSerializer[T Redis](serializer serializer.ISerializer) Option[Redis] { // // WithFilterFunc is an option that sets the filter function to use. // // The filter function is a predicate that takes a `Item` as an argument and returns a boolean indicating whether the item should be included in the cache. -// func WithFilterFunc[T any](fn func(item *models.Item) bool) FilterOption[T] { +// func WithFilterFunc[T any](fn func(item *types.Item) bool) FilterOption[T] { // return func(a *T) { // if filterable, ok := any(a).(IFilterableBackend); ok { // filterable.setFilterFunc(fn) diff --git a/backend/redis.go b/backend/redis.go index 3f7a393..3f1be8c 100644 --- a/backend/redis.go +++ b/backend/redis.go @@ -5,7 +5,7 @@ import ( "github.com/hyp3rd/hypercache/errors" "github.com/hyp3rd/hypercache/libs/serializer" - "github.com/hyp3rd/hypercache/models" + "github.com/hyp3rd/hypercache/types" "github.com/redis/go-redis/v9" ) @@ -70,7 +70,7 @@ func (cacheBackend *Redis) Count() int { } // Get retrieves the Item with the given key from the cacheBackend. If the item is not found, it returns nil. -func (cacheBackend *Redis) Get(key string) (item *models.Item, ok bool) { +func (cacheBackend *Redis) Get(key string) (item *types.Item, ok bool) { // Check if the key is in the set of keys isMember, err := cacheBackend.rdb.SIsMember(context.Background(), cacheBackend.keysSetName, key).Result() if err != nil { @@ -81,9 +81,9 @@ func (cacheBackend *Redis) Get(key string) (item *models.Item, ok bool) { } // Get the item from the cacheBackend - item = models.ItemPool.Get().(*models.Item) + item = types.ItemPool.Get().(*types.Item) // Return the item to the pool - defer models.ItemPool.Put(item) + defer types.ItemPool.Put(item) data, err := cacheBackend.rdb.HGet(context.Background(), key, "data").Bytes() if err != nil { @@ -102,7 +102,7 @@ func (cacheBackend *Redis) Get(key string) (item *models.Item, ok bool) { } // Set stores the Item in the cacheBackend. -func (cacheBackend *Redis) Set(item *models.Item) error { +func (cacheBackend *Redis) Set(item *types.Item) error { pipe := cacheBackend.rdb.TxPipeline() // Check if the item is valid @@ -142,8 +142,8 @@ func (cacheBackend *Redis) Set(item *models.Item) error { } // List returns a list of all the items in the cacheBackend that match the given filter options. -// func (cacheBackend *Redis) List(ctx context.Context, options ...FilterOption[Redis]) ([]*models.Item, error) { -func (cacheBackend *Redis) List(ctx context.Context, filters ...IFilter) ([]*models.Item, error) { +// func (cacheBackend *Redis) List(ctx context.Context, options ...FilterOption[Redis]) ([]*types.Item, error) { +func (cacheBackend *Redis) List(ctx context.Context, filters ...IFilter) ([]*types.Item, error) { // Apply the filters // filterOptions := make([]FilterOption[Redis], len(filters)) // for i, option := range filters { @@ -169,14 +169,14 @@ func (cacheBackend *Redis) List(ctx context.Context, filters ...IFilter) ([]*mod } // Create a slice to hold the items - items := make([]*models.Item, 0, len(keys)) + items := make([]*types.Item, 0, len(keys)) // Deserialize the items and add them to the slice of items to return for _, cmd := range cmds { data, _ := cmd.(*redis.MapStringStringCmd).Result() // Change the type assertion to match HGetAll - item := models.ItemPool.Get().(*models.Item) + item := types.ItemPool.Get().(*types.Item) // Return the item to the pool - defer models.ItemPool.Put(item) + defer types.ItemPool.Put(item) err := cacheBackend.Serializer.Unmarshal([]byte(data["data"]), item) if err == nil { if cacheBackend.FilterFunc != nil && !cacheBackend.FilterFunc(item) { diff --git a/backend/sorting.go b/backend/sorting.go index 9d8b944..208d678 100644 --- a/backend/sorting.go +++ b/backend/sorting.go @@ -1,8 +1,6 @@ package backend -import ( - "github.com/hyp3rd/hypercache/models" -) +import "github.com/hyp3rd/hypercache/types" // SortFilters holds the filters applied when listing the items in the cache type SortFilters struct { @@ -13,11 +11,11 @@ type SortFilters struct { // If set to false, the items will be sorted in descending order. SortAscending bool // FilterFunc is a predicate that takes a `Item` as an argument and returns a boolean indicating whether the item should be included in the cache. - FilterFunc func(item *models.Item) bool // filters applied when listing the items in the cache + FilterFunc func(item *types.Item) bool // filters applied when listing the items in the cache } type itemSorterByKey struct { - items []*models.Item + items []*types.Item } func (s *itemSorterByKey) Len() int { return len(s.items) } @@ -25,7 +23,7 @@ func (s *itemSorterByKey) Swap(i, j int) { s.items[i], s.items[j] = s.items func (s *itemSorterByKey) Less(i, j int) bool { return s.items[i].Key < s.items[j].Key } type itemSorterByExpiration struct { - items []*models.Item + items []*types.Item } func (s *itemSorterByExpiration) Len() int { return len(s.items) } @@ -35,7 +33,7 @@ func (s *itemSorterByExpiration) Less(i, j int) bool { } type itemSorterByLastAccess struct { - items []*models.Item + items []*types.Item } func (s *itemSorterByLastAccess) Len() int { return len(s.items) } @@ -45,7 +43,7 @@ func (s *itemSorterByLastAccess) Less(i, j int) bool { } type itemSorterByAccessCount struct { - items []*models.Item + items []*types.Item } func (s *itemSorterByAccessCount) Len() int { return len(s.items) } diff --git a/datastructure/v3/cmap.go b/datastructure/v3/cmap.go index 4e41ac1..4a249d0 100644 --- a/datastructure/v3/cmap.go +++ b/datastructure/v3/cmap.go @@ -5,7 +5,7 @@ import ( "hash/fnv" "sync" - "github.com/hyp3rd/hypercache/models" + "github.com/hyp3rd/hypercache/types" ) const ( @@ -15,16 +15,16 @@ const ( ShardCount32 uint32 = uint32(ShardCount) ) -// ConcurrentMap is a "thread" safe map of type string:*models.Item. +// ConcurrentMap is a "thread" safe map of type string:*types.Item. // To avoid lock bottlenecks this map is dived to several (ShardCount) map shards. type ConcurrentMap struct { shards []*ConcurrentMapShard hasher hash.Hash32 } -// ConcurrentMapShard is a "thread" safe string to `*models.Item`. +// ConcurrentMapShard is a "thread" safe string to `*types.Item`. type ConcurrentMapShard struct { - items map[string]*models.Item + items map[string]*types.Item sync.RWMutex } @@ -42,7 +42,7 @@ func New() ConcurrentMap { func create() []*ConcurrentMapShard { shards := make([]*ConcurrentMapShard, ShardCount) for i := 0; i < ShardCount; i++ { - shards[i] = &ConcurrentMapShard{items: make(map[string]*models.Item)} + shards[i] = &ConcurrentMapShard{items: make(map[string]*types.Item)} } return shards } @@ -55,7 +55,7 @@ func (m *ConcurrentMap) GetShard(key string) *ConcurrentMapShard { } // Set sets the given value under the specified key. -func (m *ConcurrentMap) Set(key string, value *models.Item) { +func (m *ConcurrentMap) Set(key string, value *types.Item) { shard := m.GetShard(key) shard.Lock() shard.items[key] = value @@ -63,7 +63,7 @@ func (m *ConcurrentMap) Set(key string, value *models.Item) { } // Get retrieves an element from map under given key. -func (m *ConcurrentMap) Get(key string) (*models.Item, bool) { +func (m *ConcurrentMap) Get(key string) (*types.Item, bool) { // Get shard shard := m.GetShard(key) shard.RLock() @@ -85,7 +85,7 @@ func (m *ConcurrentMap) Has(key string) bool { } // Pop removes an element from the map and returns it. -func (m *ConcurrentMap) Pop(key string) (*models.Item, bool) { +func (m *ConcurrentMap) Pop(key string) (*types.Item, bool) { shard := m.GetShard(key) shard.Lock() item, ok := shard.items[key] @@ -101,7 +101,7 @@ func (m *ConcurrentMap) Pop(key string) (*models.Item, bool) { // Tuple is used by the IterBuffered functions to wrap two variables together over a channel, type Tuple struct { Key string - Val models.Item + Val types.Item } // IterBuffered returns a buffered iterator which could be used in a for range loop. diff --git a/datastructure/v4/cmap.go b/datastructure/v4/cmap.go index eb79798..bb12ee4 100644 --- a/datastructure/v4/cmap.go +++ b/datastructure/v4/cmap.go @@ -5,7 +5,7 @@ import ( "hash/fnv" "sync" - "github.com/hyp3rd/hypercache/models" + "github.com/hyp3rd/hypercache/types" ) const ( @@ -15,15 +15,15 @@ const ( ShardCount32 uint32 = uint32(ShardCount) ) -// ConcurrentMap is a "thread" safe map of type string:*models.Item. +// ConcurrentMap is a "thread" safe map of type string:*types.Item. // To avoid lock bottlenecks this map is divided into several (ShardCount) map shards. type ConcurrentMap struct { shards []*ConcurrentMapShard } -// ConcurrentMapShard is a "thread" safe string to `*models.Item` map shard. +// ConcurrentMapShard is a "thread" safe string to `*types.Item` map shard. type ConcurrentMapShard struct { - items map[string]*models.Item + items map[string]*types.Item hasher hash.Hash32 sync.RWMutex } @@ -40,7 +40,7 @@ func create() []*ConcurrentMapShard { shards := make([]*ConcurrentMapShard, ShardCount) for i := 0; i < ShardCount; i++ { shards[i] = &ConcurrentMapShard{ - items: make(map[string]*models.Item), + items: make(map[string]*types.Item), hasher: fnv.New32a(), } } @@ -62,7 +62,7 @@ func getShardIndex(m *ConcurrentMap, key string) uint32 { } // Set sets the given value under the specified key. -func (m *ConcurrentMap) Set(key string, value *models.Item) { +func (m *ConcurrentMap) Set(key string, value *types.Item) { shard := m.GetShard(key) shard.Lock() shard.items[key] = value @@ -70,7 +70,7 @@ func (m *ConcurrentMap) Set(key string, value *models.Item) { } // Get retrieves an element from map under given key. -func (m *ConcurrentMap) Get(key string) (*models.Item, bool) { +func (m *ConcurrentMap) Get(key string) (*types.Item, bool) { // Get shard shard := m.GetShard(key) shard.RLock() @@ -92,7 +92,7 @@ func (m *ConcurrentMap) Has(key string) bool { } // Pop removes an element from the map and returns it. -func (m *ConcurrentMap) Pop(key string) (*models.Item, bool) { +func (m *ConcurrentMap) Pop(key string) (*types.Item, bool) { shard := m.GetShard(key) shard.Lock() item, ok := shard.items[key] @@ -108,7 +108,7 @@ func (m *ConcurrentMap) Pop(key string) (*models.Item, bool) { // Tuple is used by the IterBuffered functions to wrap two variables together over a channel, type Tuple struct { Key string - Val models.Item + Val types.Item } // IterBuffered returns a buffered iterator which could be used in a for range loop. diff --git a/eviction/arc.go b/eviction/arc.go index fc17d64..1e1c65b 100644 --- a/eviction/arc.go +++ b/eviction/arc.go @@ -10,19 +10,19 @@ import ( "sync" "github.com/hyp3rd/hypercache/errors" - "github.com/hyp3rd/hypercache/models" + "github.com/hyp3rd/hypercache/types" ) // ARC is an in-memory cache that uses the Adaptive Replacement Cache (ARC) algorithm to manage its items. type ARC struct { - capacity int // capacity is the maximum number of items that can be stored in the cache - t1 map[string]*models.Item // t1 is a list of items that have been accessed recently - t2 map[string]*models.Item // t2 is a list of items that have been accessed less recently - b1 map[string]bool // b1 is a list of items that have been evicted from t1 - b2 map[string]bool // b2 is a list of items that have been evicted from t2 - p int // p is the promotion threshold - c int // c is the current number of items in the cache - mutex sync.RWMutex // mutex is a read-write mutex that protects the cache + capacity int // capacity is the maximum number of items that can be stored in the cache + t1 map[string]*types.Item // t1 is a list of items that have been accessed recently + t2 map[string]*types.Item // t2 is a list of items that have been accessed less recently + b1 map[string]bool // b1 is a list of items that have been evicted from t1 + b2 map[string]bool // b2 is a list of items that have been evicted from t2 + p int // p is the promotion threshold + c int // c is the current number of items in the cache + mutex sync.RWMutex // mutex is a read-write mutex that protects the cache } // NewARCAlgorithm creates a new in-memory cache with the given capacity and the Adaptive Replacement Cache (ARC) algorithm. @@ -33,8 +33,8 @@ func NewARCAlgorithm(capacity int) (*ARC, error) { } return &ARC{ capacity: capacity, - t1: make(map[string]*models.Item, capacity), - t2: make(map[string]*models.Item, capacity), + t1: make(map[string]*types.Item, capacity), + t2: make(map[string]*types.Item, capacity), b1: make(map[string]bool, capacity), b2: make(map[string]bool, capacity), p: 0, @@ -117,7 +117,7 @@ func (arc *ARC) Set(key string, value any) { arc.Delete(evictedKey) } // Add new item to cache - item := models.ItemPool.Get().(*models.Item) + item := types.ItemPool.Get().(*types.Item) item.Value = value arc.t1[key] = item @@ -139,7 +139,7 @@ func (arc *ARC) Delete(key string) { if arc.p < 0 { arc.p = 0 } - models.ItemPool.Put(item) + types.ItemPool.Put(item) return } // Check t2 @@ -147,7 +147,7 @@ func (arc *ARC) Delete(key string) { if ok { delete(arc.t2, key) arc.c-- - models.ItemPool.Put(item) + types.ItemPool.Put(item) } } @@ -158,14 +158,14 @@ func (arc *ARC) Evict() (string, bool) { for key, val := range arc.t1 { delete(arc.t1, key) arc.c-- - models.ItemPool.Put(val) + types.ItemPool.Put(val) return key, true } // Check t2 for key, val := range arc.t2 { delete(arc.t2, key) arc.c-- - models.ItemPool.Put(val) + types.ItemPool.Put(val) return key, true } return "", false diff --git a/eviction/clock.go b/eviction/clock.go index 51e4619..4739e9c 100644 --- a/eviction/clock.go +++ b/eviction/clock.go @@ -10,12 +10,12 @@ import ( "sync" "github.com/hyp3rd/hypercache/errors" - "github.com/hyp3rd/hypercache/models" + "github.com/hyp3rd/hypercache/types" ) // ClockAlgorithm is an in-memory cache with the Clock algorithm. type ClockAlgorithm struct { - items []*models.Item + items []*types.Item keys map[string]int mutex sync.RWMutex evictMutex sync.Mutex @@ -30,7 +30,7 @@ func NewClockAlgorithm(capacity int) (*ClockAlgorithm, error) { } return &ClockAlgorithm{ - items: make([]*models.Item, capacity), + items: make([]*types.Item, capacity), keys: make(map[string]int, capacity), capacity: capacity, hand: 0, @@ -52,7 +52,7 @@ func (c *ClockAlgorithm) Evict() (string, bool) { item.AccessCount-- } else { delete(c.keys, item.Key) - models.ItemPool.Put(item) + types.ItemPool.Put(item) c.items[c.hand] = nil return item.Key, true } @@ -71,7 +71,7 @@ func (c *ClockAlgorithm) Set(key string, value any) { c.Delete(evictedKey) } - item := models.ItemPool.Get().(*models.Item) + item := types.ItemPool.Get().(*types.Item) item.Key = key item.Value = value item.AccessCount = 1 @@ -107,5 +107,5 @@ func (c *ClockAlgorithm) Delete(key string) { item := c.items[index] delete(c.keys, key) c.items[index] = nil - models.ItemPool.Put(item) + types.ItemPool.Put(item) } diff --git a/examples/list/list.go b/examples/list/list.go index 739d463..f759b9c 100644 --- a/examples/list/list.go +++ b/examples/list/list.go @@ -7,7 +7,6 @@ import ( "github.com/hyp3rd/hypercache" "github.com/hyp3rd/hypercache/backend" - "github.com/hyp3rd/hypercache/models" "github.com/hyp3rd/hypercache/types" ) @@ -45,7 +44,7 @@ func main() { // Apply filters // Define a filter function - itemsFilterFunc := func(item *models.Item) bool { + itemsFilterFunc := func(item *types.Item) bool { // return time.Since(item.LastAccess) > 1*time.Microsecond return item.Value != "val84" } diff --git a/examples/redis/redis.go b/examples/redis/redis.go index 3f192ab..e3d61fa 100644 --- a/examples/redis/redis.go +++ b/examples/redis/redis.go @@ -8,7 +8,6 @@ import ( "github.com/hyp3rd/hypercache" "github.com/hyp3rd/hypercache/backend" "github.com/hyp3rd/hypercache/backend/redis" - "github.com/hyp3rd/hypercache/models" "github.com/hyp3rd/hypercache/types" ) @@ -61,7 +60,7 @@ func main() { // Apply filters // Define a filter function - itemsFilterFunc := func(item *models.Item) bool { + itemsFilterFunc := func(item *types.Item) bool { // return time.Since(item.LastAccess) > 1*time.Microsecond return item.Value != "value-16" } diff --git a/hypercache.go b/hypercache.go index 8139d8d..f8e0de4 100644 --- a/hypercache.go +++ b/hypercache.go @@ -18,7 +18,6 @@ import ( "github.com/hyp3rd/hypercache/backend" "github.com/hyp3rd/hypercache/errors" "github.com/hyp3rd/hypercache/eviction" - "github.com/hyp3rd/hypercache/models" "github.com/hyp3rd/hypercache/stats" "github.com/hyp3rd/hypercache/types" "github.com/hyp3rd/hypercache/utils" @@ -225,7 +224,7 @@ func (hyperCache *HyperCache[T]) expirationLoop() { var ( expiredCount int64 - items []*models.Item + items []*types.Item err error ) @@ -236,7 +235,7 @@ func (hyperCache *HyperCache[T]) expirationLoop() { } sortByFilter := backend.WithSortBy(types.SortByExpiration.String()) - filterFuncFilter := backend.WithFilterFunc(func(item *models.Item) bool { + filterFuncFilter := backend.WithFilterFunc(func(item *types.Item) bool { return item.Expiration > 0 && time.Since(item.LastAccess) > item.Expiration }) @@ -247,7 +246,7 @@ func (hyperCache *HyperCache[T]) expirationLoop() { for _, item := range sortedItems { expiredCount++ hyperCache.Remove(item.Key) - models.ItemPool.Put(item) + types.ItemPool.Put(item) hyperCache.StatsCollector.Incr("item_expired_count", 1) } @@ -312,7 +311,7 @@ func (hyperCache *HyperCache[T]) evictItem() (string, bool) { // If the capacity of the cache is reached, the cache will leverage the eviction algorithm proactively if the evictionInterval is zero. If not, the background process will take care of the eviction. func (hyperCache *HyperCache[T]) Set(key string, value any, expiration time.Duration) error { // Create a new cache item and set its properties - item := models.ItemPool.Get().(*models.Item) + item := types.ItemPool.Get().(*types.Item) item.Key = key item.Value = value item.Expiration = expiration @@ -338,7 +337,7 @@ func (hyperCache *HyperCache[T]) Set(key string, value any, expiration time.Dura err = hyperCache.backend.Set(item) if err != nil { hyperCache.memoryAllocation.Add(-item.Size) - models.ItemPool.Put(item) + types.ItemPool.Put(item) return err } @@ -363,7 +362,7 @@ func (hyperCache *HyperCache[T]) Get(key string) (value any, ok bool) { // Check if the item has expired, if so, trigger the expiration loop if item.Expired() { go func() { - models.ItemPool.Put(item) + types.ItemPool.Put(item) hyperCache.expirationTriggerCh <- true }() return nil, false @@ -375,7 +374,7 @@ func (hyperCache *HyperCache[T]) Get(key string) (value any, ok bool) { } // GetWithInfo retrieves the item with the given key from the cache returning the `Item` object and a boolean indicating if the item was found. -func (hyperCache *HyperCache[T]) GetWithInfo(key string) (*models.Item, bool) { +func (hyperCache *HyperCache[T]) GetWithInfo(key string) (*types.Item, bool) { item, ok := hyperCache.backend.Get(key) // Check if the item has expired if it exists, if so, trigger the expiration loop if !ok { @@ -385,7 +384,7 @@ func (hyperCache *HyperCache[T]) GetWithInfo(key string) (*models.Item, bool) { // Check if the item has expired, if so, trigger the expiration loop if item.Expired() { go func() { - models.ItemPool.Put(item) + types.ItemPool.Put(item) hyperCache.expirationTriggerCh <- true }() return nil, false @@ -405,7 +404,7 @@ func (hyperCache *HyperCache[T]) GetOrSet(key string, value any, expiration time // Check if the item has expired if item.Expired() { go func() { - models.ItemPool.Put(item) + types.ItemPool.Put(item) hyperCache.expirationTriggerCh <- true }() return nil, errors.ErrKeyExpired @@ -418,7 +417,7 @@ func (hyperCache *HyperCache[T]) GetOrSet(key string, value any, expiration time } // if the item is not found, add it to the cache - item := models.ItemPool.Get().(*models.Item) + item := types.ItemPool.Get().(*types.Item) item.Key = key item.Value = value item.Expiration = expiration @@ -437,7 +436,7 @@ func (hyperCache *HyperCache[T]) GetOrSet(key string, value any, expiration time hyperCache.memoryAllocation.Add(item.Size) if hyperCache.maxCacheSize > 0 && hyperCache.memoryAllocation.Load() > hyperCache.maxCacheSize { hyperCache.memoryAllocation.Add(-item.Size) - models.ItemPool.Put(item) + types.ItemPool.Put(item) return nil, errors.ErrCacheFull } @@ -445,7 +444,7 @@ func (hyperCache *HyperCache[T]) GetOrSet(key string, value any, expiration time err = hyperCache.backend.Set(item) if err != nil { hyperCache.memoryAllocation.Add(-item.Size) - models.ItemPool.Put(item) + types.ItemPool.Put(item) return nil, err } @@ -454,7 +453,7 @@ func (hyperCache *HyperCache[T]) GetOrSet(key string, value any, expiration time hyperCache.evictionAlgorithm.Set(key, item.Value) // If the cache is at capacity, evict an item when the eviction interval is zero if hyperCache.shouldEvict.Load() && hyperCache.backend.Count() > hyperCache.backend.Capacity() { - models.ItemPool.Put(item) + types.ItemPool.Put(item) hyperCache.evictItem() } }() @@ -477,7 +476,7 @@ func (hyperCache *HyperCache[T]) GetMultiple(keys ...string) (result map[string] // Check if the item has expired if item.Expired() { // Put the item back in the pool - models.ItemPool.Put(item) + types.ItemPool.Put(item) // Add the key to the errors map failed[key] = errors.ErrKeyExpired // Trigger the expiration loop @@ -493,14 +492,14 @@ func (hyperCache *HyperCache[T]) GetMultiple(keys ...string) (result map[string] return } -func (hyperCache *HyperCache[T]) List(ctx context.Context, filters ...backend.IFilter) ([]*models.Item, error) { +func (hyperCache *HyperCache[T]) List(ctx context.Context, filters ...backend.IFilter) ([]*types.Item, error) { return hyperCache.backend.List(ctx, filters...) } // List lists the items in the cache that meet the specified criteria. // It takes in a variadic number of any type as filters, it then checks the backend type, and calls the corresponding // implementation of the List function for that backend, with the filters passed in as arguments -// func (hyperCache *HyperCache[T]) List(ctx context.Context, filters ...any) ([]*models.Item, error) { +// func (hyperCache *HyperCache[T]) List(ctx context.Context, filters ...any) ([]*types.Item, error) { // var listInstance listFunc // // checking the backend type @@ -520,13 +519,13 @@ func (hyperCache *HyperCache[T]) List(ctx context.Context, filters ...backend.IF // // listFunc is a type that defines a function that takes in a variable number of any type as arguments, and returns // // a slice of Item pointers, and an error -// type listFunc func(ctx context.Context, options ...any) ([]*models.Item, error) +// type listFunc func(ctx context.Context, options ...any) ([]*types.Item, error) // // listInMemory is a function that takes in an InMemory, and returns a ListFunc // // it takes any type as filters, and converts them to the specific FilterOption type for the InMemory, // // and calls the InMemory's List function with these filters. // func listInMemory(cacheBackend *backend.InMemory) listFunc { -// return func(ctx context.Context, options ...any) ([]*models.Item, error) { +// return func(ctx context.Context, options ...any) ([]*types.Item, error) { // // here we are converting the filters of any type to the specific FilterOption type for the InMemory // filterOptions := make([]backend.FilterOption[backend.InMemory], len(options)) // for i, option := range options { @@ -540,7 +539,7 @@ func (hyperCache *HyperCache[T]) List(ctx context.Context, filters ...backend.IF // // it takes any type as filters, and converts them to the specific FilterOption type for the Redis, // // and calls the Redis's List function with these filters. // func listRedis(cacheBackend *backend.Redis) listFunc { -// return func(ctx context.Context, options ...any) ([]*models.Item, error) { +// return func(ctx context.Context, options ...any) ([]*types.Item, error) { // // here we are converting the filters of any type to the specific FilterOption type for the Redis // filterOptions := make([]backend.FilterOption[backend.Redis], len(options)) // for i, option := range options { @@ -568,7 +567,7 @@ func (hyperCache *HyperCache[T]) Remove(keys ...string) { // Clear removes all items from the cache. func (hyperCache *HyperCache[T]) Clear() error { var ( - items []*models.Item + items []*types.Item err error ) diff --git a/middleware/logging.go b/middleware/logging.go index 50ddff3..938432d 100644 --- a/middleware/logging.go +++ b/middleware/logging.go @@ -6,8 +6,8 @@ import ( "github.com/hyp3rd/hypercache" "github.com/hyp3rd/hypercache/backend" - "github.com/hyp3rd/hypercache/models" "github.com/hyp3rd/hypercache/stats" + "github.com/hyp3rd/hypercache/types" ) // Logger describes a logging interface allowing to implement different external, or custom logger. @@ -60,7 +60,7 @@ func (mw LoggingMiddleware) GetOrSet(key string, value any, expiration time.Dura } // GetWithInfo logs the time it takes to execute the next middleware. -func (mw LoggingMiddleware) GetWithInfo(key string) (item *models.Item, ok bool) { +func (mw LoggingMiddleware) GetWithInfo(key string) (item *types.Item, ok bool) { defer func(begin time.Time) { mw.logger.Printf("method GetWithInfo took: %s", time.Since(begin)) }(time.Now()) @@ -80,7 +80,7 @@ func (mw LoggingMiddleware) GetMultiple(keys ...string) (result map[string]any, } // List logs the time it takes to execute the next middleware. -func (mw LoggingMiddleware) List(ctx context.Context, filters ...backend.IFilter) ([]*models.Item, error) { +func (mw LoggingMiddleware) List(ctx context.Context, filters ...backend.IFilter) ([]*types.Item, error) { defer func(begin time.Time) { mw.logger.Printf("method List took: %s", time.Since(begin)) }(time.Now()) diff --git a/middleware/stats.go b/middleware/stats.go index 9f8d213..cbfdd21 100644 --- a/middleware/stats.go +++ b/middleware/stats.go @@ -6,8 +6,8 @@ import ( "github.com/hyp3rd/hypercache" "github.com/hyp3rd/hypercache/backend" - "github.com/hyp3rd/hypercache/models" "github.com/hyp3rd/hypercache/stats" + "github.com/hyp3rd/hypercache/types" ) // StatsCollectorMiddleware is a middleware that collects stats. It can and should re-use the same stats collector as the hypercache. @@ -53,7 +53,7 @@ func (mw StatsCollectorMiddleware) GetOrSet(key string, value any, expiration ti } // GetWithInfo collects stats for the GetWithInfo method. -func (mw StatsCollectorMiddleware) GetWithInfo(key string) (*models.Item, bool) { +func (mw StatsCollectorMiddleware) GetWithInfo(key string) (*types.Item, bool) { start := time.Now() defer func() { mw.statsCollector.Timing("hypercache_get_with_info_duration", time.Since(start).Nanoseconds()) @@ -73,7 +73,7 @@ func (mw StatsCollectorMiddleware) GetMultiple(keys ...string) (result map[strin } // List collects stats for the List method. -func (mw StatsCollectorMiddleware) List(ctx context.Context, filters ...backend.IFilter) ([]*models.Item, error) { +func (mw StatsCollectorMiddleware) List(ctx context.Context, filters ...backend.IFilter) ([]*types.Item, error) { start := time.Now() defer func() { mw.statsCollector.Timing("hypercache_list_duration", time.Since(start).Nanoseconds()) diff --git a/service.go b/service.go index b1286c1..d13391b 100644 --- a/service.go +++ b/service.go @@ -5,8 +5,8 @@ import ( "time" "github.com/hyp3rd/hypercache/backend" - "github.com/hyp3rd/hypercache/models" "github.com/hyp3rd/hypercache/stats" + "github.com/hyp3rd/hypercache/types" ) // Service is the service interface for the HyperCache. @@ -18,12 +18,12 @@ type Service interface { Set(key string, value any, expiration time.Duration) error // GetOrSet retrieves a value from the cache using the key, if the key does not exist, it will set the value using the key and expiration duration GetOrSet(key string, value any, expiration time.Duration) (any, error) - // GetWithInfo fetches from the cache using the key, and returns the `models.Item` and a boolean indicating if the key exists - GetWithInfo(key string) (*models.Item, bool) + // GetWithInfo fetches from the cache using the key, and returns the `types.Item` and a boolean indicating if the key exists + GetWithInfo(key string) (*types.Item, bool) // GetMultiple retrieves a list of values from the cache using the keys GetMultiple(keys ...string) (result map[string]any, failed map[string]error) // List returns a list of all items in the cache - List(ctx context.Context, filters ...backend.IFilter) ([]*models.Item, error) + List(ctx context.Context, filters ...backend.IFilter) ([]*types.Item, error) // Remove removes a value from the cache using the key Remove(keys ...string) // Clear removes all values from the cache diff --git a/models/item.go b/types/item.go similarity index 99% rename from models/item.go rename to types/item.go index 399c930..dafaf23 100644 --- a/models/item.go +++ b/types/item.go @@ -1,4 +1,4 @@ -package models +package types // Item represents an item in the cache. It has a key, value, expiration duration, and a last access time field. From b74e6d699576b784a8084614c728bf89f232f6c1 Mon Sep 17 00:00:00 2001 From: Francesco Cosentino Date: Mon, 3 Apr 2023 15:30:44 +0200 Subject: [PATCH 07/10] linting --- backend/backend.go | 45 +---------------------- backend/filters.go | 24 +++++++++---- backend/inmemory.go | 45 ----------------------- backend/options.go | 87 --------------------------------------------- hypercache.go | 56 ++--------------------------- 5 files changed, 21 insertions(+), 236 deletions(-) diff --git a/backend/backend.go b/backend/backend.go index 9939e77..381e455 100644 --- a/backend/backend.go +++ b/backend/backend.go @@ -6,49 +6,7 @@ import ( "github.com/hyp3rd/hypercache/types" ) -// IBackendConstrain is the interface that defines the constrain type that must be implemented by cache backends. -// type IBackendConstrain interface { -// InMemory | Redis -// } - -// // IInMemory is the interface that must be implemented by in-memory cache backends. -// type IInMemory[T IBackendConstrain] interface { -// // IBackend[T] is the interface that must be implemented by cache backends. -// IBackend[T] -// // List the items in the cache that meet the specified criteria. -// List(options ...FilterOption[InMemory]) ([]*types.Item, error) -// // Clear removes all items from the cache. -// Clear() -// } - -// // IRedisBackend is the interface that must be implemented by Redis cache backends. -// type IRedisBackend[T IBackendConstrain] interface { -// // IBackend[T] is the interface that must be implemented by cache backends. -// IBackend[T] -// // List the items in the cache that meet the specified criteria. -// List(ctx context.Context, options ...FilterOption[Redis]) ([]*types.Item, error) -// // Clear removes all items from the cache. -// Clear() error -// } - -// // IBackend is the interface that must be implemented by cache backends. -// type IBackend[T IBackendConstrain] interface { -// // Get retrieves the item with the given key from the cache. -// // If the key is not found in the cache, it returns nil. -// Get(key string) (item *types.Item, ok bool) -// // Set adds a new item to the cache. -// Set(item *types.Item) error -// // Capacity returns the maximum number of items that can be stored in the cache. -// Capacity() int -// // SetCapacity sets the maximum number of items that can be stored in the cache. -// SetCapacity(capacity int) -// // Count returns the number of items currently stored in the cache. -// Count() int -// // Remove deletes the item with the given key from the cache. -// Remove(keys ...string) error -// } - -// IBackendConstrain is the interface that defines the constrain type that must be implemented by cache backends. +// IBackendConstrain is the interface that defines the constrain type implemented by cache backends. type IBackendConstrain interface { InMemory | Redis } @@ -69,7 +27,6 @@ type IBackend[T IBackendConstrain] interface { // Remove deletes the item with the given key from the cache. Remove(keys ...string) error // List the items in the cache that meet the specified criteria. - // List(ctx context.Context, options ...FilterOption[T]) ([]*types.Item, error) List(ctx context.Context, filters ...IFilter) ([]*types.Item, error) // Clear removes all items from the cache. Clear() error diff --git a/backend/filters.go b/backend/filters.go index 925f8cb..c1f31f8 100644 --- a/backend/filters.go +++ b/backend/filters.go @@ -6,14 +6,17 @@ import ( "github.com/hyp3rd/hypercache/types" ) +// IFilter is a backend agnostic interface for a filter that can be applied to a list of items. type IFilter interface { ApplyFilter(backendType string, items []*types.Item) []*types.Item } +// sortByFilter is a filter that sorts the items by a given field. type sortByFilter struct { field string } +// ApplyFilter applies the sort filter to the given list of items. func (f sortByFilter) ApplyFilter(backendType string, items []*types.Item) []*types.Item { var sorter sort.Interface switch f.field { @@ -32,11 +35,13 @@ func (f sortByFilter) ApplyFilter(backendType string, items []*types.Item) []*ty return items } -type sortOrderFilter struct { +// SortOrderFilter is a filter that sorts the items by a given field. +type SortOrderFilter struct { ascending bool } -func (f sortOrderFilter) ApplyFilter(backendType string, items []*types.Item) []*types.Item { +// ApplyFilter applies the sort order filter to the given list of items. +func (f SortOrderFilter) ApplyFilter(backendType string, items []*types.Item) []*types.Item { if !f.ascending { sort.Slice(items, func(i, j int) bool { return items[j].Key > items[i].Key @@ -49,11 +54,13 @@ func (f sortOrderFilter) ApplyFilter(backendType string, items []*types.Item) [] return items } -type filterFuncFilter struct { +// filterFunc is a filter that filters the items by a given field's value. +type filterFunc struct { fn func(item *types.Item) bool } -func (f filterFuncFilter) ApplyFilter(backendType string, items []*types.Item) []*types.Item { +// ApplyFilter applies the filter function to the given list of items. +func (f filterFunc) ApplyFilter(backendType string, items []*types.Item) []*types.Item { filteredItems := make([]*types.Item, 0) for _, item := range items { if f.fn(item) { @@ -63,14 +70,17 @@ func (f filterFuncFilter) ApplyFilter(backendType string, items []*types.Item) [ return filteredItems } +// WithSortBy returns a filter that sorts the items by a given field. func WithSortBy(field string) IFilter { return sortByFilter{field: field} } -func WithSortOrderAsc(ascending bool) sortOrderFilter { - return sortOrderFilter{ascending: ascending} +// WithSortOrderAsc returns a filter that determins whether to sort ascending or not. +func WithSortOrderAsc(ascending bool) SortOrderFilter { + return SortOrderFilter{ascending: ascending} } +// WithFilterFunc returns a filter that filters the items by a given field's value. func WithFilterFunc(fn func(item *types.Item) bool) IFilter { - return filterFuncFilter{fn: fn} + return filterFunc{fn: fn} } diff --git a/backend/inmemory.go b/backend/inmemory.go index a083608..b600559 100644 --- a/backend/inmemory.go +++ b/backend/inmemory.go @@ -11,7 +11,6 @@ import ( // InMemory is a cache backend that stores the items in memory, leveraging a custom `ConcurrentMap`. type InMemory struct { - // items datastructure.ConcurrentMap[string, *types.Item] // map to store the items in the cache items datastructure.ConcurrentMap // map to store the items in the cache capacity int // capacity of the cache, limits the number of items that can be stored in the cache mutex sync.RWMutex // mutex to protect the cache from concurrent access @@ -76,50 +75,6 @@ func (cacheBackend *InMemory) Set(item *types.Item) error { return nil } -// ListV1 returns a list of all items in the cache filtered and ordered by the given options -// func (cacheBackend *InMemory) ListV1(options ...FilterOption[InMemory]) ([]*types.Item, error) { -// // Apply the filter options -// ApplyFilterOptions(cacheBackend, options...) - -// items := make([]*types.Item, 0, cacheBackend.items.Count()) -// wg := sync.WaitGroup{} -// wg.Add(cacheBackend.items.Count()) -// for item := range cacheBackend.items.IterBuffered() { -// go func(item datastructure.Tuple) { -// defer wg.Done() -// if cacheBackend.FilterFunc == nil || cacheBackend.FilterFunc(&item.Val) { -// items = append(items, &item.Val) -// } -// }(item) -// } -// wg.Wait() - -// if cacheBackend.SortBy == "" { -// return items, nil -// } - -// var sorter sort.Interface -// switch cacheBackend.SortBy { -// case types.SortByKey.String(): -// sorter = &itemSorterByKey{items: items} -// case types.SortByLastAccess.String(): -// sorter = &itemSorterByLastAccess{items: items} -// case types.SortByAccessCount.String(): -// sorter = &itemSorterByAccessCount{items: items} -// case types.SortByExpiration.String(): -// sorter = &itemSorterByExpiration{items: items} -// default: -// return nil, fmt.Errorf("unknown sortBy field: %s", cacheBackend.SortBy) -// } - -// if !cacheBackend.SortAscending { -// sorter = sort.Reverse(sorter) -// } - -// sort.Sort(sorter) -// return items, nil -// } - // List returns a list of all items in the cache filtered and ordered by the given options // func (cacheBackend *InMemory) List(ctx context.Context, options ...FilterOption[InMemory]) ([]*types.Item, error) { func (cacheBackend *InMemory) List(ctx context.Context, filters ...IFilter) ([]*types.Item, error) { diff --git a/backend/options.go b/backend/options.go index 6892e53..7851ec2 100644 --- a/backend/options.go +++ b/backend/options.go @@ -2,56 +2,9 @@ package backend import ( "github.com/hyp3rd/hypercache/libs/serializer" - "github.com/hyp3rd/hypercache/types" "github.com/redis/go-redis/v9" ) -// ISortableBackend is an interface that defines the methods that a backend should implement to be sortable. -type iSortableBackend interface { - // setSortAscending indicates whether the items should be sorted in ascending order. - setSortAscending(ascending bool) - // setSortBy sets the field to sort the items by. - setSortBy(sortBy string) -} - -// setSortAscending sets the `SortAscending` field of the `InMemory` backend. -func (inm *InMemory) setSortAscending(ascending bool) { - inm.SortAscending = ascending -} - -// setSortAscending sets the `SortAscending` field of the `Redis` backend. -func (rb *Redis) setSortAscending(ascending bool) { - rb.SortAscending = ascending -} - -// setSortBy sets the `SortBy` field of the `InMemory` backend. -func (inm *InMemory) setSortBy(sortBy string) { - inm.SortBy = sortBy -} - -// setSortBy sets the `SortBy` field of the `Redis` backend. -func (rb *Redis) setSortBy(sortBy string) { - rb.SortBy = sortBy -} - -// FilterFunc is a predicate that takes a `Item` as an argument and returns a boolean indicating whether the item should be included in the cache. -type FilterFunc func(item *types.Item) bool // filters applied when listing the items in the cache - -// IFilterableBackend is an interface that defines the methods that a backend should implement to be filterable. -type IFilterableBackend interface { - setFilterFunc(filterFunc FilterFunc) -} - -// setFilterFunc sets the `FilterFunc` field of the `InMemory` backend. -func (inm *InMemory) setFilterFunc(filterFunc FilterFunc) { - inm.FilterFunc = filterFunc -} - -// setFilterFunc sets the `FilterFunc` field of the `Redis` backend. -func (rb *Redis) setFilterFunc(filterFunc FilterFunc) { - rb.FilterFunc = filterFunc -} - // iConfigurableBackend is an interface that defines the methods that a backend should implement to be configurable. type iConfigurableBackend interface { // setCapacity sets the capacity of the cache. @@ -110,43 +63,3 @@ func WithSerializer[T Redis](serializer serializer.ISerializer) Option[Redis] { backend.Serializer = serializer } } - -// FilterOption is a function type that can be used to configure the `Filter` struct. -// type FilterOption[T any] func(*T) - -// // ApplyFilterOptions applies the given options to the given filter. -// func ApplyFilterOptions[T IBackendConstrain](backend *T, options ...FilterOption[T]) { -// for _, option := range options { -// option(backend) -// } -// } - -// // WithSortBy is an option that sets the field to sort the items by. -// // The field can be any of the fields in the `Item` struct. -// func WithSortBy[T IBackendConstrain](field types.SortingField) FilterOption[T] { -// return func(a *T) { -// if sortable, ok := any(a).(iSortableBackend); ok { -// sortable.setSortBy(field.String()) -// } -// } -// } - -// // WithSortOrderAsc is an option that sets the sort order to ascending or descending. -// // When sorting the items in the cache, they will be sorted in ascending or descending order based on the field specified with the `WithSortBy` option. -// func WithSortOrderAsc[T IBackendConstrain](ascending bool) FilterOption[T] { -// return func(a *T) { -// if sortable, ok := any(a).(iSortableBackend); ok { -// sortable.setSortAscending(ascending) -// } -// } -// } - -// // WithFilterFunc is an option that sets the filter function to use. -// // The filter function is a predicate that takes a `Item` as an argument and returns a boolean indicating whether the item should be included in the cache. -// func WithFilterFunc[T any](fn func(item *types.Item) bool) FilterOption[T] { -// return func(a *T) { -// if filterable, ok := any(a).(IFilterableBackend); ok { -// filterable.setFilterFunc(fn) -// } -// } -// } diff --git a/hypercache.go b/hypercache.go index f8e0de4..be1c107 100644 --- a/hypercache.go +++ b/hypercache.go @@ -492,62 +492,12 @@ func (hyperCache *HyperCache[T]) GetMultiple(keys ...string) (result map[string] return } -func (hyperCache *HyperCache[T]) List(ctx context.Context, filters ...backend.IFilter) ([]*types.Item, error) { - return hyperCache.backend.List(ctx, filters...) -} - // List lists the items in the cache that meet the specified criteria. // It takes in a variadic number of any type as filters, it then checks the backend type, and calls the corresponding // implementation of the List function for that backend, with the filters passed in as arguments -// func (hyperCache *HyperCache[T]) List(ctx context.Context, filters ...any) ([]*types.Item, error) { -// var listInstance listFunc - -// // checking the backend type -// if hyperCache.cacheBackendChecker.IsInMemory() { -// // if the backend is an InMemory, we set the listFunc to the ListInMemory function -// listInstance = listInMemory(hyperCache.backend.(*backend.InMemory)) -// } - -// if hyperCache.cacheBackendChecker.IsRedis() { -// // if the backend is a Redis, we set the listFunc to the ListRedis function -// listInstance = listRedis(hyperCache.backend.(*backend.Redis)) -// } - -// // calling the corresponding implementation of the list function -// return listInstance(ctx, filters...) -// } - -// // listFunc is a type that defines a function that takes in a variable number of any type as arguments, and returns -// // a slice of Item pointers, and an error -// type listFunc func(ctx context.Context, options ...any) ([]*types.Item, error) - -// // listInMemory is a function that takes in an InMemory, and returns a ListFunc -// // it takes any type as filters, and converts them to the specific FilterOption type for the InMemory, -// // and calls the InMemory's List function with these filters. -// func listInMemory(cacheBackend *backend.InMemory) listFunc { -// return func(ctx context.Context, options ...any) ([]*types.Item, error) { -// // here we are converting the filters of any type to the specific FilterOption type for the InMemory -// filterOptions := make([]backend.FilterOption[backend.InMemory], len(options)) -// for i, option := range options { -// filterOptions[i] = option.(backend.FilterOption[backend.InMemory]) -// } -// return cacheBackend.List(filterOptions...) -// } -// } - -// // listRedis is a function that takes in a Redis, and returns a ListFunc -// // it takes any type as filters, and converts them to the specific FilterOption type for the Redis, -// // and calls the Redis's List function with these filters. -// func listRedis(cacheBackend *backend.Redis) listFunc { -// return func(ctx context.Context, options ...any) ([]*types.Item, error) { -// // here we are converting the filters of any type to the specific FilterOption type for the Redis -// filterOptions := make([]backend.FilterOption[backend.Redis], len(options)) -// for i, option := range options { -// filterOptions[i] = option.(backend.FilterOption[backend.Redis]) -// } -// return cacheBackend.List(ctx, filterOptions...) -// } -// } +func (hyperCache *HyperCache[T]) List(ctx context.Context, filters ...backend.IFilter) ([]*types.Item, error) { + return hyperCache.backend.List(ctx, filters...) +} // Remove removes items with the given key from the cache. If an item is not found, it does nothing. func (hyperCache *HyperCache[T]) Remove(keys ...string) { From 2b667b7d1fb668a8229f843f06a8f3d647e89cf5 Mon Sep 17 00:00:00 2001 From: Francesco Cosentino Date: Mon, 3 Apr 2023 18:14:36 +0200 Subject: [PATCH 08/10] added context support --- backend/backend.go | 4 +- backend/inmemory.go | 86 ++++++++++++++++----------- backend/redis.go | 67 ++++++--------------- backend/sorting.go | 2 +- errors/errors.go | 3 + examples/clear/clear.go | 5 +- examples/eviction/eviction.go | 2 +- examples/get/get.go | 7 ++- examples/list/list.go | 30 ++++------ examples/redis/redis.go | 2 +- examples/service/service.go | 11 ++-- examples/size/size.go | 3 +- examples/stats/stats.go | 2 +- hypercache.go | 58 ++++++++---------- middleware/logging.go | 20 +++---- middleware/stats.go | 20 +++---- service.go | 10 ++-- tests/hypercache_get_multiple_test.go | 19 +++--- tests/hypercache_get_or_set_test.go | 5 +- tests/hypercache_get_test.go | 3 +- tests/hypercache_set_test.go | 3 +- 21 files changed, 174 insertions(+), 188 deletions(-) diff --git a/backend/backend.go b/backend/backend.go index 381e455..4bbf618 100644 --- a/backend/backend.go +++ b/backend/backend.go @@ -25,9 +25,9 @@ type IBackend[T IBackendConstrain] interface { // Count returns the number of items currently stored in the cache. Count() int // Remove deletes the item with the given key from the cache. - Remove(keys ...string) error + Remove(ctx context.Context, keys ...string) error // List the items in the cache that meet the specified criteria. List(ctx context.Context, filters ...IFilter) ([]*types.Item, error) // Clear removes all items from the cache. - Clear() error + Clear(ctx context.Context) error } diff --git a/backend/inmemory.go b/backend/inmemory.go index b600559..b4466c2 100644 --- a/backend/inmemory.go +++ b/backend/inmemory.go @@ -11,10 +11,10 @@ import ( // InMemory is a cache backend that stores the items in memory, leveraging a custom `ConcurrentMap`. type InMemory struct { - items datastructure.ConcurrentMap // map to store the items in the cache - capacity int // capacity of the cache, limits the number of items that can be stored in the cache - mutex sync.RWMutex // mutex to protect the cache from concurrent access - SortFilters // filters applied when listing the items in the cache + items datastructure.ConcurrentMap // map to store the items in the cache + capacity int // capacity of the cache, limits the number of items that can be stored in the cache + sync.RWMutex // mutex to protect the cache from concurrent access + // SortFilters // filters applied when listing the items in the cache } // NewInMemory creates a new in-memory cache with the given options. @@ -68,56 +68,74 @@ func (cacheBackend *InMemory) Set(item *types.Item) error { return err } - cacheBackend.mutex.Lock() - defer cacheBackend.mutex.Unlock() + cacheBackend.Lock() + defer cacheBackend.Unlock() cacheBackend.items.Set(item.Key, item) return nil } // List returns a list of all items in the cache filtered and ordered by the given options -// func (cacheBackend *InMemory) List(ctx context.Context, options ...FilterOption[InMemory]) ([]*types.Item, error) { func (cacheBackend *InMemory) List(ctx context.Context, filters ...IFilter) ([]*types.Item, error) { // Apply the filters - cacheBackend.mutex.RLock() - defer cacheBackend.mutex.RUnlock() + cacheBackend.RLock() + defer cacheBackend.RUnlock() + + items := make([]*types.Item, 0, cacheBackend.items.Count()) - items := make([]*types.Item, 0, cacheBackend.Count()) - wg := sync.WaitGroup{} - wg.Add(cacheBackend.items.Count()) for item := range cacheBackend.items.IterBuffered() { - go func(item datastructure.Tuple) { - defer wg.Done() - if cacheBackend.FilterFunc == nil || cacheBackend.FilterFunc(&item.Val) { - items = append(items, &item.Val) - } - }(item) + copy := item + items = append(items, ©.Val) } - wg.Wait() - for _, filter := range filters { - items = filter.ApplyFilter("in-memory", items) + // Apply the filters + if len(filters) > 0 { + wg := sync.WaitGroup{} + wg.Add(len(filters)) + for _, filter := range filters { + go func(filter IFilter) { + defer wg.Done() + items = filter.ApplyFilter("in-memory", items) + }(filter) + } + wg.Wait() } return items, nil } // Remove removes items with the given key from the cacheBackend. If an item is not found, it does nothing. -func (cacheBackend *InMemory) Remove(keys ...string) (err error) { - //TODO: determine if handling the error or not - // var ok bool - // item := types.ItemPool.Get().(*types.Item) - // defer types.ItemPool.Put(item) - for _, key := range keys { - cacheBackend.items.Remove(key) +func (cacheBackend *InMemory) Remove(ctx context.Context, keys ...string) (err error) { + done := make(chan struct{}) + go func() { + defer close(done) + for _, key := range keys { + cacheBackend.items.Remove(key) + } + }() + + select { + case <-done: + return nil + case <-ctx.Done(): + return errors.ErrTimeoutOrCanceled } - return } // Clear removes all items from the cacheBackend. -func (cacheBackend *InMemory) Clear() error { - // clear the cacheBackend - cacheBackend.items.Clear() - - return nil +func (cacheBackend *InMemory) Clear(ctx context.Context) error { + + done := make(chan struct{}) + go func() { + defer close(done) + // clear the cacheBackend + cacheBackend.items.Clear() + }() + + select { + case <-done: + return nil + case <-ctx.Done(): + return errors.ErrTimeoutOrCanceled + } } diff --git a/backend/redis.go b/backend/redis.go index 3f1be8c..4a49df8 100644 --- a/backend/redis.go +++ b/backend/redis.go @@ -2,6 +2,7 @@ package backend import ( "context" + "sync" "github.com/hyp3rd/hypercache/errors" "github.com/hyp3rd/hypercache/libs/serializer" @@ -15,7 +16,7 @@ type Redis struct { capacity int // capacity of the cache, limits the number of items that can be stored in the cache keysSetName string // keysSetName is the name of the set that holds the keys of the items in the cache Serializer serializer.ISerializer // Serializer is the serializer used to serialize the items before storing them in the cache - SortFilters // SortFilters holds the filters applied when listing the items in the cache + // SortFilters // SortFilters holds the filters applied when listing the items in the cache } // NewRedis creates a new redis cache with the given options. @@ -142,14 +143,7 @@ func (cacheBackend *Redis) Set(item *types.Item) error { } // List returns a list of all the items in the cacheBackend that match the given filter options. -// func (cacheBackend *Redis) List(ctx context.Context, options ...FilterOption[Redis]) ([]*types.Item, error) { func (cacheBackend *Redis) List(ctx context.Context, filters ...IFilter) ([]*types.Item, error) { - // Apply the filters - // filterOptions := make([]FilterOption[Redis], len(filters)) - // for i, option := range filters { - // filterOptions[i] = option.(FilterOption[Redis]) - // } - // Get the set of keys held in the cacheBackend with the given `keysSetName` keys, err := cacheBackend.rdb.SMembers(ctx, cacheBackend.keysSetName).Result() if err != nil { @@ -179,64 +173,39 @@ func (cacheBackend *Redis) List(ctx context.Context, filters ...IFilter) ([]*typ defer types.ItemPool.Put(item) err := cacheBackend.Serializer.Unmarshal([]byte(data["data"]), item) if err == nil { - if cacheBackend.FilterFunc != nil && !cacheBackend.FilterFunc(item) { - continue - } items = append(items, item) } } // Apply the filters - for _, filter := range filters { - items = filter.ApplyFilter("redis", items) + if len(filters) > 0 { + wg := sync.WaitGroup{} + wg.Add(len(filters)) + for _, filter := range filters { + go func(filter IFilter) { + defer wg.Done() + items = filter.ApplyFilter("redis", items) + }(filter) + } + wg.Wait() } return items, nil - - // Check if the items should be sorted - // if cacheBackend.SortBy == "" { - // // No sorting - // return items, nil - // } - - // // Sort the items - // var sorter sort.Interface - // switch cacheBackend.SortBy { - // case types.SortByKey.String(): // Sort by key - // sorter = &itemSorterByKey{items: items} - // case types.SortByLastAccess.String(): // Sort by last access - // sorter = &itemSorterByLastAccess{items: items} - // case types.SortByAccessCount.String(): // Sort by access count - // sorter = &itemSorterByAccessCount{items: items} - // case types.SortByExpiration.String(): // Sort by expiration - // sorter = &itemSorterByExpiration{items: items} - // default: - // return nil, fmt.Errorf("unknown sortBy field: %s", cacheBackend.SortBy) - // } - - // // Reverse the sort order if needed - // if !cacheBackend.SortAscending { - // sorter = sort.Reverse(sorter) - // } - // // Sort the items by the given field - // sort.Sort(sorter) - - // return items, nil } // Remove removes an item from the cache with the given key -func (cacheBackend *Redis) Remove(keys ...string) error { +func (cacheBackend *Redis) Remove(ctx context.Context, keys ...string) error { pipe := cacheBackend.rdb.TxPipeline() - pipe.SRem(context.Background(), cacheBackend.keysSetName, keys).Result() - pipe.Del(context.Background(), keys...).Result() + pipe.SRem(ctx, cacheBackend.keysSetName, keys).Result() + pipe.Del(ctx, keys...).Result() - _, err := pipe.Exec(context.Background()) + _, err := pipe.Exec(ctx) return err } // Clear removes all items from the cache -func (cacheBackend *Redis) Clear() error { - _, err := cacheBackend.rdb.FlushDB(context.Background()).Result() +func (cacheBackend *Redis) Clear(ctx context.Context) error { + _, err := cacheBackend.rdb.FlushDB(ctx).Result() return err } diff --git a/backend/sorting.go b/backend/sorting.go index 208d678..36471be 100644 --- a/backend/sorting.go +++ b/backend/sorting.go @@ -11,7 +11,7 @@ type SortFilters struct { // If set to false, the items will be sorted in descending order. SortAscending bool // FilterFunc is a predicate that takes a `Item` as an argument and returns a boolean indicating whether the item should be included in the cache. - FilterFunc func(item *types.Item) bool // filters applied when listing the items in the cache + // FilterFunc func(item *types.Item) bool // filters applied when listing the items in the cache } type itemSorterByKey struct { diff --git a/errors/errors.go b/errors/errors.go index 2fc7782..96401fd 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -51,4 +51,7 @@ var ( // ErrBackendNotFound is returned when a backend is not found. ErrBackendNotFound = errors.New("backend not found") + + // ErrTimeoutOrCanceled is returned when a timeout or cancellation occurs. + ErrTimeoutOrCanceled = errors.New("the operation timedout or was canceled") ) diff --git a/examples/clear/clear.go b/examples/clear/clear.go index b55c714..44a84b9 100644 --- a/examples/clear/clear.go +++ b/examples/clear/clear.go @@ -1,6 +1,7 @@ package main import ( + "context" "fmt" "github.com/hyp3rd/hypercache" @@ -18,7 +19,7 @@ func main() { fmt.Println("adding 100000 items to cache") for i := 0; i < 100000; i++ { - cache.Set(fmt.Sprintf("key%d", i), fmt.Sprintf("value%d", i), 0) + cache.Set(context.TODO(), fmt.Sprintf("key%d", i), fmt.Sprintf("value%d", i), 0) } item, ok := cache.Get("key100") @@ -30,7 +31,7 @@ func main() { fmt.Println("count", cache.Count()) fmt.Println("allocation", cache.Allocation()) fmt.Println("clearing cache") - cache.Clear() + cache.Clear(context.TODO()) fmt.Println("capacity", cache.Capacity()) fmt.Println("count", cache.Count()) fmt.Println("allocation", cache.Allocation()) diff --git a/examples/eviction/eviction.go b/examples/eviction/eviction.go index b9dff8e..a87e488 100644 --- a/examples/eviction/eviction.go +++ b/examples/eviction/eviction.go @@ -49,7 +49,7 @@ func executeExample(evictionInterval time.Duration) { key := fmt.Sprintf("key%d", i) val := fmt.Sprintf("val%d", i) - err = cache.Set(key, val, time.Minute) + err = cache.Set(context.TODO(), key, val, time.Minute) if err != nil { fmt.Printf("unexpected error: %v\n", err) diff --git a/examples/get/get.go b/examples/get/get.go index 53c2f8f..9e64b19 100644 --- a/examples/get/get.go +++ b/examples/get/get.go @@ -1,6 +1,7 @@ package main import ( + "context" "fmt" "log" "time" @@ -24,7 +25,7 @@ func main() { key := fmt.Sprintf("key%d", i) val := fmt.Sprintf("val%d", i) - err = cache.Set(key, val, time.Minute) + err = cache.Set(context.TODO(), key, val, time.Minute) if err != nil { fmt.Printf("unexpected error: %v\n", err) @@ -34,7 +35,7 @@ func main() { log.Println("fetching items from the cache using the `GetMultiple` method, key11 does not exist") // Retrieve the specific of items from the cache - items, errs := cache.GetMultiple("key1", "key7", "key9", "key11") + items, errs := cache.GetMultiple(context.TODO(), "key1", "key7", "key9", "key11") // Print the errors if any for k, e := range errs { @@ -49,7 +50,7 @@ func main() { log.Println("fetching items from the cache using the `GetOrSet` method") // Retrieve a specific of item from the cache // If the item is not found, set it and return the value - val, err := cache.GetOrSet("key11", "val11", time.Minute) + val, err := cache.GetOrSet(context.TODO(), "key11", "val11", time.Minute) if err != nil { fmt.Println(err) return diff --git a/examples/list/list.go b/examples/list/list.go index f759b9c..99c75a4 100644 --- a/examples/list/list.go +++ b/examples/list/list.go @@ -27,41 +27,35 @@ func main() { key := fmt.Sprintf("%d", i) val := fmt.Sprintf("val%d", i) - err = hyperCache.Set(key, val, time.Minute) - + err = hyperCache.Set(context.TODO(), key, val, time.Minute) + time.Sleep(time.Millisecond * 350) if err != nil { fmt.Printf("unexpected error: %v\n", err) return } } - // Retrieve the list of items from the cache - items, err := hyperCache.List(context.TODO()) - if err != nil { - fmt.Println(err) - return - } - // Apply filters // Define a filter function itemsFilterFunc := func(item *types.Item) bool { // return time.Since(item.LastAccess) > 1*time.Microsecond - return item.Value != "val84" + return item.Value != "val8" } - sortByFilter := backend.WithSortBy(types.SortByKey.String()) - // sortOrderFilter := backend.WithSortOrderAsc(true) + sortByFilter := backend.WithSortBy(types.SortByExpiration.String()) + sortOrderFilter := backend.WithSortOrderAsc(true) // Create a filterFuncFilter with the defined filter function filter := backend.WithFilterFunc(itemsFilterFunc) - // Apply the filter to the items - filteredItems := filter.ApplyFilter("in-memory", items) - - filteredItems = sortByFilter.ApplyFilter("in-memory", filteredItems) - // sortedItems := sortOrderFilter.ApplyFilter("in-memory", filteredItems) + // Retrieve the list of items from the cache + items, err := hyperCache.List(context.TODO(), sortByFilter, sortOrderFilter, filter) + if err != nil { + fmt.Println(err) + return + } - for _, item := range filteredItems { + for _, item := range items { fmt.Println(item.Key, item.Value) } } diff --git a/examples/redis/redis.go b/examples/redis/redis.go index e3d61fa..8330e86 100644 --- a/examples/redis/redis.go +++ b/examples/redis/redis.go @@ -40,7 +40,7 @@ func main() { fmt.Println("setting 50 items to the cache") for i := 0; i < 50; i++ { - err = hyperCache.Set(fmt.Sprintf("key-%d", i), fmt.Sprintf("value-%d", i), time.Hour) + err = hyperCache.Set(context.TODO(), fmt.Sprintf("key-%d", i), fmt.Sprintf("value-%d", i), time.Hour) if err != nil { panic(err) } diff --git a/examples/service/service.go b/examples/service/service.go index a9d647f..465d418 100644 --- a/examples/service/service.go +++ b/examples/service/service.go @@ -1,6 +1,7 @@ package main import ( + "context" "fmt" "log" @@ -41,7 +42,7 @@ func main() { ) defer svc.Stop() - err = svc.Set("key string", "value any", 0) + err = svc.Set(context.TODO(), "key string", "value any", 0) if err != nil { fmt.Println(err) return @@ -54,10 +55,10 @@ func main() { fmt.Println(key) for i := 0; i < 10; i++ { - svc.Set(fmt.Sprintf("key%v", i), fmt.Sprintf("val%v", i), 0) + svc.Set(context.TODO(), fmt.Sprintf("key%v", i), fmt.Sprintf("val%v", i), 0) } - items, errs := svc.GetMultiple("key1", "key7", "key9", "key9999") + items, errs := svc.GetMultiple(context.TODO(), "key1", "key7", "key9", "key9999") for k, e := range errs { fmt.Printf("error fetching item %s: %s\n", k, e) } @@ -66,7 +67,7 @@ func main() { fmt.Println(k, v) } - val, err := svc.GetOrSet("key9999", "val9999", 0) + val, err := svc.GetOrSet(context.TODO(), "key9999", "val9999", 0) if err != nil { fmt.Println(err) return @@ -74,5 +75,5 @@ func main() { fmt.Println(val) - svc.Remove("key9999", "key1") + svc.Remove(context.TODO(), "key9999", "key1") } diff --git a/examples/size/size.go b/examples/size/size.go index 35f13cb..8d52ecb 100644 --- a/examples/size/size.go +++ b/examples/size/size.go @@ -1,6 +1,7 @@ package main import ( + "context" "fmt" "time" @@ -583,7 +584,7 @@ func main() { } for i := 0; i < 3; i++ { - err = cache.Set(fmt.Sprintf("key-%d", i), users, 0) + err = cache.Set(context.TODO(), fmt.Sprintf("key-%d", i), users, 0) if err != nil { fmt.Println(err, "set", i) } diff --git a/examples/stats/stats.go b/examples/stats/stats.go index 3abbf6c..1299c3a 100644 --- a/examples/stats/stats.go +++ b/examples/stats/stats.go @@ -37,7 +37,7 @@ func main() { key := fmt.Sprintf("key%d", i) val := fmt.Sprintf("val%d", i) - err = hyperCache.Set(key, val, time.Minute) + err = hyperCache.Set(context.TODO(), key, val, time.Minute) if err != nil { fmt.Printf("unexpected error: %v\n", err) diff --git a/hypercache.go b/hypercache.go index be1c107..623fdd1 100644 --- a/hypercache.go +++ b/hypercache.go @@ -173,6 +173,10 @@ func New[T backend.IBackendConstrain](bm *BackendManager, config *Config[T]) (hy // Initialize the expiration trigger channel with the buffer size set to half the capacity hyperCache.expirationTriggerCh = make(chan bool, hyperCache.backend.Capacity()/2) + // Initialize the eviction channel with the buffer size set to half the capacity + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + // Start expiration and eviction loops if capacity is greater than zero hyperCache.once.Do(func() { tick := time.NewTicker(hyperCache.expirationInterval) @@ -181,13 +185,13 @@ func New[T backend.IBackendConstrain](bm *BackendManager, config *Config[T]) (hy select { case <-tick.C: // trigger expiration - hyperCache.expirationLoop() + hyperCache.expirationLoop(ctx) case <-hyperCache.expirationTriggerCh: // trigger expiration - hyperCache.expirationLoop() + hyperCache.expirationLoop(ctx) case <-hyperCache.evictCh: // trigger eviction - hyperCache.evictionLoop() + hyperCache.evictionLoop(ctx) case <-hyperCache.stop: // stop the loops return @@ -204,7 +208,7 @@ func New[T backend.IBackendConstrain](bm *BackendManager, config *Config[T]) (hy for { select { case <-tick.C: - hyperCache.evictionLoop() + hyperCache.evictionLoop(ctx) case <-hyperCache.stop: return } @@ -217,7 +221,7 @@ func New[T backend.IBackendConstrain](bm *BackendManager, config *Config[T]) (hy } // expirationLoop is a function that runs in a separate goroutine and expires items in the cache based on their expiration duration. -func (hyperCache *HyperCache[T]) expirationLoop() { +func (hyperCache *HyperCache[T]) expirationLoop(ctx context.Context) { hyperCache.workerPool.Enqueue(func() error { hyperCache.StatsCollector.Incr("expiration_loop_count", 1) defer hyperCache.StatsCollector.Timing("expiration_loop_duration", time.Now().UnixNano()) @@ -245,7 +249,7 @@ func (hyperCache *HyperCache[T]) expirationLoop() { // iterate all expired items and remove them for _, item := range sortedItems { expiredCount++ - hyperCache.Remove(item.Key) + hyperCache.Remove(ctx, item.Key) types.ItemPool.Put(item) hyperCache.StatsCollector.Incr("item_expired_count", 1) } @@ -259,7 +263,7 @@ func (hyperCache *HyperCache[T]) expirationLoop() { // evictionLoop is a function that runs in a separate goroutine and evicts items from the cache based on the cache's capacity and the max eviction count. // The eviction is determined by the eviction algorithm. -func (hyperCache *HyperCache[T]) evictionLoop() { +func (hyperCache *HyperCache[T]) evictionLoop(ctx context.Context) { // Enqueue the eviction loop in the worker pool to avoid blocking the main goroutine if the eviction loop is slow hyperCache.workerPool.Enqueue(func() error { hyperCache.StatsCollector.Incr("eviction_loop_count", 1) @@ -282,7 +286,7 @@ func (hyperCache *HyperCache[T]) evictionLoop() { } // remove the item from the cache - hyperCache.Remove(key) + hyperCache.Remove(ctx, key) evictedCount++ hyperCache.StatsCollector.Incr("item_evicted_count", 1) } @@ -295,21 +299,21 @@ func (hyperCache *HyperCache[T]) evictionLoop() { // evictItem is a helper function that removes an item from the cache and returns the key of the evicted item. // If no item can be evicted, it returns a false. -func (hyperCache *HyperCache[T]) evictItem() (string, bool) { +func (hyperCache *HyperCache[T]) evictItem(ctx context.Context) (string, bool) { key, ok := hyperCache.evictionAlgorithm.Evict() if !ok { // no more items to evict return "", false } - hyperCache.Remove(key) + hyperCache.Remove(ctx, key) return key, true } // Set adds an item to the cache with the given key and value. If an item with the same key already exists, it updates the value of the existing item. // If the expiration duration is greater than zero, the item will expire after the specified duration. // If the capacity of the cache is reached, the cache will leverage the eviction algorithm proactively if the evictionInterval is zero. If not, the background process will take care of the eviction. -func (hyperCache *HyperCache[T]) Set(key string, value any, expiration time.Duration) error { +func (hyperCache *HyperCache[T]) Set(ctx context.Context, key string, value any, expiration time.Duration) error { // Create a new cache item and set its properties item := types.ItemPool.Get().(*types.Item) item.Key = key @@ -346,7 +350,7 @@ func (hyperCache *HyperCache[T]) Set(key string, value any, expiration time.Dura // If the cache is at capacity, evict an item when the eviction interval is zero if hyperCache.shouldEvict.Load() && hyperCache.backend.Count() > hyperCache.backend.Capacity() { - hyperCache.evictItem() + hyperCache.evictItem(ctx) } return nil @@ -397,7 +401,7 @@ func (hyperCache *HyperCache[T]) GetWithInfo(key string) (*types.Item, bool) { // GetOrSet retrieves the item with the given key. If the item is not found, it adds the item to the cache with the given value and expiration duration. // If the capacity of the cache is reached, leverage the eviction algorithm. -func (hyperCache *HyperCache[T]) GetOrSet(key string, value any, expiration time.Duration) (any, error) { +func (hyperCache *HyperCache[T]) GetOrSet(ctx context.Context, key string, value any, expiration time.Duration) (any, error) { // if the item is found, return the value if item, ok := hyperCache.backend.Get(key); ok { @@ -454,14 +458,14 @@ func (hyperCache *HyperCache[T]) GetOrSet(key string, value any, expiration time // If the cache is at capacity, evict an item when the eviction interval is zero if hyperCache.shouldEvict.Load() && hyperCache.backend.Count() > hyperCache.backend.Capacity() { types.ItemPool.Put(item) - hyperCache.evictItem() + hyperCache.evictItem(ctx) } }() return value, nil } // GetMultiple retrieves the items with the given keys from the cache. -func (hyperCache *HyperCache[T]) GetMultiple(keys ...string) (result map[string]any, failed map[string]error) { +func (hyperCache *HyperCache[T]) GetMultiple(ctx context.Context, keys ...string) (result map[string]any, failed map[string]error) { result = make(map[string]any, len(keys)) // Preallocate the result map failed = make(map[string]error, len(keys)) // Preallocate the errors map @@ -480,7 +484,7 @@ func (hyperCache *HyperCache[T]) GetMultiple(keys ...string) (result map[string] // Add the key to the errors map failed[key] = errors.ErrKeyExpired // Trigger the expiration loop - go hyperCache.expirationLoop() + go hyperCache.expirationLoop(ctx) } else { item.Touch() // Update the last access time and access count // Add the item to the result map @@ -500,7 +504,7 @@ func (hyperCache *HyperCache[T]) List(ctx context.Context, filters ...backend.IF } // Remove removes items with the given key from the cache. If an item is not found, it does nothing. -func (hyperCache *HyperCache[T]) Remove(keys ...string) { +func (hyperCache *HyperCache[T]) Remove(ctx context.Context, keys ...string) { // Remove the item from the eviction algorithm // and update the memory allocation for _, key := range keys { @@ -511,11 +515,11 @@ func (hyperCache *HyperCache[T]) Remove(keys ...string) { hyperCache.evictionAlgorithm.Delete(key) } } - hyperCache.backend.Remove(keys...) + hyperCache.backend.Remove(ctx, keys...) } // Clear removes all items from the cache. -func (hyperCache *HyperCache[T]) Clear() error { +func (hyperCache *HyperCache[T]) Clear(ctx context.Context) error { var ( items []*types.Item err error @@ -528,20 +532,10 @@ func (hyperCache *HyperCache[T]) Clear() error { } // clear the cacheBackend - err = hyperCache.backend.Clear() + err = hyperCache.backend.Clear(ctx) if err != nil { return err } - // if cb, ok := hyperCache.backend.(*backend.InMemory); ok { - // items, err = cb.List() - // cb.Clear() - // } else if cb, ok := hyperCache.backend.(*backend.Redis); ok { - // items, err = cb.List(context.TODO()) - // if err != nil { - // return err - // } - // err = cb.Clear() - // } for _, item := range items { hyperCache.evictionAlgorithm.Delete(item.Key) @@ -559,14 +553,14 @@ func (hyperCache *HyperCache[T]) Capacity() int { // SetCapacity sets the capacity of the cache. If the new capacity is smaller than the current number of items in the cache, // it evicts the excess items from the cache. -func (hyperCache *HyperCache[T]) SetCapacity(capacity int) { +func (hyperCache *HyperCache[T]) SetCapacity(ctx context.Context, capacity int) { // set capacity of the backend hyperCache.backend.SetCapacity(capacity) // evaluate again if the cache should evict items proactively hyperCache.shouldEvict.Swap(hyperCache.evictionInterval == 0 && hyperCache.backend.Capacity() > 0) // if the cache size is greater than the new capacity, evict items if hyperCache.backend.Count() > hyperCache.Capacity() { - hyperCache.evictionLoop() + hyperCache.evictionLoop(ctx) } } diff --git a/middleware/logging.go b/middleware/logging.go index 938432d..622258e 100644 --- a/middleware/logging.go +++ b/middleware/logging.go @@ -40,23 +40,23 @@ func (mw LoggingMiddleware) Get(key string) (value interface{}, ok bool) { } // Set logs the time it takes to execute the next middleware. -func (mw LoggingMiddleware) Set(key string, value any, expiration time.Duration) error { +func (mw LoggingMiddleware) Set(ctx context.Context, key string, value any, expiration time.Duration) error { defer func(begin time.Time) { mw.logger.Printf("method Set took: %s", time.Since(begin)) }(time.Now()) mw.logger.Printf("Set method called with key: %s value: %s", key, value) - return mw.next.Set(key, value, expiration) + return mw.next.Set(ctx, key, value, expiration) } // GetOrSet logs the time it takes to execute the next middleware. -func (mw LoggingMiddleware) GetOrSet(key string, value any, expiration time.Duration) (any, error) { +func (mw LoggingMiddleware) GetOrSet(ctx context.Context, key string, value any, expiration time.Duration) (any, error) { defer func(begin time.Time) { mw.logger.Printf("method GetOrSet took: %s", time.Since(begin)) }(time.Now()) mw.logger.Printf("GetOrSet method invoked with key: %s value: %s", key, value) - return mw.next.GetOrSet(key, value, expiration) + return mw.next.GetOrSet(ctx, key, value, expiration) } // GetWithInfo logs the time it takes to execute the next middleware. @@ -70,13 +70,13 @@ func (mw LoggingMiddleware) GetWithInfo(key string) (item *types.Item, ok bool) } // GetMultiple logs the time it takes to execute the next middleware. -func (mw LoggingMiddleware) GetMultiple(keys ...string) (result map[string]any, failed map[string]error) { +func (mw LoggingMiddleware) GetMultiple(ctx context.Context, keys ...string) (result map[string]any, failed map[string]error) { defer func(begin time.Time) { mw.logger.Printf("method GetMultiple took: %s", time.Since(begin)) }(time.Now()) mw.logger.Printf("GetMultiple method invoked with keys: %s", keys) - return mw.next.GetMultiple(keys...) + return mw.next.GetMultiple(ctx, keys...) } // List logs the time it takes to execute the next middleware. @@ -90,23 +90,23 @@ func (mw LoggingMiddleware) List(ctx context.Context, filters ...backend.IFilter } // Remove logs the time it takes to execute the next middleware. -func (mw LoggingMiddleware) Remove(keys ...string) { +func (mw LoggingMiddleware) Remove(ctx context.Context, keys ...string) { defer func(begin time.Time) { mw.logger.Printf("method Remove took: %s", time.Since(begin)) }(time.Now()) mw.logger.Printf("Remove method invoked with keys: %s", keys) - mw.next.Remove(keys...) + mw.next.Remove(ctx, keys...) } // Clear logs the time it takes to execute the next middleware. -func (mw LoggingMiddleware) Clear() error { +func (mw LoggingMiddleware) Clear(ctx context.Context) error { defer func(begin time.Time) { mw.logger.Printf("method Clear took: %s", time.Since(begin)) }(time.Now()) mw.logger.Printf("Clear method invoked") - return mw.next.Clear() + return mw.next.Clear(ctx) } // Capacity takes to execute the next middleware. diff --git a/middleware/stats.go b/middleware/stats.go index cbfdd21..a51c5a3 100644 --- a/middleware/stats.go +++ b/middleware/stats.go @@ -33,23 +33,23 @@ func (mw StatsCollectorMiddleware) Get(key string) (interface{}, bool) { } // Set collects stats for the Set method. -func (mw StatsCollectorMiddleware) Set(key string, value any, expiration time.Duration) error { +func (mw StatsCollectorMiddleware) Set(ctx context.Context, key string, value any, expiration time.Duration) error { start := time.Now() defer func() { mw.statsCollector.Timing("hypercache_set_duration", time.Since(start).Nanoseconds()) mw.statsCollector.Incr("hypercache_set_count", 1) }() - return mw.next.Set(key, value, expiration) + return mw.next.Set(ctx, key, value, expiration) } // GetOrSet collects stats for the GetOrSet method. -func (mw StatsCollectorMiddleware) GetOrSet(key string, value any, expiration time.Duration) (any, error) { +func (mw StatsCollectorMiddleware) GetOrSet(ctx context.Context, key string, value any, expiration time.Duration) (any, error) { start := time.Now() defer func() { mw.statsCollector.Timing("hypercache_get_or_set_duration", time.Since(start).Nanoseconds()) mw.statsCollector.Incr("hypercache_get_or_set_count", 1) }() - return mw.next.GetOrSet(key, value, expiration) + return mw.next.GetOrSet(ctx, key, value, expiration) } // GetWithInfo collects stats for the GetWithInfo method. @@ -63,13 +63,13 @@ func (mw StatsCollectorMiddleware) GetWithInfo(key string) (*types.Item, bool) { } // GetMultiple collects stats for the GetMultiple method. -func (mw StatsCollectorMiddleware) GetMultiple(keys ...string) (result map[string]any, failed map[string]error) { +func (mw StatsCollectorMiddleware) GetMultiple(ctx context.Context, keys ...string) (result map[string]any, failed map[string]error) { start := time.Now() defer func() { mw.statsCollector.Timing("hypercache_get_multiple_duration", time.Since(start).Nanoseconds()) mw.statsCollector.Incr("hypercache_get_multiple_count", 1) }() - return mw.next.GetMultiple(keys...) + return mw.next.GetMultiple(ctx, keys...) } // List collects stats for the List method. @@ -83,23 +83,23 @@ func (mw StatsCollectorMiddleware) List(ctx context.Context, filters ...backend. } // Remove collects stats for the Remove method. -func (mw StatsCollectorMiddleware) Remove(keys ...string) { +func (mw StatsCollectorMiddleware) Remove(ctx context.Context, keys ...string) { start := time.Now() defer func() { mw.statsCollector.Timing("hypercache_remove_duration", time.Since(start).Nanoseconds()) mw.statsCollector.Incr("hypercache_remove_count", 1) }() - mw.next.Remove(keys...) + mw.next.Remove(ctx, keys...) } // Clear collects stats for the Clear method. -func (mw StatsCollectorMiddleware) Clear() error { +func (mw StatsCollectorMiddleware) Clear(ctx context.Context) error { start := time.Now() defer func() { mw.statsCollector.Timing("hypercache_clear_duration", time.Since(start).Nanoseconds()) mw.statsCollector.Incr("hypercache_clear_count", 1) }() - return mw.next.Clear() + return mw.next.Clear(ctx) } // Capacity returns the capacity of the cache diff --git a/service.go b/service.go index d13391b..f47e6db 100644 --- a/service.go +++ b/service.go @@ -15,19 +15,19 @@ type Service interface { // Get retrieves a value from the cache using the key Get(key string) (value interface{}, ok bool) // Set stores a value in the cache using the key and expiration duration - Set(key string, value any, expiration time.Duration) error + Set(ctx context.Context, key string, value any, expiration time.Duration) error // GetOrSet retrieves a value from the cache using the key, if the key does not exist, it will set the value using the key and expiration duration - GetOrSet(key string, value any, expiration time.Duration) (any, error) + GetOrSet(ctx context.Context, key string, value any, expiration time.Duration) (any, error) // GetWithInfo fetches from the cache using the key, and returns the `types.Item` and a boolean indicating if the key exists GetWithInfo(key string) (*types.Item, bool) // GetMultiple retrieves a list of values from the cache using the keys - GetMultiple(keys ...string) (result map[string]any, failed map[string]error) + GetMultiple(ctx context.Context, keys ...string) (result map[string]any, failed map[string]error) // List returns a list of all items in the cache List(ctx context.Context, filters ...backend.IFilter) ([]*types.Item, error) // Remove removes a value from the cache using the key - Remove(keys ...string) + Remove(ctx context.Context, keys ...string) // Clear removes all values from the cache - Clear() error + Clear(ctx context.Context) error // Capacity returns the capacity of the cache Capacity() int // Allocation returns the allocation in bytes of the current cache diff --git a/tests/hypercache_get_multiple_test.go b/tests/hypercache_get_multiple_test.go index ae62d83..e297f1f 100644 --- a/tests/hypercache_get_multiple_test.go +++ b/tests/hypercache_get_multiple_test.go @@ -1,6 +1,7 @@ package tests import ( + "context" "testing" "time" @@ -24,9 +25,9 @@ func TestGetMultiple(t *testing.T) { wantValues: map[string]interface{}{"key1": 1, "key2": 2, "key3": 3}, wantErrs: map[string]error(map[string]error{}), setup: func(cache *hypercache.HyperCache[backend.InMemory]) { - cache.Set("key1", 1, 0) - cache.Set("key2", 2, 0) - cache.Set("key3", 3, 0) + cache.Set(context.TODO(), "key1", 1, 0) + cache.Set(context.TODO(), "key2", 2, 0) + cache.Set(context.TODO(), "key3", 3, 0) }, }, { @@ -35,8 +36,8 @@ func TestGetMultiple(t *testing.T) { wantValues: map[string]interface{}{"key1": 1, "key3": 3}, wantErrs: map[string]error{"key2": errors.ErrKeyNotFound}, setup: func(cache *hypercache.HyperCache[backend.InMemory]) { - cache.Set("key1", 1, 0) - cache.Set("key3", 3, 0) + cache.Set(context.TODO(), "key1", 1, 0) + cache.Set(context.TODO(), "key3", 3, 0) }, }, { @@ -45,10 +46,10 @@ func TestGetMultiple(t *testing.T) { wantValues: map[string]interface{}{"key2": 2, "key3": 3}, wantErrs: map[string]error{"key1": errors.ErrKeyNotFound}, setup: func(cache *hypercache.HyperCache[backend.InMemory]) { - cache.Set("key1", 1, time.Millisecond) + cache.Set(context.TODO(), "key1", 1, time.Millisecond) time.Sleep(2 * time.Millisecond) - cache.Set("key2", 2, 0) - cache.Set("key3", 3, 0) + cache.Set(context.TODO(), "key2", 2, 0) + cache.Set(context.TODO(), "key3", 3, 0) }, }, } @@ -69,7 +70,7 @@ func TestGetMultiple(t *testing.T) { assert.Nil(t, err) test.setup(cache) - gotValues, gotErrs := cache.GetMultiple(test.keys...) + gotValues, gotErrs := cache.GetMultiple(context.TODO(), test.keys...) assert.Equal(t, test.wantValues, gotValues) assert.Equal(t, test.wantErrs, gotErrs) }) diff --git a/tests/hypercache_get_or_set_test.go b/tests/hypercache_get_or_set_test.go index f02d40e..88d24f6 100644 --- a/tests/hypercache_get_or_set_test.go +++ b/tests/hypercache_get_or_set_test.go @@ -1,6 +1,7 @@ package tests import ( + "context" "testing" "time" @@ -78,7 +79,7 @@ func TestHyperCache_GetOrSet(t *testing.T) { shouldExpire := test.expectedErr == errors.ErrKeyExpired - val, err = cache.GetOrSet(test.key, test.value, test.expiry) + val, err = cache.GetOrSet(context.TODO(), test.key, test.value, test.expiry) if !shouldExpire { assert.Equal(t, test.expectedErr, err) } @@ -90,7 +91,7 @@ func TestHyperCache_GetOrSet(t *testing.T) { if shouldExpire { t.Log("sleeping for 2 Millisecond to allow the key to expire") time.Sleep(2 * time.Millisecond) - _, err = cache.GetOrSet(test.key, test.value, test.expiry) + _, err = cache.GetOrSet(context.TODO(), test.key, test.value, test.expiry) assert.Equal(t, test.expectedErr, err) } diff --git a/tests/hypercache_get_test.go b/tests/hypercache_get_test.go index 875da07..6b1b8b8 100644 --- a/tests/hypercache_get_test.go +++ b/tests/hypercache_get_test.go @@ -1,6 +1,7 @@ package tests import ( + "context" "testing" "time" @@ -68,7 +69,7 @@ func TestHyperCache_Get(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { if test.shouldSet { - err = cache.Set(test.key, test.value, test.expiry) + err = cache.Set(context.TODO(), test.key, test.value, test.expiry) if err != nil { assert.Equal(t, test.expectedErr, err) } diff --git a/tests/hypercache_set_test.go b/tests/hypercache_set_test.go index f544b48..c59b8e2 100644 --- a/tests/hypercache_set_test.go +++ b/tests/hypercache_set_test.go @@ -1,6 +1,7 @@ package tests import ( + "context" "testing" "time" @@ -74,7 +75,7 @@ func TestHyperCache_Set(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - err = cache.Set(test.key, test.value, test.expiry) + err = cache.Set(context.TODO(), test.key, test.value, test.expiry) assert.Equal(t, test.expectedErr, err) if err == nil { val, ok := cache.Get(test.key) From 52fe1a5d17c360f1d629ed79f999548c8f7f8222 Mon Sep 17 00:00:00 2001 From: Francesco Cosentino Date: Mon, 3 Apr 2023 18:48:32 +0200 Subject: [PATCH 09/10] updated README --- README.md | 180 +++--------------------------------------------------- 1 file changed, 9 insertions(+), 171 deletions(-) diff --git a/README.md b/README.md index 2299180..1f96c81 100644 --- a/README.md +++ b/README.md @@ -4,9 +4,9 @@ ## Synopsis -HyperCache is a **thread-safe** **high-performance** cache implementation in `Go` that supports multiple backends with an optional size limit, expiration, and eviction of items supporting custom algorithms alongside the defaults. It can be used as a standalone cache or as a cache middleware for a service. It can implement a [service interface](./service.go) to intercept and decorate the cache methods with middleware (default or custom). -It is optimized for performance and flexibility, allowing to specify the expiration and eviction intervals, and providing and registering new eviction algorithms, stats collectors, and middleware(s). -It ships with a default [historigram stats collector](./stats/statscollector.go) and several eviction algorithms, but you can develop and register your own as long as it implements the [Eviction Algorithm interface](./eviction/eviction.go).: +HyperCache is a **thread-safe** **high-performance** cache implementation in `Go` that supports multiple backends with an optional size limit, expiration, and eviction of items with custom algorithms alongside the defaults. It can be used as a standalone cache, distributed environents, or as a cache middleware for a service. It can implement a [service interface](./service.go) to intercept and decorate the cache methods with middleware (default or custom). +It is optimized for performance and flexibility, allowing to specify of the expiration and eviction intervals and providing and registering new eviction algorithms, stats collectors, and middleware(s). +It ships with a default [historigram stats collector](./stats/stats.go) and several eviction algorithms. However, you can develop and register your own if it implements the [Eviction Algorithm interface](./eviction/eviction.go).: - [Recently Used (LRU) eviction algorithm](./eviction/lru.go) - [The Least Frequently Used (LFU) algorithm](./eviction/lfu.go) @@ -18,7 +18,7 @@ It ships with a default [historigram stats collector](./stats/statscollector.go) - Thread-safe - High-performance -- Supports multiple backends, default backends are: +- Supports multiple, custom backends. Default backends are: 1. [In-memory](./backend/inmemory.go) 2. [Redis](./backend/redis.go) - Store items in the cache with a key and expiration duration @@ -27,8 +27,8 @@ It ships with a default [historigram stats collector](./stats/statscollector.go) - Clear the cache of all items - Evitc items in the background based on the cache capacity and items access leveraging several custom eviction algorithms - Expire items in the background based on their duration -- [Eviction Algorithm interface](./eviction.go) to implement custom eviction algorithms. -- Stats collection with a default [stats collector](./stats/statscollector.go) or a custom one that implements the StatsCollector interface. +- [Eviction Algorithm interface](./eviction/eviction.go) to implement custom eviction algorithms. +- Stats collection with a default [stats collector](./stats/stats.go) or a custom one that implements the StatsCollector interface. - [Service interface implementation](./service.go) to allow intercepting cache methods and decorate them with custom or default middleware(s). ## Installation @@ -87,7 +87,7 @@ if err != nil { } ``` -For a fine-grained control over the cache configuration, use the `New` function, for instance: +For fine-grained control over the cache configuration, use the `New` function, for instance: ```golang config := hypercache.NewConfig[backend.InMemory]() @@ -108,174 +108,12 @@ if err != nil { } ``` -**For the full configuration options, refer to the [config.go](./config.go) file.** - -### Set - -Set adds an item to the cache with the given key and value. - -```golang -err := cache.Set("key", "value", time.Hour) -if err != nil { - // handle error -} -``` - -The `Set` function takes a key, a value, and a duration as arguments. The key must be a non-empty string, the value can be of any type, and the duration specifies how long the item should stay in the cache before it expires. - -### Get - -`Get` retrieves the item with the given key from the cache. - -```golang -value, ok := cache.Get("key") -if !ok { - // handle item not found -} -``` - -The `Get` function returns the value associated with the given key or an error if the key is not found or has expired. - -### Remove - -`Remove` deletes items with the given key from the cache. If an item is not found, it does nothing. - -```golang -err := cache.Remove("key", "key2", "key3") -if err != nil { - // handle error -} -``` - -The `Remove` function takes a variadic number of keys as arguments and returns an error if any keys are not found. - +**Refer to the [config.go](./config.go) file for the full configuration options.** **For a comprehensive API overview, see the [documentation](https://pkg.go.dev/github.com/hyp3rd/hypercache).** -## Service interface for microservices implementation - -The `Service` interface allows intercepting cache methods and decorating them with custom or default middleware(s). - -```golang -var svc hypercache.Service -hyperCache, err := hypercache.NewInMemoryWithDefaults(10) - -if err != nil { - fmt.Println(err) - return -} -// assign statsCollector of the backend to use it in middleware -statsCollector := hyperCache.StatsCollector -svc = hyperCache - -if err != nil { - fmt.Println(err) - return -} - -// Example of using zap logger from uber -logger, _ := zap.NewProduction() - -sugar := logger.Sugar() -defer sugar.Sync() -defer logger.Sync() - -// apply middleware in the same order as you want to execute them -svc = hypercache.ApplyMiddleware(svc, - // middleware.YourMiddleware, - func(next hypercache.Service) hypercache.Service { - return middleware.NewLoggingMiddleware(next, sugar) - }, - func(next hypercache.Service) hypercache.Service { - return middleware.NewStatsCollectorMiddleware(next, statsCollector) - }, -) - -err = svc.Set("key string", "value any", 0) -if err != nil { - fmt.Println(err) - return -} -key, ok := svc.Get("key string") -if !ok { - fmt.Println("key not found") - return -} -fmt.Println(key) -``` - ## Usage -Here is an example of using the HyperCache package. See the [examples](./examples/README.md) directory for a more comprehensive overview. - -```golang -package main - -import ( - "fmt" - "log" - "time" - - "github.com/hyp3rd/hypercache" -) - -func main() { - // Create a new HyperCache with a capacity of 10 - cache, err := hypercache.NewInMemoryWithDefaults(10) - if err != nil { - fmt.Println(err) - return - } - // Stop the cache when the program exits - defer cache.Stop() - - log.Println("adding items to the cache") - // Add 10 items to the cache - for i := 0; i < 10; i++ { - key := fmt.Sprintf("key%d", i) - val := fmt.Sprintf("val%d", i) - - err = cache.Set(key, val, time.Minute) - - if err != nil { - fmt.Printf("unexpected error: %v\n", err) - return - } - } - - log.Println("fetching items from the cache using the `GetMultiple` method, key11 does not exist") - // Retrieve the specific of items from the cache - items, errs := cache.GetMultiple("key1", "key7", "key9", "key11") - - // Print the errors if any - for k, e := range errs { - log.Printf("error fetching item %s: %s\n", k, e) - } - - // Print the items - for k, v := range items { - fmt.Println(k, v) - } - - log.Println("fetching items from the cache using the `GetOrSet` method") - // Retrieve a specific of item from the cache - // If the item is not found, set it and return the value - val, err := cache.GetOrSet("key11", "val11", time.Minute) - if err != nil { - fmt.Println(err) - return - } - fmt.Println(val) - - log.Println("fetching items from the cache using the simple `Get` method") - item, ok := cache.Get("key7") - if !ok { - fmt.Println("item not found") - return - } - fmt.Println(item) -} - -``` +Examples can be too broad for a readme, refer to the [examples](./examples/README.md) directory for a more comprehensive overview. ## License From 39d56927a2bc5ca247dba85b6d6973f69b918ae0 Mon Sep 17 00:00:00 2001 From: Francesco Cosentino Date: Mon, 3 Apr 2023 18:53:28 +0200 Subject: [PATCH 10/10] updated benchmarks to meet the new singatures --- tests/benchmark/hypercache_get_benchmark_test.go | 5 +++-- tests/benchmark/hypercache_list_benchmark_test.go | 2 +- tests/benchmark/hypercache_set_benchmark_test.go | 5 +++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/tests/benchmark/hypercache_get_benchmark_test.go b/tests/benchmark/hypercache_get_benchmark_test.go index 273fbd4..6357658 100644 --- a/tests/benchmark/hypercache_get_benchmark_test.go +++ b/tests/benchmark/hypercache_get_benchmark_test.go @@ -1,6 +1,7 @@ package tests import ( + "context" "testing" "time" @@ -13,7 +14,7 @@ func BenchmarkHyperCache_Get(b *testing.B) { cache, _ := hypercache.NewInMemoryWithDefaults(1000) // Store a value in the cache with a key and expiration duration - cache.Set("key", "value", time.Hour) + cache.Set(context.TODO(), "key", "value", time.Hour) b.ResetTimer() for i := 0; i < b.N; i++ { @@ -38,7 +39,7 @@ func BenchmarkHyperCache_Get_ProactiveEviction(b *testing.B) { cache, _ := hypercache.New(hypercache.GetDefaultManager(), config) // Store a value in the cache with a key and expiration duration - cache.Set("key", "value", time.Hour) + cache.Set(context.TODO(), "key", "value", time.Hour) b.ResetTimer() for i := 0; i < b.N; i++ { diff --git a/tests/benchmark/hypercache_list_benchmark_test.go b/tests/benchmark/hypercache_list_benchmark_test.go index 0453bef..d008dbf 100644 --- a/tests/benchmark/hypercache_list_benchmark_test.go +++ b/tests/benchmark/hypercache_list_benchmark_test.go @@ -15,7 +15,7 @@ func BenchmarkHyperCache_List(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { // Store a value in the cache with a key and expiration duration - cache.Set("key", "value", time.Hour) + cache.Set(context.TODO(), "key", "value", time.Hour) } list, _ := cache.List(context.TODO()) diff --git a/tests/benchmark/hypercache_set_benchmark_test.go b/tests/benchmark/hypercache_set_benchmark_test.go index 718823c..40153f1 100644 --- a/tests/benchmark/hypercache_set_benchmark_test.go +++ b/tests/benchmark/hypercache_set_benchmark_test.go @@ -1,6 +1,7 @@ package tests import ( + "context" "fmt" "testing" "time" @@ -16,7 +17,7 @@ func BenchmarkHyperCache_Set(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { // Store a value in the cache with a key and expiration duration - cache.Set(fmt.Sprintf("key-%d", i), "value", time.Hour) + cache.Set(context.TODO(), fmt.Sprintf("key-%d", i), "value", time.Hour) } } @@ -38,6 +39,6 @@ func BenchmarkHyperCache_Set_Proactive_Eviction(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { // Store a value in the cache with a key and expiration duration - cache.Set(fmt.Sprintf("key-%d", i), "value", time.Hour) + cache.Set(context.TODO(), fmt.Sprintf("key-%d", i), "value", time.Hour) } }