From b74c06c99732f8b26863bbc3c842918329669ff0 Mon Sep 17 00:00:00 2001 From: Francesco Cosentino Date: Mon, 3 Apr 2023 12:35:50 +0200 Subject: [PATCH 1/4] refactored models to types --- backend/backend.go | 18 ++++++++--------- backend/filters.go | 15 +++++++------- backend/inmemory.go | 24 +++++++++++------------ backend/options.go | 6 +++--- backend/redis.go | 20 +++++++++---------- backend/sorting.go | 14 ++++++------- datastructure/v3/cmap.go | 18 ++++++++--------- datastructure/v4/cmap.go | 18 ++++++++--------- eviction/arc.go | 32 +++++++++++++++--------------- eviction/clock.go | 12 ++++++------ examples/list/list.go | 3 +-- examples/redis/redis.go | 3 +-- hypercache.go | 41 +++++++++++++++++++-------------------- middleware/logging.go | 6 +++--- middleware/stats.go | 6 +++--- service.go | 8 ++++---- {models => types}/item.go | 2 +- 17 files changed, 120 insertions(+), 126 deletions(-) rename {models => types}/item.go (99%) diff --git a/backend/backend.go b/backend/backend.go index c4ef247..9939e77 100644 --- a/backend/backend.go +++ b/backend/backend.go @@ -3,7 +3,7 @@ package backend import ( "context" - "github.com/hyp3rd/hypercache/models" + "github.com/hyp3rd/hypercache/types" ) // IBackendConstrain is the interface that defines the constrain type that must be implemented by cache backends. @@ -16,7 +16,7 @@ import ( // // IBackend[T] is the interface that must be implemented by cache backends. // IBackend[T] // // List the items in the cache that meet the specified criteria. -// List(options ...FilterOption[InMemory]) ([]*models.Item, error) +// List(options ...FilterOption[InMemory]) ([]*types.Item, error) // // Clear removes all items from the cache. // Clear() // } @@ -26,7 +26,7 @@ import ( // // IBackend[T] is the interface that must be implemented by cache backends. // IBackend[T] // // List the items in the cache that meet the specified criteria. -// List(ctx context.Context, options ...FilterOption[Redis]) ([]*models.Item, error) +// List(ctx context.Context, options ...FilterOption[Redis]) ([]*types.Item, error) // // Clear removes all items from the cache. // Clear() error // } @@ -35,9 +35,9 @@ import ( // type IBackend[T IBackendConstrain] interface { // // Get retrieves the item with the given key from the cache. // // If the key is not found in the cache, it returns nil. -// Get(key string) (item *models.Item, ok bool) +// Get(key string) (item *types.Item, ok bool) // // Set adds a new item to the cache. -// Set(item *models.Item) error +// Set(item *types.Item) error // // Capacity returns the maximum number of items that can be stored in the cache. // Capacity() int // // SetCapacity sets the maximum number of items that can be stored in the cache. @@ -57,9 +57,9 @@ type IBackendConstrain interface { type IBackend[T IBackendConstrain] interface { // Get retrieves the item with the given key from the cache. // If the key is not found in the cache, it returns nil. - Get(key string) (item *models.Item, ok bool) + Get(key string) (item *types.Item, ok bool) // Set adds a new item to the cache. - Set(item *models.Item) error + Set(item *types.Item) error // Capacity returns the maximum number of items that can be stored in the cache. Capacity() int // SetCapacity sets the maximum number of items that can be stored in the cache. @@ -69,8 +69,8 @@ type IBackend[T IBackendConstrain] interface { // Remove deletes the item with the given key from the cache. Remove(keys ...string) error // List the items in the cache that meet the specified criteria. - // List(ctx context.Context, options ...FilterOption[T]) ([]*models.Item, error) - List(ctx context.Context, filters ...IFilter) ([]*models.Item, error) + // List(ctx context.Context, options ...FilterOption[T]) ([]*types.Item, error) + List(ctx context.Context, filters ...IFilter) ([]*types.Item, error) // Clear removes all items from the cache. Clear() error } diff --git a/backend/filters.go b/backend/filters.go index 83d5134..925f8cb 100644 --- a/backend/filters.go +++ b/backend/filters.go @@ -3,19 +3,18 @@ package backend import ( "sort" - "github.com/hyp3rd/hypercache/models" "github.com/hyp3rd/hypercache/types" ) type IFilter interface { - ApplyFilter(backendType string, items []*models.Item) []*models.Item + ApplyFilter(backendType string, items []*types.Item) []*types.Item } type sortByFilter struct { field string } -func (f sortByFilter) ApplyFilter(backendType string, items []*models.Item) []*models.Item { +func (f sortByFilter) ApplyFilter(backendType string, items []*types.Item) []*types.Item { var sorter sort.Interface switch f.field { case types.SortByKey.String(): @@ -37,7 +36,7 @@ type sortOrderFilter struct { ascending bool } -func (f sortOrderFilter) ApplyFilter(backendType string, items []*models.Item) []*models.Item { +func (f sortOrderFilter) ApplyFilter(backendType string, items []*types.Item) []*types.Item { if !f.ascending { sort.Slice(items, func(i, j int) bool { return items[j].Key > items[i].Key @@ -51,11 +50,11 @@ func (f sortOrderFilter) ApplyFilter(backendType string, items []*models.Item) [ } type filterFuncFilter struct { - fn func(item *models.Item) bool + fn func(item *types.Item) bool } -func (f filterFuncFilter) ApplyFilter(backendType string, items []*models.Item) []*models.Item { - filteredItems := make([]*models.Item, 0) +func (f filterFuncFilter) ApplyFilter(backendType string, items []*types.Item) []*types.Item { + filteredItems := make([]*types.Item, 0) for _, item := range items { if f.fn(item) { filteredItems = append(filteredItems, item) @@ -72,6 +71,6 @@ func WithSortOrderAsc(ascending bool) sortOrderFilter { return sortOrderFilter{ascending: ascending} } -func WithFilterFunc(fn func(item *models.Item) bool) IFilter { +func WithFilterFunc(fn func(item *types.Item) bool) IFilter { return filterFuncFilter{fn: fn} } diff --git a/backend/inmemory.go b/backend/inmemory.go index 72b1e57..a083608 100644 --- a/backend/inmemory.go +++ b/backend/inmemory.go @@ -6,12 +6,12 @@ import ( datastructure "github.com/hyp3rd/hypercache/datastructure/v4" "github.com/hyp3rd/hypercache/errors" - "github.com/hyp3rd/hypercache/models" + "github.com/hyp3rd/hypercache/types" ) // InMemory is a cache backend that stores the items in memory, leveraging a custom `ConcurrentMap`. type InMemory struct { - // items datastructure.ConcurrentMap[string, *models.Item] // map to store the items in the cache + // items datastructure.ConcurrentMap[string, *types.Item] // map to store the items in the cache items datastructure.ConcurrentMap // map to store the items in the cache capacity int // capacity of the cache, limits the number of items that can be stored in the cache mutex sync.RWMutex // mutex to protect the cache from concurrent access @@ -52,7 +52,7 @@ func (cacheBackend *InMemory) Count() int { } // Get retrieves the item with the given key from the cacheBackend. If the item is not found, it returns nil. -func (cacheBackend *InMemory) Get(key string) (item *models.Item, ok bool) { +func (cacheBackend *InMemory) Get(key string) (item *types.Item, ok bool) { item, ok = cacheBackend.items.Get(key) if !ok { return nil, false @@ -62,10 +62,10 @@ func (cacheBackend *InMemory) Get(key string) (item *models.Item, ok bool) { } // Set adds a Item to the cache. -func (cacheBackend *InMemory) Set(item *models.Item) error { +func (cacheBackend *InMemory) Set(item *types.Item) error { // Check for invalid key, value, or duration if err := item.Valid(); err != nil { - models.ItemPool.Put(item) + types.ItemPool.Put(item) return err } @@ -77,11 +77,11 @@ func (cacheBackend *InMemory) Set(item *models.Item) error { } // ListV1 returns a list of all items in the cache filtered and ordered by the given options -// func (cacheBackend *InMemory) ListV1(options ...FilterOption[InMemory]) ([]*models.Item, error) { +// func (cacheBackend *InMemory) ListV1(options ...FilterOption[InMemory]) ([]*types.Item, error) { // // Apply the filter options // ApplyFilterOptions(cacheBackend, options...) -// items := make([]*models.Item, 0, cacheBackend.items.Count()) +// items := make([]*types.Item, 0, cacheBackend.items.Count()) // wg := sync.WaitGroup{} // wg.Add(cacheBackend.items.Count()) // for item := range cacheBackend.items.IterBuffered() { @@ -121,13 +121,13 @@ func (cacheBackend *InMemory) Set(item *models.Item) error { // } // List returns a list of all items in the cache filtered and ordered by the given options -// func (cacheBackend *InMemory) List(ctx context.Context, options ...FilterOption[InMemory]) ([]*models.Item, error) { -func (cacheBackend *InMemory) List(ctx context.Context, filters ...IFilter) ([]*models.Item, error) { +// func (cacheBackend *InMemory) List(ctx context.Context, options ...FilterOption[InMemory]) ([]*types.Item, error) { +func (cacheBackend *InMemory) List(ctx context.Context, filters ...IFilter) ([]*types.Item, error) { // Apply the filters cacheBackend.mutex.RLock() defer cacheBackend.mutex.RUnlock() - items := make([]*models.Item, 0, cacheBackend.Count()) + items := make([]*types.Item, 0, cacheBackend.Count()) wg := sync.WaitGroup{} wg.Add(cacheBackend.items.Count()) for item := range cacheBackend.items.IterBuffered() { @@ -151,8 +151,8 @@ func (cacheBackend *InMemory) List(ctx context.Context, filters ...IFilter) ([]* func (cacheBackend *InMemory) Remove(keys ...string) (err error) { //TODO: determine if handling the error or not // var ok bool - // item := models.ItemPool.Get().(*models.Item) - // defer models.ItemPool.Put(item) + // item := types.ItemPool.Get().(*types.Item) + // defer types.ItemPool.Put(item) for _, key := range keys { cacheBackend.items.Remove(key) } diff --git a/backend/options.go b/backend/options.go index 4311c7e..6892e53 100644 --- a/backend/options.go +++ b/backend/options.go @@ -2,7 +2,7 @@ package backend import ( "github.com/hyp3rd/hypercache/libs/serializer" - "github.com/hyp3rd/hypercache/models" + "github.com/hyp3rd/hypercache/types" "github.com/redis/go-redis/v9" ) @@ -35,7 +35,7 @@ func (rb *Redis) setSortBy(sortBy string) { } // FilterFunc is a predicate that takes a `Item` as an argument and returns a boolean indicating whether the item should be included in the cache. -type FilterFunc func(item *models.Item) bool // filters applied when listing the items in the cache +type FilterFunc func(item *types.Item) bool // filters applied when listing the items in the cache // IFilterableBackend is an interface that defines the methods that a backend should implement to be filterable. type IFilterableBackend interface { @@ -143,7 +143,7 @@ func WithSerializer[T Redis](serializer serializer.ISerializer) Option[Redis] { // // WithFilterFunc is an option that sets the filter function to use. // // The filter function is a predicate that takes a `Item` as an argument and returns a boolean indicating whether the item should be included in the cache. -// func WithFilterFunc[T any](fn func(item *models.Item) bool) FilterOption[T] { +// func WithFilterFunc[T any](fn func(item *types.Item) bool) FilterOption[T] { // return func(a *T) { // if filterable, ok := any(a).(IFilterableBackend); ok { // filterable.setFilterFunc(fn) diff --git a/backend/redis.go b/backend/redis.go index 3f7a393..3f1be8c 100644 --- a/backend/redis.go +++ b/backend/redis.go @@ -5,7 +5,7 @@ import ( "github.com/hyp3rd/hypercache/errors" "github.com/hyp3rd/hypercache/libs/serializer" - "github.com/hyp3rd/hypercache/models" + "github.com/hyp3rd/hypercache/types" "github.com/redis/go-redis/v9" ) @@ -70,7 +70,7 @@ func (cacheBackend *Redis) Count() int { } // Get retrieves the Item with the given key from the cacheBackend. If the item is not found, it returns nil. -func (cacheBackend *Redis) Get(key string) (item *models.Item, ok bool) { +func (cacheBackend *Redis) Get(key string) (item *types.Item, ok bool) { // Check if the key is in the set of keys isMember, err := cacheBackend.rdb.SIsMember(context.Background(), cacheBackend.keysSetName, key).Result() if err != nil { @@ -81,9 +81,9 @@ func (cacheBackend *Redis) Get(key string) (item *models.Item, ok bool) { } // Get the item from the cacheBackend - item = models.ItemPool.Get().(*models.Item) + item = types.ItemPool.Get().(*types.Item) // Return the item to the pool - defer models.ItemPool.Put(item) + defer types.ItemPool.Put(item) data, err := cacheBackend.rdb.HGet(context.Background(), key, "data").Bytes() if err != nil { @@ -102,7 +102,7 @@ func (cacheBackend *Redis) Get(key string) (item *models.Item, ok bool) { } // Set stores the Item in the cacheBackend. -func (cacheBackend *Redis) Set(item *models.Item) error { +func (cacheBackend *Redis) Set(item *types.Item) error { pipe := cacheBackend.rdb.TxPipeline() // Check if the item is valid @@ -142,8 +142,8 @@ func (cacheBackend *Redis) Set(item *models.Item) error { } // List returns a list of all the items in the cacheBackend that match the given filter options. -// func (cacheBackend *Redis) List(ctx context.Context, options ...FilterOption[Redis]) ([]*models.Item, error) { -func (cacheBackend *Redis) List(ctx context.Context, filters ...IFilter) ([]*models.Item, error) { +// func (cacheBackend *Redis) List(ctx context.Context, options ...FilterOption[Redis]) ([]*types.Item, error) { +func (cacheBackend *Redis) List(ctx context.Context, filters ...IFilter) ([]*types.Item, error) { // Apply the filters // filterOptions := make([]FilterOption[Redis], len(filters)) // for i, option := range filters { @@ -169,14 +169,14 @@ func (cacheBackend *Redis) List(ctx context.Context, filters ...IFilter) ([]*mod } // Create a slice to hold the items - items := make([]*models.Item, 0, len(keys)) + items := make([]*types.Item, 0, len(keys)) // Deserialize the items and add them to the slice of items to return for _, cmd := range cmds { data, _ := cmd.(*redis.MapStringStringCmd).Result() // Change the type assertion to match HGetAll - item := models.ItemPool.Get().(*models.Item) + item := types.ItemPool.Get().(*types.Item) // Return the item to the pool - defer models.ItemPool.Put(item) + defer types.ItemPool.Put(item) err := cacheBackend.Serializer.Unmarshal([]byte(data["data"]), item) if err == nil { if cacheBackend.FilterFunc != nil && !cacheBackend.FilterFunc(item) { diff --git a/backend/sorting.go b/backend/sorting.go index 9d8b944..208d678 100644 --- a/backend/sorting.go +++ b/backend/sorting.go @@ -1,8 +1,6 @@ package backend -import ( - "github.com/hyp3rd/hypercache/models" -) +import "github.com/hyp3rd/hypercache/types" // SortFilters holds the filters applied when listing the items in the cache type SortFilters struct { @@ -13,11 +11,11 @@ type SortFilters struct { // If set to false, the items will be sorted in descending order. SortAscending bool // FilterFunc is a predicate that takes a `Item` as an argument and returns a boolean indicating whether the item should be included in the cache. - FilterFunc func(item *models.Item) bool // filters applied when listing the items in the cache + FilterFunc func(item *types.Item) bool // filters applied when listing the items in the cache } type itemSorterByKey struct { - items []*models.Item + items []*types.Item } func (s *itemSorterByKey) Len() int { return len(s.items) } @@ -25,7 +23,7 @@ func (s *itemSorterByKey) Swap(i, j int) { s.items[i], s.items[j] = s.items func (s *itemSorterByKey) Less(i, j int) bool { return s.items[i].Key < s.items[j].Key } type itemSorterByExpiration struct { - items []*models.Item + items []*types.Item } func (s *itemSorterByExpiration) Len() int { return len(s.items) } @@ -35,7 +33,7 @@ func (s *itemSorterByExpiration) Less(i, j int) bool { } type itemSorterByLastAccess struct { - items []*models.Item + items []*types.Item } func (s *itemSorterByLastAccess) Len() int { return len(s.items) } @@ -45,7 +43,7 @@ func (s *itemSorterByLastAccess) Less(i, j int) bool { } type itemSorterByAccessCount struct { - items []*models.Item + items []*types.Item } func (s *itemSorterByAccessCount) Len() int { return len(s.items) } diff --git a/datastructure/v3/cmap.go b/datastructure/v3/cmap.go index 4e41ac1..4a249d0 100644 --- a/datastructure/v3/cmap.go +++ b/datastructure/v3/cmap.go @@ -5,7 +5,7 @@ import ( "hash/fnv" "sync" - "github.com/hyp3rd/hypercache/models" + "github.com/hyp3rd/hypercache/types" ) const ( @@ -15,16 +15,16 @@ const ( ShardCount32 uint32 = uint32(ShardCount) ) -// ConcurrentMap is a "thread" safe map of type string:*models.Item. +// ConcurrentMap is a "thread" safe map of type string:*types.Item. // To avoid lock bottlenecks this map is dived to several (ShardCount) map shards. type ConcurrentMap struct { shards []*ConcurrentMapShard hasher hash.Hash32 } -// ConcurrentMapShard is a "thread" safe string to `*models.Item`. +// ConcurrentMapShard is a "thread" safe string to `*types.Item`. type ConcurrentMapShard struct { - items map[string]*models.Item + items map[string]*types.Item sync.RWMutex } @@ -42,7 +42,7 @@ func New() ConcurrentMap { func create() []*ConcurrentMapShard { shards := make([]*ConcurrentMapShard, ShardCount) for i := 0; i < ShardCount; i++ { - shards[i] = &ConcurrentMapShard{items: make(map[string]*models.Item)} + shards[i] = &ConcurrentMapShard{items: make(map[string]*types.Item)} } return shards } @@ -55,7 +55,7 @@ func (m *ConcurrentMap) GetShard(key string) *ConcurrentMapShard { } // Set sets the given value under the specified key. -func (m *ConcurrentMap) Set(key string, value *models.Item) { +func (m *ConcurrentMap) Set(key string, value *types.Item) { shard := m.GetShard(key) shard.Lock() shard.items[key] = value @@ -63,7 +63,7 @@ func (m *ConcurrentMap) Set(key string, value *models.Item) { } // Get retrieves an element from map under given key. -func (m *ConcurrentMap) Get(key string) (*models.Item, bool) { +func (m *ConcurrentMap) Get(key string) (*types.Item, bool) { // Get shard shard := m.GetShard(key) shard.RLock() @@ -85,7 +85,7 @@ func (m *ConcurrentMap) Has(key string) bool { } // Pop removes an element from the map and returns it. -func (m *ConcurrentMap) Pop(key string) (*models.Item, bool) { +func (m *ConcurrentMap) Pop(key string) (*types.Item, bool) { shard := m.GetShard(key) shard.Lock() item, ok := shard.items[key] @@ -101,7 +101,7 @@ func (m *ConcurrentMap) Pop(key string) (*models.Item, bool) { // Tuple is used by the IterBuffered functions to wrap two variables together over a channel, type Tuple struct { Key string - Val models.Item + Val types.Item } // IterBuffered returns a buffered iterator which could be used in a for range loop. diff --git a/datastructure/v4/cmap.go b/datastructure/v4/cmap.go index eb79798..bb12ee4 100644 --- a/datastructure/v4/cmap.go +++ b/datastructure/v4/cmap.go @@ -5,7 +5,7 @@ import ( "hash/fnv" "sync" - "github.com/hyp3rd/hypercache/models" + "github.com/hyp3rd/hypercache/types" ) const ( @@ -15,15 +15,15 @@ const ( ShardCount32 uint32 = uint32(ShardCount) ) -// ConcurrentMap is a "thread" safe map of type string:*models.Item. +// ConcurrentMap is a "thread" safe map of type string:*types.Item. // To avoid lock bottlenecks this map is divided into several (ShardCount) map shards. type ConcurrentMap struct { shards []*ConcurrentMapShard } -// ConcurrentMapShard is a "thread" safe string to `*models.Item` map shard. +// ConcurrentMapShard is a "thread" safe string to `*types.Item` map shard. type ConcurrentMapShard struct { - items map[string]*models.Item + items map[string]*types.Item hasher hash.Hash32 sync.RWMutex } @@ -40,7 +40,7 @@ func create() []*ConcurrentMapShard { shards := make([]*ConcurrentMapShard, ShardCount) for i := 0; i < ShardCount; i++ { shards[i] = &ConcurrentMapShard{ - items: make(map[string]*models.Item), + items: make(map[string]*types.Item), hasher: fnv.New32a(), } } @@ -62,7 +62,7 @@ func getShardIndex(m *ConcurrentMap, key string) uint32 { } // Set sets the given value under the specified key. -func (m *ConcurrentMap) Set(key string, value *models.Item) { +func (m *ConcurrentMap) Set(key string, value *types.Item) { shard := m.GetShard(key) shard.Lock() shard.items[key] = value @@ -70,7 +70,7 @@ func (m *ConcurrentMap) Set(key string, value *models.Item) { } // Get retrieves an element from map under given key. -func (m *ConcurrentMap) Get(key string) (*models.Item, bool) { +func (m *ConcurrentMap) Get(key string) (*types.Item, bool) { // Get shard shard := m.GetShard(key) shard.RLock() @@ -92,7 +92,7 @@ func (m *ConcurrentMap) Has(key string) bool { } // Pop removes an element from the map and returns it. -func (m *ConcurrentMap) Pop(key string) (*models.Item, bool) { +func (m *ConcurrentMap) Pop(key string) (*types.Item, bool) { shard := m.GetShard(key) shard.Lock() item, ok := shard.items[key] @@ -108,7 +108,7 @@ func (m *ConcurrentMap) Pop(key string) (*models.Item, bool) { // Tuple is used by the IterBuffered functions to wrap two variables together over a channel, type Tuple struct { Key string - Val models.Item + Val types.Item } // IterBuffered returns a buffered iterator which could be used in a for range loop. diff --git a/eviction/arc.go b/eviction/arc.go index fc17d64..1e1c65b 100644 --- a/eviction/arc.go +++ b/eviction/arc.go @@ -10,19 +10,19 @@ import ( "sync" "github.com/hyp3rd/hypercache/errors" - "github.com/hyp3rd/hypercache/models" + "github.com/hyp3rd/hypercache/types" ) // ARC is an in-memory cache that uses the Adaptive Replacement Cache (ARC) algorithm to manage its items. type ARC struct { - capacity int // capacity is the maximum number of items that can be stored in the cache - t1 map[string]*models.Item // t1 is a list of items that have been accessed recently - t2 map[string]*models.Item // t2 is a list of items that have been accessed less recently - b1 map[string]bool // b1 is a list of items that have been evicted from t1 - b2 map[string]bool // b2 is a list of items that have been evicted from t2 - p int // p is the promotion threshold - c int // c is the current number of items in the cache - mutex sync.RWMutex // mutex is a read-write mutex that protects the cache + capacity int // capacity is the maximum number of items that can be stored in the cache + t1 map[string]*types.Item // t1 is a list of items that have been accessed recently + t2 map[string]*types.Item // t2 is a list of items that have been accessed less recently + b1 map[string]bool // b1 is a list of items that have been evicted from t1 + b2 map[string]bool // b2 is a list of items that have been evicted from t2 + p int // p is the promotion threshold + c int // c is the current number of items in the cache + mutex sync.RWMutex // mutex is a read-write mutex that protects the cache } // NewARCAlgorithm creates a new in-memory cache with the given capacity and the Adaptive Replacement Cache (ARC) algorithm. @@ -33,8 +33,8 @@ func NewARCAlgorithm(capacity int) (*ARC, error) { } return &ARC{ capacity: capacity, - t1: make(map[string]*models.Item, capacity), - t2: make(map[string]*models.Item, capacity), + t1: make(map[string]*types.Item, capacity), + t2: make(map[string]*types.Item, capacity), b1: make(map[string]bool, capacity), b2: make(map[string]bool, capacity), p: 0, @@ -117,7 +117,7 @@ func (arc *ARC) Set(key string, value any) { arc.Delete(evictedKey) } // Add new item to cache - item := models.ItemPool.Get().(*models.Item) + item := types.ItemPool.Get().(*types.Item) item.Value = value arc.t1[key] = item @@ -139,7 +139,7 @@ func (arc *ARC) Delete(key string) { if arc.p < 0 { arc.p = 0 } - models.ItemPool.Put(item) + types.ItemPool.Put(item) return } // Check t2 @@ -147,7 +147,7 @@ func (arc *ARC) Delete(key string) { if ok { delete(arc.t2, key) arc.c-- - models.ItemPool.Put(item) + types.ItemPool.Put(item) } } @@ -158,14 +158,14 @@ func (arc *ARC) Evict() (string, bool) { for key, val := range arc.t1 { delete(arc.t1, key) arc.c-- - models.ItemPool.Put(val) + types.ItemPool.Put(val) return key, true } // Check t2 for key, val := range arc.t2 { delete(arc.t2, key) arc.c-- - models.ItemPool.Put(val) + types.ItemPool.Put(val) return key, true } return "", false diff --git a/eviction/clock.go b/eviction/clock.go index 51e4619..4739e9c 100644 --- a/eviction/clock.go +++ b/eviction/clock.go @@ -10,12 +10,12 @@ import ( "sync" "github.com/hyp3rd/hypercache/errors" - "github.com/hyp3rd/hypercache/models" + "github.com/hyp3rd/hypercache/types" ) // ClockAlgorithm is an in-memory cache with the Clock algorithm. type ClockAlgorithm struct { - items []*models.Item + items []*types.Item keys map[string]int mutex sync.RWMutex evictMutex sync.Mutex @@ -30,7 +30,7 @@ func NewClockAlgorithm(capacity int) (*ClockAlgorithm, error) { } return &ClockAlgorithm{ - items: make([]*models.Item, capacity), + items: make([]*types.Item, capacity), keys: make(map[string]int, capacity), capacity: capacity, hand: 0, @@ -52,7 +52,7 @@ func (c *ClockAlgorithm) Evict() (string, bool) { item.AccessCount-- } else { delete(c.keys, item.Key) - models.ItemPool.Put(item) + types.ItemPool.Put(item) c.items[c.hand] = nil return item.Key, true } @@ -71,7 +71,7 @@ func (c *ClockAlgorithm) Set(key string, value any) { c.Delete(evictedKey) } - item := models.ItemPool.Get().(*models.Item) + item := types.ItemPool.Get().(*types.Item) item.Key = key item.Value = value item.AccessCount = 1 @@ -107,5 +107,5 @@ func (c *ClockAlgorithm) Delete(key string) { item := c.items[index] delete(c.keys, key) c.items[index] = nil - models.ItemPool.Put(item) + types.ItemPool.Put(item) } diff --git a/examples/list/list.go b/examples/list/list.go index 739d463..f759b9c 100644 --- a/examples/list/list.go +++ b/examples/list/list.go @@ -7,7 +7,6 @@ import ( "github.com/hyp3rd/hypercache" "github.com/hyp3rd/hypercache/backend" - "github.com/hyp3rd/hypercache/models" "github.com/hyp3rd/hypercache/types" ) @@ -45,7 +44,7 @@ func main() { // Apply filters // Define a filter function - itemsFilterFunc := func(item *models.Item) bool { + itemsFilterFunc := func(item *types.Item) bool { // return time.Since(item.LastAccess) > 1*time.Microsecond return item.Value != "val84" } diff --git a/examples/redis/redis.go b/examples/redis/redis.go index 3f192ab..e3d61fa 100644 --- a/examples/redis/redis.go +++ b/examples/redis/redis.go @@ -8,7 +8,6 @@ import ( "github.com/hyp3rd/hypercache" "github.com/hyp3rd/hypercache/backend" "github.com/hyp3rd/hypercache/backend/redis" - "github.com/hyp3rd/hypercache/models" "github.com/hyp3rd/hypercache/types" ) @@ -61,7 +60,7 @@ func main() { // Apply filters // Define a filter function - itemsFilterFunc := func(item *models.Item) bool { + itemsFilterFunc := func(item *types.Item) bool { // return time.Since(item.LastAccess) > 1*time.Microsecond return item.Value != "value-16" } diff --git a/hypercache.go b/hypercache.go index 8139d8d..f8e0de4 100644 --- a/hypercache.go +++ b/hypercache.go @@ -18,7 +18,6 @@ import ( "github.com/hyp3rd/hypercache/backend" "github.com/hyp3rd/hypercache/errors" "github.com/hyp3rd/hypercache/eviction" - "github.com/hyp3rd/hypercache/models" "github.com/hyp3rd/hypercache/stats" "github.com/hyp3rd/hypercache/types" "github.com/hyp3rd/hypercache/utils" @@ -225,7 +224,7 @@ func (hyperCache *HyperCache[T]) expirationLoop() { var ( expiredCount int64 - items []*models.Item + items []*types.Item err error ) @@ -236,7 +235,7 @@ func (hyperCache *HyperCache[T]) expirationLoop() { } sortByFilter := backend.WithSortBy(types.SortByExpiration.String()) - filterFuncFilter := backend.WithFilterFunc(func(item *models.Item) bool { + filterFuncFilter := backend.WithFilterFunc(func(item *types.Item) bool { return item.Expiration > 0 && time.Since(item.LastAccess) > item.Expiration }) @@ -247,7 +246,7 @@ func (hyperCache *HyperCache[T]) expirationLoop() { for _, item := range sortedItems { expiredCount++ hyperCache.Remove(item.Key) - models.ItemPool.Put(item) + types.ItemPool.Put(item) hyperCache.StatsCollector.Incr("item_expired_count", 1) } @@ -312,7 +311,7 @@ func (hyperCache *HyperCache[T]) evictItem() (string, bool) { // If the capacity of the cache is reached, the cache will leverage the eviction algorithm proactively if the evictionInterval is zero. If not, the background process will take care of the eviction. func (hyperCache *HyperCache[T]) Set(key string, value any, expiration time.Duration) error { // Create a new cache item and set its properties - item := models.ItemPool.Get().(*models.Item) + item := types.ItemPool.Get().(*types.Item) item.Key = key item.Value = value item.Expiration = expiration @@ -338,7 +337,7 @@ func (hyperCache *HyperCache[T]) Set(key string, value any, expiration time.Dura err = hyperCache.backend.Set(item) if err != nil { hyperCache.memoryAllocation.Add(-item.Size) - models.ItemPool.Put(item) + types.ItemPool.Put(item) return err } @@ -363,7 +362,7 @@ func (hyperCache *HyperCache[T]) Get(key string) (value any, ok bool) { // Check if the item has expired, if so, trigger the expiration loop if item.Expired() { go func() { - models.ItemPool.Put(item) + types.ItemPool.Put(item) hyperCache.expirationTriggerCh <- true }() return nil, false @@ -375,7 +374,7 @@ func (hyperCache *HyperCache[T]) Get(key string) (value any, ok bool) { } // GetWithInfo retrieves the item with the given key from the cache returning the `Item` object and a boolean indicating if the item was found. -func (hyperCache *HyperCache[T]) GetWithInfo(key string) (*models.Item, bool) { +func (hyperCache *HyperCache[T]) GetWithInfo(key string) (*types.Item, bool) { item, ok := hyperCache.backend.Get(key) // Check if the item has expired if it exists, if so, trigger the expiration loop if !ok { @@ -385,7 +384,7 @@ func (hyperCache *HyperCache[T]) GetWithInfo(key string) (*models.Item, bool) { // Check if the item has expired, if so, trigger the expiration loop if item.Expired() { go func() { - models.ItemPool.Put(item) + types.ItemPool.Put(item) hyperCache.expirationTriggerCh <- true }() return nil, false @@ -405,7 +404,7 @@ func (hyperCache *HyperCache[T]) GetOrSet(key string, value any, expiration time // Check if the item has expired if item.Expired() { go func() { - models.ItemPool.Put(item) + types.ItemPool.Put(item) hyperCache.expirationTriggerCh <- true }() return nil, errors.ErrKeyExpired @@ -418,7 +417,7 @@ func (hyperCache *HyperCache[T]) GetOrSet(key string, value any, expiration time } // if the item is not found, add it to the cache - item := models.ItemPool.Get().(*models.Item) + item := types.ItemPool.Get().(*types.Item) item.Key = key item.Value = value item.Expiration = expiration @@ -437,7 +436,7 @@ func (hyperCache *HyperCache[T]) GetOrSet(key string, value any, expiration time hyperCache.memoryAllocation.Add(item.Size) if hyperCache.maxCacheSize > 0 && hyperCache.memoryAllocation.Load() > hyperCache.maxCacheSize { hyperCache.memoryAllocation.Add(-item.Size) - models.ItemPool.Put(item) + types.ItemPool.Put(item) return nil, errors.ErrCacheFull } @@ -445,7 +444,7 @@ func (hyperCache *HyperCache[T]) GetOrSet(key string, value any, expiration time err = hyperCache.backend.Set(item) if err != nil { hyperCache.memoryAllocation.Add(-item.Size) - models.ItemPool.Put(item) + types.ItemPool.Put(item) return nil, err } @@ -454,7 +453,7 @@ func (hyperCache *HyperCache[T]) GetOrSet(key string, value any, expiration time hyperCache.evictionAlgorithm.Set(key, item.Value) // If the cache is at capacity, evict an item when the eviction interval is zero if hyperCache.shouldEvict.Load() && hyperCache.backend.Count() > hyperCache.backend.Capacity() { - models.ItemPool.Put(item) + types.ItemPool.Put(item) hyperCache.evictItem() } }() @@ -477,7 +476,7 @@ func (hyperCache *HyperCache[T]) GetMultiple(keys ...string) (result map[string] // Check if the item has expired if item.Expired() { // Put the item back in the pool - models.ItemPool.Put(item) + types.ItemPool.Put(item) // Add the key to the errors map failed[key] = errors.ErrKeyExpired // Trigger the expiration loop @@ -493,14 +492,14 @@ func (hyperCache *HyperCache[T]) GetMultiple(keys ...string) (result map[string] return } -func (hyperCache *HyperCache[T]) List(ctx context.Context, filters ...backend.IFilter) ([]*models.Item, error) { +func (hyperCache *HyperCache[T]) List(ctx context.Context, filters ...backend.IFilter) ([]*types.Item, error) { return hyperCache.backend.List(ctx, filters...) } // List lists the items in the cache that meet the specified criteria. // It takes in a variadic number of any type as filters, it then checks the backend type, and calls the corresponding // implementation of the List function for that backend, with the filters passed in as arguments -// func (hyperCache *HyperCache[T]) List(ctx context.Context, filters ...any) ([]*models.Item, error) { +// func (hyperCache *HyperCache[T]) List(ctx context.Context, filters ...any) ([]*types.Item, error) { // var listInstance listFunc // // checking the backend type @@ -520,13 +519,13 @@ func (hyperCache *HyperCache[T]) List(ctx context.Context, filters ...backend.IF // // listFunc is a type that defines a function that takes in a variable number of any type as arguments, and returns // // a slice of Item pointers, and an error -// type listFunc func(ctx context.Context, options ...any) ([]*models.Item, error) +// type listFunc func(ctx context.Context, options ...any) ([]*types.Item, error) // // listInMemory is a function that takes in an InMemory, and returns a ListFunc // // it takes any type as filters, and converts them to the specific FilterOption type for the InMemory, // // and calls the InMemory's List function with these filters. // func listInMemory(cacheBackend *backend.InMemory) listFunc { -// return func(ctx context.Context, options ...any) ([]*models.Item, error) { +// return func(ctx context.Context, options ...any) ([]*types.Item, error) { // // here we are converting the filters of any type to the specific FilterOption type for the InMemory // filterOptions := make([]backend.FilterOption[backend.InMemory], len(options)) // for i, option := range options { @@ -540,7 +539,7 @@ func (hyperCache *HyperCache[T]) List(ctx context.Context, filters ...backend.IF // // it takes any type as filters, and converts them to the specific FilterOption type for the Redis, // // and calls the Redis's List function with these filters. // func listRedis(cacheBackend *backend.Redis) listFunc { -// return func(ctx context.Context, options ...any) ([]*models.Item, error) { +// return func(ctx context.Context, options ...any) ([]*types.Item, error) { // // here we are converting the filters of any type to the specific FilterOption type for the Redis // filterOptions := make([]backend.FilterOption[backend.Redis], len(options)) // for i, option := range options { @@ -568,7 +567,7 @@ func (hyperCache *HyperCache[T]) Remove(keys ...string) { // Clear removes all items from the cache. func (hyperCache *HyperCache[T]) Clear() error { var ( - items []*models.Item + items []*types.Item err error ) diff --git a/middleware/logging.go b/middleware/logging.go index 50ddff3..938432d 100644 --- a/middleware/logging.go +++ b/middleware/logging.go @@ -6,8 +6,8 @@ import ( "github.com/hyp3rd/hypercache" "github.com/hyp3rd/hypercache/backend" - "github.com/hyp3rd/hypercache/models" "github.com/hyp3rd/hypercache/stats" + "github.com/hyp3rd/hypercache/types" ) // Logger describes a logging interface allowing to implement different external, or custom logger. @@ -60,7 +60,7 @@ func (mw LoggingMiddleware) GetOrSet(key string, value any, expiration time.Dura } // GetWithInfo logs the time it takes to execute the next middleware. -func (mw LoggingMiddleware) GetWithInfo(key string) (item *models.Item, ok bool) { +func (mw LoggingMiddleware) GetWithInfo(key string) (item *types.Item, ok bool) { defer func(begin time.Time) { mw.logger.Printf("method GetWithInfo took: %s", time.Since(begin)) }(time.Now()) @@ -80,7 +80,7 @@ func (mw LoggingMiddleware) GetMultiple(keys ...string) (result map[string]any, } // List logs the time it takes to execute the next middleware. -func (mw LoggingMiddleware) List(ctx context.Context, filters ...backend.IFilter) ([]*models.Item, error) { +func (mw LoggingMiddleware) List(ctx context.Context, filters ...backend.IFilter) ([]*types.Item, error) { defer func(begin time.Time) { mw.logger.Printf("method List took: %s", time.Since(begin)) }(time.Now()) diff --git a/middleware/stats.go b/middleware/stats.go index 9f8d213..cbfdd21 100644 --- a/middleware/stats.go +++ b/middleware/stats.go @@ -6,8 +6,8 @@ import ( "github.com/hyp3rd/hypercache" "github.com/hyp3rd/hypercache/backend" - "github.com/hyp3rd/hypercache/models" "github.com/hyp3rd/hypercache/stats" + "github.com/hyp3rd/hypercache/types" ) // StatsCollectorMiddleware is a middleware that collects stats. It can and should re-use the same stats collector as the hypercache. @@ -53,7 +53,7 @@ func (mw StatsCollectorMiddleware) GetOrSet(key string, value any, expiration ti } // GetWithInfo collects stats for the GetWithInfo method. -func (mw StatsCollectorMiddleware) GetWithInfo(key string) (*models.Item, bool) { +func (mw StatsCollectorMiddleware) GetWithInfo(key string) (*types.Item, bool) { start := time.Now() defer func() { mw.statsCollector.Timing("hypercache_get_with_info_duration", time.Since(start).Nanoseconds()) @@ -73,7 +73,7 @@ func (mw StatsCollectorMiddleware) GetMultiple(keys ...string) (result map[strin } // List collects stats for the List method. -func (mw StatsCollectorMiddleware) List(ctx context.Context, filters ...backend.IFilter) ([]*models.Item, error) { +func (mw StatsCollectorMiddleware) List(ctx context.Context, filters ...backend.IFilter) ([]*types.Item, error) { start := time.Now() defer func() { mw.statsCollector.Timing("hypercache_list_duration", time.Since(start).Nanoseconds()) diff --git a/service.go b/service.go index b1286c1..d13391b 100644 --- a/service.go +++ b/service.go @@ -5,8 +5,8 @@ import ( "time" "github.com/hyp3rd/hypercache/backend" - "github.com/hyp3rd/hypercache/models" "github.com/hyp3rd/hypercache/stats" + "github.com/hyp3rd/hypercache/types" ) // Service is the service interface for the HyperCache. @@ -18,12 +18,12 @@ type Service interface { Set(key string, value any, expiration time.Duration) error // GetOrSet retrieves a value from the cache using the key, if the key does not exist, it will set the value using the key and expiration duration GetOrSet(key string, value any, expiration time.Duration) (any, error) - // GetWithInfo fetches from the cache using the key, and returns the `models.Item` and a boolean indicating if the key exists - GetWithInfo(key string) (*models.Item, bool) + // GetWithInfo fetches from the cache using the key, and returns the `types.Item` and a boolean indicating if the key exists + GetWithInfo(key string) (*types.Item, bool) // GetMultiple retrieves a list of values from the cache using the keys GetMultiple(keys ...string) (result map[string]any, failed map[string]error) // List returns a list of all items in the cache - List(ctx context.Context, filters ...backend.IFilter) ([]*models.Item, error) + List(ctx context.Context, filters ...backend.IFilter) ([]*types.Item, error) // Remove removes a value from the cache using the key Remove(keys ...string) // Clear removes all values from the cache diff --git a/models/item.go b/types/item.go similarity index 99% rename from models/item.go rename to types/item.go index 399c930..dafaf23 100644 --- a/models/item.go +++ b/types/item.go @@ -1,4 +1,4 @@ -package models +package types // Item represents an item in the cache. It has a key, value, expiration duration, and a last access time field. From b74e6d699576b784a8084614c728bf89f232f6c1 Mon Sep 17 00:00:00 2001 From: Francesco Cosentino Date: Mon, 3 Apr 2023 15:30:44 +0200 Subject: [PATCH 2/4] linting --- backend/backend.go | 45 +---------------------- backend/filters.go | 24 +++++++++---- backend/inmemory.go | 45 ----------------------- backend/options.go | 87 --------------------------------------------- hypercache.go | 56 ++--------------------------- 5 files changed, 21 insertions(+), 236 deletions(-) diff --git a/backend/backend.go b/backend/backend.go index 9939e77..381e455 100644 --- a/backend/backend.go +++ b/backend/backend.go @@ -6,49 +6,7 @@ import ( "github.com/hyp3rd/hypercache/types" ) -// IBackendConstrain is the interface that defines the constrain type that must be implemented by cache backends. -// type IBackendConstrain interface { -// InMemory | Redis -// } - -// // IInMemory is the interface that must be implemented by in-memory cache backends. -// type IInMemory[T IBackendConstrain] interface { -// // IBackend[T] is the interface that must be implemented by cache backends. -// IBackend[T] -// // List the items in the cache that meet the specified criteria. -// List(options ...FilterOption[InMemory]) ([]*types.Item, error) -// // Clear removes all items from the cache. -// Clear() -// } - -// // IRedisBackend is the interface that must be implemented by Redis cache backends. -// type IRedisBackend[T IBackendConstrain] interface { -// // IBackend[T] is the interface that must be implemented by cache backends. -// IBackend[T] -// // List the items in the cache that meet the specified criteria. -// List(ctx context.Context, options ...FilterOption[Redis]) ([]*types.Item, error) -// // Clear removes all items from the cache. -// Clear() error -// } - -// // IBackend is the interface that must be implemented by cache backends. -// type IBackend[T IBackendConstrain] interface { -// // Get retrieves the item with the given key from the cache. -// // If the key is not found in the cache, it returns nil. -// Get(key string) (item *types.Item, ok bool) -// // Set adds a new item to the cache. -// Set(item *types.Item) error -// // Capacity returns the maximum number of items that can be stored in the cache. -// Capacity() int -// // SetCapacity sets the maximum number of items that can be stored in the cache. -// SetCapacity(capacity int) -// // Count returns the number of items currently stored in the cache. -// Count() int -// // Remove deletes the item with the given key from the cache. -// Remove(keys ...string) error -// } - -// IBackendConstrain is the interface that defines the constrain type that must be implemented by cache backends. +// IBackendConstrain is the interface that defines the constrain type implemented by cache backends. type IBackendConstrain interface { InMemory | Redis } @@ -69,7 +27,6 @@ type IBackend[T IBackendConstrain] interface { // Remove deletes the item with the given key from the cache. Remove(keys ...string) error // List the items in the cache that meet the specified criteria. - // List(ctx context.Context, options ...FilterOption[T]) ([]*types.Item, error) List(ctx context.Context, filters ...IFilter) ([]*types.Item, error) // Clear removes all items from the cache. Clear() error diff --git a/backend/filters.go b/backend/filters.go index 925f8cb..c1f31f8 100644 --- a/backend/filters.go +++ b/backend/filters.go @@ -6,14 +6,17 @@ import ( "github.com/hyp3rd/hypercache/types" ) +// IFilter is a backend agnostic interface for a filter that can be applied to a list of items. type IFilter interface { ApplyFilter(backendType string, items []*types.Item) []*types.Item } +// sortByFilter is a filter that sorts the items by a given field. type sortByFilter struct { field string } +// ApplyFilter applies the sort filter to the given list of items. func (f sortByFilter) ApplyFilter(backendType string, items []*types.Item) []*types.Item { var sorter sort.Interface switch f.field { @@ -32,11 +35,13 @@ func (f sortByFilter) ApplyFilter(backendType string, items []*types.Item) []*ty return items } -type sortOrderFilter struct { +// SortOrderFilter is a filter that sorts the items by a given field. +type SortOrderFilter struct { ascending bool } -func (f sortOrderFilter) ApplyFilter(backendType string, items []*types.Item) []*types.Item { +// ApplyFilter applies the sort order filter to the given list of items. +func (f SortOrderFilter) ApplyFilter(backendType string, items []*types.Item) []*types.Item { if !f.ascending { sort.Slice(items, func(i, j int) bool { return items[j].Key > items[i].Key @@ -49,11 +54,13 @@ func (f sortOrderFilter) ApplyFilter(backendType string, items []*types.Item) [] return items } -type filterFuncFilter struct { +// filterFunc is a filter that filters the items by a given field's value. +type filterFunc struct { fn func(item *types.Item) bool } -func (f filterFuncFilter) ApplyFilter(backendType string, items []*types.Item) []*types.Item { +// ApplyFilter applies the filter function to the given list of items. +func (f filterFunc) ApplyFilter(backendType string, items []*types.Item) []*types.Item { filteredItems := make([]*types.Item, 0) for _, item := range items { if f.fn(item) { @@ -63,14 +70,17 @@ func (f filterFuncFilter) ApplyFilter(backendType string, items []*types.Item) [ return filteredItems } +// WithSortBy returns a filter that sorts the items by a given field. func WithSortBy(field string) IFilter { return sortByFilter{field: field} } -func WithSortOrderAsc(ascending bool) sortOrderFilter { - return sortOrderFilter{ascending: ascending} +// WithSortOrderAsc returns a filter that determins whether to sort ascending or not. +func WithSortOrderAsc(ascending bool) SortOrderFilter { + return SortOrderFilter{ascending: ascending} } +// WithFilterFunc returns a filter that filters the items by a given field's value. func WithFilterFunc(fn func(item *types.Item) bool) IFilter { - return filterFuncFilter{fn: fn} + return filterFunc{fn: fn} } diff --git a/backend/inmemory.go b/backend/inmemory.go index a083608..b600559 100644 --- a/backend/inmemory.go +++ b/backend/inmemory.go @@ -11,7 +11,6 @@ import ( // InMemory is a cache backend that stores the items in memory, leveraging a custom `ConcurrentMap`. type InMemory struct { - // items datastructure.ConcurrentMap[string, *types.Item] // map to store the items in the cache items datastructure.ConcurrentMap // map to store the items in the cache capacity int // capacity of the cache, limits the number of items that can be stored in the cache mutex sync.RWMutex // mutex to protect the cache from concurrent access @@ -76,50 +75,6 @@ func (cacheBackend *InMemory) Set(item *types.Item) error { return nil } -// ListV1 returns a list of all items in the cache filtered and ordered by the given options -// func (cacheBackend *InMemory) ListV1(options ...FilterOption[InMemory]) ([]*types.Item, error) { -// // Apply the filter options -// ApplyFilterOptions(cacheBackend, options...) - -// items := make([]*types.Item, 0, cacheBackend.items.Count()) -// wg := sync.WaitGroup{} -// wg.Add(cacheBackend.items.Count()) -// for item := range cacheBackend.items.IterBuffered() { -// go func(item datastructure.Tuple) { -// defer wg.Done() -// if cacheBackend.FilterFunc == nil || cacheBackend.FilterFunc(&item.Val) { -// items = append(items, &item.Val) -// } -// }(item) -// } -// wg.Wait() - -// if cacheBackend.SortBy == "" { -// return items, nil -// } - -// var sorter sort.Interface -// switch cacheBackend.SortBy { -// case types.SortByKey.String(): -// sorter = &itemSorterByKey{items: items} -// case types.SortByLastAccess.String(): -// sorter = &itemSorterByLastAccess{items: items} -// case types.SortByAccessCount.String(): -// sorter = &itemSorterByAccessCount{items: items} -// case types.SortByExpiration.String(): -// sorter = &itemSorterByExpiration{items: items} -// default: -// return nil, fmt.Errorf("unknown sortBy field: %s", cacheBackend.SortBy) -// } - -// if !cacheBackend.SortAscending { -// sorter = sort.Reverse(sorter) -// } - -// sort.Sort(sorter) -// return items, nil -// } - // List returns a list of all items in the cache filtered and ordered by the given options // func (cacheBackend *InMemory) List(ctx context.Context, options ...FilterOption[InMemory]) ([]*types.Item, error) { func (cacheBackend *InMemory) List(ctx context.Context, filters ...IFilter) ([]*types.Item, error) { diff --git a/backend/options.go b/backend/options.go index 6892e53..7851ec2 100644 --- a/backend/options.go +++ b/backend/options.go @@ -2,56 +2,9 @@ package backend import ( "github.com/hyp3rd/hypercache/libs/serializer" - "github.com/hyp3rd/hypercache/types" "github.com/redis/go-redis/v9" ) -// ISortableBackend is an interface that defines the methods that a backend should implement to be sortable. -type iSortableBackend interface { - // setSortAscending indicates whether the items should be sorted in ascending order. - setSortAscending(ascending bool) - // setSortBy sets the field to sort the items by. - setSortBy(sortBy string) -} - -// setSortAscending sets the `SortAscending` field of the `InMemory` backend. -func (inm *InMemory) setSortAscending(ascending bool) { - inm.SortAscending = ascending -} - -// setSortAscending sets the `SortAscending` field of the `Redis` backend. -func (rb *Redis) setSortAscending(ascending bool) { - rb.SortAscending = ascending -} - -// setSortBy sets the `SortBy` field of the `InMemory` backend. -func (inm *InMemory) setSortBy(sortBy string) { - inm.SortBy = sortBy -} - -// setSortBy sets the `SortBy` field of the `Redis` backend. -func (rb *Redis) setSortBy(sortBy string) { - rb.SortBy = sortBy -} - -// FilterFunc is a predicate that takes a `Item` as an argument and returns a boolean indicating whether the item should be included in the cache. -type FilterFunc func(item *types.Item) bool // filters applied when listing the items in the cache - -// IFilterableBackend is an interface that defines the methods that a backend should implement to be filterable. -type IFilterableBackend interface { - setFilterFunc(filterFunc FilterFunc) -} - -// setFilterFunc sets the `FilterFunc` field of the `InMemory` backend. -func (inm *InMemory) setFilterFunc(filterFunc FilterFunc) { - inm.FilterFunc = filterFunc -} - -// setFilterFunc sets the `FilterFunc` field of the `Redis` backend. -func (rb *Redis) setFilterFunc(filterFunc FilterFunc) { - rb.FilterFunc = filterFunc -} - // iConfigurableBackend is an interface that defines the methods that a backend should implement to be configurable. type iConfigurableBackend interface { // setCapacity sets the capacity of the cache. @@ -110,43 +63,3 @@ func WithSerializer[T Redis](serializer serializer.ISerializer) Option[Redis] { backend.Serializer = serializer } } - -// FilterOption is a function type that can be used to configure the `Filter` struct. -// type FilterOption[T any] func(*T) - -// // ApplyFilterOptions applies the given options to the given filter. -// func ApplyFilterOptions[T IBackendConstrain](backend *T, options ...FilterOption[T]) { -// for _, option := range options { -// option(backend) -// } -// } - -// // WithSortBy is an option that sets the field to sort the items by. -// // The field can be any of the fields in the `Item` struct. -// func WithSortBy[T IBackendConstrain](field types.SortingField) FilterOption[T] { -// return func(a *T) { -// if sortable, ok := any(a).(iSortableBackend); ok { -// sortable.setSortBy(field.String()) -// } -// } -// } - -// // WithSortOrderAsc is an option that sets the sort order to ascending or descending. -// // When sorting the items in the cache, they will be sorted in ascending or descending order based on the field specified with the `WithSortBy` option. -// func WithSortOrderAsc[T IBackendConstrain](ascending bool) FilterOption[T] { -// return func(a *T) { -// if sortable, ok := any(a).(iSortableBackend); ok { -// sortable.setSortAscending(ascending) -// } -// } -// } - -// // WithFilterFunc is an option that sets the filter function to use. -// // The filter function is a predicate that takes a `Item` as an argument and returns a boolean indicating whether the item should be included in the cache. -// func WithFilterFunc[T any](fn func(item *types.Item) bool) FilterOption[T] { -// return func(a *T) { -// if filterable, ok := any(a).(IFilterableBackend); ok { -// filterable.setFilterFunc(fn) -// } -// } -// } diff --git a/hypercache.go b/hypercache.go index f8e0de4..be1c107 100644 --- a/hypercache.go +++ b/hypercache.go @@ -492,62 +492,12 @@ func (hyperCache *HyperCache[T]) GetMultiple(keys ...string) (result map[string] return } -func (hyperCache *HyperCache[T]) List(ctx context.Context, filters ...backend.IFilter) ([]*types.Item, error) { - return hyperCache.backend.List(ctx, filters...) -} - // List lists the items in the cache that meet the specified criteria. // It takes in a variadic number of any type as filters, it then checks the backend type, and calls the corresponding // implementation of the List function for that backend, with the filters passed in as arguments -// func (hyperCache *HyperCache[T]) List(ctx context.Context, filters ...any) ([]*types.Item, error) { -// var listInstance listFunc - -// // checking the backend type -// if hyperCache.cacheBackendChecker.IsInMemory() { -// // if the backend is an InMemory, we set the listFunc to the ListInMemory function -// listInstance = listInMemory(hyperCache.backend.(*backend.InMemory)) -// } - -// if hyperCache.cacheBackendChecker.IsRedis() { -// // if the backend is a Redis, we set the listFunc to the ListRedis function -// listInstance = listRedis(hyperCache.backend.(*backend.Redis)) -// } - -// // calling the corresponding implementation of the list function -// return listInstance(ctx, filters...) -// } - -// // listFunc is a type that defines a function that takes in a variable number of any type as arguments, and returns -// // a slice of Item pointers, and an error -// type listFunc func(ctx context.Context, options ...any) ([]*types.Item, error) - -// // listInMemory is a function that takes in an InMemory, and returns a ListFunc -// // it takes any type as filters, and converts them to the specific FilterOption type for the InMemory, -// // and calls the InMemory's List function with these filters. -// func listInMemory(cacheBackend *backend.InMemory) listFunc { -// return func(ctx context.Context, options ...any) ([]*types.Item, error) { -// // here we are converting the filters of any type to the specific FilterOption type for the InMemory -// filterOptions := make([]backend.FilterOption[backend.InMemory], len(options)) -// for i, option := range options { -// filterOptions[i] = option.(backend.FilterOption[backend.InMemory]) -// } -// return cacheBackend.List(filterOptions...) -// } -// } - -// // listRedis is a function that takes in a Redis, and returns a ListFunc -// // it takes any type as filters, and converts them to the specific FilterOption type for the Redis, -// // and calls the Redis's List function with these filters. -// func listRedis(cacheBackend *backend.Redis) listFunc { -// return func(ctx context.Context, options ...any) ([]*types.Item, error) { -// // here we are converting the filters of any type to the specific FilterOption type for the Redis -// filterOptions := make([]backend.FilterOption[backend.Redis], len(options)) -// for i, option := range options { -// filterOptions[i] = option.(backend.FilterOption[backend.Redis]) -// } -// return cacheBackend.List(ctx, filterOptions...) -// } -// } +func (hyperCache *HyperCache[T]) List(ctx context.Context, filters ...backend.IFilter) ([]*types.Item, error) { + return hyperCache.backend.List(ctx, filters...) +} // Remove removes items with the given key from the cache. If an item is not found, it does nothing. func (hyperCache *HyperCache[T]) Remove(keys ...string) { From 2b667b7d1fb668a8229f843f06a8f3d647e89cf5 Mon Sep 17 00:00:00 2001 From: Francesco Cosentino Date: Mon, 3 Apr 2023 18:14:36 +0200 Subject: [PATCH 3/4] added context support --- backend/backend.go | 4 +- backend/inmemory.go | 86 ++++++++++++++++----------- backend/redis.go | 67 ++++++--------------- backend/sorting.go | 2 +- errors/errors.go | 3 + examples/clear/clear.go | 5 +- examples/eviction/eviction.go | 2 +- examples/get/get.go | 7 ++- examples/list/list.go | 30 ++++------ examples/redis/redis.go | 2 +- examples/service/service.go | 11 ++-- examples/size/size.go | 3 +- examples/stats/stats.go | 2 +- hypercache.go | 58 ++++++++---------- middleware/logging.go | 20 +++---- middleware/stats.go | 20 +++---- service.go | 10 ++-- tests/hypercache_get_multiple_test.go | 19 +++--- tests/hypercache_get_or_set_test.go | 5 +- tests/hypercache_get_test.go | 3 +- tests/hypercache_set_test.go | 3 +- 21 files changed, 174 insertions(+), 188 deletions(-) diff --git a/backend/backend.go b/backend/backend.go index 381e455..4bbf618 100644 --- a/backend/backend.go +++ b/backend/backend.go @@ -25,9 +25,9 @@ type IBackend[T IBackendConstrain] interface { // Count returns the number of items currently stored in the cache. Count() int // Remove deletes the item with the given key from the cache. - Remove(keys ...string) error + Remove(ctx context.Context, keys ...string) error // List the items in the cache that meet the specified criteria. List(ctx context.Context, filters ...IFilter) ([]*types.Item, error) // Clear removes all items from the cache. - Clear() error + Clear(ctx context.Context) error } diff --git a/backend/inmemory.go b/backend/inmemory.go index b600559..b4466c2 100644 --- a/backend/inmemory.go +++ b/backend/inmemory.go @@ -11,10 +11,10 @@ import ( // InMemory is a cache backend that stores the items in memory, leveraging a custom `ConcurrentMap`. type InMemory struct { - items datastructure.ConcurrentMap // map to store the items in the cache - capacity int // capacity of the cache, limits the number of items that can be stored in the cache - mutex sync.RWMutex // mutex to protect the cache from concurrent access - SortFilters // filters applied when listing the items in the cache + items datastructure.ConcurrentMap // map to store the items in the cache + capacity int // capacity of the cache, limits the number of items that can be stored in the cache + sync.RWMutex // mutex to protect the cache from concurrent access + // SortFilters // filters applied when listing the items in the cache } // NewInMemory creates a new in-memory cache with the given options. @@ -68,56 +68,74 @@ func (cacheBackend *InMemory) Set(item *types.Item) error { return err } - cacheBackend.mutex.Lock() - defer cacheBackend.mutex.Unlock() + cacheBackend.Lock() + defer cacheBackend.Unlock() cacheBackend.items.Set(item.Key, item) return nil } // List returns a list of all items in the cache filtered and ordered by the given options -// func (cacheBackend *InMemory) List(ctx context.Context, options ...FilterOption[InMemory]) ([]*types.Item, error) { func (cacheBackend *InMemory) List(ctx context.Context, filters ...IFilter) ([]*types.Item, error) { // Apply the filters - cacheBackend.mutex.RLock() - defer cacheBackend.mutex.RUnlock() + cacheBackend.RLock() + defer cacheBackend.RUnlock() + + items := make([]*types.Item, 0, cacheBackend.items.Count()) - items := make([]*types.Item, 0, cacheBackend.Count()) - wg := sync.WaitGroup{} - wg.Add(cacheBackend.items.Count()) for item := range cacheBackend.items.IterBuffered() { - go func(item datastructure.Tuple) { - defer wg.Done() - if cacheBackend.FilterFunc == nil || cacheBackend.FilterFunc(&item.Val) { - items = append(items, &item.Val) - } - }(item) + copy := item + items = append(items, ©.Val) } - wg.Wait() - for _, filter := range filters { - items = filter.ApplyFilter("in-memory", items) + // Apply the filters + if len(filters) > 0 { + wg := sync.WaitGroup{} + wg.Add(len(filters)) + for _, filter := range filters { + go func(filter IFilter) { + defer wg.Done() + items = filter.ApplyFilter("in-memory", items) + }(filter) + } + wg.Wait() } return items, nil } // Remove removes items with the given key from the cacheBackend. If an item is not found, it does nothing. -func (cacheBackend *InMemory) Remove(keys ...string) (err error) { - //TODO: determine if handling the error or not - // var ok bool - // item := types.ItemPool.Get().(*types.Item) - // defer types.ItemPool.Put(item) - for _, key := range keys { - cacheBackend.items.Remove(key) +func (cacheBackend *InMemory) Remove(ctx context.Context, keys ...string) (err error) { + done := make(chan struct{}) + go func() { + defer close(done) + for _, key := range keys { + cacheBackend.items.Remove(key) + } + }() + + select { + case <-done: + return nil + case <-ctx.Done(): + return errors.ErrTimeoutOrCanceled } - return } // Clear removes all items from the cacheBackend. -func (cacheBackend *InMemory) Clear() error { - // clear the cacheBackend - cacheBackend.items.Clear() - - return nil +func (cacheBackend *InMemory) Clear(ctx context.Context) error { + + done := make(chan struct{}) + go func() { + defer close(done) + // clear the cacheBackend + cacheBackend.items.Clear() + }() + + select { + case <-done: + return nil + case <-ctx.Done(): + return errors.ErrTimeoutOrCanceled + } } diff --git a/backend/redis.go b/backend/redis.go index 3f1be8c..4a49df8 100644 --- a/backend/redis.go +++ b/backend/redis.go @@ -2,6 +2,7 @@ package backend import ( "context" + "sync" "github.com/hyp3rd/hypercache/errors" "github.com/hyp3rd/hypercache/libs/serializer" @@ -15,7 +16,7 @@ type Redis struct { capacity int // capacity of the cache, limits the number of items that can be stored in the cache keysSetName string // keysSetName is the name of the set that holds the keys of the items in the cache Serializer serializer.ISerializer // Serializer is the serializer used to serialize the items before storing them in the cache - SortFilters // SortFilters holds the filters applied when listing the items in the cache + // SortFilters // SortFilters holds the filters applied when listing the items in the cache } // NewRedis creates a new redis cache with the given options. @@ -142,14 +143,7 @@ func (cacheBackend *Redis) Set(item *types.Item) error { } // List returns a list of all the items in the cacheBackend that match the given filter options. -// func (cacheBackend *Redis) List(ctx context.Context, options ...FilterOption[Redis]) ([]*types.Item, error) { func (cacheBackend *Redis) List(ctx context.Context, filters ...IFilter) ([]*types.Item, error) { - // Apply the filters - // filterOptions := make([]FilterOption[Redis], len(filters)) - // for i, option := range filters { - // filterOptions[i] = option.(FilterOption[Redis]) - // } - // Get the set of keys held in the cacheBackend with the given `keysSetName` keys, err := cacheBackend.rdb.SMembers(ctx, cacheBackend.keysSetName).Result() if err != nil { @@ -179,64 +173,39 @@ func (cacheBackend *Redis) List(ctx context.Context, filters ...IFilter) ([]*typ defer types.ItemPool.Put(item) err := cacheBackend.Serializer.Unmarshal([]byte(data["data"]), item) if err == nil { - if cacheBackend.FilterFunc != nil && !cacheBackend.FilterFunc(item) { - continue - } items = append(items, item) } } // Apply the filters - for _, filter := range filters { - items = filter.ApplyFilter("redis", items) + if len(filters) > 0 { + wg := sync.WaitGroup{} + wg.Add(len(filters)) + for _, filter := range filters { + go func(filter IFilter) { + defer wg.Done() + items = filter.ApplyFilter("redis", items) + }(filter) + } + wg.Wait() } return items, nil - - // Check if the items should be sorted - // if cacheBackend.SortBy == "" { - // // No sorting - // return items, nil - // } - - // // Sort the items - // var sorter sort.Interface - // switch cacheBackend.SortBy { - // case types.SortByKey.String(): // Sort by key - // sorter = &itemSorterByKey{items: items} - // case types.SortByLastAccess.String(): // Sort by last access - // sorter = &itemSorterByLastAccess{items: items} - // case types.SortByAccessCount.String(): // Sort by access count - // sorter = &itemSorterByAccessCount{items: items} - // case types.SortByExpiration.String(): // Sort by expiration - // sorter = &itemSorterByExpiration{items: items} - // default: - // return nil, fmt.Errorf("unknown sortBy field: %s", cacheBackend.SortBy) - // } - - // // Reverse the sort order if needed - // if !cacheBackend.SortAscending { - // sorter = sort.Reverse(sorter) - // } - // // Sort the items by the given field - // sort.Sort(sorter) - - // return items, nil } // Remove removes an item from the cache with the given key -func (cacheBackend *Redis) Remove(keys ...string) error { +func (cacheBackend *Redis) Remove(ctx context.Context, keys ...string) error { pipe := cacheBackend.rdb.TxPipeline() - pipe.SRem(context.Background(), cacheBackend.keysSetName, keys).Result() - pipe.Del(context.Background(), keys...).Result() + pipe.SRem(ctx, cacheBackend.keysSetName, keys).Result() + pipe.Del(ctx, keys...).Result() - _, err := pipe.Exec(context.Background()) + _, err := pipe.Exec(ctx) return err } // Clear removes all items from the cache -func (cacheBackend *Redis) Clear() error { - _, err := cacheBackend.rdb.FlushDB(context.Background()).Result() +func (cacheBackend *Redis) Clear(ctx context.Context) error { + _, err := cacheBackend.rdb.FlushDB(ctx).Result() return err } diff --git a/backend/sorting.go b/backend/sorting.go index 208d678..36471be 100644 --- a/backend/sorting.go +++ b/backend/sorting.go @@ -11,7 +11,7 @@ type SortFilters struct { // If set to false, the items will be sorted in descending order. SortAscending bool // FilterFunc is a predicate that takes a `Item` as an argument and returns a boolean indicating whether the item should be included in the cache. - FilterFunc func(item *types.Item) bool // filters applied when listing the items in the cache + // FilterFunc func(item *types.Item) bool // filters applied when listing the items in the cache } type itemSorterByKey struct { diff --git a/errors/errors.go b/errors/errors.go index 2fc7782..96401fd 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -51,4 +51,7 @@ var ( // ErrBackendNotFound is returned when a backend is not found. ErrBackendNotFound = errors.New("backend not found") + + // ErrTimeoutOrCanceled is returned when a timeout or cancellation occurs. + ErrTimeoutOrCanceled = errors.New("the operation timedout or was canceled") ) diff --git a/examples/clear/clear.go b/examples/clear/clear.go index b55c714..44a84b9 100644 --- a/examples/clear/clear.go +++ b/examples/clear/clear.go @@ -1,6 +1,7 @@ package main import ( + "context" "fmt" "github.com/hyp3rd/hypercache" @@ -18,7 +19,7 @@ func main() { fmt.Println("adding 100000 items to cache") for i := 0; i < 100000; i++ { - cache.Set(fmt.Sprintf("key%d", i), fmt.Sprintf("value%d", i), 0) + cache.Set(context.TODO(), fmt.Sprintf("key%d", i), fmt.Sprintf("value%d", i), 0) } item, ok := cache.Get("key100") @@ -30,7 +31,7 @@ func main() { fmt.Println("count", cache.Count()) fmt.Println("allocation", cache.Allocation()) fmt.Println("clearing cache") - cache.Clear() + cache.Clear(context.TODO()) fmt.Println("capacity", cache.Capacity()) fmt.Println("count", cache.Count()) fmt.Println("allocation", cache.Allocation()) diff --git a/examples/eviction/eviction.go b/examples/eviction/eviction.go index b9dff8e..a87e488 100644 --- a/examples/eviction/eviction.go +++ b/examples/eviction/eviction.go @@ -49,7 +49,7 @@ func executeExample(evictionInterval time.Duration) { key := fmt.Sprintf("key%d", i) val := fmt.Sprintf("val%d", i) - err = cache.Set(key, val, time.Minute) + err = cache.Set(context.TODO(), key, val, time.Minute) if err != nil { fmt.Printf("unexpected error: %v\n", err) diff --git a/examples/get/get.go b/examples/get/get.go index 53c2f8f..9e64b19 100644 --- a/examples/get/get.go +++ b/examples/get/get.go @@ -1,6 +1,7 @@ package main import ( + "context" "fmt" "log" "time" @@ -24,7 +25,7 @@ func main() { key := fmt.Sprintf("key%d", i) val := fmt.Sprintf("val%d", i) - err = cache.Set(key, val, time.Minute) + err = cache.Set(context.TODO(), key, val, time.Minute) if err != nil { fmt.Printf("unexpected error: %v\n", err) @@ -34,7 +35,7 @@ func main() { log.Println("fetching items from the cache using the `GetMultiple` method, key11 does not exist") // Retrieve the specific of items from the cache - items, errs := cache.GetMultiple("key1", "key7", "key9", "key11") + items, errs := cache.GetMultiple(context.TODO(), "key1", "key7", "key9", "key11") // Print the errors if any for k, e := range errs { @@ -49,7 +50,7 @@ func main() { log.Println("fetching items from the cache using the `GetOrSet` method") // Retrieve a specific of item from the cache // If the item is not found, set it and return the value - val, err := cache.GetOrSet("key11", "val11", time.Minute) + val, err := cache.GetOrSet(context.TODO(), "key11", "val11", time.Minute) if err != nil { fmt.Println(err) return diff --git a/examples/list/list.go b/examples/list/list.go index f759b9c..99c75a4 100644 --- a/examples/list/list.go +++ b/examples/list/list.go @@ -27,41 +27,35 @@ func main() { key := fmt.Sprintf("%d", i) val := fmt.Sprintf("val%d", i) - err = hyperCache.Set(key, val, time.Minute) - + err = hyperCache.Set(context.TODO(), key, val, time.Minute) + time.Sleep(time.Millisecond * 350) if err != nil { fmt.Printf("unexpected error: %v\n", err) return } } - // Retrieve the list of items from the cache - items, err := hyperCache.List(context.TODO()) - if err != nil { - fmt.Println(err) - return - } - // Apply filters // Define a filter function itemsFilterFunc := func(item *types.Item) bool { // return time.Since(item.LastAccess) > 1*time.Microsecond - return item.Value != "val84" + return item.Value != "val8" } - sortByFilter := backend.WithSortBy(types.SortByKey.String()) - // sortOrderFilter := backend.WithSortOrderAsc(true) + sortByFilter := backend.WithSortBy(types.SortByExpiration.String()) + sortOrderFilter := backend.WithSortOrderAsc(true) // Create a filterFuncFilter with the defined filter function filter := backend.WithFilterFunc(itemsFilterFunc) - // Apply the filter to the items - filteredItems := filter.ApplyFilter("in-memory", items) - - filteredItems = sortByFilter.ApplyFilter("in-memory", filteredItems) - // sortedItems := sortOrderFilter.ApplyFilter("in-memory", filteredItems) + // Retrieve the list of items from the cache + items, err := hyperCache.List(context.TODO(), sortByFilter, sortOrderFilter, filter) + if err != nil { + fmt.Println(err) + return + } - for _, item := range filteredItems { + for _, item := range items { fmt.Println(item.Key, item.Value) } } diff --git a/examples/redis/redis.go b/examples/redis/redis.go index e3d61fa..8330e86 100644 --- a/examples/redis/redis.go +++ b/examples/redis/redis.go @@ -40,7 +40,7 @@ func main() { fmt.Println("setting 50 items to the cache") for i := 0; i < 50; i++ { - err = hyperCache.Set(fmt.Sprintf("key-%d", i), fmt.Sprintf("value-%d", i), time.Hour) + err = hyperCache.Set(context.TODO(), fmt.Sprintf("key-%d", i), fmt.Sprintf("value-%d", i), time.Hour) if err != nil { panic(err) } diff --git a/examples/service/service.go b/examples/service/service.go index a9d647f..465d418 100644 --- a/examples/service/service.go +++ b/examples/service/service.go @@ -1,6 +1,7 @@ package main import ( + "context" "fmt" "log" @@ -41,7 +42,7 @@ func main() { ) defer svc.Stop() - err = svc.Set("key string", "value any", 0) + err = svc.Set(context.TODO(), "key string", "value any", 0) if err != nil { fmt.Println(err) return @@ -54,10 +55,10 @@ func main() { fmt.Println(key) for i := 0; i < 10; i++ { - svc.Set(fmt.Sprintf("key%v", i), fmt.Sprintf("val%v", i), 0) + svc.Set(context.TODO(), fmt.Sprintf("key%v", i), fmt.Sprintf("val%v", i), 0) } - items, errs := svc.GetMultiple("key1", "key7", "key9", "key9999") + items, errs := svc.GetMultiple(context.TODO(), "key1", "key7", "key9", "key9999") for k, e := range errs { fmt.Printf("error fetching item %s: %s\n", k, e) } @@ -66,7 +67,7 @@ func main() { fmt.Println(k, v) } - val, err := svc.GetOrSet("key9999", "val9999", 0) + val, err := svc.GetOrSet(context.TODO(), "key9999", "val9999", 0) if err != nil { fmt.Println(err) return @@ -74,5 +75,5 @@ func main() { fmt.Println(val) - svc.Remove("key9999", "key1") + svc.Remove(context.TODO(), "key9999", "key1") } diff --git a/examples/size/size.go b/examples/size/size.go index 35f13cb..8d52ecb 100644 --- a/examples/size/size.go +++ b/examples/size/size.go @@ -1,6 +1,7 @@ package main import ( + "context" "fmt" "time" @@ -583,7 +584,7 @@ func main() { } for i := 0; i < 3; i++ { - err = cache.Set(fmt.Sprintf("key-%d", i), users, 0) + err = cache.Set(context.TODO(), fmt.Sprintf("key-%d", i), users, 0) if err != nil { fmt.Println(err, "set", i) } diff --git a/examples/stats/stats.go b/examples/stats/stats.go index 3abbf6c..1299c3a 100644 --- a/examples/stats/stats.go +++ b/examples/stats/stats.go @@ -37,7 +37,7 @@ func main() { key := fmt.Sprintf("key%d", i) val := fmt.Sprintf("val%d", i) - err = hyperCache.Set(key, val, time.Minute) + err = hyperCache.Set(context.TODO(), key, val, time.Minute) if err != nil { fmt.Printf("unexpected error: %v\n", err) diff --git a/hypercache.go b/hypercache.go index be1c107..623fdd1 100644 --- a/hypercache.go +++ b/hypercache.go @@ -173,6 +173,10 @@ func New[T backend.IBackendConstrain](bm *BackendManager, config *Config[T]) (hy // Initialize the expiration trigger channel with the buffer size set to half the capacity hyperCache.expirationTriggerCh = make(chan bool, hyperCache.backend.Capacity()/2) + // Initialize the eviction channel with the buffer size set to half the capacity + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + // Start expiration and eviction loops if capacity is greater than zero hyperCache.once.Do(func() { tick := time.NewTicker(hyperCache.expirationInterval) @@ -181,13 +185,13 @@ func New[T backend.IBackendConstrain](bm *BackendManager, config *Config[T]) (hy select { case <-tick.C: // trigger expiration - hyperCache.expirationLoop() + hyperCache.expirationLoop(ctx) case <-hyperCache.expirationTriggerCh: // trigger expiration - hyperCache.expirationLoop() + hyperCache.expirationLoop(ctx) case <-hyperCache.evictCh: // trigger eviction - hyperCache.evictionLoop() + hyperCache.evictionLoop(ctx) case <-hyperCache.stop: // stop the loops return @@ -204,7 +208,7 @@ func New[T backend.IBackendConstrain](bm *BackendManager, config *Config[T]) (hy for { select { case <-tick.C: - hyperCache.evictionLoop() + hyperCache.evictionLoop(ctx) case <-hyperCache.stop: return } @@ -217,7 +221,7 @@ func New[T backend.IBackendConstrain](bm *BackendManager, config *Config[T]) (hy } // expirationLoop is a function that runs in a separate goroutine and expires items in the cache based on their expiration duration. -func (hyperCache *HyperCache[T]) expirationLoop() { +func (hyperCache *HyperCache[T]) expirationLoop(ctx context.Context) { hyperCache.workerPool.Enqueue(func() error { hyperCache.StatsCollector.Incr("expiration_loop_count", 1) defer hyperCache.StatsCollector.Timing("expiration_loop_duration", time.Now().UnixNano()) @@ -245,7 +249,7 @@ func (hyperCache *HyperCache[T]) expirationLoop() { // iterate all expired items and remove them for _, item := range sortedItems { expiredCount++ - hyperCache.Remove(item.Key) + hyperCache.Remove(ctx, item.Key) types.ItemPool.Put(item) hyperCache.StatsCollector.Incr("item_expired_count", 1) } @@ -259,7 +263,7 @@ func (hyperCache *HyperCache[T]) expirationLoop() { // evictionLoop is a function that runs in a separate goroutine and evicts items from the cache based on the cache's capacity and the max eviction count. // The eviction is determined by the eviction algorithm. -func (hyperCache *HyperCache[T]) evictionLoop() { +func (hyperCache *HyperCache[T]) evictionLoop(ctx context.Context) { // Enqueue the eviction loop in the worker pool to avoid blocking the main goroutine if the eviction loop is slow hyperCache.workerPool.Enqueue(func() error { hyperCache.StatsCollector.Incr("eviction_loop_count", 1) @@ -282,7 +286,7 @@ func (hyperCache *HyperCache[T]) evictionLoop() { } // remove the item from the cache - hyperCache.Remove(key) + hyperCache.Remove(ctx, key) evictedCount++ hyperCache.StatsCollector.Incr("item_evicted_count", 1) } @@ -295,21 +299,21 @@ func (hyperCache *HyperCache[T]) evictionLoop() { // evictItem is a helper function that removes an item from the cache and returns the key of the evicted item. // If no item can be evicted, it returns a false. -func (hyperCache *HyperCache[T]) evictItem() (string, bool) { +func (hyperCache *HyperCache[T]) evictItem(ctx context.Context) (string, bool) { key, ok := hyperCache.evictionAlgorithm.Evict() if !ok { // no more items to evict return "", false } - hyperCache.Remove(key) + hyperCache.Remove(ctx, key) return key, true } // Set adds an item to the cache with the given key and value. If an item with the same key already exists, it updates the value of the existing item. // If the expiration duration is greater than zero, the item will expire after the specified duration. // If the capacity of the cache is reached, the cache will leverage the eviction algorithm proactively if the evictionInterval is zero. If not, the background process will take care of the eviction. -func (hyperCache *HyperCache[T]) Set(key string, value any, expiration time.Duration) error { +func (hyperCache *HyperCache[T]) Set(ctx context.Context, key string, value any, expiration time.Duration) error { // Create a new cache item and set its properties item := types.ItemPool.Get().(*types.Item) item.Key = key @@ -346,7 +350,7 @@ func (hyperCache *HyperCache[T]) Set(key string, value any, expiration time.Dura // If the cache is at capacity, evict an item when the eviction interval is zero if hyperCache.shouldEvict.Load() && hyperCache.backend.Count() > hyperCache.backend.Capacity() { - hyperCache.evictItem() + hyperCache.evictItem(ctx) } return nil @@ -397,7 +401,7 @@ func (hyperCache *HyperCache[T]) GetWithInfo(key string) (*types.Item, bool) { // GetOrSet retrieves the item with the given key. If the item is not found, it adds the item to the cache with the given value and expiration duration. // If the capacity of the cache is reached, leverage the eviction algorithm. -func (hyperCache *HyperCache[T]) GetOrSet(key string, value any, expiration time.Duration) (any, error) { +func (hyperCache *HyperCache[T]) GetOrSet(ctx context.Context, key string, value any, expiration time.Duration) (any, error) { // if the item is found, return the value if item, ok := hyperCache.backend.Get(key); ok { @@ -454,14 +458,14 @@ func (hyperCache *HyperCache[T]) GetOrSet(key string, value any, expiration time // If the cache is at capacity, evict an item when the eviction interval is zero if hyperCache.shouldEvict.Load() && hyperCache.backend.Count() > hyperCache.backend.Capacity() { types.ItemPool.Put(item) - hyperCache.evictItem() + hyperCache.evictItem(ctx) } }() return value, nil } // GetMultiple retrieves the items with the given keys from the cache. -func (hyperCache *HyperCache[T]) GetMultiple(keys ...string) (result map[string]any, failed map[string]error) { +func (hyperCache *HyperCache[T]) GetMultiple(ctx context.Context, keys ...string) (result map[string]any, failed map[string]error) { result = make(map[string]any, len(keys)) // Preallocate the result map failed = make(map[string]error, len(keys)) // Preallocate the errors map @@ -480,7 +484,7 @@ func (hyperCache *HyperCache[T]) GetMultiple(keys ...string) (result map[string] // Add the key to the errors map failed[key] = errors.ErrKeyExpired // Trigger the expiration loop - go hyperCache.expirationLoop() + go hyperCache.expirationLoop(ctx) } else { item.Touch() // Update the last access time and access count // Add the item to the result map @@ -500,7 +504,7 @@ func (hyperCache *HyperCache[T]) List(ctx context.Context, filters ...backend.IF } // Remove removes items with the given key from the cache. If an item is not found, it does nothing. -func (hyperCache *HyperCache[T]) Remove(keys ...string) { +func (hyperCache *HyperCache[T]) Remove(ctx context.Context, keys ...string) { // Remove the item from the eviction algorithm // and update the memory allocation for _, key := range keys { @@ -511,11 +515,11 @@ func (hyperCache *HyperCache[T]) Remove(keys ...string) { hyperCache.evictionAlgorithm.Delete(key) } } - hyperCache.backend.Remove(keys...) + hyperCache.backend.Remove(ctx, keys...) } // Clear removes all items from the cache. -func (hyperCache *HyperCache[T]) Clear() error { +func (hyperCache *HyperCache[T]) Clear(ctx context.Context) error { var ( items []*types.Item err error @@ -528,20 +532,10 @@ func (hyperCache *HyperCache[T]) Clear() error { } // clear the cacheBackend - err = hyperCache.backend.Clear() + err = hyperCache.backend.Clear(ctx) if err != nil { return err } - // if cb, ok := hyperCache.backend.(*backend.InMemory); ok { - // items, err = cb.List() - // cb.Clear() - // } else if cb, ok := hyperCache.backend.(*backend.Redis); ok { - // items, err = cb.List(context.TODO()) - // if err != nil { - // return err - // } - // err = cb.Clear() - // } for _, item := range items { hyperCache.evictionAlgorithm.Delete(item.Key) @@ -559,14 +553,14 @@ func (hyperCache *HyperCache[T]) Capacity() int { // SetCapacity sets the capacity of the cache. If the new capacity is smaller than the current number of items in the cache, // it evicts the excess items from the cache. -func (hyperCache *HyperCache[T]) SetCapacity(capacity int) { +func (hyperCache *HyperCache[T]) SetCapacity(ctx context.Context, capacity int) { // set capacity of the backend hyperCache.backend.SetCapacity(capacity) // evaluate again if the cache should evict items proactively hyperCache.shouldEvict.Swap(hyperCache.evictionInterval == 0 && hyperCache.backend.Capacity() > 0) // if the cache size is greater than the new capacity, evict items if hyperCache.backend.Count() > hyperCache.Capacity() { - hyperCache.evictionLoop() + hyperCache.evictionLoop(ctx) } } diff --git a/middleware/logging.go b/middleware/logging.go index 938432d..622258e 100644 --- a/middleware/logging.go +++ b/middleware/logging.go @@ -40,23 +40,23 @@ func (mw LoggingMiddleware) Get(key string) (value interface{}, ok bool) { } // Set logs the time it takes to execute the next middleware. -func (mw LoggingMiddleware) Set(key string, value any, expiration time.Duration) error { +func (mw LoggingMiddleware) Set(ctx context.Context, key string, value any, expiration time.Duration) error { defer func(begin time.Time) { mw.logger.Printf("method Set took: %s", time.Since(begin)) }(time.Now()) mw.logger.Printf("Set method called with key: %s value: %s", key, value) - return mw.next.Set(key, value, expiration) + return mw.next.Set(ctx, key, value, expiration) } // GetOrSet logs the time it takes to execute the next middleware. -func (mw LoggingMiddleware) GetOrSet(key string, value any, expiration time.Duration) (any, error) { +func (mw LoggingMiddleware) GetOrSet(ctx context.Context, key string, value any, expiration time.Duration) (any, error) { defer func(begin time.Time) { mw.logger.Printf("method GetOrSet took: %s", time.Since(begin)) }(time.Now()) mw.logger.Printf("GetOrSet method invoked with key: %s value: %s", key, value) - return mw.next.GetOrSet(key, value, expiration) + return mw.next.GetOrSet(ctx, key, value, expiration) } // GetWithInfo logs the time it takes to execute the next middleware. @@ -70,13 +70,13 @@ func (mw LoggingMiddleware) GetWithInfo(key string) (item *types.Item, ok bool) } // GetMultiple logs the time it takes to execute the next middleware. -func (mw LoggingMiddleware) GetMultiple(keys ...string) (result map[string]any, failed map[string]error) { +func (mw LoggingMiddleware) GetMultiple(ctx context.Context, keys ...string) (result map[string]any, failed map[string]error) { defer func(begin time.Time) { mw.logger.Printf("method GetMultiple took: %s", time.Since(begin)) }(time.Now()) mw.logger.Printf("GetMultiple method invoked with keys: %s", keys) - return mw.next.GetMultiple(keys...) + return mw.next.GetMultiple(ctx, keys...) } // List logs the time it takes to execute the next middleware. @@ -90,23 +90,23 @@ func (mw LoggingMiddleware) List(ctx context.Context, filters ...backend.IFilter } // Remove logs the time it takes to execute the next middleware. -func (mw LoggingMiddleware) Remove(keys ...string) { +func (mw LoggingMiddleware) Remove(ctx context.Context, keys ...string) { defer func(begin time.Time) { mw.logger.Printf("method Remove took: %s", time.Since(begin)) }(time.Now()) mw.logger.Printf("Remove method invoked with keys: %s", keys) - mw.next.Remove(keys...) + mw.next.Remove(ctx, keys...) } // Clear logs the time it takes to execute the next middleware. -func (mw LoggingMiddleware) Clear() error { +func (mw LoggingMiddleware) Clear(ctx context.Context) error { defer func(begin time.Time) { mw.logger.Printf("method Clear took: %s", time.Since(begin)) }(time.Now()) mw.logger.Printf("Clear method invoked") - return mw.next.Clear() + return mw.next.Clear(ctx) } // Capacity takes to execute the next middleware. diff --git a/middleware/stats.go b/middleware/stats.go index cbfdd21..a51c5a3 100644 --- a/middleware/stats.go +++ b/middleware/stats.go @@ -33,23 +33,23 @@ func (mw StatsCollectorMiddleware) Get(key string) (interface{}, bool) { } // Set collects stats for the Set method. -func (mw StatsCollectorMiddleware) Set(key string, value any, expiration time.Duration) error { +func (mw StatsCollectorMiddleware) Set(ctx context.Context, key string, value any, expiration time.Duration) error { start := time.Now() defer func() { mw.statsCollector.Timing("hypercache_set_duration", time.Since(start).Nanoseconds()) mw.statsCollector.Incr("hypercache_set_count", 1) }() - return mw.next.Set(key, value, expiration) + return mw.next.Set(ctx, key, value, expiration) } // GetOrSet collects stats for the GetOrSet method. -func (mw StatsCollectorMiddleware) GetOrSet(key string, value any, expiration time.Duration) (any, error) { +func (mw StatsCollectorMiddleware) GetOrSet(ctx context.Context, key string, value any, expiration time.Duration) (any, error) { start := time.Now() defer func() { mw.statsCollector.Timing("hypercache_get_or_set_duration", time.Since(start).Nanoseconds()) mw.statsCollector.Incr("hypercache_get_or_set_count", 1) }() - return mw.next.GetOrSet(key, value, expiration) + return mw.next.GetOrSet(ctx, key, value, expiration) } // GetWithInfo collects stats for the GetWithInfo method. @@ -63,13 +63,13 @@ func (mw StatsCollectorMiddleware) GetWithInfo(key string) (*types.Item, bool) { } // GetMultiple collects stats for the GetMultiple method. -func (mw StatsCollectorMiddleware) GetMultiple(keys ...string) (result map[string]any, failed map[string]error) { +func (mw StatsCollectorMiddleware) GetMultiple(ctx context.Context, keys ...string) (result map[string]any, failed map[string]error) { start := time.Now() defer func() { mw.statsCollector.Timing("hypercache_get_multiple_duration", time.Since(start).Nanoseconds()) mw.statsCollector.Incr("hypercache_get_multiple_count", 1) }() - return mw.next.GetMultiple(keys...) + return mw.next.GetMultiple(ctx, keys...) } // List collects stats for the List method. @@ -83,23 +83,23 @@ func (mw StatsCollectorMiddleware) List(ctx context.Context, filters ...backend. } // Remove collects stats for the Remove method. -func (mw StatsCollectorMiddleware) Remove(keys ...string) { +func (mw StatsCollectorMiddleware) Remove(ctx context.Context, keys ...string) { start := time.Now() defer func() { mw.statsCollector.Timing("hypercache_remove_duration", time.Since(start).Nanoseconds()) mw.statsCollector.Incr("hypercache_remove_count", 1) }() - mw.next.Remove(keys...) + mw.next.Remove(ctx, keys...) } // Clear collects stats for the Clear method. -func (mw StatsCollectorMiddleware) Clear() error { +func (mw StatsCollectorMiddleware) Clear(ctx context.Context) error { start := time.Now() defer func() { mw.statsCollector.Timing("hypercache_clear_duration", time.Since(start).Nanoseconds()) mw.statsCollector.Incr("hypercache_clear_count", 1) }() - return mw.next.Clear() + return mw.next.Clear(ctx) } // Capacity returns the capacity of the cache diff --git a/service.go b/service.go index d13391b..f47e6db 100644 --- a/service.go +++ b/service.go @@ -15,19 +15,19 @@ type Service interface { // Get retrieves a value from the cache using the key Get(key string) (value interface{}, ok bool) // Set stores a value in the cache using the key and expiration duration - Set(key string, value any, expiration time.Duration) error + Set(ctx context.Context, key string, value any, expiration time.Duration) error // GetOrSet retrieves a value from the cache using the key, if the key does not exist, it will set the value using the key and expiration duration - GetOrSet(key string, value any, expiration time.Duration) (any, error) + GetOrSet(ctx context.Context, key string, value any, expiration time.Duration) (any, error) // GetWithInfo fetches from the cache using the key, and returns the `types.Item` and a boolean indicating if the key exists GetWithInfo(key string) (*types.Item, bool) // GetMultiple retrieves a list of values from the cache using the keys - GetMultiple(keys ...string) (result map[string]any, failed map[string]error) + GetMultiple(ctx context.Context, keys ...string) (result map[string]any, failed map[string]error) // List returns a list of all items in the cache List(ctx context.Context, filters ...backend.IFilter) ([]*types.Item, error) // Remove removes a value from the cache using the key - Remove(keys ...string) + Remove(ctx context.Context, keys ...string) // Clear removes all values from the cache - Clear() error + Clear(ctx context.Context) error // Capacity returns the capacity of the cache Capacity() int // Allocation returns the allocation in bytes of the current cache diff --git a/tests/hypercache_get_multiple_test.go b/tests/hypercache_get_multiple_test.go index ae62d83..e297f1f 100644 --- a/tests/hypercache_get_multiple_test.go +++ b/tests/hypercache_get_multiple_test.go @@ -1,6 +1,7 @@ package tests import ( + "context" "testing" "time" @@ -24,9 +25,9 @@ func TestGetMultiple(t *testing.T) { wantValues: map[string]interface{}{"key1": 1, "key2": 2, "key3": 3}, wantErrs: map[string]error(map[string]error{}), setup: func(cache *hypercache.HyperCache[backend.InMemory]) { - cache.Set("key1", 1, 0) - cache.Set("key2", 2, 0) - cache.Set("key3", 3, 0) + cache.Set(context.TODO(), "key1", 1, 0) + cache.Set(context.TODO(), "key2", 2, 0) + cache.Set(context.TODO(), "key3", 3, 0) }, }, { @@ -35,8 +36,8 @@ func TestGetMultiple(t *testing.T) { wantValues: map[string]interface{}{"key1": 1, "key3": 3}, wantErrs: map[string]error{"key2": errors.ErrKeyNotFound}, setup: func(cache *hypercache.HyperCache[backend.InMemory]) { - cache.Set("key1", 1, 0) - cache.Set("key3", 3, 0) + cache.Set(context.TODO(), "key1", 1, 0) + cache.Set(context.TODO(), "key3", 3, 0) }, }, { @@ -45,10 +46,10 @@ func TestGetMultiple(t *testing.T) { wantValues: map[string]interface{}{"key2": 2, "key3": 3}, wantErrs: map[string]error{"key1": errors.ErrKeyNotFound}, setup: func(cache *hypercache.HyperCache[backend.InMemory]) { - cache.Set("key1", 1, time.Millisecond) + cache.Set(context.TODO(), "key1", 1, time.Millisecond) time.Sleep(2 * time.Millisecond) - cache.Set("key2", 2, 0) - cache.Set("key3", 3, 0) + cache.Set(context.TODO(), "key2", 2, 0) + cache.Set(context.TODO(), "key3", 3, 0) }, }, } @@ -69,7 +70,7 @@ func TestGetMultiple(t *testing.T) { assert.Nil(t, err) test.setup(cache) - gotValues, gotErrs := cache.GetMultiple(test.keys...) + gotValues, gotErrs := cache.GetMultiple(context.TODO(), test.keys...) assert.Equal(t, test.wantValues, gotValues) assert.Equal(t, test.wantErrs, gotErrs) }) diff --git a/tests/hypercache_get_or_set_test.go b/tests/hypercache_get_or_set_test.go index f02d40e..88d24f6 100644 --- a/tests/hypercache_get_or_set_test.go +++ b/tests/hypercache_get_or_set_test.go @@ -1,6 +1,7 @@ package tests import ( + "context" "testing" "time" @@ -78,7 +79,7 @@ func TestHyperCache_GetOrSet(t *testing.T) { shouldExpire := test.expectedErr == errors.ErrKeyExpired - val, err = cache.GetOrSet(test.key, test.value, test.expiry) + val, err = cache.GetOrSet(context.TODO(), test.key, test.value, test.expiry) if !shouldExpire { assert.Equal(t, test.expectedErr, err) } @@ -90,7 +91,7 @@ func TestHyperCache_GetOrSet(t *testing.T) { if shouldExpire { t.Log("sleeping for 2 Millisecond to allow the key to expire") time.Sleep(2 * time.Millisecond) - _, err = cache.GetOrSet(test.key, test.value, test.expiry) + _, err = cache.GetOrSet(context.TODO(), test.key, test.value, test.expiry) assert.Equal(t, test.expectedErr, err) } diff --git a/tests/hypercache_get_test.go b/tests/hypercache_get_test.go index 875da07..6b1b8b8 100644 --- a/tests/hypercache_get_test.go +++ b/tests/hypercache_get_test.go @@ -1,6 +1,7 @@ package tests import ( + "context" "testing" "time" @@ -68,7 +69,7 @@ func TestHyperCache_Get(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { if test.shouldSet { - err = cache.Set(test.key, test.value, test.expiry) + err = cache.Set(context.TODO(), test.key, test.value, test.expiry) if err != nil { assert.Equal(t, test.expectedErr, err) } diff --git a/tests/hypercache_set_test.go b/tests/hypercache_set_test.go index f544b48..c59b8e2 100644 --- a/tests/hypercache_set_test.go +++ b/tests/hypercache_set_test.go @@ -1,6 +1,7 @@ package tests import ( + "context" "testing" "time" @@ -74,7 +75,7 @@ func TestHyperCache_Set(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - err = cache.Set(test.key, test.value, test.expiry) + err = cache.Set(context.TODO(), test.key, test.value, test.expiry) assert.Equal(t, test.expectedErr, err) if err == nil { val, ok := cache.Get(test.key) From 52fe1a5d17c360f1d629ed79f999548c8f7f8222 Mon Sep 17 00:00:00 2001 From: Francesco Cosentino Date: Mon, 3 Apr 2023 18:48:32 +0200 Subject: [PATCH 4/4] updated README --- README.md | 180 +++--------------------------------------------------- 1 file changed, 9 insertions(+), 171 deletions(-) diff --git a/README.md b/README.md index 2299180..1f96c81 100644 --- a/README.md +++ b/README.md @@ -4,9 +4,9 @@ ## Synopsis -HyperCache is a **thread-safe** **high-performance** cache implementation in `Go` that supports multiple backends with an optional size limit, expiration, and eviction of items supporting custom algorithms alongside the defaults. It can be used as a standalone cache or as a cache middleware for a service. It can implement a [service interface](./service.go) to intercept and decorate the cache methods with middleware (default or custom). -It is optimized for performance and flexibility, allowing to specify the expiration and eviction intervals, and providing and registering new eviction algorithms, stats collectors, and middleware(s). -It ships with a default [historigram stats collector](./stats/statscollector.go) and several eviction algorithms, but you can develop and register your own as long as it implements the [Eviction Algorithm interface](./eviction/eviction.go).: +HyperCache is a **thread-safe** **high-performance** cache implementation in `Go` that supports multiple backends with an optional size limit, expiration, and eviction of items with custom algorithms alongside the defaults. It can be used as a standalone cache, distributed environents, or as a cache middleware for a service. It can implement a [service interface](./service.go) to intercept and decorate the cache methods with middleware (default or custom). +It is optimized for performance and flexibility, allowing to specify of the expiration and eviction intervals and providing and registering new eviction algorithms, stats collectors, and middleware(s). +It ships with a default [historigram stats collector](./stats/stats.go) and several eviction algorithms. However, you can develop and register your own if it implements the [Eviction Algorithm interface](./eviction/eviction.go).: - [Recently Used (LRU) eviction algorithm](./eviction/lru.go) - [The Least Frequently Used (LFU) algorithm](./eviction/lfu.go) @@ -18,7 +18,7 @@ It ships with a default [historigram stats collector](./stats/statscollector.go) - Thread-safe - High-performance -- Supports multiple backends, default backends are: +- Supports multiple, custom backends. Default backends are: 1. [In-memory](./backend/inmemory.go) 2. [Redis](./backend/redis.go) - Store items in the cache with a key and expiration duration @@ -27,8 +27,8 @@ It ships with a default [historigram stats collector](./stats/statscollector.go) - Clear the cache of all items - Evitc items in the background based on the cache capacity and items access leveraging several custom eviction algorithms - Expire items in the background based on their duration -- [Eviction Algorithm interface](./eviction.go) to implement custom eviction algorithms. -- Stats collection with a default [stats collector](./stats/statscollector.go) or a custom one that implements the StatsCollector interface. +- [Eviction Algorithm interface](./eviction/eviction.go) to implement custom eviction algorithms. +- Stats collection with a default [stats collector](./stats/stats.go) or a custom one that implements the StatsCollector interface. - [Service interface implementation](./service.go) to allow intercepting cache methods and decorate them with custom or default middleware(s). ## Installation @@ -87,7 +87,7 @@ if err != nil { } ``` -For a fine-grained control over the cache configuration, use the `New` function, for instance: +For fine-grained control over the cache configuration, use the `New` function, for instance: ```golang config := hypercache.NewConfig[backend.InMemory]() @@ -108,174 +108,12 @@ if err != nil { } ``` -**For the full configuration options, refer to the [config.go](./config.go) file.** - -### Set - -Set adds an item to the cache with the given key and value. - -```golang -err := cache.Set("key", "value", time.Hour) -if err != nil { - // handle error -} -``` - -The `Set` function takes a key, a value, and a duration as arguments. The key must be a non-empty string, the value can be of any type, and the duration specifies how long the item should stay in the cache before it expires. - -### Get - -`Get` retrieves the item with the given key from the cache. - -```golang -value, ok := cache.Get("key") -if !ok { - // handle item not found -} -``` - -The `Get` function returns the value associated with the given key or an error if the key is not found or has expired. - -### Remove - -`Remove` deletes items with the given key from the cache. If an item is not found, it does nothing. - -```golang -err := cache.Remove("key", "key2", "key3") -if err != nil { - // handle error -} -``` - -The `Remove` function takes a variadic number of keys as arguments and returns an error if any keys are not found. - +**Refer to the [config.go](./config.go) file for the full configuration options.** **For a comprehensive API overview, see the [documentation](https://pkg.go.dev/github.com/hyp3rd/hypercache).** -## Service interface for microservices implementation - -The `Service` interface allows intercepting cache methods and decorating them with custom or default middleware(s). - -```golang -var svc hypercache.Service -hyperCache, err := hypercache.NewInMemoryWithDefaults(10) - -if err != nil { - fmt.Println(err) - return -} -// assign statsCollector of the backend to use it in middleware -statsCollector := hyperCache.StatsCollector -svc = hyperCache - -if err != nil { - fmt.Println(err) - return -} - -// Example of using zap logger from uber -logger, _ := zap.NewProduction() - -sugar := logger.Sugar() -defer sugar.Sync() -defer logger.Sync() - -// apply middleware in the same order as you want to execute them -svc = hypercache.ApplyMiddleware(svc, - // middleware.YourMiddleware, - func(next hypercache.Service) hypercache.Service { - return middleware.NewLoggingMiddleware(next, sugar) - }, - func(next hypercache.Service) hypercache.Service { - return middleware.NewStatsCollectorMiddleware(next, statsCollector) - }, -) - -err = svc.Set("key string", "value any", 0) -if err != nil { - fmt.Println(err) - return -} -key, ok := svc.Get("key string") -if !ok { - fmt.Println("key not found") - return -} -fmt.Println(key) -``` - ## Usage -Here is an example of using the HyperCache package. See the [examples](./examples/README.md) directory for a more comprehensive overview. - -```golang -package main - -import ( - "fmt" - "log" - "time" - - "github.com/hyp3rd/hypercache" -) - -func main() { - // Create a new HyperCache with a capacity of 10 - cache, err := hypercache.NewInMemoryWithDefaults(10) - if err != nil { - fmt.Println(err) - return - } - // Stop the cache when the program exits - defer cache.Stop() - - log.Println("adding items to the cache") - // Add 10 items to the cache - for i := 0; i < 10; i++ { - key := fmt.Sprintf("key%d", i) - val := fmt.Sprintf("val%d", i) - - err = cache.Set(key, val, time.Minute) - - if err != nil { - fmt.Printf("unexpected error: %v\n", err) - return - } - } - - log.Println("fetching items from the cache using the `GetMultiple` method, key11 does not exist") - // Retrieve the specific of items from the cache - items, errs := cache.GetMultiple("key1", "key7", "key9", "key11") - - // Print the errors if any - for k, e := range errs { - log.Printf("error fetching item %s: %s\n", k, e) - } - - // Print the items - for k, v := range items { - fmt.Println(k, v) - } - - log.Println("fetching items from the cache using the `GetOrSet` method") - // Retrieve a specific of item from the cache - // If the item is not found, set it and return the value - val, err := cache.GetOrSet("key11", "val11", time.Minute) - if err != nil { - fmt.Println(err) - return - } - fmt.Println(val) - - log.Println("fetching items from the cache using the simple `Get` method") - item, ok := cache.Get("key7") - if !ok { - fmt.Println("item not found") - return - } - fmt.Println(item) -} - -``` +Examples can be too broad for a readme, refer to the [examples](./examples/README.md) directory for a more comprehensive overview. ## License