From d12869fae1b7ece05cb4747b95cdec667fd9d61e Mon Sep 17 00:00:00 2001 From: darkweak Date: Fri, 31 Mar 2023 15:16:07 +0200 Subject: [PATCH] fix(plugins): handle stringified array from traefik configuration loader (#333) --- plugins/traefik/Makefile | 1 - plugins/traefik/go.mod | 1 - plugins/traefik/go.sum | 2 - plugins/traefik/main.go | 14 + .../traefik/override/middleware/middleware.go | 4 - .../override/providers/abstractProvider.go | 58 - .../providers/abstractProvider_test.go | 17 - .../override/providers/cacheProvider.go | 110 -- .../traefik/override/storage/cacheProvider.go | 1 + plugins/traefik/override/ykeys/ykey.go | 5 +- plugins/traefik/souin-configuration.yaml | 1 + .../souin/cache/providers/abstractProvider.go | 24 +- .../cache/providers/abstractProvider_test.go | 17 - .../souin/cache/providers/badgerProvider.go | 203 +++ .../souin/cache/providers/cacheProvider.go | 110 -- .../cache/providers/embeddedOlricProvider.go | 218 ++++ .../souin/cache/providers/etcdProvider.go | 217 +++ .../souin/cache/providers/nutsProvider.go | 219 ++++ .../souin/cache/providers/olricProvider.go | 223 ++++ .../souin/cache/providers/redisProvider.go | 227 ++++ .../darkweak/souin/cache/ykeys/ykey.go | 5 +- .../souin/pkg/middleware/middleware.go | 4 - .../souin/pkg/storage/cacheProvider.go | 1 + .../patrickmn/go-cache/CONTRIBUTORS | 9 - .../github.com/patrickmn/go-cache/LICENSE | 19 - .../github.com/patrickmn/go-cache/README.md | 83 -- .../github.com/patrickmn/go-cache/cache.go | 1161 ----------------- .../github.com/patrickmn/go-cache/sharded.go | 192 --- plugins/traefik/vendor/modules.txt | 3 - 29 files changed, 1351 insertions(+), 1798 deletions(-) delete mode 100644 plugins/traefik/override/providers/abstractProvider.go delete mode 100644 plugins/traefik/override/providers/abstractProvider_test.go delete mode 100644 plugins/traefik/override/providers/cacheProvider.go delete mode 100644 plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/abstractProvider_test.go create mode 100644 plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/badgerProvider.go delete mode 100644 plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/cacheProvider.go create mode 100644 plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/embeddedOlricProvider.go create mode 100644 plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/etcdProvider.go create mode 100644 plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/nutsProvider.go create mode 100644 plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/olricProvider.go create mode 100644 plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/redisProvider.go delete mode 100644 plugins/traefik/vendor/github.com/patrickmn/go-cache/CONTRIBUTORS delete mode 100644 plugins/traefik/vendor/github.com/patrickmn/go-cache/LICENSE delete mode 100644 plugins/traefik/vendor/github.com/patrickmn/go-cache/README.md delete mode 100644 plugins/traefik/vendor/github.com/patrickmn/go-cache/cache.go delete mode 100644 plugins/traefik/vendor/github.com/patrickmn/go-cache/sharded.go diff --git a/plugins/traefik/Makefile b/plugins/traefik/Makefile index 6d7ab713f..820a27909 100644 --- a/plugins/traefik/Makefile +++ b/plugins/traefik/Makefile @@ -25,7 +25,6 @@ prepare: vendor ## Prepare traefik plugin # TODO find another way to do that replace: ## Replace sources in the vendor folder deeper than the go mod replace $(MAKE) copy-to base=$(CACHE) target=coalescing - $(MAKE) copy-to base=$(CACHE) target=providers $(MAKE) copy-to base=$(SOUIN) target=context $(MAKE) copy-file-to base=$(PKG) target=surrogate/providers/common.go $(MAKE) copy-file-to base=$(CACHE) target=types/layerStorage.go diff --git a/plugins/traefik/go.mod b/plugins/traefik/go.mod index bf5c3b89e..1ca9477e3 100644 --- a/plugins/traefik/go.mod +++ b/plugins/traefik/go.mod @@ -5,7 +5,6 @@ go 1.19 require ( github.com/akyoto/cache v1.0.6 github.com/darkweak/souin v1.6.36 - github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pquerna/cachecontrol v0.1.0 go.uber.org/zap v1.21.0 ) diff --git a/plugins/traefik/go.sum b/plugins/traefik/go.sum index 8fd6b5d27..ae144ac3f 100644 --- a/plugins/traefik/go.sum +++ b/plugins/traefik/go.sum @@ -898,8 +898,6 @@ github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5h github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= -github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= diff --git a/plugins/traefik/main.go b/plugins/traefik/main.go index 9610855f3..77633670d 100644 --- a/plugins/traefik/main.go +++ b/plugins/traefik/main.go @@ -201,7 +201,21 @@ func parseConfiguration(c map[string]interface{}) Configuration { // parseStringSlice returns the string slice corresponding to the given interface. // The interface can be of type string which contains a comma separated list of values (e.g. foo,bar) or of type []string. func parseStringSlice(i interface{}) []string { + if value, ok := i.([]string); ok { + return value + } + if value, ok := i.([]interface{}); ok { + var arr []string + for _, v := range value { + arr = append(arr, v.(string)) + } + return arr + } + if value, ok := i.(string); ok { + if strings.HasPrefix(value, "║24║") { + return strings.Split(strings.TrimPrefix(value, "║24║"), "║") + } return strings.Split(value, ",") } diff --git a/plugins/traefik/override/middleware/middleware.go b/plugins/traefik/override/middleware/middleware.go index dbea70345..656cfca85 100644 --- a/plugins/traefik/override/middleware/middleware.go +++ b/plugins/traefik/override/middleware/middleware.go @@ -179,8 +179,6 @@ type handlerFunc = func(http.ResponseWriter, *http.Request) error func (s *SouinBaseHandler) ServeHTTP(rw http.ResponseWriter, rq *http.Request, next handlerFunc) error { b, handler := s.HandleInternally(rq) - fmt.Println("AFTER") - fmt.Println("AFTER b", b) if b { handler(rw, rq) return nil @@ -193,13 +191,11 @@ func (s *SouinBaseHandler) ServeHTTP(rw http.ResponseWriter, rq *http.Request, n return next(rw, rq) } - fmt.Println("RESPONSE 1") if !rq.Context().Value(context.SupportedMethod).(bool) { rw.Header().Set("Cache-Status", cacheName+"; fwd=uri-miss; detail=UNSUPPORTED-METHOD") return next(rw, rq) } - fmt.Println("RESPONSE 2") requestCc, coErr := cacheobject.ParseRequestCacheControl(rq.Header.Get("Cache-Control")) diff --git a/plugins/traefik/override/providers/abstractProvider.go b/plugins/traefik/override/providers/abstractProvider.go deleted file mode 100644 index d18a21eca..000000000 --- a/plugins/traefik/override/providers/abstractProvider.go +++ /dev/null @@ -1,58 +0,0 @@ -package providers - -import ( - "net/http" - "net/url" - "strings" - - "github.com/darkweak/souin/configurationtypes" -) - -// VarySeparator will separate vary headers from the plain URL -const VarySeparator = "{-VARY-}" -const DecodedHeaderSeparator = ";" -const encodedHeaderSemiColonSeparator = "%3B" -const encodedHeaderColonSeparator = "%3A" -const StalePrefix = "STALE_" - -// InitializeProvider allow to generate the providers array according to the configuration -func InitializeProvider(configuration configurationtypes.AbstractConfigurationInterface) *Cache { - r, _ := CacheConnectionFactory(configuration) - e := r.Init() - if e != nil { - panic(e) - } - return r -} - -func varyVoter(baseKey string, req *http.Request, currentKey string) bool { - if currentKey == baseKey { - return true - } - - if strings.Contains(currentKey, VarySeparator) && strings.HasPrefix(currentKey, baseKey+VarySeparator) { - list := currentKey[(strings.LastIndex(currentKey, VarySeparator) + len(VarySeparator)):] - if len(list) == 0 { - return false - } - - for _, item := range strings.Split(list, ";") { - index := strings.LastIndex(item, ":") - if len(item) < index+1 { - return false - } - - hVal := item[index+1:] - if strings.Contains(hVal, encodedHeaderSemiColonSeparator) || strings.Contains(hVal, encodedHeaderColonSeparator) { - hVal, _ = url.QueryUnescape(hVal) - } - if req.Header.Get(item[:index]) != hVal { - return false - } - } - - return true - } - - return false -} diff --git a/plugins/traefik/override/providers/abstractProvider_test.go b/plugins/traefik/override/providers/abstractProvider_test.go deleted file mode 100644 index f44303172..000000000 --- a/plugins/traefik/override/providers/abstractProvider_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package providers - -import ( - "testing" - - "github.com/darkweak/souin/errors" - "github.com/darkweak/souin/tests" -) - -func TestInitializeProvider(t *testing.T) { - c := tests.MockConfiguration(tests.BaseConfiguration) - p := InitializeProvider(c) - err := p.Init() - if nil != err { - errors.GenerateError(t, "Init shouldn't crash") - } -} diff --git a/plugins/traefik/override/providers/cacheProvider.go b/plugins/traefik/override/providers/cacheProvider.go deleted file mode 100644 index 50ff494b9..000000000 --- a/plugins/traefik/override/providers/cacheProvider.go +++ /dev/null @@ -1,110 +0,0 @@ -package providers - -import ( - "net/http" - "regexp" - "strings" - "time" - - t "github.com/darkweak/souin/configurationtypes" - "github.com/patrickmn/go-cache" -) - -// Cache provider type -type Cache struct { - *cache.Cache - stale time.Duration -} - -// CacheConnectionFactory function create new Cache instance -func CacheConnectionFactory(c t.AbstractConfigurationInterface) (*Cache, error) { - provider := cache.New(1*time.Second, 1*time.Second) - return &Cache{Cache: provider, stale: c.GetDefaultCache().GetStale()}, nil -} - -// ListKeys method returns the list of existing keys -func (provider *Cache) ListKeys() []string { - items := provider.Items() - keys := make([]string, 0, len(items)) - for k := range items { - keys = append(keys, k) - } - - return keys -} - -// Get method returns the populated response if exists, empty response then -func (provider *Cache) Get(key string) []byte { - result, found := provider.Cache.Get(key) - - if !found { - return []byte{} - } - - return result.([]byte) -} - -// Prefix method returns the populated response if exists, empty response then -func (provider *Cache) Prefix(key string, req *http.Request) []byte { - var result []byte - - for k, v := range provider.Items() { - if k == key { - return v.Object.([]byte) - } - - if !strings.HasPrefix(k, key) { - continue - } - - if varyVoter(key, req, k) { - result = v.Object.([]byte) - } - } - - return result -} - -// Set method will store the response in Cache provider -func (provider *Cache) Set(key string, value []byte, url t.URL, duration time.Duration) error { - if duration == 0 { - duration = url.TTL.Duration - } - - provider.Cache.Set(key, value, duration) - provider.Cache.Set(StalePrefix+key, value, provider.stale+duration) - - return nil -} - -// Delete method will delete the response in Cache provider if exists corresponding to key param -func (provider *Cache) Delete(key string) { - provider.Cache.Delete(key) -} - -// DeleteMany method will delete the responses in Cache provider if exists corresponding to the regex key param -func (provider *Cache) DeleteMany(key string) { - re, e := regexp.Compile(key) - - if e != nil { - return - } - - for k := range provider.Items() { - if re.MatchString(k) { - provider.Delete(k) - } - } -} - -// Init method will -func (provider *Cache) Init() error { - return nil -} - -// Reset method will reset or close provider -func (provider *Cache) Reset() error { - provider.Cache.Flush() - - return nil -} diff --git a/plugins/traefik/override/storage/cacheProvider.go b/plugins/traefik/override/storage/cacheProvider.go index b11a0b44c..c590f355a 100644 --- a/plugins/traefik/override/storage/cacheProvider.go +++ b/plugins/traefik/override/storage/cacheProvider.go @@ -47,6 +47,7 @@ func (provider *Cache) Get(key string) []byte { // Prefix method returns the populated response if exists, empty response then func (provider *Cache) Prefix(key string, req *http.Request) []byte { var result []byte + provider.Cache.Range(func(k, v interface{}) bool { if k == key { result = v.([]byte) diff --git a/plugins/traefik/override/ykeys/ykey.go b/plugins/traefik/override/ykeys/ykey.go index 3b5b7d05f..b710369c0 100644 --- a/plugins/traefik/override/ykeys/ykey.go +++ b/plugins/traefik/override/ykeys/ykey.go @@ -2,12 +2,13 @@ package ykeys import ( "fmt" - "github.com/patrickmn/go-cache" "net/http" "regexp" "strings" "time" + "github.com/akyoto/cache" + "github.com/darkweak/souin/configurationtypes" ) @@ -47,7 +48,7 @@ func InitializeYKeys(keys map[string]configurationtypes.SurrogateKeys) *YKeyStor return nil } - c := cache.New(1*time.Second, 2*time.Second) + c := cache.New(1 * time.Second) for key := range keys { c.Set(key, "", 1) diff --git a/plugins/traefik/souin-configuration.yaml b/plugins/traefik/souin-configuration.yaml index c595d659a..db644f1e5 100644 --- a/plugins/traefik/souin-configuration.yaml +++ b/plugins/traefik/souin-configuration.yaml @@ -29,6 +29,7 @@ http: regex: exclude: '/excluded' ttl: 5s + allowed_http_verbs: [GET, HEAD, POST] default_cache_control: public log_level: debug urls: diff --git a/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/abstractProvider.go b/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/abstractProvider.go index d18a21eca..2e86679e3 100644 --- a/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/abstractProvider.go +++ b/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/abstractProvider.go @@ -5,6 +5,7 @@ import ( "net/url" "strings" + "github.com/darkweak/souin/cache/types" "github.com/darkweak/souin/configurationtypes" ) @@ -13,11 +14,28 @@ const VarySeparator = "{-VARY-}" const DecodedHeaderSeparator = ";" const encodedHeaderSemiColonSeparator = "%3B" const encodedHeaderColonSeparator = "%3A" -const StalePrefix = "STALE_" +const stalePrefix = "STALE_" // InitializeProvider allow to generate the providers array according to the configuration -func InitializeProvider(configuration configurationtypes.AbstractConfigurationInterface) *Cache { - r, _ := CacheConnectionFactory(configuration) +func InitializeProvider(configuration configurationtypes.AbstractConfigurationInterface) types.AbstractProviderInterface { + var r types.AbstractProviderInterface + if configuration.GetDefaultCache().GetDistributed() { + if configuration.GetDefaultCache().GetEtcd().Configuration != nil { + r, _ = EtcdConnectionFactory(configuration) + } else if configuration.GetDefaultCache().GetRedis().Configuration != nil || configuration.GetDefaultCache().GetRedis().URL != "" { + r, _ = RedisConnectionFactory(configuration) + } else { + if configuration.GetDefaultCache().GetOlric().URL != "" { + r, _ = OlricConnectionFactory(configuration) + } else { + r, _ = EmbeddedOlricConnectionFactory(configuration) + } + } + } else if configuration.GetDefaultCache().GetNuts().Configuration != nil || configuration.GetDefaultCache().GetNuts().Path != "" { + r, _ = NutsConnectionFactory(configuration) + } else { + r, _ = BadgerConnectionFactory(configuration) + } e := r.Init() if e != nil { panic(e) diff --git a/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/abstractProvider_test.go b/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/abstractProvider_test.go deleted file mode 100644 index f44303172..000000000 --- a/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/abstractProvider_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package providers - -import ( - "testing" - - "github.com/darkweak/souin/errors" - "github.com/darkweak/souin/tests" -) - -func TestInitializeProvider(t *testing.T) { - c := tests.MockConfiguration(tests.BaseConfiguration) - p := InitializeProvider(c) - err := p.Init() - if nil != err { - errors.GenerateError(t, "Init shouldn't crash") - } -} diff --git a/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/badgerProvider.go b/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/badgerProvider.go new file mode 100644 index 000000000..da5ec22e4 --- /dev/null +++ b/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/badgerProvider.go @@ -0,0 +1,203 @@ +package providers + +import ( + "encoding/json" + "net/http" + "regexp" + "time" + + "github.com/darkweak/souin/cache/types" + t "github.com/darkweak/souin/configurationtypes" + badger "github.com/dgraph-io/badger/v3" + "github.com/imdario/mergo" + "go.uber.org/zap" +) + +// Badger provider type +type Badger struct { + *badger.DB + stale time.Duration + logger *zap.Logger +} + +var ( + enabledBadgerInstances = make(map[string]*Badger) + _ badger.Logger = (*badgerLogger)(nil) +) + +type badgerLogger struct { + *zap.SugaredLogger +} + +func (b *badgerLogger) Warningf(msg string, params ...interface{}) { + b.SugaredLogger.Warnf(msg, params...) +} + +// BadgerConnectionFactory function create new Badger instance +func BadgerConnectionFactory(c t.AbstractConfigurationInterface) (types.AbstractProviderInterface, error) { + dc := c.GetDefaultCache() + badgerConfiguration := dc.GetBadger() + badgerOptions := badger.DefaultOptions(badgerConfiguration.Path) + if badgerConfiguration.Configuration != nil { + var parsedBadger badger.Options + if b, e := json.Marshal(badgerConfiguration.Configuration); e == nil { + if e = json.Unmarshal(b, &parsedBadger); e != nil { + c.GetLogger().Sugar().Error("Impossible to parse the configuration for the default provider (Badger)", e) + } + } + + if err := mergo.Merge(&badgerOptions, parsedBadger, mergo.WithOverride); err != nil { + c.GetLogger().Sugar().Error("An error occurred during the badgerOptions merge from the default options with your configuration.") + } + } else if badgerConfiguration.Path == "" { + badgerOptions = badgerOptions.WithInMemory(true) + } + + badgerOptions.Logger = &badgerLogger{SugaredLogger: c.GetLogger().Sugar()} + uid := badgerOptions.Dir + badgerOptions.ValueDir + dc.GetStale().String() + if i, ok := enabledBadgerInstances[uid]; ok { + return i, nil + } + + db, e := badger.Open(badgerOptions) + + if e != nil { + c.GetLogger().Sugar().Error("Impossible to open the Badger DB.", e) + } + + i := &Badger{DB: db, logger: c.GetLogger(), stale: dc.GetStale()} + enabledBadgerInstances[uid] = i + + return i, nil +} + +// ListKeys method returns the list of existing keys +func (provider *Badger) ListKeys() []string { + keys := []string{} + + e := provider.DB.View(func(txn *badger.Txn) error { + opts := badger.DefaultIteratorOptions + opts.PrefetchValues = false + it := txn.NewIterator(opts) + defer it.Close() + for it.Rewind(); it.Valid(); it.Next() { + keys = append(keys, string(it.Item().Key())) + } + return nil + }) + + if e != nil { + return []string{} + } + + return keys +} + +// Get method returns the populated response if exists, empty response then +func (provider *Badger) Get(key string) []byte { + var item *badger.Item + var result []byte + + e := provider.DB.View(func(txn *badger.Txn) error { + i, err := txn.Get([]byte(key)) + item = i + return err + }) + + if e == badger.ErrKeyNotFound { + return result + } + + _ = item.Value(func(val []byte) error { + result = val + return nil + }) + + return result +} + +// Prefix method returns the populated response if exists, empty response then +func (provider *Badger) Prefix(key string, req *http.Request) []byte { + var result []byte + + _ = provider.DB.View(func(txn *badger.Txn) error { + prefix := []byte(key) + it := txn.NewIterator(badger.DefaultIteratorOptions) + defer it.Close() + for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { + if varyVoter(key, req, string(it.Item().Key())) { + _ = it.Item().Value(func(val []byte) error { + result = val + return nil + }) + } + } + return nil + }) + + return result +} + +// Set method will store the response in Badger provider +func (provider *Badger) Set(key string, value []byte, url t.URL, duration time.Duration) error { + if duration == 0 { + duration = url.TTL.Duration + } + + err := provider.DB.Update(func(txn *badger.Txn) error { + return txn.SetEntry(badger.NewEntry([]byte(key), value).WithTTL(duration)) + }) + + if err != nil { + provider.logger.Sugar().Errorf("Impossible to set value into Badger, %v", err) + return err + } + + err = provider.DB.Update(func(txn *badger.Txn) error { + return txn.SetEntry(badger.NewEntry([]byte(stalePrefix+key), value).WithTTL(provider.stale + duration)) + }) + + if err != nil { + provider.logger.Sugar().Errorf("Impossible to set value into Badger, %v", err) + } + + return nil +} + +// Delete method will delete the response in Badger provider if exists corresponding to key param +func (provider *Badger) Delete(key string) { + _ = provider.DB.DropPrefix([]byte(key)) +} + +// DeleteMany method will delete the responses in Badger provider if exists corresponding to the regex key param +func (provider *Badger) DeleteMany(key string) { + re, e := regexp.Compile(key) + + if e != nil { + return + } + + _ = provider.DB.View(func(txn *badger.Txn) error { + opts := badger.DefaultIteratorOptions + opts.PrefetchValues = false + it := txn.NewIterator(opts) + defer it.Close() + for it.Rewind(); it.Valid(); it.Next() { + k := string(it.Item().Key()) + if re.MatchString(k) { + provider.Delete(k) + } + } + return nil + }) +} + +// Init method will +func (provider *Badger) Init() error { + return nil +} + +// Reset method will reset or close provider +func (provider *Badger) Reset() error { + return provider.DB.DropAll() +} diff --git a/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/cacheProvider.go b/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/cacheProvider.go deleted file mode 100644 index 50ff494b9..000000000 --- a/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/cacheProvider.go +++ /dev/null @@ -1,110 +0,0 @@ -package providers - -import ( - "net/http" - "regexp" - "strings" - "time" - - t "github.com/darkweak/souin/configurationtypes" - "github.com/patrickmn/go-cache" -) - -// Cache provider type -type Cache struct { - *cache.Cache - stale time.Duration -} - -// CacheConnectionFactory function create new Cache instance -func CacheConnectionFactory(c t.AbstractConfigurationInterface) (*Cache, error) { - provider := cache.New(1*time.Second, 1*time.Second) - return &Cache{Cache: provider, stale: c.GetDefaultCache().GetStale()}, nil -} - -// ListKeys method returns the list of existing keys -func (provider *Cache) ListKeys() []string { - items := provider.Items() - keys := make([]string, 0, len(items)) - for k := range items { - keys = append(keys, k) - } - - return keys -} - -// Get method returns the populated response if exists, empty response then -func (provider *Cache) Get(key string) []byte { - result, found := provider.Cache.Get(key) - - if !found { - return []byte{} - } - - return result.([]byte) -} - -// Prefix method returns the populated response if exists, empty response then -func (provider *Cache) Prefix(key string, req *http.Request) []byte { - var result []byte - - for k, v := range provider.Items() { - if k == key { - return v.Object.([]byte) - } - - if !strings.HasPrefix(k, key) { - continue - } - - if varyVoter(key, req, k) { - result = v.Object.([]byte) - } - } - - return result -} - -// Set method will store the response in Cache provider -func (provider *Cache) Set(key string, value []byte, url t.URL, duration time.Duration) error { - if duration == 0 { - duration = url.TTL.Duration - } - - provider.Cache.Set(key, value, duration) - provider.Cache.Set(StalePrefix+key, value, provider.stale+duration) - - return nil -} - -// Delete method will delete the response in Cache provider if exists corresponding to key param -func (provider *Cache) Delete(key string) { - provider.Cache.Delete(key) -} - -// DeleteMany method will delete the responses in Cache provider if exists corresponding to the regex key param -func (provider *Cache) DeleteMany(key string) { - re, e := regexp.Compile(key) - - if e != nil { - return - } - - for k := range provider.Items() { - if re.MatchString(k) { - provider.Delete(k) - } - } -} - -// Init method will -func (provider *Cache) Init() error { - return nil -} - -// Reset method will reset or close provider -func (provider *Cache) Reset() error { - provider.Cache.Flush() - - return nil -} diff --git a/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/embeddedOlricProvider.go b/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/embeddedOlricProvider.go new file mode 100644 index 000000000..5a12e3840 --- /dev/null +++ b/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/embeddedOlricProvider.go @@ -0,0 +1,218 @@ +package providers + +import ( + "context" + "net/http" + "os" + "time" + + "github.com/buraksezer/olric" + "github.com/buraksezer/olric/config" + t "github.com/darkweak/souin/configurationtypes" + "github.com/google/uuid" + "go.uber.org/zap" + "gopkg.in/yaml.v3" +) + +// EmbeddedOlric provider type +type EmbeddedOlric struct { + dm olric.DMap + db *olric.Olric + stale time.Duration + logger *zap.Logger + ct context.Context +} + +func tryToLoadConfiguration(olricInstance *config.Config, olricConfiguration t.CacheProvider, logger *zap.Logger) (*config.Config, bool) { + var e error + isAlreadyLoaded := false + if olricConfiguration.Configuration == nil && olricConfiguration.Path != "" { + if olricInstance, e = config.Load(olricConfiguration.Path); e == nil { + isAlreadyLoaded = true + } + } else if olricConfiguration.Configuration != nil { + tmpFile := "/tmp/" + uuid.NewString() + ".yml" + yamlConfig, e := yaml.Marshal(olricConfiguration.Configuration) + defer func() { + if e = os.RemoveAll(tmpFile); e != nil { + logger.Error("Impossible to remove the temporary file") + } + }() + if e = os.WriteFile( + tmpFile, + yamlConfig, + 0600, + ); e != nil { + logger.Error("Impossible to create the embedded Olric config from the given one") + } + + if olricInstance, e = config.Load(tmpFile); e == nil { + isAlreadyLoaded = true + } else { + logger.Error("Impossible to create the embedded Olric config from the given one") + } + } + + return olricInstance, isAlreadyLoaded +} + +// EmbeddedOlricConnectionFactory function create new EmbeddedOlric instance +func EmbeddedOlricConnectionFactory(configuration t.AbstractConfigurationInterface) (*EmbeddedOlric, error) { + var olricInstance *config.Config + loaded := false + + if olricInstance, loaded = tryToLoadConfiguration(olricInstance, configuration.GetDefaultCache().GetOlric(), configuration.GetLogger()); !loaded { + olricInstance = config.New("local") + olricInstance.DMaps.MaxInuse = 512 << 20 + } + + started, cancel := context.WithCancel(context.Background()) + olricInstance.Started = func() { + configuration.GetLogger().Sugar().Error("Embedded Olric is ready") + defer cancel() + } + + db, err := olric.New(olricInstance) + if err != nil { + return nil, err + } + + ch := make(chan error, 1) + defer func() { + close(ch) + }() + + go func(cdb *olric.Olric) { + if err = cdb.Start(); err != nil { + ch <- err + } + }(db) + + select { + case err = <-ch: + case <-started.Done(): + } + dm, e := db.NewEmbeddedClient().NewDMap("souin-map") + + configuration.GetLogger().Sugar().Info("Embedded Olric is ready for this node.") + + return &EmbeddedOlric{ + dm: dm, + db: db, + stale: configuration.GetDefaultCache().GetStale(), + logger: configuration.GetLogger(), + ct: context.Background(), + }, e +} + +// ListKeys method returns the list of existing keys +func (provider *EmbeddedOlric) ListKeys() []string { + + records, err := provider.dm.Scan(provider.ct) + if err != nil { + provider.logger.Sugar().Errorf("An error occurred while trying to list keys in Olric: %s\n", err) + return []string{} + } + + keys := []string{} + for records.Next() { + keys = append(keys, records.Key()) + } + records.Close() + + return keys +} + +// Prefix method returns the populated response if exists, empty response then +func (provider *EmbeddedOlric) Prefix(key string, req *http.Request) []byte { + records, err := provider.dm.Scan(provider.ct, olric.Match("^"+key+"({|$)")) + if err != nil { + provider.logger.Sugar().Errorf("An error occurred while trying to retrieve data in Olric: %s\n", err) + return []byte{} + } + + for records.Next() { + if varyVoter(key, req, records.Key()) { + return provider.Get(records.Key()) + } + } + records.Close() + + return []byte{} +} + +// Get method returns the populated response if exists, empty response then +func (provider *EmbeddedOlric) Get(key string) []byte { + res, err := provider.dm.Get(provider.ct, key) + + if err != nil { + return []byte{} + } + + val, _ := res.Byte() + return val +} + +// Set method will store the response in EmbeddedOlric provider +func (provider *EmbeddedOlric) Set(key string, value []byte, url t.URL, duration time.Duration) error { + if duration == 0 { + duration = url.TTL.Duration + } + + if err := provider.dm.Put(provider.ct, key, value, olric.EX(duration)); err != nil { + provider.logger.Sugar().Errorf("Impossible to set value into EmbeddedOlric, %v", err) + return err + } + + if err := provider.dm.Put(provider.ct, stalePrefix+key, value, olric.EX(provider.stale+duration)); err != nil { + provider.logger.Sugar().Errorf("Impossible to set value into EmbeddedOlric, %v", err) + } + + return nil +} + +// Delete method will delete the response in EmbeddedOlric provider if exists corresponding to key param +func (provider *EmbeddedOlric) Delete(key string) { + _, err := provider.dm.Delete(provider.ct, key) + if err != nil { + provider.logger.Sugar().Errorf("Impossible to delete value into Olric, %v", err) + } +} + +// DeleteMany method will delete the responses in EmbeddedOlric provider if exists corresponding to the regex key param +func (provider *EmbeddedOlric) DeleteMany(key string) { + records, err := provider.dm.Scan(provider.ct, olric.Match(key)) + if err != nil { + provider.logger.Sugar().Errorf("Impossible to delete values into EmbeddedOlric, %v", err) + return + } + + keys := []string{} + for records.Next() { + keys = append(keys, records.Key()) + } + records.Close() + + _, _ = provider.dm.Delete(provider.ct, keys...) +} + +// Init method will initialize EmbeddedOlric provider if needed +func (provider *EmbeddedOlric) Init() error { + return nil +} + +// Reset method will reset or close provider +func (provider *EmbeddedOlric) Reset() error { + return provider.db.Shutdown(provider.ct) +} + +// Destruct method will reset or close provider +func (provider *EmbeddedOlric) Destruct() error { + provider.logger.Sugar().Debug("Destruct current embedded olric...") + return provider.Reset() +} + +// GetDM method returns the embbeded instance dm property +func (provider *EmbeddedOlric) GetDM() olric.DMap { + return provider.dm +} diff --git a/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/etcdProvider.go b/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/etcdProvider.go new file mode 100644 index 000000000..8db2e6726 --- /dev/null +++ b/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/etcdProvider.go @@ -0,0 +1,217 @@ +package providers + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "regexp" + "time" + + "github.com/darkweak/souin/cache/types" + t "github.com/darkweak/souin/configurationtypes" + clientv3 "go.etcd.io/etcd/client/v3" + "go.uber.org/zap" + "google.golang.org/grpc/connectivity" +) + +// Etcd provider type +type Etcd struct { + *clientv3.Client + stale time.Duration + ctx context.Context + logger *zap.Logger + reconnecting bool + configuration clientv3.Config +} + +// EtcdConnectionFactory function create new Etcd instance +func EtcdConnectionFactory(c t.AbstractConfigurationInterface) (types.AbstractReconnectProvider, error) { + dc := c.GetDefaultCache() + bc, _ := json.Marshal(dc.GetEtcd().Configuration) + etcdConfiguration := clientv3.Config{ + DialTimeout: 5 * time.Second, + AutoSyncInterval: 1 * time.Second, + Logger: c.GetLogger(), + } + _ = json.Unmarshal(bc, &etcdConfiguration) + + cli, err := clientv3.New(etcdConfiguration) + + if err != nil { + c.GetLogger().Sugar().Error("Impossible to initialize the Etcd DB.", err) + return nil, err + } + + for { + if cli.ActiveConnection().GetState() == connectivity.Ready { + break + } + } + + return &Etcd{ + Client: cli, + ctx: context.Background(), + stale: dc.GetStale(), + logger: c.GetLogger(), + configuration: etcdConfiguration, + }, nil +} + +// ListKeys method returns the list of existing keys +func (provider *Etcd) ListKeys() []string { + if provider.reconnecting { + provider.logger.Sugar().Error("Impossible to list the etcd keys while reconnecting.") + return []string{} + } + keys := []string{} + + r, e := provider.Client.Get(provider.ctx, "\x00", clientv3.WithFromKey()) + + if e != nil { + if !provider.reconnecting { + go provider.Reconnect() + } + return []string{} + } + for _, k := range r.Kvs { + keys = append(keys, string(k.Key)) + } + + return keys +} + +// Get method returns the populated response if exists, empty response then +func (provider *Etcd) Get(key string) (item []byte) { + if provider.reconnecting { + provider.logger.Sugar().Error("Impossible to get the etcd key while reconnecting.") + return []byte{} + } + r, e := provider.Client.Get(provider.ctx, key) + + if e != nil && !provider.reconnecting { + go provider.Reconnect() + return + } + + if e == nil && r != nil && len(r.Kvs) > 0 { + item = r.Kvs[0].Value + } + + return +} + +// Prefix method returns the populated response if exists, empty response then +func (provider *Etcd) Prefix(key string, req *http.Request) []byte { + if provider.reconnecting { + provider.logger.Sugar().Error("Impossible to get the etcd keys by prefix while reconnecting.") + return []byte{} + } + r, e := provider.Client.Get(provider.ctx, key, clientv3.WithPrefix()) + + if e != nil && !provider.reconnecting { + go provider.Reconnect() + return []byte{} + } + + if e == nil && r != nil { + for _, v := range r.Kvs { + if varyVoter(key, req, string(v.Key)) { + return v.Value + } + } + } + + return []byte{} +} + +// Set method will store the response in Etcd provider +func (provider *Etcd) Set(key string, value []byte, url t.URL, duration time.Duration) error { + if provider.reconnecting { + provider.logger.Sugar().Error("Impossible to set the etcd value while reconnecting.") + return fmt.Errorf("reconnecting error") + } + if provider.Client.ActiveConnection().GetState() != connectivity.Ready && provider.Client.ActiveConnection().GetState() != connectivity.Idle { + return fmt.Errorf("the connection is not ready: %v", provider.Client.ActiveConnection().GetState()) + } + if duration == 0 { + duration = url.TTL.Duration + } + + rs, err := provider.Client.Grant(context.TODO(), int64(duration.Seconds())) + if err == nil { + _, err = provider.Client.Put(provider.ctx, key, string(value), clientv3.WithLease(rs.ID)) + } + + if err != nil { + if !provider.reconnecting { + go provider.Reconnect() + } + provider.logger.Sugar().Errorf("Impossible to set value into Etcd, %v", err) + return err + } + + _, err = provider.Client.Put(provider.ctx, stalePrefix+key, string(value), clientv3.WithLease(rs.ID)) + + if err != nil { + if !provider.reconnecting { + go provider.Reconnect() + } + provider.logger.Sugar().Errorf("Impossible to set value into Etcd, %v", err) + } + + return err +} + +// Delete method will delete the response in Etcd provider if exists corresponding to key param +func (provider *Etcd) Delete(key string) { + if provider.reconnecting { + provider.logger.Sugar().Error("Impossible to delete the etcd key while reconnecting.") + return + } + _, _ = provider.Client.Delete(provider.ctx, key) +} + +// DeleteMany method will delete the responses in Etcd provider if exists corresponding to the regex key param +func (provider *Etcd) DeleteMany(key string) { + if provider.reconnecting { + provider.logger.Sugar().Error("Impossible to delete the etcd keys while reconnecting.") + return + } + re, e := regexp.Compile(key) + + if e != nil { + return + } + + if r, e := provider.Client.Get(provider.ctx, "\x00", clientv3.WithFromKey()); e == nil { + for _, k := range r.Kvs { + key := string(k.Key) + if re.MatchString(key) { + provider.Delete(key) + } + } + } +} + +// Init method will +func (provider *Etcd) Init() error { + return nil +} + +// Reset method will reset or close provider +func (provider *Etcd) Reset() error { + return provider.Client.Close() +} + +func (provider *Etcd) Reconnect() { + provider.reconnecting = true + + if c, err := clientv3.New(provider.configuration); err == nil && c != nil { + provider.Client = c + provider.reconnecting = false + } else { + time.Sleep(10 * time.Second) + provider.Reconnect() + } +} diff --git a/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/nutsProvider.go b/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/nutsProvider.go new file mode 100644 index 000000000..9be58193f --- /dev/null +++ b/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/nutsProvider.go @@ -0,0 +1,219 @@ +package providers + +import ( + "encoding/json" + "net/http" + "strconv" + "time" + + "github.com/darkweak/souin/cache/types" + t "github.com/darkweak/souin/configurationtypes" + "github.com/imdario/mergo" + "github.com/xujiajun/nutsdb" + "go.uber.org/zap" +) + +// Nuts provider type +type Nuts struct { + *nutsdb.DB + stale time.Duration + logger *zap.Logger +} + +const ( + bucket = "souin-bucket" + nutsLimit = 1 << 16 +) + +func sanitizeProperties(m map[string]interface{}) map[string]interface{} { + iotas := []string{"RWMode", "StartFileLoadingMode"} + for _, i := range iotas { + if v := m[i]; v != nil { + currentMode := nutsdb.FileIO + switch v { + case 1: + currentMode = nutsdb.MMap + } + m[i] = currentMode + } + } + + for _, i := range []string{"SegmentSize", "NodeNum", "MaxFdNumsInCache"} { + if v := m[i]; v != nil { + m[i], _ = v.(int64) + } + } + + if v := m["EntryIdxMode"]; v != nil { + m["EntryIdxMode"] = nutsdb.HintKeyValAndRAMIdxMode + switch v { + case 1: + m["EntryIdxMode"] = nutsdb.HintKeyAndRAMIdxMode + case 2: + m["EntryIdxMode"] = nutsdb.HintBPTSparseIdxMode + } + } + + if v := m["SyncEnable"]; v != nil { + m["SyncEnable"] = true + if b, ok := v.(bool); ok { + m["SyncEnable"] = b + } else if s, ok := v.(string); ok { + m["SyncEnable"], _ = strconv.ParseBool(s) + } + } + + return m +} + +// NutsConnectionFactory function create new Nuts instance +func NutsConnectionFactory(c t.AbstractConfigurationInterface) (types.AbstractProviderInterface, error) { + dc := c.GetDefaultCache() + nutsConfiguration := dc.GetNuts() + nutsOptions := nutsdb.DefaultOptions + nutsOptions.Dir = "/tmp/souin-nuts" + if nutsConfiguration.Configuration != nil { + var parsedNuts nutsdb.Options + nutsConfiguration.Configuration = sanitizeProperties(nutsConfiguration.Configuration.(map[string]interface{})) + if b, e := json.Marshal(nutsConfiguration.Configuration); e == nil { + if e = json.Unmarshal(b, &parsedNuts); e != nil { + c.GetLogger().Sugar().Error("Impossible to parse the configuration for the Nuts provider", e) + } + } + + if err := mergo.Merge(&nutsOptions, parsedNuts, mergo.WithOverride); err != nil { + c.GetLogger().Sugar().Error("An error occurred during the nutsOptions merge from the default options with your configuration.") + } + } else { + nutsOptions.RWMode = nutsdb.MMap + if nutsConfiguration.Path != "" { + nutsOptions.Dir = nutsConfiguration.Path + } + } + + db, e := nutsdb.Open(nutsOptions) + + if e != nil { + c.GetLogger().Sugar().Error("Impossible to open the Nuts DB.", e) + } + + return &Nuts{ + DB: db, + stale: dc.GetStale(), + logger: c.GetLogger(), + }, nil +} + +// ListKeys method returns the list of existing keys +func (provider *Nuts) ListKeys() []string { + keys := []string{} + + e := provider.DB.View(func(tx *nutsdb.Tx) error { + e, _ := tx.GetAll(bucket) + for _, k := range e { + keys = append(keys, string(k.Key)) + } + return nil + }) + + if e != nil { + return []string{} + } + + return keys +} + +// Get method returns the populated response if exists, empty response then +func (provider *Nuts) Get(key string) (item []byte) { + _ = provider.DB.View(func(tx *nutsdb.Tx) error { + i, e := tx.Get(bucket, []byte(key)) + if i != nil { + item = i.Value + } + return e + }) + + return +} + +// Prefix method returns the populated response if exists, empty response then +func (provider *Nuts) Prefix(key string, req *http.Request) []byte { + var result []byte + + _ = provider.DB.View(func(tx *nutsdb.Tx) error { + prefix := []byte(key) + + if entries, _, err := tx.PrefixSearchScan(bucket, prefix, "^({|$)", 0, 50); err != nil { + return err + } else { + for _, entry := range entries { + if varyVoter(key, req, string(entry.Key)) { + result = entry.Value + return nil + } + } + } + return nil + }) + + return result +} + +// Set method will store the response in Nuts provider +func (provider *Nuts) Set(key string, value []byte, url t.URL, duration time.Duration) error { + if duration == 0 { + duration = url.TTL.Duration + } + + err := provider.DB.Update(func(tx *nutsdb.Tx) error { + return tx.Put(bucket, []byte(key), value, uint32(duration.Seconds())) + }) + + if err != nil { + provider.logger.Sugar().Errorf("Impossible to set value into Nuts, %v", err) + return err + } + + err = provider.DB.Update(func(tx *nutsdb.Tx) error { + return tx.Put(bucket, []byte(stalePrefix+key), value, uint32((provider.stale + duration).Seconds())) + }) + + if err != nil { + provider.logger.Sugar().Errorf("Impossible to set value into Nuts, %v", err) + } + + return nil +} + +// Delete method will delete the response in Nuts provider if exists corresponding to key param +func (provider *Nuts) Delete(key string) { + _ = provider.DB.Update(func(tx *nutsdb.Tx) error { + return tx.Delete(bucket, []byte(key)) + }) +} + +// DeleteMany method will delete the responses in Nuts provider if exists corresponding to the regex key param +func (provider *Nuts) DeleteMany(key string) { + _ = provider.DB.Update(func(tx *nutsdb.Tx) error { + if entries, _, err := tx.PrefixSearchScan(bucket, []byte(""), key, 0, nutsLimit); err != nil { + return err + } else { + for _, entry := range entries { + _ = tx.Delete(bucket, entry.Key) + } + } + return nil + }) +} + +// Init method will +func (provider *Nuts) Init() error { + return nil +} + +// Reset method will reset or close provider +func (provider *Nuts) Reset() error { + return provider.DB.Update(func(tx *nutsdb.Tx) error { + return tx.DeleteBucket(1, bucket) + }) +} diff --git a/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/olricProvider.go b/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/olricProvider.go new file mode 100644 index 000000000..7b5dedb37 --- /dev/null +++ b/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/olricProvider.go @@ -0,0 +1,223 @@ +package providers + +import ( + "context" + "errors" + "fmt" + "net/http" + "sync" + "time" + + "github.com/buraksezer/olric" + "github.com/buraksezer/olric/config" + "github.com/darkweak/souin/cache/types" + t "github.com/darkweak/souin/configurationtypes" + "go.uber.org/zap" +) + +// Olric provider type +type Olric struct { + *olric.ClusterClient + dm *sync.Pool + stale time.Duration + logger *zap.Logger + addresses []string + reconnecting bool + configuration config.Client +} + +// OlricConnectionFactory function create new Olric instance +func OlricConnectionFactory(configuration t.AbstractConfigurationInterface) (types.AbstractReconnectProvider, error) { + c, err := olric.NewClusterClient([]string{configuration.GetDefaultCache().GetOlric().URL}) + if err != nil { + configuration.GetLogger().Sugar().Errorf("Impossible to connect to Olric, %v", err) + } + + return &Olric{ + ClusterClient: c, + dm: nil, + stale: configuration.GetDefaultCache().GetStale(), + logger: configuration.GetLogger(), + configuration: config.Client{}, + addresses: []string{configuration.GetDefaultCache().GetOlric().URL}, + }, nil +} + +// ListKeys method returns the list of existing keys +func (provider *Olric) ListKeys() []string { + if provider.reconnecting { + provider.logger.Sugar().Error("Impossible to list the olric keys while reconnecting.") + return []string{} + } + dm := provider.dm.Get().(olric.DMap) + defer provider.dm.Put(dm) + + records, err := dm.Scan(context.Background()) + if err != nil { + if !provider.reconnecting { + go provider.Reconnect() + } + provider.logger.Sugar().Error("An error occurred while trying to list keys in Olric: %s\n", err) + return []string{} + } + + keys := []string{} + for records.Next() { + keys = append(keys, records.Key()) + } + records.Close() + + return keys +} + +// Prefix method returns the populated response if exists, empty response then +func (provider *Olric) Prefix(key string, req *http.Request) []byte { + if provider.reconnecting { + provider.logger.Sugar().Error("Impossible to get the olric keys by prefix while reconnecting.") + return []byte{} + } + dm := provider.dm.Get().(olric.DMap) + defer provider.dm.Put(dm) + + records, err := dm.Scan(context.Background(), olric.Match("^"+key+"({|$)")) + if err != nil { + if !provider.reconnecting { + go provider.Reconnect() + } + provider.logger.Sugar().Errorf("An error occurred while trying to retrieve data in Olric: %s\n", err) + return []byte{} + } + + for records.Next() { + if varyVoter(key, req, records.Key()) { + return provider.Get(records.Key()) + } + } + records.Close() + + return []byte{} +} + +// Get method returns the populated response if exists, empty response then +func (provider *Olric) Get(key string) []byte { + if provider.reconnecting { + provider.logger.Sugar().Error("Impossible to get the olric key while reconnecting.") + return []byte{} + } + dm := provider.dm.Get().(olric.DMap) + defer provider.dm.Put(dm) + res, err := dm.Get(context.Background(), key) + + if err != nil { + if !errors.Is(err, olric.ErrKeyNotFound) && !errors.Is(err, olric.ErrKeyTooLarge) && !provider.reconnecting { + go provider.Reconnect() + } + return []byte{} + } + + val, _ := res.Byte() + return val +} + +// Set method will store the response in Olric provider +func (provider *Olric) Set(key string, value []byte, url t.URL, duration time.Duration) error { + if provider.reconnecting { + provider.logger.Sugar().Error("Impossible to set the olric value while reconnecting.") + return fmt.Errorf("reconnecting error") + } + if duration == 0 { + duration = url.TTL.Duration + } + + dm := provider.dm.Get().(olric.DMap) + defer provider.dm.Put(dm) + if err := dm.Put(context.Background(), key, value, olric.EX(duration)); err != nil { + if !provider.reconnecting { + go provider.Reconnect() + } + provider.logger.Sugar().Errorf("Impossible to set value into Olric, %v", err) + return err + } + + if err := dm.Put(context.Background(), stalePrefix+key, value, olric.EX(provider.stale+duration)); err != nil { + if !provider.reconnecting { + go provider.Reconnect() + } + provider.logger.Sugar().Errorf("Impossible to set value into Olric, %v", err) + } + + return nil +} + +// Delete method will delete the response in Olric provider if exists corresponding to key param +func (provider *Olric) Delete(key string) { + if provider.reconnecting { + provider.logger.Sugar().Error("Impossible to delete the olric key while reconnecting.") + return + } + dm := provider.dm.Get().(olric.DMap) + defer provider.dm.Put(dm) + _, err := dm.Delete(context.Background(), key) + if err != nil { + provider.logger.Sugar().Errorf("Impossible to delete value into Olric, %v", err) + } +} + +// DeleteMany method will delete the responses in Olric provider if exists corresponding to the regex key param +func (provider *Olric) DeleteMany(key string) { + if provider.reconnecting { + provider.logger.Sugar().Error("Impossible to delete the olric keys while reconnecting.") + return + } + + dm := provider.dm.Get().(olric.DMap) + defer provider.dm.Put(dm) + records, err := dm.Scan(context.Background(), olric.Match(key)) + if err != nil { + if !provider.reconnecting { + go provider.Reconnect() + } + provider.logger.Sugar().Error("An error occurred while trying to list keys in Olric: %s\n", err) + return + } + + keys := []string{} + for records.Next() { + keys = append(keys, records.Key()) + } + records.Close() + + _, _ = dm.Delete(context.Background(), keys...) +} + +// Init method will initialize Olric provider if needed +func (provider *Olric) Init() error { + dm := sync.Pool{ + New: func() interface{} { + dmap, _ := provider.ClusterClient.NewDMap("souin-map") + return dmap + }, + } + + provider.dm = &dm + return nil +} + +// Reset method will reset or close provider +func (provider *Olric) Reset() error { + provider.ClusterClient.Close(context.Background()) + + return nil +} + +func (provider *Olric) Reconnect() { + provider.reconnecting = true + + if c, err := olric.NewClusterClient(provider.addresses, olric.WithConfig(&provider.configuration)); err == nil && c != nil { + provider.ClusterClient = c + provider.reconnecting = false + } else { + time.Sleep(10 * time.Second) + provider.Reconnect() + } +} diff --git a/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/redisProvider.go b/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/redisProvider.go new file mode 100644 index 000000000..a78f80d28 --- /dev/null +++ b/plugins/traefik/vendor/github.com/darkweak/souin/cache/providers/redisProvider.go @@ -0,0 +1,227 @@ +package providers + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "regexp" + "time" + + "github.com/darkweak/souin/cache/types" + t "github.com/darkweak/souin/configurationtypes" + "github.com/redis/go-redis/v9" + "go.uber.org/zap" +) + +// Redis provider type +type Redis struct { + *redis.Client + stale time.Duration + ctx context.Context + logger *zap.Logger + reconnecting bool + configuration redis.Options +} + +// RedisConnectionFactory function create new Nuts instance +func RedisConnectionFactory(c t.AbstractConfigurationInterface) (types.AbstractReconnectProvider, error) { + dc := c.GetDefaultCache() + bc, _ := json.Marshal(dc.GetRedis().Configuration) + + var options redis.Options + if dc.GetRedis().Configuration != nil { + if err := json.Unmarshal(bc, &options); err != nil { + c.GetLogger().Sugar().Infof("Cannot parse your redis configuration: %+v", err) + } + } else { + options = redis.Options{ + Addr: dc.GetRedis().URL, + Password: "", + DB: 0, + PoolSize: 1000, + PoolTimeout: dc.GetTimeout().Cache.Duration, + } + } + + cli := redis.NewClient(&options) + + return &Redis{ + Client: cli, + ctx: context.Background(), + stale: dc.GetStale(), + configuration: options, + logger: c.GetLogger(), + }, nil +} + +// ListKeys method returns the list of existing keys +func (provider *Redis) ListKeys() []string { + if provider.reconnecting { + provider.logger.Sugar().Error("Impossible to list the redis keys while reconnecting.") + return []string{} + } + keys := []string{} + + iter := provider.Client.Scan(provider.ctx, 0, "*", 0).Iterator() + for iter.Next(provider.ctx) { + keys = append(keys, string(iter.Val())) + } + if err := iter.Err(); err != nil { + if !provider.reconnecting { + go provider.Reconnect() + } + provider.logger.Sugar().Error(err) + return []string{} + } + + return keys +} + +// Get method returns the populated response if exists, empty response then +func (provider *Redis) Get(key string) (item []byte) { + if provider.reconnecting { + provider.logger.Sugar().Error("Impossible to get the redis key while reconnecting.") + return + } + r, e := provider.Client.Get(provider.ctx, key).Result() + if e != nil { + if e != redis.Nil && !provider.reconnecting { + go provider.Reconnect() + } + return + } + + item = []byte(r) + + return +} + +// Prefix method returns the populated response if exists, empty response then +func (provider *Redis) Prefix(key string, req *http.Request) []byte { + if provider.reconnecting { + provider.logger.Sugar().Error("Impossible to get the redis keys by prefix while reconnecting.") + return []byte{} + } + in := make(chan []byte) + out := make(chan bool) + + iter := provider.Client.Scan(provider.ctx, 0, key+"*", 0).Iterator() + go func(iterator *redis.ScanIterator) { + for iterator.Next(provider.ctx) { + select { + case <-out: + return + case <-time.After(1 * time.Nanosecond): + if varyVoter(key, req, iter.Val()) { + v, e := provider.Client.Get(provider.ctx, iter.Val()).Result() + if e != nil && e != redis.Nil && !provider.reconnecting { + go provider.Reconnect() + in <- []byte{} + return + } + in <- []byte(v) + return + } + } + } + }(iter) + + select { + case <-time.After(provider.Client.Options().PoolTimeout): + out <- true + return []byte{} + case v := <-in: + return v + } +} + +// Set method will store the response in Etcd provider +func (provider *Redis) Set(key string, value []byte, url t.URL, duration time.Duration) error { + if provider.reconnecting { + provider.logger.Sugar().Error("Impossible to set the redis value while reconnecting.") + return fmt.Errorf("reconnecting error") + } + if duration == 0 { + duration = url.TTL.Duration + } + + if err := provider.Client.Set(provider.ctx, key, value, duration).Err(); err != nil { + if !provider.reconnecting { + go provider.Reconnect() + } + provider.logger.Sugar().Errorf("Impossible to set value into Redis, %v", err) + return err + } + + if err := provider.Client.Set(provider.ctx, stalePrefix+key, value, duration+provider.stale).Err(); err != nil { + if !provider.reconnecting { + go provider.Reconnect() + } + provider.logger.Sugar().Errorf("Impossible to set value into Redis, %v", err) + } + + return nil +} + +// Delete method will delete the response in Etcd provider if exists corresponding to key param +func (provider *Redis) Delete(key string) { + if provider.reconnecting { + provider.logger.Sugar().Error("Impossible to delete the redis key while reconnecting.") + return + } + _ = provider.Client.Del(provider.ctx, key) +} + +// DeleteMany method will delete the responses in Nuts provider if exists corresponding to the regex key param +func (provider *Redis) DeleteMany(key string) { + if provider.reconnecting { + provider.logger.Sugar().Error("Impossible to delete the redis keys while reconnecting.") + return + } + re, e := regexp.Compile(key) + + if e != nil { + return + } + + keys := []string{} + iter := provider.Client.Scan(provider.ctx, 0, "*", 0).Iterator() + for iter.Next(provider.ctx) { + if re.MatchString(iter.Val()) { + keys = append(keys, iter.Val()) + } + } + + if iter.Err() != nil && !provider.reconnecting { + go provider.Reconnect() + return + } + + provider.Client.Del(provider.ctx, keys...) +} + +// Init method will +func (provider *Redis) Init() error { + return nil +} + +// Reset method will reset or close provider +func (provider *Redis) Reset() error { + if provider.reconnecting { + provider.logger.Sugar().Error("Impossible to reset the redis instance while reconnecting.") + return nil + } + return provider.Client.Close() +} + +func (provider *Redis) Reconnect() { + provider.reconnecting = true + + if provider.Client = redis.NewClient(&provider.configuration); provider.Client != nil { + provider.reconnecting = false + } else { + time.Sleep(10 * time.Second) + provider.Reconnect() + } +} diff --git a/plugins/traefik/vendor/github.com/darkweak/souin/cache/ykeys/ykey.go b/plugins/traefik/vendor/github.com/darkweak/souin/cache/ykeys/ykey.go index 3b5b7d05f..b710369c0 100644 --- a/plugins/traefik/vendor/github.com/darkweak/souin/cache/ykeys/ykey.go +++ b/plugins/traefik/vendor/github.com/darkweak/souin/cache/ykeys/ykey.go @@ -2,12 +2,13 @@ package ykeys import ( "fmt" - "github.com/patrickmn/go-cache" "net/http" "regexp" "strings" "time" + "github.com/akyoto/cache" + "github.com/darkweak/souin/configurationtypes" ) @@ -47,7 +48,7 @@ func InitializeYKeys(keys map[string]configurationtypes.SurrogateKeys) *YKeyStor return nil } - c := cache.New(1*time.Second, 2*time.Second) + c := cache.New(1 * time.Second) for key := range keys { c.Set(key, "", 1) diff --git a/plugins/traefik/vendor/github.com/darkweak/souin/pkg/middleware/middleware.go b/plugins/traefik/vendor/github.com/darkweak/souin/pkg/middleware/middleware.go index dbea70345..656cfca85 100644 --- a/plugins/traefik/vendor/github.com/darkweak/souin/pkg/middleware/middleware.go +++ b/plugins/traefik/vendor/github.com/darkweak/souin/pkg/middleware/middleware.go @@ -179,8 +179,6 @@ type handlerFunc = func(http.ResponseWriter, *http.Request) error func (s *SouinBaseHandler) ServeHTTP(rw http.ResponseWriter, rq *http.Request, next handlerFunc) error { b, handler := s.HandleInternally(rq) - fmt.Println("AFTER") - fmt.Println("AFTER b", b) if b { handler(rw, rq) return nil @@ -193,13 +191,11 @@ func (s *SouinBaseHandler) ServeHTTP(rw http.ResponseWriter, rq *http.Request, n return next(rw, rq) } - fmt.Println("RESPONSE 1") if !rq.Context().Value(context.SupportedMethod).(bool) { rw.Header().Set("Cache-Status", cacheName+"; fwd=uri-miss; detail=UNSUPPORTED-METHOD") return next(rw, rq) } - fmt.Println("RESPONSE 2") requestCc, coErr := cacheobject.ParseRequestCacheControl(rq.Header.Get("Cache-Control")) diff --git a/plugins/traefik/vendor/github.com/darkweak/souin/pkg/storage/cacheProvider.go b/plugins/traefik/vendor/github.com/darkweak/souin/pkg/storage/cacheProvider.go index b11a0b44c..c590f355a 100644 --- a/plugins/traefik/vendor/github.com/darkweak/souin/pkg/storage/cacheProvider.go +++ b/plugins/traefik/vendor/github.com/darkweak/souin/pkg/storage/cacheProvider.go @@ -47,6 +47,7 @@ func (provider *Cache) Get(key string) []byte { // Prefix method returns the populated response if exists, empty response then func (provider *Cache) Prefix(key string, req *http.Request) []byte { var result []byte + provider.Cache.Range(func(k, v interface{}) bool { if k == key { result = v.([]byte) diff --git a/plugins/traefik/vendor/github.com/patrickmn/go-cache/CONTRIBUTORS b/plugins/traefik/vendor/github.com/patrickmn/go-cache/CONTRIBUTORS deleted file mode 100644 index 2b16e9974..000000000 --- a/plugins/traefik/vendor/github.com/patrickmn/go-cache/CONTRIBUTORS +++ /dev/null @@ -1,9 +0,0 @@ -This is a list of people who have contributed code to go-cache. They, or their -employers, are the copyright holders of the contributed code. Contributed code -is subject to the license restrictions listed in LICENSE (as they were when the -code was contributed.) - -Dustin Sallings -Jason Mooberry -Sergey Shepelev -Alex Edwards diff --git a/plugins/traefik/vendor/github.com/patrickmn/go-cache/LICENSE b/plugins/traefik/vendor/github.com/patrickmn/go-cache/LICENSE deleted file mode 100644 index db9903c75..000000000 --- a/plugins/traefik/vendor/github.com/patrickmn/go-cache/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2012-2017 Patrick Mylund Nielsen and the go-cache contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/plugins/traefik/vendor/github.com/patrickmn/go-cache/README.md b/plugins/traefik/vendor/github.com/patrickmn/go-cache/README.md deleted file mode 100644 index c5789cc66..000000000 --- a/plugins/traefik/vendor/github.com/patrickmn/go-cache/README.md +++ /dev/null @@ -1,83 +0,0 @@ -# go-cache - -go-cache is an in-memory key:value store/cache similar to memcached that is -suitable for applications running on a single machine. Its major advantage is -that, being essentially a thread-safe `map[string]interface{}` with expiration -times, it doesn't need to serialize or transmit its contents over the network. - -Any object can be stored, for a given duration or forever, and the cache can be -safely used by multiple goroutines. - -Although go-cache isn't meant to be used as a persistent datastore, the entire -cache can be saved to and loaded from a file (using `c.Items()` to retrieve the -items map to serialize, and `NewFrom()` to create a cache from a deserialized -one) to recover from downtime quickly. (See the docs for `NewFrom()` for caveats.) - -### Installation - -`go get github.com/patrickmn/go-cache` - -### Usage - -```go -import ( - "fmt" - "github.com/patrickmn/go-cache" - "time" -) - -func main() { - // Create a cache with a default expiration time of 5 minutes, and which - // purges expired items every 10 minutes - c := cache.New(5*time.Minute, 10*time.Minute) - - // Set the value of the key "foo" to "bar", with the default expiration time - c.Set("foo", "bar", cache.DefaultExpiration) - - // Set the value of the key "baz" to 42, with no expiration time - // (the item won't be removed until it is re-set, or removed using - // c.Delete("baz") - c.Set("baz", 42, cache.NoExpiration) - - // Get the string associated with the key "foo" from the cache - foo, found := c.Get("foo") - if found { - fmt.Println(foo) - } - - // Since Go is statically typed, and cache values can be anything, type - // assertion is needed when values are being passed to functions that don't - // take arbitrary types, (i.e. interface{}). The simplest way to do this for - // values which will only be used once--e.g. for passing to another - // function--is: - foo, found := c.Get("foo") - if found { - MyFunction(foo.(string)) - } - - // This gets tedious if the value is used several times in the same function. - // You might do either of the following instead: - if x, found := c.Get("foo"); found { - foo := x.(string) - // ... - } - // or - var foo string - if x, found := c.Get("foo"); found { - foo = x.(string) - } - // ... - // foo can then be passed around freely as a string - - // Want performance? Store pointers! - c.Set("foo", &MyStruct, cache.DefaultExpiration) - if x, found := c.Get("foo"); found { - foo := x.(*MyStruct) - // ... - } -} -``` - -### Reference - -`godoc` or [http://godoc.org/github.com/patrickmn/go-cache](http://godoc.org/github.com/patrickmn/go-cache) diff --git a/plugins/traefik/vendor/github.com/patrickmn/go-cache/cache.go b/plugins/traefik/vendor/github.com/patrickmn/go-cache/cache.go deleted file mode 100644 index db88d2f2c..000000000 --- a/plugins/traefik/vendor/github.com/patrickmn/go-cache/cache.go +++ /dev/null @@ -1,1161 +0,0 @@ -package cache - -import ( - "encoding/gob" - "fmt" - "io" - "os" - "runtime" - "sync" - "time" -) - -type Item struct { - Object interface{} - Expiration int64 -} - -// Returns true if the item has expired. -func (item Item) Expired() bool { - if item.Expiration == 0 { - return false - } - return time.Now().UnixNano() > item.Expiration -} - -const ( - // For use with functions that take an expiration time. - NoExpiration time.Duration = -1 - // For use with functions that take an expiration time. Equivalent to - // passing in the same expiration duration as was given to New() or - // NewFrom() when the cache was created (e.g. 5 minutes.) - DefaultExpiration time.Duration = 0 -) - -type Cache struct { - *cache - // If this is confusing, see the comment at the bottom of New() -} - -type cache struct { - defaultExpiration time.Duration - items map[string]Item - mu sync.RWMutex - onEvicted func(string, interface{}) - janitor *janitor -} - -// Add an item to the cache, replacing any existing item. If the duration is 0 -// (DefaultExpiration), the cache's default expiration time is used. If it is -1 -// (NoExpiration), the item never expires. -func (c *cache) Set(k string, x interface{}, d time.Duration) { - // "Inlining" of set - var e int64 - if d == DefaultExpiration { - d = c.defaultExpiration - } - if d > 0 { - e = time.Now().Add(d).UnixNano() - } - c.mu.Lock() - c.items[k] = Item{ - Object: x, - Expiration: e, - } - // TODO: Calls to mu.Unlock are currently not deferred because defer - // adds ~200 ns (as of go1.) - c.mu.Unlock() -} - -func (c *cache) set(k string, x interface{}, d time.Duration) { - var e int64 - if d == DefaultExpiration { - d = c.defaultExpiration - } - if d > 0 { - e = time.Now().Add(d).UnixNano() - } - c.items[k] = Item{ - Object: x, - Expiration: e, - } -} - -// Add an item to the cache, replacing any existing item, using the default -// expiration. -func (c *cache) SetDefault(k string, x interface{}) { - c.Set(k, x, DefaultExpiration) -} - -// Add an item to the cache only if an item doesn't already exist for the given -// key, or if the existing item has expired. Returns an error otherwise. -func (c *cache) Add(k string, x interface{}, d time.Duration) error { - c.mu.Lock() - _, found := c.get(k) - if found { - c.mu.Unlock() - return fmt.Errorf("Item %s already exists", k) - } - c.set(k, x, d) - c.mu.Unlock() - return nil -} - -// Set a new value for the cache key only if it already exists, and the existing -// item hasn't expired. Returns an error otherwise. -func (c *cache) Replace(k string, x interface{}, d time.Duration) error { - c.mu.Lock() - _, found := c.get(k) - if !found { - c.mu.Unlock() - return fmt.Errorf("Item %s doesn't exist", k) - } - c.set(k, x, d) - c.mu.Unlock() - return nil -} - -// Get an item from the cache. Returns the item or nil, and a bool indicating -// whether the key was found. -func (c *cache) Get(k string) (interface{}, bool) { - c.mu.RLock() - // "Inlining" of get and Expired - item, found := c.items[k] - if !found { - c.mu.RUnlock() - return nil, false - } - if item.Expiration > 0 { - if time.Now().UnixNano() > item.Expiration { - c.mu.RUnlock() - return nil, false - } - } - c.mu.RUnlock() - return item.Object, true -} - -// GetWithExpiration returns an item and its expiration time from the cache. -// It returns the item or nil, the expiration time if one is set (if the item -// never expires a zero value for time.Time is returned), and a bool indicating -// whether the key was found. -func (c *cache) GetWithExpiration(k string) (interface{}, time.Time, bool) { - c.mu.RLock() - // "Inlining" of get and Expired - item, found := c.items[k] - if !found { - c.mu.RUnlock() - return nil, time.Time{}, false - } - - if item.Expiration > 0 { - if time.Now().UnixNano() > item.Expiration { - c.mu.RUnlock() - return nil, time.Time{}, false - } - - // Return the item and the expiration time - c.mu.RUnlock() - return item.Object, time.Unix(0, item.Expiration), true - } - - // If expiration <= 0 (i.e. no expiration time set) then return the item - // and a zeroed time.Time - c.mu.RUnlock() - return item.Object, time.Time{}, true -} - -func (c *cache) get(k string) (interface{}, bool) { - item, found := c.items[k] - if !found { - return nil, false - } - // "Inlining" of Expired - if item.Expiration > 0 { - if time.Now().UnixNano() > item.Expiration { - return nil, false - } - } - return item.Object, true -} - -// Increment an item of type int, int8, int16, int32, int64, uintptr, uint, -// uint8, uint32, or uint64, float32 or float64 by n. Returns an error if the -// item's value is not an integer, if it was not found, or if it is not -// possible to increment it by n. To retrieve the incremented value, use one -// of the specialized methods, e.g. IncrementInt64. -func (c *cache) Increment(k string, n int64) error { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return fmt.Errorf("Item %s not found", k) - } - switch v.Object.(type) { - case int: - v.Object = v.Object.(int) + int(n) - case int8: - v.Object = v.Object.(int8) + int8(n) - case int16: - v.Object = v.Object.(int16) + int16(n) - case int32: - v.Object = v.Object.(int32) + int32(n) - case int64: - v.Object = v.Object.(int64) + n - case uint: - v.Object = v.Object.(uint) + uint(n) - case uintptr: - v.Object = v.Object.(uintptr) + uintptr(n) - case uint8: - v.Object = v.Object.(uint8) + uint8(n) - case uint16: - v.Object = v.Object.(uint16) + uint16(n) - case uint32: - v.Object = v.Object.(uint32) + uint32(n) - case uint64: - v.Object = v.Object.(uint64) + uint64(n) - case float32: - v.Object = v.Object.(float32) + float32(n) - case float64: - v.Object = v.Object.(float64) + float64(n) - default: - c.mu.Unlock() - return fmt.Errorf("The value for %s is not an integer", k) - } - c.items[k] = v - c.mu.Unlock() - return nil -} - -// Increment an item of type float32 or float64 by n. Returns an error if the -// item's value is not floating point, if it was not found, or if it is not -// possible to increment it by n. Pass a negative number to decrement the -// value. To retrieve the incremented value, use one of the specialized methods, -// e.g. IncrementFloat64. -func (c *cache) IncrementFloat(k string, n float64) error { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return fmt.Errorf("Item %s not found", k) - } - switch v.Object.(type) { - case float32: - v.Object = v.Object.(float32) + float32(n) - case float64: - v.Object = v.Object.(float64) + n - default: - c.mu.Unlock() - return fmt.Errorf("The value for %s does not have type float32 or float64", k) - } - c.items[k] = v - c.mu.Unlock() - return nil -} - -// Increment an item of type int by n. Returns an error if the item's value is -// not an int, or if it was not found. If there is no error, the incremented -// value is returned. -func (c *cache) IncrementInt(k string, n int) (int, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(int) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an int", k) - } - nv := rv + n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Increment an item of type int8 by n. Returns an error if the item's value is -// not an int8, or if it was not found. If there is no error, the incremented -// value is returned. -func (c *cache) IncrementInt8(k string, n int8) (int8, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(int8) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an int8", k) - } - nv := rv + n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Increment an item of type int16 by n. Returns an error if the item's value is -// not an int16, or if it was not found. If there is no error, the incremented -// value is returned. -func (c *cache) IncrementInt16(k string, n int16) (int16, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(int16) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an int16", k) - } - nv := rv + n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Increment an item of type int32 by n. Returns an error if the item's value is -// not an int32, or if it was not found. If there is no error, the incremented -// value is returned. -func (c *cache) IncrementInt32(k string, n int32) (int32, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(int32) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an int32", k) - } - nv := rv + n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Increment an item of type int64 by n. Returns an error if the item's value is -// not an int64, or if it was not found. If there is no error, the incremented -// value is returned. -func (c *cache) IncrementInt64(k string, n int64) (int64, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(int64) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an int64", k) - } - nv := rv + n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Increment an item of type uint by n. Returns an error if the item's value is -// not an uint, or if it was not found. If there is no error, the incremented -// value is returned. -func (c *cache) IncrementUint(k string, n uint) (uint, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(uint) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an uint", k) - } - nv := rv + n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Increment an item of type uintptr by n. Returns an error if the item's value -// is not an uintptr, or if it was not found. If there is no error, the -// incremented value is returned. -func (c *cache) IncrementUintptr(k string, n uintptr) (uintptr, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(uintptr) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an uintptr", k) - } - nv := rv + n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Increment an item of type uint8 by n. Returns an error if the item's value -// is not an uint8, or if it was not found. If there is no error, the -// incremented value is returned. -func (c *cache) IncrementUint8(k string, n uint8) (uint8, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(uint8) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an uint8", k) - } - nv := rv + n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Increment an item of type uint16 by n. Returns an error if the item's value -// is not an uint16, or if it was not found. If there is no error, the -// incremented value is returned. -func (c *cache) IncrementUint16(k string, n uint16) (uint16, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(uint16) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an uint16", k) - } - nv := rv + n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Increment an item of type uint32 by n. Returns an error if the item's value -// is not an uint32, or if it was not found. If there is no error, the -// incremented value is returned. -func (c *cache) IncrementUint32(k string, n uint32) (uint32, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(uint32) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an uint32", k) - } - nv := rv + n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Increment an item of type uint64 by n. Returns an error if the item's value -// is not an uint64, or if it was not found. If there is no error, the -// incremented value is returned. -func (c *cache) IncrementUint64(k string, n uint64) (uint64, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(uint64) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an uint64", k) - } - nv := rv + n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Increment an item of type float32 by n. Returns an error if the item's value -// is not an float32, or if it was not found. If there is no error, the -// incremented value is returned. -func (c *cache) IncrementFloat32(k string, n float32) (float32, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(float32) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an float32", k) - } - nv := rv + n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Increment an item of type float64 by n. Returns an error if the item's value -// is not an float64, or if it was not found. If there is no error, the -// incremented value is returned. -func (c *cache) IncrementFloat64(k string, n float64) (float64, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(float64) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an float64", k) - } - nv := rv + n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Decrement an item of type int, int8, int16, int32, int64, uintptr, uint, -// uint8, uint32, or uint64, float32 or float64 by n. Returns an error if the -// item's value is not an integer, if it was not found, or if it is not -// possible to decrement it by n. To retrieve the decremented value, use one -// of the specialized methods, e.g. DecrementInt64. -func (c *cache) Decrement(k string, n int64) error { - // TODO: Implement Increment and Decrement more cleanly. - // (Cannot do Increment(k, n*-1) for uints.) - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return fmt.Errorf("Item not found") - } - switch v.Object.(type) { - case int: - v.Object = v.Object.(int) - int(n) - case int8: - v.Object = v.Object.(int8) - int8(n) - case int16: - v.Object = v.Object.(int16) - int16(n) - case int32: - v.Object = v.Object.(int32) - int32(n) - case int64: - v.Object = v.Object.(int64) - n - case uint: - v.Object = v.Object.(uint) - uint(n) - case uintptr: - v.Object = v.Object.(uintptr) - uintptr(n) - case uint8: - v.Object = v.Object.(uint8) - uint8(n) - case uint16: - v.Object = v.Object.(uint16) - uint16(n) - case uint32: - v.Object = v.Object.(uint32) - uint32(n) - case uint64: - v.Object = v.Object.(uint64) - uint64(n) - case float32: - v.Object = v.Object.(float32) - float32(n) - case float64: - v.Object = v.Object.(float64) - float64(n) - default: - c.mu.Unlock() - return fmt.Errorf("The value for %s is not an integer", k) - } - c.items[k] = v - c.mu.Unlock() - return nil -} - -// Decrement an item of type float32 or float64 by n. Returns an error if the -// item's value is not floating point, if it was not found, or if it is not -// possible to decrement it by n. Pass a negative number to decrement the -// value. To retrieve the decremented value, use one of the specialized methods, -// e.g. DecrementFloat64. -func (c *cache) DecrementFloat(k string, n float64) error { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return fmt.Errorf("Item %s not found", k) - } - switch v.Object.(type) { - case float32: - v.Object = v.Object.(float32) - float32(n) - case float64: - v.Object = v.Object.(float64) - n - default: - c.mu.Unlock() - return fmt.Errorf("The value for %s does not have type float32 or float64", k) - } - c.items[k] = v - c.mu.Unlock() - return nil -} - -// Decrement an item of type int by n. Returns an error if the item's value is -// not an int, or if it was not found. If there is no error, the decremented -// value is returned. -func (c *cache) DecrementInt(k string, n int) (int, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(int) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an int", k) - } - nv := rv - n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Decrement an item of type int8 by n. Returns an error if the item's value is -// not an int8, or if it was not found. If there is no error, the decremented -// value is returned. -func (c *cache) DecrementInt8(k string, n int8) (int8, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(int8) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an int8", k) - } - nv := rv - n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Decrement an item of type int16 by n. Returns an error if the item's value is -// not an int16, or if it was not found. If there is no error, the decremented -// value is returned. -func (c *cache) DecrementInt16(k string, n int16) (int16, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(int16) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an int16", k) - } - nv := rv - n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Decrement an item of type int32 by n. Returns an error if the item's value is -// not an int32, or if it was not found. If there is no error, the decremented -// value is returned. -func (c *cache) DecrementInt32(k string, n int32) (int32, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(int32) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an int32", k) - } - nv := rv - n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Decrement an item of type int64 by n. Returns an error if the item's value is -// not an int64, or if it was not found. If there is no error, the decremented -// value is returned. -func (c *cache) DecrementInt64(k string, n int64) (int64, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(int64) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an int64", k) - } - nv := rv - n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Decrement an item of type uint by n. Returns an error if the item's value is -// not an uint, or if it was not found. If there is no error, the decremented -// value is returned. -func (c *cache) DecrementUint(k string, n uint) (uint, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(uint) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an uint", k) - } - nv := rv - n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Decrement an item of type uintptr by n. Returns an error if the item's value -// is not an uintptr, or if it was not found. If there is no error, the -// decremented value is returned. -func (c *cache) DecrementUintptr(k string, n uintptr) (uintptr, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(uintptr) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an uintptr", k) - } - nv := rv - n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Decrement an item of type uint8 by n. Returns an error if the item's value is -// not an uint8, or if it was not found. If there is no error, the decremented -// value is returned. -func (c *cache) DecrementUint8(k string, n uint8) (uint8, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(uint8) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an uint8", k) - } - nv := rv - n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Decrement an item of type uint16 by n. Returns an error if the item's value -// is not an uint16, or if it was not found. If there is no error, the -// decremented value is returned. -func (c *cache) DecrementUint16(k string, n uint16) (uint16, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(uint16) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an uint16", k) - } - nv := rv - n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Decrement an item of type uint32 by n. Returns an error if the item's value -// is not an uint32, or if it was not found. If there is no error, the -// decremented value is returned. -func (c *cache) DecrementUint32(k string, n uint32) (uint32, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(uint32) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an uint32", k) - } - nv := rv - n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Decrement an item of type uint64 by n. Returns an error if the item's value -// is not an uint64, or if it was not found. If there is no error, the -// decremented value is returned. -func (c *cache) DecrementUint64(k string, n uint64) (uint64, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(uint64) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an uint64", k) - } - nv := rv - n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Decrement an item of type float32 by n. Returns an error if the item's value -// is not an float32, or if it was not found. If there is no error, the -// decremented value is returned. -func (c *cache) DecrementFloat32(k string, n float32) (float32, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(float32) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an float32", k) - } - nv := rv - n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Decrement an item of type float64 by n. Returns an error if the item's value -// is not an float64, or if it was not found. If there is no error, the -// decremented value is returned. -func (c *cache) DecrementFloat64(k string, n float64) (float64, error) { - c.mu.Lock() - v, found := c.items[k] - if !found || v.Expired() { - c.mu.Unlock() - return 0, fmt.Errorf("Item %s not found", k) - } - rv, ok := v.Object.(float64) - if !ok { - c.mu.Unlock() - return 0, fmt.Errorf("The value for %s is not an float64", k) - } - nv := rv - n - v.Object = nv - c.items[k] = v - c.mu.Unlock() - return nv, nil -} - -// Delete an item from the cache. Does nothing if the key is not in the cache. -func (c *cache) Delete(k string) { - c.mu.Lock() - v, evicted := c.delete(k) - c.mu.Unlock() - if evicted { - c.onEvicted(k, v) - } -} - -func (c *cache) delete(k string) (interface{}, bool) { - if c.onEvicted != nil { - if v, found := c.items[k]; found { - delete(c.items, k) - return v.Object, true - } - } - delete(c.items, k) - return nil, false -} - -type keyAndValue struct { - key string - value interface{} -} - -// Delete all expired items from the cache. -func (c *cache) DeleteExpired() { - var evictedItems []keyAndValue - now := time.Now().UnixNano() - c.mu.Lock() - for k, v := range c.items { - // "Inlining" of expired - if v.Expiration > 0 && now > v.Expiration { - ov, evicted := c.delete(k) - if evicted { - evictedItems = append(evictedItems, keyAndValue{k, ov}) - } - } - } - c.mu.Unlock() - for _, v := range evictedItems { - c.onEvicted(v.key, v.value) - } -} - -// Sets an (optional) function that is called with the key and value when an -// item is evicted from the cache. (Including when it is deleted manually, but -// not when it is overwritten.) Set to nil to disable. -func (c *cache) OnEvicted(f func(string, interface{})) { - c.mu.Lock() - c.onEvicted = f - c.mu.Unlock() -} - -// Write the cache's items (using Gob) to an io.Writer. -// -// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the -// documentation for NewFrom().) -func (c *cache) Save(w io.Writer) (err error) { - enc := gob.NewEncoder(w) - defer func() { - if x := recover(); x != nil { - err = fmt.Errorf("Error registering item types with Gob library") - } - }() - c.mu.RLock() - defer c.mu.RUnlock() - for _, v := range c.items { - gob.Register(v.Object) - } - err = enc.Encode(&c.items) - return -} - -// Save the cache's items to the given filename, creating the file if it -// doesn't exist, and overwriting it if it does. -// -// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the -// documentation for NewFrom().) -func (c *cache) SaveFile(fname string) error { - fp, err := os.Create(fname) - if err != nil { - return err - } - err = c.Save(fp) - if err != nil { - fp.Close() - return err - } - return fp.Close() -} - -// Add (Gob-serialized) cache items from an io.Reader, excluding any items with -// keys that already exist (and haven't expired) in the current cache. -// -// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the -// documentation for NewFrom().) -func (c *cache) Load(r io.Reader) error { - dec := gob.NewDecoder(r) - items := map[string]Item{} - err := dec.Decode(&items) - if err == nil { - c.mu.Lock() - defer c.mu.Unlock() - for k, v := range items { - ov, found := c.items[k] - if !found || ov.Expired() { - c.items[k] = v - } - } - } - return err -} - -// Load and add cache items from the given filename, excluding any items with -// keys that already exist in the current cache. -// -// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the -// documentation for NewFrom().) -func (c *cache) LoadFile(fname string) error { - fp, err := os.Open(fname) - if err != nil { - return err - } - err = c.Load(fp) - if err != nil { - fp.Close() - return err - } - return fp.Close() -} - -// Copies all unexpired items in the cache into a new map and returns it. -func (c *cache) Items() map[string]Item { - c.mu.RLock() - defer c.mu.RUnlock() - m := make(map[string]Item, len(c.items)) - now := time.Now().UnixNano() - for k, v := range c.items { - // "Inlining" of Expired - if v.Expiration > 0 { - if now > v.Expiration { - continue - } - } - m[k] = v - } - return m -} - -// Returns the number of items in the cache. This may include items that have -// expired, but have not yet been cleaned up. -func (c *cache) ItemCount() int { - c.mu.RLock() - n := len(c.items) - c.mu.RUnlock() - return n -} - -// Delete all items from the cache. -func (c *cache) Flush() { - c.mu.Lock() - c.items = map[string]Item{} - c.mu.Unlock() -} - -type janitor struct { - Interval time.Duration - stop chan bool -} - -func (j *janitor) Run(c *cache) { - ticker := time.NewTicker(j.Interval) - for { - select { - case <-ticker.C: - c.DeleteExpired() - case <-j.stop: - ticker.Stop() - return - } - } -} - -func stopJanitor(c *Cache) { - c.janitor.stop <- true -} - -func runJanitor(c *cache, ci time.Duration) { - j := &janitor{ - Interval: ci, - stop: make(chan bool), - } - c.janitor = j - go j.Run(c) -} - -func newCache(de time.Duration, m map[string]Item) *cache { - if de == 0 { - de = -1 - } - c := &cache{ - defaultExpiration: de, - items: m, - } - return c -} - -func newCacheWithJanitor(de time.Duration, ci time.Duration, m map[string]Item) *Cache { - c := newCache(de, m) - // This trick ensures that the janitor goroutine (which--granted it - // was enabled--is running DeleteExpired on c forever) does not keep - // the returned C object from being garbage collected. When it is - // garbage collected, the finalizer stops the janitor goroutine, after - // which c can be collected. - C := &Cache{c} - if ci > 0 { - runJanitor(c, ci) - runtime.SetFinalizer(C, stopJanitor) - } - return C -} - -// Return a new cache with a given default expiration duration and cleanup -// interval. If the expiration duration is less than one (or NoExpiration), -// the items in the cache never expire (by default), and must be deleted -// manually. If the cleanup interval is less than one, expired items are not -// deleted from the cache before calling c.DeleteExpired(). -func New(defaultExpiration, cleanupInterval time.Duration) *Cache { - items := make(map[string]Item) - return newCacheWithJanitor(defaultExpiration, cleanupInterval, items) -} - -// Return a new cache with a given default expiration duration and cleanup -// interval. If the expiration duration is less than one (or NoExpiration), -// the items in the cache never expire (by default), and must be deleted -// manually. If the cleanup interval is less than one, expired items are not -// deleted from the cache before calling c.DeleteExpired(). -// -// NewFrom() also accepts an items map which will serve as the underlying map -// for the cache. This is useful for starting from a deserialized cache -// (serialized using e.g. gob.Encode() on c.Items()), or passing in e.g. -// make(map[string]Item, 500) to improve startup performance when the cache -// is expected to reach a certain minimum size. -// -// Only the cache's methods synchronize access to this map, so it is not -// recommended to keep any references to the map around after creating a cache. -// If need be, the map can be accessed at a later point using c.Items() (subject -// to the same caveat.) -// -// Note regarding serialization: When using e.g. gob, make sure to -// gob.Register() the individual types stored in the cache before encoding a -// map retrieved with c.Items(), and to register those same types before -// decoding a blob containing an items map. -func NewFrom(defaultExpiration, cleanupInterval time.Duration, items map[string]Item) *Cache { - return newCacheWithJanitor(defaultExpiration, cleanupInterval, items) -} diff --git a/plugins/traefik/vendor/github.com/patrickmn/go-cache/sharded.go b/plugins/traefik/vendor/github.com/patrickmn/go-cache/sharded.go deleted file mode 100644 index bcc0538bc..000000000 --- a/plugins/traefik/vendor/github.com/patrickmn/go-cache/sharded.go +++ /dev/null @@ -1,192 +0,0 @@ -package cache - -import ( - "crypto/rand" - "math" - "math/big" - insecurerand "math/rand" - "os" - "runtime" - "time" -) - -// This is an experimental and unexported (for now) attempt at making a cache -// with better algorithmic complexity than the standard one, namely by -// preventing write locks of the entire cache when an item is added. As of the -// time of writing, the overhead of selecting buckets results in cache -// operations being about twice as slow as for the standard cache with small -// total cache sizes, and faster for larger ones. -// -// See cache_test.go for a few benchmarks. - -type unexportedShardedCache struct { - *shardedCache -} - -type shardedCache struct { - seed uint32 - m uint32 - cs []*cache - janitor *shardedJanitor -} - -// djb2 with better shuffling. 5x faster than FNV with the hash.Hash overhead. -func djb33(seed uint32, k string) uint32 { - var ( - l = uint32(len(k)) - d = 5381 + seed + l - i = uint32(0) - ) - // Why is all this 5x faster than a for loop? - if l >= 4 { - for i < l-4 { - d = (d * 33) ^ uint32(k[i]) - d = (d * 33) ^ uint32(k[i+1]) - d = (d * 33) ^ uint32(k[i+2]) - d = (d * 33) ^ uint32(k[i+3]) - i += 4 - } - } - switch l - i { - case 1: - case 2: - d = (d * 33) ^ uint32(k[i]) - case 3: - d = (d * 33) ^ uint32(k[i]) - d = (d * 33) ^ uint32(k[i+1]) - case 4: - d = (d * 33) ^ uint32(k[i]) - d = (d * 33) ^ uint32(k[i+1]) - d = (d * 33) ^ uint32(k[i+2]) - } - return d ^ (d >> 16) -} - -func (sc *shardedCache) bucket(k string) *cache { - return sc.cs[djb33(sc.seed, k)%sc.m] -} - -func (sc *shardedCache) Set(k string, x interface{}, d time.Duration) { - sc.bucket(k).Set(k, x, d) -} - -func (sc *shardedCache) Add(k string, x interface{}, d time.Duration) error { - return sc.bucket(k).Add(k, x, d) -} - -func (sc *shardedCache) Replace(k string, x interface{}, d time.Duration) error { - return sc.bucket(k).Replace(k, x, d) -} - -func (sc *shardedCache) Get(k string) (interface{}, bool) { - return sc.bucket(k).Get(k) -} - -func (sc *shardedCache) Increment(k string, n int64) error { - return sc.bucket(k).Increment(k, n) -} - -func (sc *shardedCache) IncrementFloat(k string, n float64) error { - return sc.bucket(k).IncrementFloat(k, n) -} - -func (sc *shardedCache) Decrement(k string, n int64) error { - return sc.bucket(k).Decrement(k, n) -} - -func (sc *shardedCache) Delete(k string) { - sc.bucket(k).Delete(k) -} - -func (sc *shardedCache) DeleteExpired() { - for _, v := range sc.cs { - v.DeleteExpired() - } -} - -// Returns the items in the cache. This may include items that have expired, -// but have not yet been cleaned up. If this is significant, the Expiration -// fields of the items should be checked. Note that explicit synchronization -// is needed to use a cache and its corresponding Items() return values at -// the same time, as the maps are shared. -func (sc *shardedCache) Items() []map[string]Item { - res := make([]map[string]Item, len(sc.cs)) - for i, v := range sc.cs { - res[i] = v.Items() - } - return res -} - -func (sc *shardedCache) Flush() { - for _, v := range sc.cs { - v.Flush() - } -} - -type shardedJanitor struct { - Interval time.Duration - stop chan bool -} - -func (j *shardedJanitor) Run(sc *shardedCache) { - j.stop = make(chan bool) - tick := time.Tick(j.Interval) - for { - select { - case <-tick: - sc.DeleteExpired() - case <-j.stop: - return - } - } -} - -func stopShardedJanitor(sc *unexportedShardedCache) { - sc.janitor.stop <- true -} - -func runShardedJanitor(sc *shardedCache, ci time.Duration) { - j := &shardedJanitor{ - Interval: ci, - } - sc.janitor = j - go j.Run(sc) -} - -func newShardedCache(n int, de time.Duration) *shardedCache { - max := big.NewInt(0).SetUint64(uint64(math.MaxUint32)) - rnd, err := rand.Int(rand.Reader, max) - var seed uint32 - if err != nil { - os.Stderr.Write([]byte("WARNING: go-cache's newShardedCache failed to read from the system CSPRNG (/dev/urandom or equivalent.) Your system's security may be compromised. Continuing with an insecure seed.\n")) - seed = insecurerand.Uint32() - } else { - seed = uint32(rnd.Uint64()) - } - sc := &shardedCache{ - seed: seed, - m: uint32(n), - cs: make([]*cache, n), - } - for i := 0; i < n; i++ { - c := &cache{ - defaultExpiration: de, - items: map[string]Item{}, - } - sc.cs[i] = c - } - return sc -} - -func unexportedNewSharded(defaultExpiration, cleanupInterval time.Duration, shards int) *unexportedShardedCache { - if defaultExpiration == 0 { - defaultExpiration = -1 - } - sc := newShardedCache(shards, defaultExpiration) - SC := &unexportedShardedCache{sc} - if cleanupInterval > 0 { - runShardedJanitor(sc, cleanupInterval) - runtime.SetFinalizer(SC, stopShardedJanitor) - } - return SC -} diff --git a/plugins/traefik/vendor/modules.txt b/plugins/traefik/vendor/modules.txt index 260a64b57..baf26285a 100644 --- a/plugins/traefik/vendor/modules.txt +++ b/plugins/traefik/vendor/modules.txt @@ -203,9 +203,6 @@ github.com/modern-go/reflect2 # github.com/mschoch/smat v0.2.0 ## explicit; go 1.13 github.com/mschoch/smat -# github.com/patrickmn/go-cache v2.1.0+incompatible -## explicit -github.com/patrickmn/go-cache # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors