-
Notifications
You must be signed in to change notification settings - Fork 49
/
concurrent_buffer_test.go
131 lines (120 loc) · 3.94 KB
/
concurrent_buffer_test.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
package limiters_test
import (
"context"
"fmt"
"sync"
"testing"
"time"
"github.com/google/uuid"
l "github.com/mennanov/limiters"
)
func (s *LimitersTestSuite) concurrentBuffers(capacity int64, ttl time.Duration, clock l.Clock) map[string]*l.ConcurrentBuffer {
buffers := make(map[string]*l.ConcurrentBuffer)
for lockerName, locker := range s.lockers(true) {
for bName, b := range s.concurrentBufferBackends(ttl, clock) {
buffers[lockerName+":"+bName] = l.NewConcurrentBuffer(locker, b, capacity, s.logger)
}
}
return buffers
}
func (s *LimitersTestSuite) concurrentBufferBackends(ttl time.Duration, clock l.Clock) map[string]l.ConcurrentBufferBackend {
return map[string]l.ConcurrentBufferBackend{
"ConcurrentBufferInMemory": l.NewConcurrentBufferInMemory(l.NewRegistry(), ttl, clock),
"ConcurrentBufferRedis": l.NewConcurrentBufferRedis(s.redisClient, uuid.New().String(), ttl, clock),
"ConcurrentBufferRedisCluster": l.NewConcurrentBufferRedis(s.redisClusterClient, uuid.New().String(), ttl, clock),
"ConcurrentBufferMemcached": l.NewConcurrentBufferMemcached(s.memcacheClient, uuid.New().String(), ttl, clock),
}
}
func (s *LimitersTestSuite) TestConcurrentBufferNoOverflow() {
clock := newFakeClock()
capacity := int64(10)
ttl := time.Second
for name, buffer := range s.concurrentBuffers(capacity, ttl, clock) {
s.Run(name, func() {
wg := sync.WaitGroup{}
for i := int64(0); i < capacity; i++ {
wg.Add(1)
go func(i int64, buffer *l.ConcurrentBuffer) {
defer wg.Done()
key := fmt.Sprintf("key%d", i)
s.NoError(buffer.Limit(context.TODO(), key))
s.NoError(buffer.Done(context.TODO(), key))
}(i, buffer)
}
wg.Wait()
s.NoError(buffer.Limit(context.TODO(), "last"))
s.NoError(buffer.Done(context.TODO(), "last"))
})
}
}
func (s *LimitersTestSuite) TestConcurrentBufferOverflow() {
clock := newFakeClock()
capacity := int64(3)
ttl := time.Second
for name, buffer := range s.concurrentBuffers(capacity, ttl, clock) {
s.Run(name, func() {
mu := sync.Mutex{}
var errors []error
wg := sync.WaitGroup{}
for i := int64(0); i <= capacity; i++ {
wg.Add(1)
go func(i int64, buffer *l.ConcurrentBuffer) {
defer wg.Done()
if err := buffer.Limit(context.TODO(), fmt.Sprintf("key%d", i)); err != nil {
mu.Lock()
errors = append(errors, err)
mu.Unlock()
}
}(i, buffer)
}
wg.Wait()
s.Equal([]error{l.ErrLimitExhausted}, errors)
})
}
}
func (s *LimitersTestSuite) TestConcurrentBufferExpiredKeys() {
clock := newFakeClock()
capacity := int64(2)
ttl := time.Second
for name, buffer := range s.concurrentBuffers(capacity, ttl, clock) {
s.Run(name, func() {
s.Require().NoError(buffer.Limit(context.TODO(), "key1"))
clock.Sleep(ttl / 2)
s.Require().NoError(buffer.Limit(context.TODO(), "key2"))
clock.Sleep(ttl / 2)
// No error is expected (despite the following request overflows the capacity) as the first key has already
// expired by this time.
s.NoError(buffer.Limit(context.TODO(), "key3"))
})
}
}
func (s *LimitersTestSuite) TestConcurrentBufferDuplicateKeys() {
clock := newFakeClock()
capacity := int64(2)
ttl := time.Second
for name, buffer := range s.concurrentBuffers(capacity, ttl, clock) {
s.Run(name, func() {
s.Require().NoError(buffer.Limit(context.TODO(), "key1"))
s.Require().NoError(buffer.Limit(context.TODO(), "key2"))
// No error is expected as it should just update the timestamp of the existing key.
s.NoError(buffer.Limit(context.TODO(), "key1"))
})
}
}
func BenchmarkConcurrentBuffers(b *testing.B) {
s := new(LimitersTestSuite)
s.SetT(&testing.T{})
s.SetupSuite()
capacity := int64(1)
ttl := time.Second
clock := newFakeClock()
buffers := s.concurrentBuffers(capacity, ttl, clock)
for name, buffer := range buffers {
b.Run(name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
s.Require().NoError(buffer.Limit(context.TODO(), "key1"))
}
})
}
s.TearDownSuite()
}